input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
Loader
def __init__ (self, * args, ** kw) :
self.pathes = []
self.pop_to_self (kw, "pre_load_cb", "x_context", prefix = "_")
self.__super.__init__ (* args, ** kw)
# end def __init__
def cook (self, value, cao = None) :
from _TFL.load_config_file import load_config_file
path = self.__super.cook (value, cao)
result = {}
if path :
if self._pre_load_cb and not self._pre_load_cb_run :
self._pre_load_cb ()
self._pre_load_cb_run = True
context = dict \
(C = cao._cmd if cao else None, ** self._x_context)
self.Loader (self, context, path, result)
return result
# end def cook
# end class Config
class Config_Bundle (_Config_, Bool) :
"""Option specifying a bundle of option values in a static config dict."""
def __init__ (self, config_dct, ** kw) :
self.config_dct = config_dct
if "name" not in kw :
kw ["name"] = self.__class__.__name__.lower ()
if "description" not in kw :
kw ["description"] = self.__class__.__doc__
self.__super.__init__ (** kw)
# end def __init__
def cook (self, value, cao = None) :
if self.__super.cook (value, cao) :
return self.config_dct
# end def cook
def raw_default (self, cao = None) :
if self.__super.raw_default (cao) :
return self.config_dct
return {}
# end def raw_default
def _help_items (self) :
yield from self.__super._help_items ()
yield self.config_dct
# end def _help_items
def _set_default (self, default) :
if default is None :
default = {}
return self.__super._set_default (default)
# end def _set_default
# end class Config_Bundle
class Percent (Float) :
"""Argument or option with a percentage value,
specified as integer or float value between 0 and 100.
Cooked value is float between 0.0 and 1.0.
"""
type_abbr = "%"
def _cook (self, value) :
if isinstance (value, pyk.string_types) :
value = int (value, 0)
if isinstance (value, (int, float)) :
value = value / 100.
if not (0.0 <= value <= 1.0) :
raise (ValueError ("Invalid percentage value %s" % value))
return value
# end def _cook
# end class Percent
class Set (_Spec_) :
"""Argument or option that specifies one element of a set of choices"""
def __init__ (self, choices, ** kw) :
self.choices = set (choices)
self.__super.__init__ (** kw)
# end def __init__
def cook (self, value, cao = None) :
return self.__super.cook (self._get_choice (value), cao)
# end def cook
# end class Set
class SHA (_User_Config_Entry_, Set) :
"""Name of secure hash algorithm to use."""
def __init__ (self, ** kw) :
import _TFL.Secure_Hash
self.__super.__init__ (choices = TFL.Secure_Hash.algorithms, ** kw)
# end def __init__
def cook (self, value, cao = None) :
import _TFL.Secure_Hash
result = self.__super.cook (value, cao)
return getattr (TFL.Secure_Hash, result)
# end def cook
# end class SHA
class Str (_Spec_) :
"""Argument or option with a string value"""
type_abbr = "S"
def cook (self, value, cao = None) :
result = pyk.decoded (value, self.user_config.input_encoding)
return result
# end def cook
# end class Str
class Str_AS (Str) :
"""Argument or option with a string value, auto-splitting"""
auto_split = ","
type_abbr = "T"
# end class Str
class Unicode (Str) :
"""Argument or option with a string value"""
type_abbr = "U"
# end class Unicode
class _Regexp_Arg_Mixin_ (TFL.Meta.Object) :
R_Type_combined = Multi_Regexp
re_flags = dict \
( A = re.ASCII
, I = re.IGNORECASE
, M = re.MULTILINE
, S = re.DOTALL
, X = re.VERBOSE
)
def combine (self, values) :
if len (values) > 1 :
return self.R_Type_combined (* values)
elif values :
return values [0]
# end def combine
def _re_flags (self, fs) :
result = 0
for f in fs :
try :
v = self.re_flags [f.upper ()]
except KeyError :
raise \
( TFL.CAO.Err
( "Invalid flag `%s`; use one of: %s"
% (f, ", ".join (sorted (self.re_flags.keys ())))
)
)
else :
result |= v
return result
# end def _re_flags
# end class _Regexp_Arg_Mixin_
class _Regexp_Arg_ (_Regexp_Arg_Mixin_, Str) :
"""Argument or option specifying a Regexp."""
_real_name = "Regexp"
auto_split = "\n"
type_abbr = "~"
def cook (self, value, cao = None) :
if value :
result = self.__super.cook (value, cao)
return Regexp (result)
# end def cook
# end class _Regexp_Arg_
class _Regexp_Arg_D_ (_Regexp_Arg_Mixin_, Str) :
"""Argument or option specifying a delimited Regexp."""
_real_name = "Regexp_D"
auto_split = "\n"
def cook (self, value, cao = None) :
if value :
value = self.__super.cook (value, cao)
delim = value [0]
p, s, fs = rsplit_hst (value [1:], delim)
flags = self._re_flags (fs)
return Regexp (p, flags)
# end def cook
# end class _Regexp_Arg_D_
class _Re_Replacer_Arg_ (_Regexp_Arg_Mixin_, Str) :
"""Argument or option specifying a regexp replacement."""
_real_name = "Re_Replacer"
R_Type_combined = Multi_Re_Replacer
auto_split = "\n"
type_abbr = "/"
def cook (self, value, cao = None) :
if value :
value = self.__super.cook (value, cao)
if value.lower () == "$untabified" :
result = Untabified
else :
delim = value [0]
p, s, x = split_hst (value [1:], delim)
r, s, fs = split_hst (x, delim)
flags = self._re_flags (fs)
result = Re_Replacer (p, r, flags)
return result
# end def cook
# end class _Re_Replacer_Arg_
class Time_Zone (_User_Config_Entry_) :
"""Time zone to use."""
def _get_default (self) :
return "UTC"
# end def _get_default
# end class Time_Zone
class Bundle (TFL.Meta.Object) :
"""Model a bundle of values for arguments and options.
A bundle is defined by creating an instance of :class:`Bundle` with
the arguments:
- the name of the bundle,
- a description of the bundle to be included in the `help`,
- a number of keyword arguments specifying values for the arguments
and options defined by the bundle.
"""
kind = "bundle"
def __init__ (self, _name, _description = "", ** _kw) :
self._name = _name
self._description = _description
self._kw = _kw
# end def __init__
def __call__ (self, value, cao) :
assert value == self._name
cao._use_args (self._kw)
# end def __call__
def __contains__ (self, item) :
return item in self._kw
# end def __contains__
def __getattr__ (self, name) :
try :
return self._kw [name]
except KeyError :
raise AttributeError (name)
# end def __getattr__
def __getitem__ (self, key) :
try :
return self._kw [key]
except AttributeError :
raise KeyError (key)
# end def __getitem__
# end class Bundle
class Cmd (TFL.Meta.Object) :
"""Model a command with options, arguments, and a handler.
The canonical usage pattern for :class:`Cmd` is to define an
instance of `Cmd` at the module level which is then called without
arguments in the `__main__` caluse of the module.
:meth:`Cmd.parse` and :meth:`Cmd.use` return an instance of
:class:`CAO`.
"""
_handler = None
_helper = None
def __init__ \
( self
, handler = None
, args = ()
, opts = ()
, buns = ()
, description = ""
, explanation = ""
, name = ""
, min_args = 0
, max_args = -1
, do_keywords = False
, put_keywords = False
, helper = None
, defaults = {}
) :
"""Specify options, arguments, and handler of a command.
:obj:`handler`
A callable that takes a :class:`CAO` instance as first argument
and implements the command in question.
:obj:`args`
A tuple of :class:`Arg`-class instances specifying the
possible arguments. One element of :obj:`args` can specify a
:class:`Cmd_Choice` with possible sub-commands.
:obj:`opts`
A tuple of :class:`Arg`- or :class:`Opt`-class instances
specifying the possible options.
:obj:`buns`
A tuple of :class:`Bundle` instances that define pre-packaged
bundles of argument- and option-values to support common usage
scenarios that can be specified simply by using name of the
respective bundle (prefixed by a `@`).
:obj:`description`
A description of the command to be included in the `help`.
If the `description` argument is undefined,
:obj:`handler.__doc__` will be used if that is defined.`
:obj:`explanation`
A more detailed explanation that can be shown in the `help`.
:obj:`name`
A `name` for the command.
By default, the name of the module defining the :class:`Cmd`
definition is used.
:obj:`min_args`
Specifies the minimum number of arguments required.
By default, no arguments are required.
:obj:`max_args`
Specifies the maximum number of arguments allowed `max_args`.
The default -1 means an unlimited number is allowed.
:obj:`do_keywords`
Specifies whether keyword values are supported.
If `do_keywords` is True, command line arguments of the | |
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torchvision.models as models
from torch.autograd import Variable
class unet(nn.Module):
def __init__(self):
super(unet, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=1) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 3, 3, stride=1, padding=1)
self.tan = nn.Tanh()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
t1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
t2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
t3 = out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
t4 = out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
# t2 = out
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape,t4.shape)
out = torch.add(out,t4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t1)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return self.tan(out)
class OUCD_lite(nn.Module):
def __init__(self):
super(OUCD_lite, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.encoder6= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1= nn.Conv2d(1024,512, 3, stride=1, padding=1)
self.decoder2 = nn.Conv2d(512, 256, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder3 = nn.Conv2d(256, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder4 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder5 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder6 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf4 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(128,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
out = torch.add(out,torch.add(self.tmp2(t3),torch.add(t1,self.tmp1(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
u5 = out
out = F.relu(F.max_pool2d(self.encoder6(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u5)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# out = F.relu(F.interpolate(self.decoder6(out),scale_factor=(2,2),mode ='bilinear'))
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
# Fusing all layers at the last layer of decoder
# print(out.shape,t1.shape,t2.shape,t3.shape)
out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
out = F.relu(F.interpolate(self.decoder6(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(self.final(out))
return self.tan(out)
class OUCD(nn.Module):
def __init__(self):
super(OUCD, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder2 = nn.Conv2d(512, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(16, 128, 2, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
# self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 16, 3, stride=1, padding=1)
# self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(32,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmp4 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf1 = nn.Conv2d(32,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# Start K-Net decoder
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t3 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t2 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
t1 = F.interpolate(out1,scale_factor=(0.5,0.5),mode ='bilinear')
# Fusing all layers at the last layer of decoder
# print(t1.shape,t2.shape,t3.shape,out.shape)
out = torch.add(out,torch.add(self.tmp3(t3),torch.add(self.tmp1(t1),self.tmp2(t2))))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,out1)
out = F.relu(self.final(out))
return self.tan(out)
class oucd_wo_msff_encoder(nn.Module):
def __init__(self):
super(oucd_wo_msff_encoder, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder2 = nn.Conv2d(512, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(16, 128, 2, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
# self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 16, 3, stride=1, padding=1)
# self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
| |
2.3
#bandpass_sig = iir_bandpass_filter_1(bandpass_sig, 0.8, 2, 20, 5, "cheby1") # Breath: 0.1 ~ 0.33 order=5, Hreat: 0.8 ~ 2.3
#bandpass_sig = firwin_filter(new_phase_diff, 0.8, 2, 20, 5)
#bandpass_sig = lowpass_filter(bandpass_sig, 2, 2, 20, 5)
N = len(bandpass_sig)
T = 1 / 20
bps_fft = fft(bandpass_sig)
bps_fft_x = np.linspace(0, 1.0 / (T * 2), N // 2)
#print(np.argmax(2 / N * np.abs(bps_fft[:N // 2])) * (1.0 / (T * 2)) / (N // 2))
index_of_fftmax = np.argmax(2 / N * np.abs(bps_fft[:N // 2])) * (1.0 / (T * 2)) / (N // 2)
print(index_of_fftmax)
if index_of_fftmax < 0.215:
replace = True
# Smoothing signal
smoothing_signal = MLR(bandpass_sig, 2) # Breath = 9, Heart = 6, Delta = 1
# Try to make smoothing values (Sv) (Sv > 1 or Sv < -1)
# smoothing_signal = np.copy(smoothing_signal)
# for i in range(1, int(len(smoothing_signal))-1):
# if smoothing_signal[i] > 0:
# tmp_s = smoothing_signal[i] + 1
# smoothing_signal[i] = tmp_s
# elif smoothing_signal[i] < 0:
# tmp_s = smoothing_signal[i] - 1
# smoothing_signal[i] = tmp_s
# Feature detect
feature_peak, feature_valley, feature_sig = feature_detection(smoothing_signal)
# Feature compress
compress_peak, compress_valley = feature_compress(feature_peak, feature_valley, 22, smoothing_signal) # Br: 20 Hr: 6 ex: 25
# Feature sort
compress_feature = np.append(compress_peak, compress_valley)
compress_feature = np.sort(compress_feature)
# Candidate_search
NT_points, NB_points = candidate_search(smoothing_signal, compress_feature, 17) # breath = 18 hreat = 4 ex7
# Breath rate
rate = caculate_breathrate(NT_points, NB_points)
print(f'Rate: {rate}')
if disp:
# Define
sampling_rate = 20
record_time = len(unw_phase) / sampling_rate
# Unwrap phase
plt.figure()
raw_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(raw))
plt.plot(raw_x, raw)
plt.title('Unwrap phase')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# Phase difference
plt.figure()
phase_diff_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(phase_diff))
plt.plot(phase_diff_x, phase_diff, label="$sin(x)$")
plt.title('Phase difference')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# RemoveImpulseNoise
plt.figure()
new_phase_diff_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(new_phase_diff))
plt.plot(new_phase_diff_x, new_phase_diff, label="$sin(x)$", color='b')
plt.title('Remove Impulse Noise')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# Bandpass signal (Butter worth)
plt.figure()
bandpass_sig_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(bandpass_sig))
plt.plot(bandpass_sig_x, bandpass_sig)
plt.title('Bandpass signal')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# Smoothing signal
plt.figure()
smoothing_signal_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(smoothing_signal))
plt.plot(smoothing_signal_x, smoothing_signal)
plt.title('Smoothing signal')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# Feature detect
plt.figure()
feature_peak_x = (record_time * count) + feature_peak/len(feature_sig) * record_time
feature_valley_x = (record_time * count) + feature_valley/len(feature_sig) * record_time
feature_sig_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(feature_sig))
plt.plot(feature_sig_x, feature_sig)
plt.plot(feature_peak_x, feature_sig[feature_peak], 'bo')
plt.plot(feature_valley_x, feature_sig[feature_valley], 'ro')
plt.title('Feature detect')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# Feature compress
plt.figure()
compress_peak_x = (record_time * count) + compress_peak/len(feature_sig) * record_time
compress_valley_x = (record_time * count) + compress_valley/len(feature_sig) * record_time
feature_sig_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(feature_sig))
plt.plot(feature_sig_x, feature_sig)
plt.plot(compress_peak_x, feature_sig[compress_peak], 'bo')
plt.plot(compress_valley_x, feature_sig[compress_valley], 'ro')
plt.title('Feature compress')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# Candidate_search
plt.figure()
candi_peak_x = (record_time * count) + NT_points/len(smoothing_signal) * record_time
candi_valley_x = (record_time * count) + NB_points/len(smoothing_signal) * record_time
candidate_search_x = np.linspace(0 + (record_time * count), record_time + (record_time * count), len(smoothing_signal))
plt.plot(candidate_search_x, smoothing_signal)
plt.plot(candi_peak_x, smoothing_signal[NT_points], 'bo')
plt.plot(candi_valley_x, smoothing_signal[NB_points], 'ro')
plt.title('Candidate_search')
plt.xlabel('Time (sec)')
plt.ylabel('Phase (radians)')
# ----------------------
# FFT (Before and After)
plt.figure()
# Before bandpass
N = len(new_phase_diff)
T = 1 / sampling_rate
ori_fft = fft(new_phase_diff)
ori_fft_x = np.linspace(0, 1.0 / (T * 2), N // 2)
# plt.subplot(2, 1, 1)
plt.plot(ori_fft_x, 2 / N * np.abs(ori_fft[:N // 2]))
# plt.legend(labels=['Phase diff FFT'], loc='upper right')
plt.title('Phase diff FFT')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Ampitude')
# After bandpass
plt.figure()
N = len(bandpass_sig)
T = 1 / sampling_rate
bps_fft = fft(bandpass_sig)
bps_fft_x = np.linspace(0, 1.0 / (T * 2), N // 2)
# plt.subplot(2, 1, 2)
# plt.legend(labels=['Bandpassed FFT'], loc='upper right')
plt.title('Bandpassed FFT')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Ampitude')
plt.plot(bps_fft_x, 2 / N * np.abs(bps_fft[:N // 2]))
print(np.argmax(2 / N * np.abs(bps_fft[:N // 2])) * (1.0 / (T * 2)) / (N // 2))
plt.show()
return rate, replace, index_of_fftmax
def plot_scatter(
all_index_of_fftmax,
all_gt_array,
all_confidenceMetricBreathOut_std,
all_confidenceMetricBreathOut_xCorr_std,
all_confidenceMetricBreathOut_mean,
all_confidenceMetricBreathOut_xCorr_mean,
all_breathingRateEst_FFT_std,all_breathingRateEst_FFT_mean,
all_breathingEst_xCorr_std, all_breathingEst_xCorr_mean,
all_breathingEst_peakCount_std, all_breathingEst_peakCount_mean,
all_sumEnergyBreathWfm_mean,
all_sumEnergyBreathWfm_std,
all_sumEnergyHeartWfm_mean,
all_sumEnergyHeartWfm_std):
plt.xlabel('all_breathingEst_xCorr_mean')
plt.ylabel('heartrate_groundtruth')
plt.scatter(all_breathingEst_xCorr_mean, all_gt_array)
plt.show()
#all_breathingRateEst_FFT_mean, all_breathingEst_xCorr_mean
def knn_test(
predict_array,
all_index_of_fftmax,
all_gt_array,
all_confidenceMetricBreathOut_std,
all_confidenceMetricBreathOut_xCorr_std,
all_confidenceMetricBreathOut_mean,
all_confidenceMetricBreathOut_xCorr_mean,
all_breathingRateEst_FFT_std,all_breathingRateEst_FFT_mean,
all_breathingEst_xCorr_std, all_breathingEst_xCorr_mean,
all_breathingEst_peakCount_std, all_breathingEst_peakCount_mean,
all_sumEnergyBreathWfm_mean,
all_sumEnergyBreathWfm_std,
all_sumEnergyHeartWfm_mean,
all_sumEnergyHeartWfm_std,
test_array1, test_array2, all_ti_og_br):
find_best = False
all_data = [
all_index_of_fftmax,
all_confidenceMetricBreathOut_std,
all_confidenceMetricBreathOut_xCorr_std,
all_confidenceMetricBreathOut_mean,
all_confidenceMetricBreathOut_xCorr_mean,
all_breathingRateEst_FFT_std,all_breathingRateEst_FFT_mean,
all_breathingEst_xCorr_std, all_breathingEst_xCorr_mean,
all_breathingEst_peakCount_std, all_breathingEst_peakCount_mean,
all_sumEnergyBreathWfm_mean,
all_sumEnergyBreathWfm_std,
all_sumEnergyHeartWfm_mean,
all_sumEnergyHeartWfm_std, all_ti_og_br]
all_data = [all_index_of_fftmax, all_breathingEst_xCorr_mean, all_breathingRateEst_FFT_mean]
all_data = np.array(all_data).transpose()
all_data = preprocessing.scale(all_data)
label_knn = []
for index, i in enumerate(all_gt_array): #做ml分類的label
if all_gt_array[index] <= 15:
label_knn.append(1)
else:
label_knn.append(0)
label_knn = np.array(label_knn)
test_array1 = np.array(test_array1)
test_array2 = np.array(test_array2)
all_gt_array = np.array(all_gt_array)
all_ti_og_br = np.array(all_ti_og_br)
kf = StratifiedKFold(n_splits = 3, random_state = 69, shuffle = True)
knn_p = []
svm_p = []
rf_p = []
ti_replace_result_kf_number = [] #算K-FOLD用的
og_result_kf_number = []
for train_index, test_index in kf.split(all_data, label_knn):
X_train, X_test = all_data[train_index], all_data[test_index]
y_train, y_test = label_knn[train_index], label_knn[test_index]
og_result_kf = test_array1[test_index] #最原始的算法輸出
ti_replace_result_kf = test_array2[test_index] #TI取代的算法輸出
gt_kf = all_gt_array[test_index] #GT
all_ti_og_kf = all_ti_og_br[test_index] #TI 原始輸出
#print("TI Tr取代:", calculate_l1_loss(gt_kf, ti_replace_result_kf))
ti_replace_result_kf_number.append(calculate_l1_loss(gt_kf, ti_replace_result_kf))
neigh = KNeighborsClassifier(n_neighbors = 5, weights = 'distance')
neigh.fit(X_train, y_train)
knn_p.append(neigh.score(X_test,y_test))
clf_rbf = svm.SVC(kernel="rbf", C = 5, gamma = 0.01, random_state = 69)
clf_rbf.fit(X_train, y_train) #用TRAIN資料下去訓練
svm_p.append(clf_rbf.score(X_test,y_test))
rf = RandomForestRegressor(n_estimators = 20, random_state = 69)
rf.fit(X_train, y_train)
rf_p.append(rf.score(X_test,y_test))
y_test_pre_rbf = clf_rbf.predict(X_test) #用TEST下去預測
for index, x in enumerate(y_test_pre_rbf):
if x == 1: #如果SVM輸出1代表心跳小於70
og_result_kf[index] = all_ti_og_kf[index] #就用TI原始輸出替代最原始的算法輸出
#print("TI SVM取代:", calculate_l1_loss(gt_kf, og_result_kf))
og_result_kf_number.append(calculate_l1_loss(gt_kf, og_result_kf))
print("AVG knn 分類表現 ", np.mean(np.array(knn_p)))
print("AVG svm 分類表現 ", np.mean(np.array(svm_p)))
print("AVG rf 分類表現 ", np.mean(np.array(rf_p)))
print("AVG TI Tr取代:", np.mean(np.array(ti_replace_result_kf_number)))
print("AVG TI SVM取代:", np.mean(np.array(og_result_kf_number)))
clf_rbf = svm.SVC(kernel="rbf", C = 5, gamma = 0.01, random_state = 69)
clf_rbf.fit(all_data, label_knn)
with open('save/svm_br.pickle', 'wb') as f:
pickle.dump(clf_rbf, f)
if find_best:
parameters = {'gamma': [0.001, 0.01, 0.1, 1, 5, 10], 'C':[0.001, 0.01, 0.1, 1, 5, 10], 'degree':[3, 4, 5], 'kernel': ["rbf", "linear"]}
#parameters = {'kernel': ["rbf", "poly", "linear"]}
#n_jobs =-1使用全部CPU并行多线程搜索
gs = GridSearchCV(svm.SVC(), parameters, refit = True, cv = 3, verbose = 1, n_jobs = -1)
gs.fit(all_data, label_knn) #Run fit with all sets of parameters.
print('最优参数: ',gs.best_params_)
print('最佳性能: ', gs.best_score_)
if __name__ == '__main__':
# Initial setting
count = 0
count_all = 0
absolute_error = 0
disp = False
diagram_disp = False # <新增> 是否顯示圖表
scatter_disp = False
knn = True
all_pr_array = []
all_gt_array = []
all_ti_og_br = []
all_ti_og_hr = []
all_index_of_fftmax = []
all_std_of_phase_diff = []
all_breathingRateEst_FFT_std = []
all_breathingRateEst_FFT_mean = []
all_breathingEst_xCorr_std = []
all_breathingEst_xCorr_mean = []
all_breathingEst_peakCount_std = []
all_breathingEst_peakCount_mean = []
all_confidenceMetricBreathOut_std = []
all_confidenceMetricBreathOut_xCorr_std = []
all_confidenceMetricBreathOut_mean = []
all_confidenceMetricBreathOut_xCorr_mean = []
all_sumEnergyBreathWfm_mean = []
all_sumEnergyBreathWfm_std = []
all_sumEnergyHeartWfm_mean = []
all_sumEnergyHeartWfm_std = []
test_array1 = []#原始算法輸出
test_array2 = []#ti輸出值輸出(Tr)
sample_total = 0
acc_sample_total = 0
for user in tqdm(os.listdir("dataset")):
if os.path.isdir(os.path.join("dataset", user, "gt_br")):
predict_array = []
ground_truth_array = []
ti_predict_array = []
files_path = os.path.join("dataset", user, "0.8")
ground_truth_files_path = os.path.join("dataset", user, "gt_br")
files = os.listdir(files_path)
for name in os.listdir(ground_truth_files_path):
with open(os.path.join(ground_truth_files_path, name)) as f:
for line in f.readlines():
ground_truth_array.append(int(line))
all_gt_array.append(int(line))
for tmp in range(0, len(files)//2, 1):
file = files[tmp]
print(f'\nCurrent file: {file}')
datas_path = os.path.join(files_path, file)
vitial_sig = pd.read_csv(datas_path)
unwrapPhase = vitial_sig['unwrapPhasePeak_mm'].values
heart = vitial_sig['rsv[1]'].values
breath = vitial_sig['rsv[0]'].values
confidenceMetricBreathOut_std = np.std(vitial_sig['confidenceMetricBreathOut'].values)
confidenceMetricBreathOut_xCorr_std = np.std(vitial_sig['confidenceMetricBreathOut_xCorr'].values)
confidenceMetricBreathOut_mean = np.mean(vitial_sig['confidenceMetricBreathOut'].values)
confidenceMetricBreathOut_xCorr_mean = np.mean(vitial_sig['confidenceMetricBreathOut_xCorr'].values)
breathingRateEst_FFT_std = np.std(vitial_sig['breathingRateEst_FFT'].values)
breathingRateEst_FFT_mean = np.mean(vitial_sig['breathingRateEst_FFT'].values)
breathingEst_xCorr_std = np.std(vitial_sig['breathingEst_xCorr'].values)
breathingEst_xCorr_mean = np.mean(vitial_sig['breathingEst_xCorr'].values)
breathingEst_peakCount_std = np.std(vitial_sig['breathingEst_peakCount'].values)
breathingEst_peakCount_mean = np.mean(vitial_sig['breathingEst_peakCount'].values)
sumEnergyBreathWfm_mean = np.mean(vitial_sig['sumEnergyBreathWfm'].values)
sumEnergyBreathWfm_std = np.std(vitial_sig['sumEnergyBreathWfm'].values)
sumEnergyHeartWfm_mean = np.mean(vitial_sig['sumEnergyHeartWfm'].values)
sumEnergyHeartWfm_std = np.std(vitial_sig['sumEnergyHeartWfm'].values)
all_ti_og_br.append(int(np.mean(breath)))
ti_predict_array.append(int(np.mean(breath)))
sample_total += 1
for i in range (0, 800, 800): # 0, 600, 1200
result_rate, replace1, index_of_fftmax = detect_breath(unwrapPhase[0 + i: 800 + i], count, disp)
test_array1.append(round(result_rate))
if replace1:
result_rate = int(np.mean(breath))
all_index_of_fftmax.append(index_of_fftmax)
test_array2.append(round(result_rate))
all_confidenceMetricBreathOut_std.append(confidenceMetricBreathOut_std)
all_confidenceMetricBreathOut_xCorr_std.append(confidenceMetricBreathOut_xCorr_std)
all_confidenceMetricBreathOut_mean.append(confidenceMetricBreathOut_mean)
all_confidenceMetricBreathOut_xCorr_mean.append(confidenceMetricBreathOut_xCorr_mean)
all_breathingRateEst_FFT_std.append(breathingRateEst_FFT_std)
all_breathingRateEst_FFT_mean.append(breathingRateEst_FFT_mean)
all_breathingEst_xCorr_std.append(breathingEst_xCorr_std)
all_breathingEst_xCorr_mean.append(breathingEst_xCorr_mean)
all_breathingEst_peakCount_std.append(breathingEst_peakCount_std)
all_breathingEst_peakCount_mean.append(breathingEst_peakCount_mean)
all_sumEnergyBreathWfm_mean.append(sumEnergyBreathWfm_mean)
all_sumEnergyBreathWfm_std.append(sumEnergyBreathWfm_std)
all_sumEnergyHeartWfm_mean.append(sumEnergyHeartWfm_mean)
all_sumEnergyHeartWfm_std.append(sumEnergyHeartWfm_std)
predict_array.append(round(result_rate))
all_pr_array.append(round(result_rate))
if result_rate != None:
absolute_error | |
"""
Base classes used in the `ChiantiPy.core.ion` and `ChiantiPy.core.spectrum`
classes. Mostly printing, plotting and saving routines.
"""
import copy
import time
import numpy as np
import matplotlib.pyplot as plt
import ChiantiPy.tools.util as util
import ChiantiPy.Gui as chGui
import ChiantiPy.tools.data as chdata
class ionTrails(object):
"""
Base class for `ChiantiPy.core.ion` and `ChiantiPy.core.spectrum`
"""
def argCheck(self, temperature=None, eDensity=None, pDensity='default', em = None, verbose=0):
''' to check the compatibility of the three arguments
and put them into numpy arrays of atleast_1d
and create attributes to the object
'''
if temperature is not None:
self.Temperature = np.atleast_1d(temperature)
if isinstance(self.Temperature[0], str):
raise ValueError(' temperature can not be a string')
if np.any(self.Temperature <= 0.):
raise ValueError(' all temperatures must be positive')
self.Ntemp = self.Temperature.size
else:
raise ValueError('temperature not defined')
if pDensity == 'default':
self.p2eRatio()
if eDensity is not None:
self.EDensity = np.atleast_1d(eDensity)
if isinstance(self.EDensity[0], str):
raise ValueError(' EDensity can not be a string')
if np.any(self.EDensity <= 0.):
raise ValueError(' all densities must be positive')
self.Ndens = self.EDensity.size
# needed when doing ioneq.calculate()
else:
self.Ndens = 0
self.NTempDens = max(self.Ndens,self.Ntemp)
if self.Ndens > 1 and self.Ntemp == 1:
self.Temperature = np.tile(self.Temperature, self.NTempDens)
elif self.Ndens == 1 and self.Ntemp > 1:
self.EDensity = np.tile(self.EDensity, self.NTempDens)
if hasattr(self,'EDensity') and hasattr(self,'Temperature') and self.Temperature.size != self.EDensity.size:
raise ValueError('Temperature and density must be the same size.')
if pDensity is not None:
if pDensity == 'default' and eDensity is not None:
self.PDensity = self.ProtonDensityRatio*self.EDensity
else:
self.PDensity = np.atleast_1d(pDensity)
if self.PDensity.size < self.Ndens:
np.tile(self.PDensity, self.Ndens)
self.NpDens = self.NpDens.size
if em is not None:
em = np.atleast_1d(em)
self.Em = em
if em.size == 1:
self.Em = np.tile(em,self.NTempDens)
elif em.size != self.NTempDens:
raise ValueError('the size of em must be either 1 or the size of the larger of temperature or density %5i'%(self.NTempDens))
else:
self.Em = np.ones_like(self.Temperature, np.float64)
def intensityList(self, index=-1, wvlRange=None, wvlRanges=None, top=10, relative=0, outFile=0, rightDigits=4 ):
"""
List the line intensities. Checks to see if there is an existing Intensity attribute. If it exists, then those values are used.
Otherwise, the `intensity` method is called.
This method prints an ASCII table with the following columns:
1. Ion: the CHIANTI style notation for the ion, e.g. 'c_4' for C IV
2. lvl1: the lower level of the transition in the CHIANTI .elvlc file
3. lvl2: the upper level of the transition in the CHIANTI .elvlc file
4. lower: the notation, usually in LS coupling, of the lower fine
structure level
5. upper: the notation, usually in LS coupling, of the upper fine
structure level
6. Wvl(A): the wavelength of the transition in units as specified in
the chiantirc file.
7. Intensity
8. A value: the Einstein coefficient for spontaneous emission from
level 'j' to level 'i'
9. Obs: indicates whether the CHIANTI database considers this an
observed line or one obtained from theoretical energy levels
Regarding the intensity column, if 'flux' in the chiantirc file is set
to 'energy', the intensity is given by,
.. math::
I = \Delta E_{ij}n_jA_{ij}\mathrm{Ab}\\frac{1}{N_e}
\\frac{N(X^{+m})}{N(X)}\mathrm{EM},
in units of ergs cm\ :sup:`-2` s\ :sup:`-1` sr\ :sup:`-1`. If 'flux' is set to 'photon',
.. math::
I = n_jA_{ij}\mathrm{Ab}\\frac{1}{N_e}\\frac{N(X^{+m})}{N(X)}
\mathrm{EM},
where,
- :math:`\Delta E_{ij}` is the transition energy (ergs)
- :math:`n_j` is the fractions of ions in level :math:`j`
- :math:`A_{ij}` is the Einstein coefficient for spontaneous emission
from level :math:`j` to level :math:`i` (in s\ :sup:`-1`)
- :math:`\mathrm{Ab}` is the abundance of the specified element
relative to hydrogen
- :math:`N_e` is the electron density (in cm\ :sup:`-3`)
- :math:`N(X^{+m})/N(X)` is the fractional ionization of ion as a
function of temperature
- :math:`\mathrm{EM}` is the emission measure integrated along the
line-of-sight, :math:`\int\mathrm{d}l\,N_eN_H` (cm\ :sup:`-5`) where
:math:`N_H` is the density of hydrogen (neutral + ionized)
(cm\ :sup:`-3`)
Note that if `relative` is set, the line intensity is relative to the
strongest line and so the output will be unitless.
Parameters
-----------
index : `int`,optional
Index the temperature or eDensity array to use.
-1 (default) sets the specified value to the middle of the array
wvlRange : `tuple`
Wavelength range
wvlRanges : a tuple, list or array that contains at least 2
2 element tuples, lists or arrays so that multiple
wavelength ranges can be specified
top : `int`
Number of lines to plot, sorted by descending magnitude.
relative : `int`
specifies whether to normalize to strongest line
default (relative = 0) specified that the intensities should be
their calculated values
outFile : `str`
specifies the file that the intensities should be output to
default(outFile = 0) intensities are output to the terminal
rightDigits: `int`
specifies the format for the wavelengths for the number of digits
to right of the decimal place
"""
if not hasattr(self, 'Intensity'):
try:
self.intensity()
#TODO: specify what exception to catch! or omit the try, catch
except:
print(' intensities not calculated and emiss() is unable to calculate them')
print(' perhaps the temperature and/or eDensity are not set')
return
temperature = self.Temperature
eDensity = self.EDensity
em = self.Em
ndens = eDensity.size
ntemp = temperature.size
intens = copy.deepcopy(self.Intensity)
if 'errorMessage' in intens.keys():
print(' errorMessage: %s'%(intens['errorMessage']))
return
intensity = intens['intensity']
ionS = intens['ionS']
wvl = intens['wvl']
lvl1 = intens['lvl1']
lvl2 = intens['lvl2']
pretty1 = intens['pretty1']
pretty2 = intens['pretty2']
obs = intens['obs']
avalue = intens['avalue']
if ndens == 1 and ntemp == 1:
if index < 0:
index = 0
dstr = ' - Density = %10.2e (cm$^{-3}$)' %(eDensity[index])
tstr = ' - T = %10.2e (K)' %(temperature[index])
intensity = intensity[index]
print(' temperature = %10.2e eDensity = %10.2e'%(temperature[index], eDensity[index]))
elif ndens == 1 and ntemp > 1:
if index < 0:
index = ntemp//2
print('using index = %5i specifying temperature = %10.2e'%(index, temperature[index]))
self.Message = 'using index = %5i specifying temperature = %10.2e'%(index, temperature[index])
intensity=intensity[index]
elif ndens > 1 and ntemp == 1:
if index < 0:
index = ntemp//2
print('using index =%5i specifying eDensity = %10.2e'%(index, eDensity[index]))
self.Message = 'using index =%5i specifying eDensity = %10.2e'%(index, eDensity[index])
intensity=intensity[index]
elif ndens > 1 and ntemp > 1:
if index < 0:
index = ntemp//2
print('using index = %5i specifying temperature = %10.2e, eDensity = %10.2e em = %10.2e'%(index, temperature[index], eDensity[index], em[index]))
self.Message = 'using index = %5i specifying temperature = %10.2e, eDensity = %10.2e = %10.2e'%(index, temperature[index], eDensity[index], em[index])
intensity=intensity[index]
if wvlRange is None and wvlRanges is None:
wvlRange = self.WvlRange
wvlIndex=util.between(wvl,wvlRange)
elif wvlRange is not None:
wvlIndex=util.between(wvl,wvlRange)
elif wvlRanges is not None:
wvlIndex = []
for awvlRange in wvlRanges:
wvlIndex.extend(util.between(wvl,awvlRange))
else:
wvlIndex = range(wvl.size)
# get lines in the specified wavelength range
intensity = intensity[wvlIndex]
ionS = ionS[wvlIndex]
wvl = wvl[wvlIndex]
lvl1 = lvl1[wvlIndex]
lvl2 = lvl2[wvlIndex]
avalue = avalue[wvlIndex]
pretty1 = pretty1[wvlIndex]
pretty2 = pretty2[wvlIndex]
obs = obs[wvlIndex]
self.Error = 0
if wvl.size == 0:
print('No lines in this wavelength interval')
self.Error = 1
self.Message = 'No lines in this wavelength interval'
return
elif top == 0:
top = wvl.size
elif top > wvl.size:
top = wvl.size
# sort by intensity
isrt = np.argsort(intensity)
ionS = ionS[isrt[-top:]]
wvl = wvl[isrt[-top:]]
lvl1 = lvl1[isrt[-top:]]
lvl2 = lvl2[isrt[-top:]]
obs = obs[isrt[-top:]]
intensity = intensity[isrt[-top:]]
avalue = avalue[isrt[-top:]]
pretty1 = pretty1[isrt[-top:]]
pretty2 = pretty2[isrt[-top:]]
# must follow setting top
if relative:
intensity = intensity/intensity[:top].max()
idx = np.argsort(wvl)
fmt1 = '%5s %5s %5s %25s - %-25s %12s %12s %12s %3s'
fmt = '%5s %5i %5i %25s - %-25s %12.' + str(rightDigits) + \
'f %12.2e %12.2e %1s'
print(' ')
print(' ------------------------------------------')
print(' ')
print(fmt1%('Ion','lvl1','lvl2','lower','upper','Wvl(A)','Intensity','A value','Obs'))
for kdx in idx:
print(fmt%(ionS[kdx], lvl1[kdx], lvl2[kdx], pretty1[kdx], pretty2[kdx], wvl[kdx], intensity[kdx], avalue[kdx], obs[kdx]))
print(' ')
print(' ------------------------------------------')
print(' ')
#
self.Intensity['wvlTop'] = wvl[idx]
self.Intensity['intensityTop'] = intensity[idx]
if outFile:
fmt1a = '%5s %5s %5s %25s - %-25s %12s %12s %12s %3s \n'
fmt = '%5s %5i %5i %25s - %-25s %12.4' + str(rightDigits) + \
'f %12.2e %12.2e %1s \n'
outpt = open(outFile, 'w')
outpt.write(fmt1a%('Ion','lvl1','lvl2','lower','upper','Wvl(A)','Intensity','A value','Obs'))
for kdx in idx:
outpt.write(fmt%(ionS[kdx], lvl1[kdx], lvl2[kdx], pretty1[kdx], pretty2[kdx], wvl[kdx], intensity[kdx], avalue[kdx], obs[kdx]))
outpt.close()
def intensityPlot(self, index=-1, wvlRange=None, top=10, | |
<gh_stars>10-100
#emacs, this is -*-Python-*- mode
"""
There are several ways we want to acquire data:
A) From live cameras (for indefinite periods).
B) From full-frame .fmf files (of known length).
C) From small-frame .ufmf files (of unknown length).
D) From a live image generator (for indefinite periods).
E) From a point generator (for indefinite periods).
The processing chain normally consists of:
0) Grab images from ImageSource. (This is not actually part of the chain).
1) Processing the images in ProcessCamClass
2) Save images in SaveCamData.
3) Save small .ufmf images in SaveSmallData.
4) Display images in DisplayCamData.
In cases B-E, some form of image/data control (play, stop, set fps)
must be settable. Ideally, this would be possible from a Python API
(for automated testing) and from a GUI (for visual debugging).
"""
from __future__ import division
from __future__ import with_statement
import os
BENCHMARK = int(os.environ.get('FLYDRA_BENCHMARK',0))
FLYDRA_BT = int(os.environ.get('FLYDRA_BT',0)) # threaded benchmark
NAUGHTY_BUT_FAST = False
#DISABLE_ALL_PROCESSING = True
DISABLE_ALL_PROCESSING = False
from flydra_core.common_variables import near_inf
bright_non_gaussian_cutoff = 255
bright_non_gaussian_replacement = 5
import threading, time, socket, sys, struct, warnings, optparse
import traceback
import Queue
import numpy
import numpy as nx
import numpy as np
import errno
import scipy.misc
import numpy.dual
import json
import contextlib
import pkg_resources # needed to run motmot namespace packages
import motmot.ufmf.ufmf as ufmf
import motmot.realtime_image_analysis.slow
import flydra_core.flydra_socket as flydra_socket
#import flydra_core.debuglock
#DebugLock = flydra_core.debuglock.DebugLock
import motmot.FlyMovieFormat.FlyMovieFormat as FlyMovieFormat
import os
PYCI2 = int(os.environ.get('FLYDRA_PYCI2',0))
if PYCI2:
import pyci2.core as cam_iface
else:
import motmot.cam_iface.cam_iface_ctypes as cam_iface
import camnode_colors
import roslib;
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('ros_flydra')
import sensor_msgs.msg
import std_msgs.msg
import ros_flydra.cv2_bridge
import ros_flydra.srv
from ros_flydra.srv import MainBrainGetVersion, \
MainBrainRegisterNewCamera, MainBrainGetListenAddress
import rospy
if BENCHMARK:
class NonExistantError(Exception):
pass
ConnectionClosedError = NonExistantError
import flydra_camnode.version
import flydra_core.rosutils
import flydra_core.data_descriptions
WIRE_ORDER_CUR_VAL_IDX = flydra_core.data_descriptions.WIRE_ORDER_CUR_VAL_IDX
WIRE_ORDER_MEAN_VAL_IDX = flydra_core.data_descriptions.WIRE_ORDER_MEAN_VAL_IDX
WIRE_ORDER_SUMSQF_VAL_IDX = flydra_core.data_descriptions.WIRE_ORDER_SUMSQF_VAL_IDX
import camnode_utils
import motmot.FastImage.FastImage as FastImage
#FastImage.set_debug(3)
import flydra_core.debuglock
DebugLock = flydra_core.debuglock.DebugLock
LOG = flydra_core.rosutils.Log(to_ros=True)
def ros_ensure_valid_name(name):
name = name.replace('-','_')
name = name.replace('Allied Vision Technologies','AVT')
return name
class SharedValue:
# in fview
def __init__(self):
self.evt = threading.Event()
self._val = None
def set(self,value):
# called from producer thread
self._val = value
self.evt.set()
def is_new_value_waiting(self):
return self.evt.isSet()
def get(self,*args,**kwargs):
# called from consumer thread
self.evt.wait(*args,**kwargs)
val = self._val
self.evt.clear()
return val
def get_nowait(self):
# race condition here -- see comments in fview.py
val = self._val
self.evt.clear()
return val
class SharedValue1(object):
# in trackem
def __init__(self,initial_value):
self._val = initial_value
#self.lock = DebugLock('SharedValue1')
self.lock = threading.Lock()
def get(self):
self.lock.acquire()
try:
val = self._val
finally:
self.lock.release()
return val
def set(self,new_value):
self.lock.acquire()
try:
self._val = new_value
finally:
self.lock.release()
class DummyMainBrain:
def __init__(self,*args,**kw):
self.set_image = self.noop
self.log_message = self.noop
self.close = self.noop
self.camno = 0
def noop(self,*args,**kw):
return
def register_new_camera(self,*args,**kw):
self.camno += 1
return 12345
def get_and_clear_commands(self,*args,**kw):
return {}
class ROSMainBrain:
def __init__(self,*args,**kw):
rospy.wait_for_service('/flydra_mainbrain/get_version')
self._get_version = rospy.ServiceProxy('/flydra_mainbrain/get_version',
MainBrainGetVersion)
rospy.wait_for_service('/flydra_mainbrain/register_new_camera')
self._register_new_camera = rospy.ServiceProxy('/flydra_mainbrain/register_new_camera',
MainBrainRegisterNewCamera)
rospy.wait_for_service('/flydra_mainbrain/get_listen_address')
self._get_listen_address = rospy.ServiceProxy('/flydra_mainbrain/get_listen_address',
MainBrainGetListenAddress)
rospy.wait_for_service('/flydra_mainbrain/get_and_clear_commands')
self._get_and_clear_commands = rospy.ServiceProxy('/flydra_mainbrain/get_and_clear_commands',
ros_flydra.srv.MainBrainGetAndClearCommands)
rospy.wait_for_service('/flydra_mainbrain/set_image')
self._set_image = rospy.ServiceProxy('/flydra_mainbrain/set_image',
ros_flydra.srv.MainBrainSetImage)
rospy.wait_for_service('/flydra_mainbrain/receive_missing_data')
self._receive_missing_data = rospy.ServiceProxy('/flydra_mainbrain/receive_missing_data',
ros_flydra.srv.MainBrainReceiveMissingData)
rospy.wait_for_service('/flydra_mainbrain/close_camera')
self._close_camera = rospy.ServiceProxy('/flydra_mainbrain/close_camera',
ros_flydra.srv.MainBrainCloseCamera)
rospy.wait_for_service('/flydra_mainbrain/log_message')
self._log_message = rospy.ServiceProxy('/flydra_mainbrain/log_message',
ros_flydra.srv.MainBrainLogMessage)
def log_message(self, cam_id, timestamp, message):
req = ros_flydra.srv.MainBrainLogMessageRequest()
req.cam_id = std_msgs.msg.String(cam_id)
req.timestamp = std_msgs.msg.Float32(timestamp)
req.message = std_msgs.msg.String(message)
self._log_message(req)
def get_version(self):
return self._get_version().version.data
def register_new_camera(self, cam_guid, scalar_control_info, camnode_ros_name):
hostname = flydra_core.rosutils.get_node_hostname( camnode_ros_name )
my_addrinfo = flydra_socket.make_addrinfo( host=flydra_socket.get_bind_address() )
req = ros_flydra.srv.MainBrainRegisterNewCameraRequest()
req.cam_guid = std_msgs.msg.String(cam_guid)
req.scalar_control_info_json = std_msgs.msg.String(json.dumps(scalar_control_info))
req.camnode_ros_name = std_msgs.msg.String(camnode_ros_name)
req.cam_hostname = std_msgs.msg.String(hostname)
self._register_new_camera(req)
def get_listen_address(self):
req = ros_flydra.srv.MainBrainGetListenAddressRequest()
response = self._get_listen_address(req)
result = json.loads(response.listen_address_json.data)
if isinstance(result, list):
# Cast to tuple. json likes to return list, but
# socket.socket wants tuple.
result = tuple(result)
return result
def get_and_clear_commands(self, cam_id):
req = ros_flydra.srv.MainBrainGetAndClearCommandsRequest()
req.cam_id = std_msgs.msg.String(cam_id)
response = self._get_and_clear_commands(req)
cmds_json = response.cmds_json.data
cmds = json.loads(cmds_json)
return cmds
def set_image(self, cam_id, lb, arr):
assert len(lb)==2
arr = np.array(arr,copy=False)
assert arr.ndim==2
assert arr.dtype==np.uint8
req = ros_flydra.srv.MainBrainSetImageRequest()
req.cam_id = std_msgs.msg.String(cam_id)
req.left = std_msgs.msg.Int32(lb[0])
req.bottom = std_msgs.msg.Int32(lb[1])
req.image = ros_flydra.cv2_bridge.numpy_to_imgmsg(arr)
self._set_image(req)
def receive_missing_data(self, cam_id, framenumber_offset, missing_data):
req = ros_flydra.srv.MainBrainReceiveMissingDataRequest(
std_msgs.msg.String(cam_id),
std_msgs.msg.Int64(framenumber_offset),
std_msgs.msg.String(json.dumps( missing_data )))
self._receive_missing_data(req)
def close(self, cam_id):
req = ros_flydra.srv.MainBrainCloseCameraRequest()
req.cam_id = std_msgs.msg.String(cam_id)
self._close_camera(req)
import flydra_core.common_variables
import motmot.realtime_image_analysis.realtime_image_analysis as realtime_image_analysis
if sys.platform == 'win32':
time_func = time.clock
else:
time_func = time.time
def TimestampEcho(timestamp_echo_receiver):
sendto_port = flydra_core.common_variables.timestamp_echo_gatherer_port
sender = None
fmt = flydra_core.common_variables.timestamp_echo_fmt_diff
while 1:
try:
buf, sender_sockaddr = timestamp_echo_receiver.recv(return_sender_sockaddr=True)
except socket.error as err:
if err.errno == errno.EINTR: # interrupted system call
continue
else:
LOG.warn('TimestampEcho errno (the exception about to come) is %s'%errno.errorcode[err.errno])
raise
if struct is None: # this line prevents bizarre interpreter shutdown errors
return
newbuf = buf + struct.pack( fmt, time.time() )
if sender is None:
addrinfo = flydra_socket.make_addrinfo(host=sender_sockaddr[0],port=sendto_port)
sender = flydra_socket.FlydraTransportSender( addrinfo )
sender.send(newbuf)
def stdout_write(x):
while 1:
try:
sys.stdout.write(x)
break
except IOError, err:
if err.args[0] == errno.EINTR: # interrupted system call
continue
while 1:
try:
sys.stdout.flush()
break
except IOError, err:
if err.args[0] == errno.EINTR: # interrupted system call
continue
L_i = nx.array([0,0,0,1,3,2])
L_j = nx.array([1,2,3,2,1,3])
def Lmatrix2Lcoords(Lmatrix):
return Lmatrix[L_i,L_j]
def pluecker_from_verts(A,B):
"""
See Hartley & Zisserman (2003) p. 70
"""
if len(A)==3:
A = A[0], A[1], A[2], 1.0
if len(B)==3:
B = B[0], B[1], B[2], 1.0
A=nx.reshape(A,(4,1))
B=nx.reshape(B,(4,1))
L = nx.dot(A,nx.transpose(B)) - nx.dot(B,nx.transpose(A))
return Lmatrix2Lcoords(L)
class PreallocatedBuffer(object):
def __init__(self,size,pool):
self._size = size
self._buf = FastImage.FastImage8u(size)
self._pool = pool
def get_size(self):
return self._size
def get_buf(self):
return self._buf
def get_pool(self):
return self._pool
class PreallocatedBufferPool(object):
"""One instance of this class for each camera. Threadsafe."""
def __init__(self,size):
self._lock = threading.Lock()
# start: vars access controlled by self._lock
self._allocated_pool = []
# end: vars access controlled by self._lock
self.set_size(size)
self._buffers_handed_out = 0 # self._zero_buffer_lock is set when this is 0
self._zero_buffer_lock = threading.Event()
self._zero_buffer_lock.set()
def set_size(self,size):
"""size is FastImage.Size() instance"""
assert isinstance(size,FastImage.Size)
with self._lock:
self._size = size
del self._allocated_pool[:]
def get_free_buffer(self):
with self._lock:
if len(self._allocated_pool):
buffer = self._allocated_pool.pop()
else:
buffer = PreallocatedBuffer(self._size,self)
self._buffers_handed_out += 1
self._zero_buffer_lock.clear()
return buffer
def return_buffer(self,buffer):
assert isinstance(buffer, PreallocatedBuffer)
with self._lock:
self._buffers_handed_out -= 1
if buffer.get_size() == self._size:
self._allocated_pool.append( buffer )
if self._buffers_handed_out == 0:
self._zero_buffer_lock.set()
def get_num_outstanding_buffers(self):
return self._buffers_handed_out
def wait_for_0_outstanding_buffers(self,*args):
self._zero_buffer_lock.wait(*args)
@contextlib.contextmanager
def get_free_buffer_from_pool(pool):
"""manage access to buffers from the pool"""
buf = pool.get_free_buffer()
buf._i_promise_to_return_buffer_to_the_pool = False
try:
yield buf
finally:
if not buf._i_promise_to_return_buffer_to_the_pool:
pool.return_buffer(buf)
class ProcessCamClass(rospy.SubscribeListener):
def __init__(self,
coord_receiver_addrinfo=None,
cam_id=None,
log_message_queue=None,
max_num_points=None,
roi2_radius=None,
bg_frame_interval=None,
bg_frame_alpha=None,
cam_no=-1,
mask_image=None,
diff_threshold_shared=None,
clear_threshold_shared=None,
n_sigma_shared=None,
color_range_1_shared=None,
color_range_2_shared=None,
color_range_3_shared=None,
sat_thresh_shared=None,
red_only_shared=None,
framerate = None,
lbrt=None,
max_height=None,
max_width=None,
globals = None,
options = None,
initial_image_dict = None,
benchmark = False,
posix_scheduler = '',
):
self.ros_namespace = cam_id
self.pub_img_n_subscribed = 0
self.pub_img = rospy.Publisher('%s/image_raw'%self.ros_namespace,
sensor_msgs.msg.Image,
subscriber_listener=self,
queue_size=1,
tcp_nodelay=True)
self.pub_img_rate = float(options.rosrate)
self.pub_img_lasttime = time.time()
self.pub_rate = rospy.Publisher('%s/framerate'%self.ros_namespace,
std_msgs.msg.Float32,
queue_size=1,
tcp_nodelay=True)
self.pub_rate_lasttime = time.time()
self.pub_rate_lastframe = 0
self.benchmark = benchmark
self.posix_scheduler = posix_scheduler
self.options = options
self.globals = globals
if framerate is not None:
self.shortest_IFI = 1.0/framerate
else:
self.shortest_IFI = numpy.inf
if self.benchmark:
self.coord_socket = flydra_socket.get_dummy_sender()
else:
self.coord_socket = flydra_socket.FlydraTransportSender( coord_receiver_addrinfo )
if len(cam_id) > (flydra_core.common_variables.cam_id_count-1):
raise ValueError('cam_id %r is too long'%cam_id)
self.cam_id = cam_id
self.log_message_queue = log_message_queue
self.bg_frame_alpha = bg_frame_alpha
self.bg_frame_interval = bg_frame_interval
self.diff_threshold_shared = diff_threshold_shared
self.clear_threshold_shared = clear_threshold_shared
self.n_sigma_shared = n_sigma_shared
self.red_only_shared = red_only_shared
self.color_range_1_shared = color_range_1_shared
self.color_range_2_shared = color_range_2_shared
self.color_range_3_shared = color_range_3_shared
self.sat_thresh_shared = sat_thresh_shared
self.new_roi = threading.Event()
self.new_roi_data = None
self.new_roi_data_lock = threading.Lock()
self.max_height = max_height
self.max_width = max_width
if mask_image is None:
mask_image = numpy.zeros( (self.max_height,
self.max_width),
dtype=numpy.bool )
# mask is currently an array of bool
mask_image = mask_image.astype(numpy.uint8)*255
self.mask_image = mask_image
self.max_num_points=max_num_points
self.realtime_analyzer = realtime_image_analysis.RealtimeAnalyzer(lbrt,
self.max_width,
self.max_height,
self.max_num_points,
roi2_radius,
)
self.realtime_analyzer.diff_threshold = self.diff_threshold_shared.get_nowait()
self.realtime_analyzer.clear_threshold = self.clear_threshold_shared.get_nowait()
self._chain = camnode_utils.ChainLink()
self._initial_image_dict = initial_image_dict
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
self.pub_img_n_subscribed += 1
def peer_unsubscribe(self, topic_name, num_peers):
self.pub_img_n_subscribed -= 1
def get_chain(self):
return self._chain
def get_roi(self):
return self.realtime_analyzer.roi
def set_roi(self, lbrt):
with self.new_roi_data_lock:
self.new_roi_data = lbrt
self.new_roi.set()
roi = property( get_roi, set_roi )
def _convert_to_wire_order(self, xpoints, hw_roi_frame, running_mean_im, sumsqf ):
"""the images passed in are already in roi coords, as are index_x and index_y.
convert to values for sending.
"""
points = []
hw_roi_frame = numpy.asarray( hw_roi_frame )
for xpt in xpoints:
try:
(x0_abs, y0_abs, area, slope, eccentricity, index_x, index_y) = xpt
except:
LOG.warn('converting xpt %s' % xpt)
raise
# Find values at location in image that triggered
# point. Cast to Python int and floats.
cur_val = int(hw_roi_frame[index_y,index_x])
mean_val = float(running_mean_im[index_y, index_x])
sumsqf_val = float(sumsqf[index_y, index_x])
slope_found = not numpy.isnan(slope)
if slope_found==False:
# prevent nan going across network
slope = 0.0
elif numpy.isinf(slope):
# prevent inf going across network
slope = near_inf
if numpy.isinf(eccentricity):
# prevent inf going across network
eccentricity = near_inf
# see flydra_core.common_variables.recv_pt_fmt struct definition:
pt = (x0_abs, y0_abs, area, slope, eccentricity,
slope_found, cur_val, | |
# Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
# Copyright (c) 2020 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import tensorflow as tf
# from transformers import TFGPT2MainLayer, TFBlock, TFBaseModelOutputWithPastAndCrossAttentions
# from transformers import TFGPT2PreTrainedModel
from modeling_utils import TFBlock
from configuration_ganzs import ElectraConfig
from file_utils import add_start_docstrings, add_start_docstrings_to_callable
from modeling_utils import ACT2FN, TFBertEncoder, TFGPT2PreTrainedModel
from modeling_utils import get_initializer, shape_list
from tokenization_utils import BatchEncoding
import pretrain_utils, collections
logger = logging.getLogger(__name__)
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/tf_model.h5",
"google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/tf_model.h5",
"google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/tf_model.h5",
"google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/tf_model.h5",
"google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/tf_model.h5",
"google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/tf_model.h5",
}
class TFElectraEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.initializer_range = config.initializer_range
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.embedding_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="position_embeddings",
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.embedding_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="token_type_embeddings",
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.amp = config.amp
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, mode="embedding", training=False):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs, training=training)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, position_ids, token_type_ids, inputs_embeds = inputs
if input_ids is not None:
input_shape = shape_list(input_ids)
else:
input_shape = shape_list(inputs_embeds)[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
if self.amp:
embeddings = inputs_embeds + tf.cast(position_embeddings, tf.float16) + tf.cast(token_type_embeddings, tf.float16)
else:
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.embedding_size])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
#DONE
class TFElectraDiscriminatorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense")
self.dense_prediction = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="dense_prediction")
self.config = config
def call(self, discriminator_hidden_states, training=False):
hidden_states = self.dense(discriminator_hidden_states)
hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
logits = tf.squeeze(self.dense_prediction(hidden_states), axis=-1)
return logits
class TFElectraGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = ACT2FN["gelu"](hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class TFElectraPreTrainedModel(TFGPT2PreTrainedModel):
config_class = ElectraConfig
pretrained_model_archive_map = TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "electra"
def get_extended_attention_mask(self, attention_mask, input_shape):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
# SWAP WITH TFGPT2MainLayer
class TFElectraMainLayer(TFElectraPreTrainedModel):
config_class = ElectraConfig
def __init__(self, config, shared_embeddings=False, input_embeddings=None, **kwargs):
super().__init__(config, **kwargs)
if shared_embeddings and input_embeddings is not None:
self.wte = input_embeddings
else:
self.wte = TFElectraEmbeddings(config, name="embeddings")
#synonym
self.embeddings = self.wte
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="embeddings_project")
# self.encoder = TFGPT2Model(config, name="encoder")
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# self.use_cache = config.use_cache
# self.return_dict = config.use_return_dict
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.n_positions = config.n_positions
self.initializer_range = config.initializer_range
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
def build(self, input_shape):
with tf.name_scope("wpe"):
self.wpe = self.add_weight(
name="embeddings",
shape=[self.n_positions, self.n_embd],
initializer=get_initializer(self.initializer_range),
)
# super().build(input_shape)
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
# inputs,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
# if isinstance(inputs, (tuple, list)):
# # FIXME!
# input_ids = inputs[0]
# past_key_values = inputs[1] if len(inputs) > 1 else past_key_values
# attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
# token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
# position_ids = inputs[4] if len(inputs) > 4 else position_ids
# head_mask=inputs[5] if len(inputs) > 5 else position_ids
# inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds
# encoder_hidden_states = inputs[7] if len(inputs) > 7 else encoder_hidden_states
# encoder_attention_mask = inputs[8] if len(inputs) > 8 else encoder_attention_mask
# use_cache = inputs[9] if len(inputs) > 9 else use_cache
# output_attentions = inputs[10] if len(inputs) > 10 else output_attentions
# output_hidden_states = inputs[11] if len(inputs) > 11 else output_hidden_states
# return_dict = inputs[12] if len(inputs) > 12 else return_dict
# assert len(inputs) <= 13, "Too many inputs."
# elif isinstance(inputs, (dict, BatchEncoding)):
# # FIXME!
# input_ids = inputs.get("input_ids")
# past_key_values = inputs.get("past_key_values", attention_mask)
# attention_mask = inputs.get("attention_mask", attention_mask)
# token_type_ids = inputs.get("token_type_ids", token_type_ids)
# position_ids = inputs.get("position_ids", position_ids)
# head_mask = inputs.get("head_mask", head_mask)
# inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
# encoder_hidden_states = inputs.get("encoder_hidden_states", encoder_hidden_states)
# encoder_attention_mask = inputs.get("encoder_hidden_mask", encoder_attention_mask)
# use_cache = inputs.get("use_cache", use_cache)
# output_attentions = inputs.get("output_attentions", output_attentions)
# output_hidden_states = inputs.get("output_hidden_states", output_hidden_states)
# return_dict = inputs.get("return_dict", return_dict)
# assert len(inputs) <= 13, "Too many inputs."
# else:
# input_ids = inputs
if input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = shape_list(past_key_values[0][0])[-2]
if position_ids is None:
position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask_shape = shape_list(attention_mask)
attention_mask = tf.reshape(
attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# | |
from sqlite3.dbapi2 import Cursor
from prettytable import PrettyTable
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import random
import sqlite3
from game_math import RandomNumber
import psycopg2
import threading
import datetime
from vkcoinapi import *
import pikches
coin = VKCoin(key='<KEY>', merchantId=545851228)
coin.setShopName('7B SHOP')
DATABASE_URL = 'postgres://eauprxzosofunb:<EMAIL>@ec<EMAIL>:5432/dfrmm2t89jd2ag'
API_VERSION = '5.126'
ranked = 0
ranks_points = [300, 500, 1000, 1200, 1500, 2000, 2200, 2500, 3000, 3200, 3500, 4000, 5000, 6000, 10000]
ranks_names = ["<NAME>", "Железо 3", "Железо 2", "Железо 1", "Бронза 3", "Бронза 2", "Бронза 1", "Серебро 3",
"Серебро 2", "Серебро 1", "Платина 3", "Платина 2", "Платина 1", "Алмаз", "Титан", "Непобедимый"]
col_coins = 1
col_abs = 1
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
id_send = False
send_sendr = 0
pr = ""
sendr = ""
table = ""
senders = []
senders_2 = []
resh = []
otvets = []
ban_list = []
wait_resh = []
num = 0
ob_send = False
def update_bases(id):
if id in senders:
senders.remove(id)
if id in senders_2:
senders_2.remove(id)
def update_bases_game(id):
if id in resh:
num = resh.index(id)
resh.remove(id)
otvets.pop(num)
num = 0
# Создание таблицы
try:
cursor.execute("""CREATE TABLE USERS (ID INT, COINS INT, BONUS INT)""")
except:
print("Database users already created")
try:
cursor.execute("""CREATE TABLE SENDS (ID INT, MST_T TEXT, MSG TEXT)""")
except:
print("Database sends already created")
conn.commit()
from rank_manager import *
def write_msg(user_id, message):
rand_id = random.getrandbits(64)
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': rand_id})
def write_msg_pik(user_id, message, attach):
rand_id = random.getrandbits(64)
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': rand_id, 'attachment': attach})
def write_msg_kb(user_id, message, keyboard):
rand_id = random.getrandbits(64)
vk.method('messages.send',
{'user_id': user_id, 'message': message, 'random_id': rand_id, 'keyboard': keyboard.get_keyboard()})
def game_event(event):
ob_send = False
initis(event.user_id)
col = 0
max = 0
closh = ""
update_bases_game(event.user_id)
id_send = False
update_bases(event.user_id)
rn = RandomNumber()
if get_points(event.user_id) < 300:
col = 3
max = 10
closh = "легко"
elif get_points(event.user_id) < 2000:
col = 3
max = 100
closh = "средне"
elif get_points(event.user_id) < 4000:
col = 4
max = 100
closh = "трудно"
else:
col = 4
max = 1000
closh = "очень трудно"
prim = rn.generate(1, max, col)
write_msg(event.user_id,
f"Игра \"Примеры\". Твоя задача - решить пример. \nСложность: {closh}. Ответ округляй до целого в меньшую сторону. \nПример: {prim}")
resh.insert(len(resh), event.user_id)
otvets.insert(len(otvets), eval(prim))
def get_payment():
pass
token = "secret"
vk = vk_api.VkApi(token=token, api_version=API_VERSION)
vk_conn = vk.get_api()
longpoll = VkLongPoll(vk)
kb_start = VkKeyboard(one_time=True, inline=False)
kb_start.add_button(color=VkKeyboardColor.POSITIVE, label="Меню", payload={"type": "0x002_menu"})
kb_menu = VkKeyboard(one_time=False, inline=False)
kb_menu.add_button(color=VkKeyboardColor.PRIMARY, label="Меню")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.POSITIVE, label="Профиль")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.NEGATIVE, label="Играть")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.POSITIVE, label="Магазин")
kb_menu.add_button(color=VkKeyboardColor.SECONDARY, label="Подать заявку")
kb_admin = VkKeyboard(one_time=False, inline=True)
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Данные")
kb_admin.add_line()
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Заявки")
kb_admin.add_line()
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Объявление")
kb_pik = VkKeyboard(one_time=False, inline=True)
kb_pik.add_button(color=VkKeyboardColor.POSITIVE, label="Мои пикчи")
kb_sender = VkKeyboard(one_time=False, inline=True)
kb_sender.add_button(color=VkKeyboardColor.PRIMARY, label="Алгебра и геометрия")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.NEGATIVE, label="Литра и русский")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.POSITIVE, label="Биология")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.PRIMARY, label="Улучшения бота")
kb_shop = VkKeyboard(one_time=False, inline=True)
kb_shop.add_button(color=VkKeyboardColor.NEGATIVE, label="Пикча Алека!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.NEGATIVE, label="Пикча для богатых")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.POSITIVE, label="Рандом Пикча!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.POSITIVE, label="Взлом рандома пикч!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.PRIMARY, label="МЕГА РАНДОМ ПИКЧ!")
def initis(id):
user_get = vk_conn.users.get(user_ids=id)[0]
cursor.execute(f"""SELECT COINS from USERS where id={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO USERS (ID, COINS, BONUS) VALUES ({id}, 0, 0)""")
conn.commit()
cursor.execute(f"""SELECT COINS from USERS where id={id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT POINTS from RANKS where id={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO ranks VALUES ({id}, 0)""")
conn.commit()
cursor.execute(f"""SELECT points FROM ranks WHERE id={id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT INV from PIK where ID={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
data_pik = "1111111111"
cursor.execute(f"""INSERT INTO PIK VALUES ({id}, {str(data_pik)})""")
conn.commit()
cursor.execute(f"""SELECT INV FROM PIK WHERE id={event.user_id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT INV from PIK where ID={id}""")
inv = str(cursor.fetchone()[0])
print(inv)
set = len(inv)
if set < 10:
get = 10 - set
print("GET: "+str(get))
get_inv = inv
data_r = get_inv + '1' * get
cursor.execute(f"""UPDATE PIK set INV={data_r} where ID={id}""")
conn.commit()
while True:
for event in longpoll.listen():
# Если пришло новое сообщение
if event.type == VkEventType.MESSAGE_NEW:
if event.to_me:
req_msg = event.text.lower()
req_msg_up = event.text
try:
initis(event.user_id)
if event.user_id in ban_list:
write_msg(event.user_id,
"Вы получили бан! Теперь вы не можете пользоваться ботом!\nДля разбана обращайтесь к администраторам!")
elif req_msg == "начать":
update_bases_game(event.user_id)
update_bases(event.user_id)
ob_send = False
id_send = False
send_sendr = 0
write_msg_kb(event.user_id,
"Привет! Это бот нашей группы (обновленный), и теперь вы сможете не просто подавать заявки, но и получать и накапливать монеты, повышать ранги и другое...",
kb_start)
user_get = vk_conn.users.get(user_ids=event.user_id)[0]
cursor.execute("""SELECT coins FROM users WHERE id={event.user_id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO users VALUES ({event.user_id}, 0, 0)""")
conn.commit()
cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT points FROM ranks WHERE id={event.user_id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO ranks VALUES ({event.user_id}, 0)""")
conn.commit()
cursor.execute(f"""SELECT points FROM ranks WHERE id={event.user_id}""")
print(cursor.fetchone())
elif req_msg == "меню":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
write_msg_kb(event.user_id, "Меню. Выбери кнопку на панели под клавиатурой.", kb_menu)
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
write_msg_kb(event.user_id,
"Тссс, я тут услышал что ты админ, так что пользуйся кнопкой админов:",
kb_admin)
elif req_msg == "заявки":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
table = ""
id_send = False
send_sendr = 0
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
t = PrettyTable(["ID пользователя", "Тема", "Текст заявки"])
write_msg(event.user_id, r"Таблица заявок 📃, поданных учениками.")
cursor.execute("""SELECT * FROM sends""")
data_s = cursor.fetchall()
for row in data_s:
t.add_row([row[0], row[1], row[2]])
table += str(row[0]) + ": " + str(row[1]) + ", " + str(row[2]) + "\n\n"
print(t)
write_msg(event.user_id, table)
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "данные":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
write_msg(event.user_id, "Введите ID пользователя, чтобы узнать о нем информацию.")
id_send = True
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "объявление":
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
ob_send = False
send_sendr = 0
if event.user_id == 545851228:
write_msg(event.user_id,
"Введите текст объявления, который будет отправлен всем зарегистрированным в боте.")
ob_send = True
elif event.user_id == 502085595:
write_msg(event.user_id,
"Прости, Леша, но во имя безопастности и защиты от спама, тебе тоже сюда нельзя(")
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "подать заявку":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
id_send = False
write_msg_kb(event.user_id,
r"Выберите предмет для заявки. Сейчас поддерживаются такие предметы:",
kb_sender)
senders.insert(len(senders), event.user_id)
elif req_msg == "играть":
ob_send = False
initis(event.user_id)
col = 0
max = 0
closh = ""
update_bases_game(event.user_id)
id_send = False
update_bases(event.user_id)
rn = RandomNumber()
if get_points(event.user_id) < 300:
col = 3
max = 10
closh = "легко"
elif get_points(event.user_id) < 2000:
col = 3
max = 100
closh = "средне"
elif get_points(event.user_id) < 4000:
col = 4
max = 100
closh = "трудно"
else:
col = 4
max = 1000
closh = "очень трудно"
prim = rn.generate(1, max, col)
write_msg(event.user_id,
f"Игра \"Примеры\". Твоя задача - решить пример. \nСложность: {closh}. Ответ округляй до целого в меньшую сторону. \nПример: {prim}")
resh.insert(len(resh), event.user_id)
otvets.insert(len(otvets), eval(prim))
elif req_msg == "профиль":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
ranking, ranked = get_rank(event.user_id)
points = get_points(event.user_id)
if points >= 10000:
ranked_more: str = "∞"
else:
ranked_more = str(ranks_points[ranked])
if ranking == "Непобедимый" and not have_pik(event.user_id, 8):
write_msg_pik(event.user_id, "Ты получил максимальный ранг в игре! Лови пикчу, которую можно получить только за это!\n\nСпасибо за ранг Непобедимый, большой вклад...", pikches.not_win)
add_pik(event.user_id, 8)
cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
write_msg_kb(event.user_id,
f"Профиль:\n1. Монеты: {cursor.fetchone()[0]} 💰\n2. Твой ранг: {ranking} 🌟\n3. Всего очков ранга: {points}/{ranked_more}.", kb_pik)
# elif req_msg == "бонус":
# ob_send = False
# initis(event.user_id)
# update_bases_game(event.user_id)
# update_bases(event.user_id)
# id_send = False
# send_sendr = 0
# write_msg(event.user_id,
# r"Бонус для beta тестировщиков или датамайнеров (везунчиков, которые написали боту во время теста) - 100 💰")
# cursor.execute(f"""SELECT bonus FROM users WHERE id={event.user_id}""")
# if cursor.fetchone()[0] == 0:
# cursor.execute(f"""UPDATE users SET bonus = 1 WHERE id={event.user_id}""")
# write_msg(event.user_id, r"Ты тоже получил бонус! - 100 💰")
# cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
# cursor.execute(
# f"""UPDATE users SET coins = {int(cursor.fetchone()[0]) + 100} WHERE id={event.user_id}""")
# conn.commit()
# else:
# write_msg(event.user_id, r"Что, захотел еще деньжат? Нее, бонус можно получить только раз!")
elif req_msg == "магазин":
write_msg_kb(event.user_id, "Магазин.\n\nЗдесь ты можешь купить рандомную пикчу из групп 7 параллель, 7б и Квазар.\n\nВ честь недавних событий ты можешь поддержать Алека, потратив 100 монет и | |
#!/usr/bin/env python
# coding: utf-8
from xumm.resource import XummResource
from typing import Union, Dict, List
from ..misc import (
ReturnUrl,
Options,
Application,
Payload,
Response,
Result,
Next,
Refs,
)
xumm_tx_types = [
'SignIn'
]
xrpl_tx_types = [
'AccountDelete',
'AccountSet',
'CheckCancel',
'CheckCash',
'CheckCreate',
'DepositPreauth',
'EscrowCancel',
'EscrowCreate',
'EscrowFinish',
'NFTokenAcceptOffer',
'NFTokenBurn',
'NFTokenCancelOffer',
'NFTokenCreateOffer',
'NFTokenMint',
'OfferCancel',
'OfferCreate',
'Payment',
'PaymentChannelClaim',
'PaymentChannelCreate',
'PaymentChannelFund',
'SetRegularKey',
'SignerListSet',
'TicketCreate',
'TrustSet'
]
# XummTransactionType: str = xumm_tx_types[int]
# XrplTransactionType: str = xrpl_tx_types[int]
XummTransactionType: str = None
XrplTransactionType: str = None
class XummJsonTransaction(XummResource):
def refresh_from(cls, **kwargs):
cls._kwargs = kwargs
def init_from(
cls,
transaction_type: Union[
XummTransactionType,
XrplTransactionType
]
):
return {**cls._kwargs, **transaction_type}
class XummCustomMeta(XummResource):
"""
Attributes:
model_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
nullable = {
'identifier': True,
'blob': True,
'instruction': True
}
required = {
'identifier': True,
'blob': True,
'instruction': True
}
model_types = {
'identifier': str,
'blob': dict,
'instruction': str
}
attribute_map = {
'identifier': 'identifier',
'blob': 'blob',
'instruction': 'instruction'
}
def refresh_from(cls, **kwargs):
"""Returns the dict as a model
:param kwargs: A dict.
:type: dict
:return: The XummCustomMeta of this XummCustomMeta. # noqa: E501
:rtype: XummCustomMeta
"""
# cls.sanity_check(kwargs)
cls._identifier = None
cls._blob = None
cls._instruction = None
if 'identifier' in kwargs:
cls.identifier = kwargs['identifier']
if 'blob' in kwargs:
cls.blob = kwargs['blob']
if 'instruction' in kwargs:
cls.instruction = kwargs['instruction']
@property
def identifier(cls) -> str:
"""Gets the identifier of this XummCustomMeta.
:return: The identifier of this XummCustomMeta.
:rtype: str
"""
return cls._identifier
@identifier.setter
def identifier(cls, identifier: str):
"""Sets the identifier of this XummCustomMeta.
:param identifier: The identifier of this XummCustomMeta.
:type identifier: str
"""
cls._identifier = identifier
@property
def blob(cls) -> Dict[str, object]:
"""Gets the blob of this XummCustomMeta.
:return: The blob of this XummCustomMeta.
:rtype: Dict[str, object]
"""
return cls._blob
@blob.setter
def blob(cls, blob: Dict[str, object]):
"""Sets the blob of this XummCustomMeta.
:param blob: The blob of this XummCustomMeta.
:type blob: Dict[str, object]
"""
# if blob is None:
# raise ValueError("Invalid value for `blob`, must not be `None`") # noqa: E501
cls._blob = blob
@property
def instruction(cls) -> str:
"""Gets the instruction of this XummCustomMeta.
:return: The instruction of this XummCustomMeta.
:rtype: str
"""
return cls._instruction
@instruction.setter
def instruction(cls, instruction: str):
"""Sets the instruction of this XummCustomMeta.
:param instruction: The instruction of this XummCustomMeta.
:type instruction: str
"""
cls._instruction = instruction
class XummPayloadMeta(XummResource):
"""
Attributes:
model_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
nullable = {
'opened_by_deeplink': True,
'signers': True,
'return_url_app': True,
'return_url_web': True,
}
required = {
'exists': True,
'uuid': True,
'multisign': True,
'submit': True,
'destination': True,
'resolved_destination': True,
'resolved': True,
'signed': True,
'cancelled': True,
'expired': True,
'pushed': True,
'app_opened': True,
'opened_by_deeplink': True,
'signers': True,
'return_url_app': True,
'return_url_web': True,
'is_xapp': True
}
model_types = {
'exists': bool,
'uuid': str,
'multisign': bool,
'submit': bool,
'destination': str,
'resolved_destination': str,
'resolved': bool,
'signed': bool,
'cancelled': bool,
'expired': bool,
'pushed': bool,
'app_opened': bool,
'opened_by_deeplink': bool,
'signers': list,
'return_url_app': str,
'return_url_web': str,
'is_xapp': bool
}
attribute_map = {
'exists': 'exists',
'uuid': 'uuid',
'multisign': 'multisign',
'submit': 'submit',
'destination': 'destination',
'resolved_destination': 'resolved_destination',
'resolved': 'resolved',
'signed': 'signed',
'cancelled': 'cancelled',
'expired': 'expired',
'pushed': 'pushed',
'app_opened': 'app_opened',
'opened_by_deeplink': 'opened_by_deeplink',
'signers': 'signers',
'return_url_app': 'return_url_app',
'return_url_web': 'return_url_web',
'is_xapp': 'is_xapp'
}
def refresh_from(cls, **kwargs):
"""Returns the dict as a model
:param kwargs: A dict.
:type: dict
:return: The XummPayloadMeta of this XummPayloadMeta. # noqa: E501
:rtype: XummPayloadMeta
"""
cls.sanity_check(kwargs)
cls._exists = None
cls._uuid = None
cls._multisign = None
cls._submit = None
cls._destination = None
cls._resolved_destination = None
cls._resolved = None
cls._signed = None
cls._cancelled = None
cls._expired = None
cls._pushed = None
cls._app_opened = None
cls._opened_by_deeplink = None
cls._signers = None
cls._return_url_app = None
cls._return_url_web = None
cls._is_xapp = None
cls.exists = kwargs['exists']
cls.uuid = kwargs['uuid']
cls.multisign = kwargs['multisign']
cls.submit = kwargs['submit']
cls.destination = kwargs['destination']
cls.resolved_destination = kwargs['resolved_destination']
cls.resolved = kwargs['resolved']
cls.signed = kwargs['signed']
cls.cancelled = kwargs['cancelled']
cls.expired = kwargs['expired']
cls.pushed = kwargs['pushed']
cls.app_opened = kwargs['app_opened']
if 'opened_by_deeplink' in kwargs:
cls.opened_by_deeplink = kwargs['opened_by_deeplink']
if 'signers' in kwargs:
cls.signers = kwargs['signers']
if 'return_url_app' in kwargs:
cls.return_url_app = kwargs['return_url_app']
if 'return_url_web' in kwargs:
cls.return_url_web = kwargs['return_url_web']
cls.is_xapp = kwargs['is_xapp']
@property
def exists(cls) -> bool:
"""Gets the exists of this XummPayloadMeta.
:return: The exists of this XummPayloadMeta.
:rtype: bool
"""
return cls._exists
@exists.setter
def exists(cls, exists: bool):
"""Sets the exists of this XummPayloadMeta.
:param exists: The exists of this XummPayloadMeta.
:type exists: bool
"""
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`") # noqa: E501
cls._exists = exists
@property
def uuid(cls) -> str:
"""Gets the uuid of this XummPayloadMeta.
:return: The uuid of this XummPayloadMeta.
:rtype: str
"""
return cls._uuid
@uuid.setter
def uuid(cls, uuid: str):
"""Sets the uuid of this XummPayloadMeta.
:param uuid: The uuid of this XummPayloadMeta.
:type uuid: str
"""
if uuid is None:
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
cls._uuid = uuid
@property
def multisign(cls) -> bool:
"""Gets the multisign of this XummPayloadMeta.
:return: The multisign of this XummPayloadMeta.
:rtype: bool
"""
return cls._multisign
@multisign.setter
def multisign(cls, multisign: bool):
"""Sets the multisign of this XummPayloadMeta.
:param multisign: The multisign of this XummPayloadMeta.
:type multisign: bool
"""
if multisign is None:
raise ValueError("Invalid value for `multisign`, must not be `None`") # noqa: E501
cls._multisign = multisign
@property
def submit(cls) -> bool:
"""Gets the submit of this XummPayloadMeta.
:return: The submit of this XummPayloadMeta.
:rtype: bool
"""
return cls._submit
@submit.setter
def submit(cls, submit: bool):
"""Sets the submit of this XummPayloadMeta.
:param submit: The submit of this XummPayloadMeta.
:type submit: bool
"""
if submit is None:
raise ValueError("Invalid value for `submit`, must not be `None`") # noqa: E501
cls._submit = submit
@property
def destination(cls) -> str:
"""Gets the destination of this XummPayloadMeta.
:return: The destination of this XummPayloadMeta.
:rtype: str
"""
return cls._destination
@destination.setter
def destination(cls, destination: str):
"""Sets the destination of this XummPayloadMeta.
:param destination: The destination of this XummPayloadMeta.
:type destination: str
"""
if destination is None:
raise ValueError("Invalid value for `destination`, must not be `None`") # noqa: E501
cls._destination = destination
@property
def resolved_destination(cls) -> str:
"""Gets the resolved_destination of this XummPayloadMeta.
:return: The resolved_destination of this XummPayloadMeta.
:rtype: str
"""
return cls._resolved_destination
@resolved_destination.setter
def resolved_destination(cls, resolved_destination: str):
"""Sets the resolved_destination of this XummPayloadMeta.
:param resolved_destination: The resolved_destination of this XummPayloadMeta. # noqa: E501
:type resolved_destination: str
"""
if resolved_destination is None:
raise ValueError("Invalid value for `resolved_destination`, must not be `None`") # noqa: E501
cls._resolved_destination = resolved_destination
@property
def resolved(cls) -> bool:
"""Gets the resolved of this XummPayloadMeta.
:return: The resolved of this XummPayloadMeta.
:rtype: bool
"""
return cls._resolved
@resolved.setter
def resolved(cls, resolved: bool):
"""Sets the resolved of this XummPayloadMeta.
:param resolved: The resolved of this XummPayloadMeta.
:type resolved: bool
"""
if resolved is None:
raise ValueError("Invalid value for `resolved`, must not be `None`") # noqa: E501
cls._resolved = resolved
@property
def signed(cls) -> bool:
"""Gets the signed of this XummPayloadMeta.
:return: The signed of this XummPayloadMeta.
:rtype: bool
"""
return cls._signed
@signed.setter
def signed(cls, signed: bool):
"""Sets the signed of this XummPayloadMeta.
:param signed: The signed of this XummPayloadMeta.
:type signed: bool
"""
if signed is None:
raise ValueError("Invalid value for `signed`, must not be `None`") # noqa: E501
cls._signed = signed
@property
def cancelled(cls) -> bool:
"""Gets the cancelled of this XummPayloadMeta.
:return: The cancelled of this XummPayloadMeta.
:rtype: bool
"""
return cls._cancelled
@cancelled.setter
def cancelled(cls, cancelled: bool):
"""Sets the cancelled of this XummPayloadMeta.
:param cancelled: The cancelled of this XummPayloadMeta.
:type cancelled: bool
"""
if cancelled is None:
raise ValueError("Invalid value for `cancelled`, must not be `None`") # noqa: E501
cls._cancelled = cancelled
@property
def expired(cls) -> bool:
"""Gets the expired of this XummPayloadMeta.
:return: The expired of this XummPayloadMeta.
:rtype: bool
"""
return cls._expired
@expired.setter
def expired(cls, expired: bool):
| |
"for source node and destination node." \
"or three columns, the first column for source node, " \
"the second for destination node, " \
"and third for labels")
if edge_type != None and len(edge_type) != 3:
raise RuntimeError("edge_type should be None or a tuple of " \
"(src_type, relation_type, dst_type)")
if multilabel:
assert len(cols) == 3, "Multi-class label requires one column for labels"
assert separator is not None, "Multi-class label is supported, "\
"but a separator is required to split the labels"
src_nodes, dst_nodes, labels = \
self._load_labels(cols, multilabel, separator, rows)
if len(cols) == 3:
assert len(src_nodes) == len(labels), \
'Train nodes shape {} and labels shape {} mismatch'.format(len(src_nodes),
len(labels))
assert self._has_label is None or self._has_label is True, \
'For a single edge label loader, it can be has-label or no-label ' \
'but it can not be both'
self._has_label = True
else:
assert self._has_label is None or self._has_label is False, \
'For a single edge label loader, it can be has-label or no-label ' \
'but it can not be both'
self._has_label = False
assert self._is_multilabel is None or self._is_multilabel == multilabel, \
'For a single label loader, it can be multi-label or single-label ' \
'but it can not be both'
self._is_multilabel = multilabel
self._labels.append((edge_type,
src_nodes,
dst_nodes,
labels,
(1., 0., 0.)))
def addRelationalTrainSet(self, cols, src_node_type='node', dst_node_type='node', rows=None):
r"""Add Training Set with multiple relation types.
Three columns of the **input** are chosen. the first
two columns represent the column names of the source
nodes and destination nodes while the last column give
the relation type.
Parameters
-----------
cols: list of str or list of int
Which columns to use. Supported data formats are:
(1) [str, str, str] column names for source node, destination node and labels.
The first column is treated as source node name,
the second column is treated as destination node name and
the third column is treated as relation type.
(2) [int, int, int] column numbers for node and labels.
The first column is treated as source node name,
the second column is treated as destination node name and
the third column is treated as relation type.
src_node_type: str
Source node type.
Default: 'node'
dst_node_type: str
Destination node type.
Default: 'node'
rows: numpy.array or list of int
Which row(s) to load. None to load all.
Default: None
Notes
-----
We can use this func to load knowledge graphs
Examples
--------
** Load train labels **
Example data of label.csv is as follows:
====== ======== ====
name movie rate
====== ======== ====
John StarWar1 5.0
Tim X-Man 3.5
Maggie StarWar1 4.5
====== ======== ====
>>> label_loader = dgl.data.EdgeLabelLoader(input='label.csv',
separator="\t")
>>> label_loader.addRelationalTrainSet(['name', 'movie', 'rate'],
src_node_type='name',
dst_node_type='movie',
rows=np.arange(start=0, stop=100))
"""
if not isinstance(cols, list):
raise RuntimeError("The cols should be a list of string or int")
if len(cols) != 3:
raise RuntimeError("addRelationalTrainSet accepts three columns " \
"for source node and destination node." \
"or three columns, the first column for source node, " \
"the second for destination node, " \
"and third for relation")
# TODO(xiangsx) add label/multilabel support in the future
rel_edges = self._load_relation_labels(cols, rows)
assert self._has_label is None or self._has_label is False, \
'For a single edge label loader, it can be has-label or no-label ' \
'but it can not be both.'
self._has_label = False
for rel_type, (src_nodes, dst_nodes) in rel_edges.items():
self._labels.append(((src_node_type, rel_type, dst_node_type),
src_nodes,
dst_nodes,
None,
(1., 0., 0.)))
def addValidSet(self, cols, multilabel=False, separator=None, rows=None, edge_type=None):
r"""Add Validation Set.
Two or three columns of the **input** are chosen.
If only two columns are provied, they represent the
column names of the source nodes and destination nodes.
This represents the existance of the edges.
If three columns are provided, the first two columns
represent the column names of the source nodes and
destination nodes while the last column give the labels.
Multi-label is supported, but a separator is required to
split the labels.
Parameters
-----------
cols: list of str or list of int
Which columns to use. Supported data formats are:
(1) [str, str] column names for source node, destination node.
(2) [int, int] column numbers for source node, destination node.
(3) [str, str, str] column names for source node, destination node and labels.
The first column is treated as source node name,
the second column is treated as destination node name and
the third column is treated as label.
(4) [int, int, int] column numbers for node and labels.
The first column is treated as source node name,
the second column is treated as destination node name and
the third column is treated as label.
multilabel: bool
Whether it is a multi-label task.
Default: False
separator: str, optional
Delimiter(separator) used to split label data.
Default: None
rows: numpy.array or list of int
Which row(s) to load. None to load all.
Default: None
edge_type: str
Canonical edge type. If None, default edge type is chosen.
Default: None
Examples
---------
** Load valid labels **
Example data of label.csv is as follows:
====== ======== ====
name movie rate
====== ======== ====
John StarWar1 5.0
Tim X-Man 3.5
Maggie StarWar1 4.5
====== ======== ====
>>> label_loader = dgl.data.EdgeLabelLoader(input='label.csv',
separator="\t")
>>> label_loader.addValidSet(['name', 'movie', 'rate'],
rows=np.arange(start=0, stop=100))
"""
if not isinstance(cols, list):
raise RuntimeError("The cols should be a list of string or int")
if len(cols) != 2 and len(cols) != 3:
raise RuntimeError("addValidSet accepts two columns " \
"for source node and destination node." \
"or three columns, the first column for source node, " \
"the second for destination node, " \
"and third for labels")
if edge_type != None and len(edge_type) != 3:
raise RuntimeError("edge_type should be None or a tuple of " \
"(src_type, relation_type, dst_type)")
if multilabel:
assert len(cols) == 3, "Multi-class label requires one column for labels"
assert separator is not None, "Multi-class label is supported, "\
"but a separator is required to split the labels"
src_nodes, dst_nodes, labels = \
self._load_labels(cols, multilabel, separator, rows)
if len(cols) == 3:
assert len(src_nodes) == len(labels), \
'Valid nodes shape {} and labels shape {} mismatch'.format(len(src_nodes),
len(labels))
assert self._has_label is None or self._has_label is True, \
'For a single edge label loader, it can be has-label or no-label ' \
'but it can not be both'
self._has_label = True
else:
assert self._has_label is None or self._has_label is False, \
'For a single edge label loader, it can be has-label or no-label ' \
'but it can not be both'
self._has_label = False
assert self._is_multilabel is None or self._is_multilabel == multilabel, \
'For a single label loader, it can be multi-label or single-label ' \
'but it can not be both'
self._is_multilabel = multilabel
self._labels.append((edge_type,
src_nodes,
dst_nodes,
labels,
(0., 1., 0.)))
def addRelationalValidSet(self, cols, src_node_type='node', dst_node_type='node', rows=None):
r"""Add Validation Set with multiple relation types.
Three columns of the **input** are chosen. the first
two columns represent the column names of the source
nodes and destination nodes while the last column give
the relation type.
Parameters
-----------
cols: list of str or list of int
Which columns to use. Supported data formats are:
(1) [str, str, str] column names for source node, destination node and labels.
The first column is treated as source node name,
the second column is treated as destination node name and
the third column is treated as relation type.
(2) [int, int, int] column numbers for node and labels.
The first column is treated as source node name,
the second column is treated as destination node name and
the third column is treated as relation type.
src_node_type: str
Source node type.
Default: 'node'
dst_node_type: str
Destination node type.
Default: 'node'
rows: numpy.array or list of int
Which row(s) to load. None to load all.
Default: None
Notes
-----
We can use this func to load knowledge graphs
Examples
--------
** Load valid labels **
| |
<gh_stars>1-10
"""Three-dimensional mobjects."""
from __future__ import annotations
__all__ = [
"ThreeDVMobject",
"Surface",
"ParametricSurface",
"Sphere",
"Dot3D",
"Cube",
"Prism",
"Cone",
"Arrow3D",
"Cylinder",
"Line3D",
"Torus",
]
from typing import *
import numpy as np
from colour import Color
from manim.mobject.opengl_compatibility import ConvertToOpenGL
from .. import config
from ..constants import *
from ..mobject.geometry import Circle, Square
from ..mobject.mobject import *
from ..mobject.opengl_mobject import OpenGLMobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.color import *
from ..utils.deprecation import deprecated
from ..utils.iterables import tuplify
from ..utils.space_ops import normalize, perpendicular_bisector, z_to_vector
class ThreeDVMobject(VMobject, metaclass=ConvertToOpenGL):
def __init__(self, shade_in_3d=True, **kwargs):
super().__init__(shade_in_3d=shade_in_3d, **kwargs)
class Surface(VGroup, metaclass=ConvertToOpenGL):
"""Creates a Parametric Surface using a checkerboard pattern.
Parameters
----------
func :
The function that defines the surface.
u_range :
The range of the ``u`` variable: ``(u_min, u_max)``.
v_range :
The range of the ``v`` variable: ``(v_min, v_max)``.
resolution :
The number of samples taken of the surface. A tuple
can be used to define different resolutions for ``u`` and
``v`` respectively.
Examples
--------
.. manim:: ParaSurface
:save_last_frame:
class ParaSurface(ThreeDScene):
def func(self, u, v):
return np.array([np.cos(u) * np.cos(v), np.cos(u) * np.sin(v), u])
def construct(self):
axes = ThreeDAxes(x_range=[-4,4], x_length=8)
surface = Surface(
lambda u, v: axes.c2p(*self.func(u, v)),
u_range=[-PI, PI],
v_range=[0, TAU]
)
self.set_camera_orientation(theta=70 * DEGREES, phi=75 * DEGREES)
self.add(axes, surface)
"""
def __init__(
self,
func: Callable[[float, float], np.ndarray],
u_range: Sequence[float] = [0, 1],
v_range: Sequence[float] = [0, 1],
resolution: Sequence[int] = 32,
surface_piece_config: dict = {},
fill_color: Color = BLUE_D,
fill_opacity: float = 1.0,
checkerboard_colors: Sequence[Color] = [BLUE_D, BLUE_E],
stroke_color: Color = LIGHT_GREY,
stroke_width: float = 0.5,
should_make_jagged: bool = False,
pre_function_handle_to_anchor_scale_factor: float = 0.00001,
**kwargs
) -> None:
self.u_range = u_range
self.v_range = v_range
super().__init__(**kwargs)
self.resolution = resolution
self.surface_piece_config = surface_piece_config
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.checkerboard_colors = checkerboard_colors
self.stroke_color = stroke_color
self.stroke_width = stroke_width
self.should_make_jagged = should_make_jagged
self.pre_function_handle_to_anchor_scale_factor = (
pre_function_handle_to_anchor_scale_factor
)
self.func = func
self._setup_in_uv_space()
self.apply_function(lambda p: func(p[0], p[1]))
if self.should_make_jagged:
self.make_jagged()
def _get_u_values_and_v_values(self):
res = tuplify(self.resolution)
if len(res) == 1:
u_res = v_res = res[0]
else:
u_res, v_res = res
u_values = np.linspace(*self.u_range, u_res + 1)
v_values = np.linspace(*self.v_range, v_res + 1)
return u_values, v_values
def _setup_in_uv_space(self):
u_values, v_values = self._get_u_values_and_v_values()
faces = VGroup()
for i in range(len(u_values) - 1):
for j in range(len(v_values) - 1):
u1, u2 = u_values[i : i + 2]
v1, v2 = v_values[j : j + 2]
face = ThreeDVMobject()
face.set_points_as_corners(
[
[u1, v1, 0],
[u2, v1, 0],
[u2, v2, 0],
[u1, v2, 0],
[u1, v1, 0],
],
)
faces.add(face)
face.u_index = i
face.v_index = j
face.u1 = u1
face.u2 = u2
face.v1 = v1
face.v2 = v2
faces.set_fill(color=self.fill_color, opacity=self.fill_opacity)
faces.set_stroke(
color=self.stroke_color,
width=self.stroke_width,
opacity=self.stroke_opacity,
)
self.add(*faces)
if self.checkerboard_colors:
self.set_fill_by_checkerboard(*self.checkerboard_colors)
def set_fill_by_checkerboard(self, *colors, opacity=None):
n_colors = len(colors)
for face in self:
c_index = (face.u_index + face.v_index) % n_colors
face.set_fill(colors[c_index], opacity=opacity)
return self
def set_fill_by_value(
self,
axes: Mobject,
colors: Union[Iterable[Color], Color],
axis: int = 2,
):
"""Sets the color of each mobject of a parametric surface to a color relative to its axis-value
Parameters
----------
axes :
The axes for the parametric surface, which will be used to map axis-values to colors.
colors :
A list of colors, ordered from lower axis-values to higher axis-values. If a list of tuples is passed
containing colors paired with numbers, then those numbers will be used as the pivots.
axis :
The chosen axis to use for the color mapping. (0 = x, 1 = y, 2 = z)
Returns
-------
:class:`~.Surface`
The parametric surface with a gradient applied by value. For chaining.
Examples
--------
.. manim:: FillByValueExample
:save_last_frame:
class FillByValueExample(ThreeDScene):
def construct(self):
resolution_fa = 42
self.set_camera_orientation(phi=75 * DEGREES, theta=-120 * DEGREES)
axes = ThreeDAxes(x_range=(0, 5, 1), y_range=(0, 5, 1), z_range=(-1, 1, 0.5))
def param_surface(u, v):
x = u
y = v
z = np.sin(x) * np.cos(y)
return z
surface_plane = Surface(
lambda u, v: axes.c2p(u, v, param_surface(u, v)),
resolution=(resolution_fa, resolution_fa),
v_range=[0, 5],
u_range=[0, 5],
)
surface_plane.set_style(fill_opacity=1)
surface_plane.set_fill_by_value(axes=axes, colors=[(RED, -0.4), (YELLOW, 0), (GREEN, 0.4)], axis = 1)
self.add(axes, surface_plane)
"""
ranges = [axes.x_range, axes.y_range, axes.z_range]
if type(colors[0]) is tuple:
new_colors, pivots = [[i for i, j in colors], [j for i, j in colors]]
else:
new_colors = colors
pivot_min = ranges[axis][0]
pivot_max = ranges[axis][1]
pivot_frequency = (pivot_max - pivot_min) / (len(new_colors) - 1)
pivots = np.arange(
start=pivot_min,
stop=pivot_max + pivot_frequency,
step=pivot_frequency,
)
for mob in self.family_members_with_points():
axis_value = axes.point_to_coords(mob.get_midpoint())[axis]
if axis_value <= pivots[0]:
mob.set_color(new_colors[0])
elif axis_value >= pivots[-1]:
mob.set_color(new_colors[-1])
else:
for i, pivot in enumerate(pivots):
if pivot > axis_value:
color_index = (axis_value - pivots[i - 1]) / (
pivots[i] - pivots[i - 1]
)
color_index = min(color_index, 1)
mob_color = interpolate_color(
new_colors[i - 1],
new_colors[i],
color_index,
)
if config.renderer == "opengl":
mob.set_color(mob_color, recurse=False)
else:
mob.set_color(mob_color, family=False)
break
return self
@deprecated(since="v0.10.0", replacement=Surface)
class ParametricSurface(Surface):
# shifts inheritance from Surface/OpenGLSurface depending on the renderer.
"""Creates a parametric surface"""
# Specific shapes
class Sphere(Surface):
"""A mobject representing a three-dimensional sphere.
Examples
---------
.. manim:: ExampleSphere
:save_last_frame:
class ExampleSphere(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=PI / 6, theta=PI / 6)
sphere1 = Sphere(
center=(3, 0, 0),
radius=1,
resolution=(20, 20),
u_range=[0.001, PI - 0.001],
v_range=[0, TAU]
)
sphere1.set_color(RED)
self.add(sphere1)
sphere2 = Sphere(center=(-1, -3, 0), radius=2, resolution=(18, 18))
sphere2.set_color(GREEN)
self.add(sphere2)
sphere3 = Sphere(center=(-1, 2, 0), radius=2, resolution=(16, 16))
sphere3.set_color(BLUE)
self.add(sphere3)
"""
def __init__(
self,
center=ORIGIN,
radius=1,
resolution=None,
u_range=(0, TAU),
v_range=(0, PI),
**kwargs
):
if config.renderer == "opengl":
res_value = (101, 51)
else:
res_value = (24, 12)
resolution = resolution if resolution is not None else res_value
self.radius = radius
super().__init__(
self.func,
resolution=resolution,
u_range=u_range,
v_range=v_range,
**kwargs,
)
self.shift(center)
def func(self, u, v):
return self.radius * np.array(
[np.cos(u) * np.sin(v), np.sin(u) * np.sin(v), -np.cos(v)],
)
class Dot3D(Sphere):
"""A spherical dot.
Parameters
--------
point : Union[:class:`list`, :class:`numpy.ndarray`], optional
The location of the dot.
radius : :class:`float`, optional
The radius of the dot.
color : :class:`~.Colors`, optional
The color of the :class:`Dot3D`
Examples
--------
.. manim:: Dot3DExample
:save_last_frame:
class Dot3DExample(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=75*DEGREES, theta=-45*DEGREES)
axes = ThreeDAxes()
dot_1 = Dot3D(point=axes.coords_to_point(0, 0, 1), color=RED)
dot_2 = Dot3D(point=axes.coords_to_point(2, 0, 0), radius=0.1, color=BLUE)
dot_3 = Dot3D(point=[0, 0, 0], radius=0.1, color=ORANGE)
self.add(axes, dot_1, dot_2,dot_3)
"""
def __init__(
self,
point=ORIGIN,
radius=DEFAULT_DOT_RADIUS,
color=WHITE,
resolution=(8, 8),
**kwargs
):
super().__init__(center=point, radius=radius, resolution=resolution, **kwargs)
self.set_color(color)
class Cube(VGroup):
def __init__(
self,
side_length=2,
fill_opacity=0.75,
fill_color=BLUE,
stroke_width=0,
**kwargs
):
self.side_length = side_length
super().__init__(
fill_color=fill_color,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
**kwargs,
)
def generate_points(self):
for vect in IN, OUT, LEFT, RIGHT, UP, DOWN:
face = Square(
side_length=self.side_length,
shade_in_3d=True,
)
face.flip()
face.shift(self.side_length * OUT / 2.0)
face.apply_matrix(z_to_vector(vect))
self.add(face)
init_points = generate_points
class Prism(Cube):
"""A cuboid.
Examples
--------
.. manim:: ExamplePrism
:save_last_frame:
class ExamplePrism(ThreeDScene):
def construct(self):
self.set_camera_orientation(phi=60 * DEGREES, theta=150 * DEGREES)
prismSmall = Prism(dimensions=[1, 2, 3]).rotate(PI / 2)
prismLarge = Prism(dimensions=[1.5, 3, 4.5]).move_to([2, 0, 0])
self.add(prismSmall, prismLarge)
"""
def __init__(self, dimensions=[3, 2, 1], **kwargs):
self.dimensions = dimensions
super().__init__(**kwargs)
def generate_points(self):
super().generate_points()
for dim, value in enumerate(self.dimensions):
self.rescale_to_fit(value, dim, stretch=True)
class Cone(Surface):
"""A circular cone.
Can be defined using 2 parameters: its height, and its base radius.
The polar angle, theta, can be calculated using arctan(base_radius /
height) The spherical radius, r, is calculated using the pythagorean
theorem.
Examples
--------
.. manim:: ExampleCone
:save_last_frame:
class ExampleCone(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
cone = Cone(direction=X_AXIS+Y_AXIS+2*Z_AXIS)
self.set_camera_orientation(phi=5*PI/11, theta=PI/9)
self.add(axes, cone)
Parameters
--------
base_radius : :class:`float`
The base radius from which the cone tapers.
height : :class:`float`
The height measured from the plane formed by the base_radius to the apex of the cone.
direction : :class:`numpy.array`
The direction of the apex.
show_base : :class:`bool`
Whether to show the base plane or not.
v_range : :class:`Sequence[float]`
The azimuthal angle to start and end at.
u_min : :class:`float`
The radius at the apex.
checkerboard_colors : :class:`bool`
Show checkerboard grid texture on the cone.
"""
def __init__(
self,
base_radius=1,
height=1,
direction=Z_AXIS,
show_base=False,
v_range=[0, TAU],
u_min=0,
checkerboard_colors=False,
**kwargs
):
self.direction = direction
self.theta = PI - np.arctan(base_radius / height)
super().__init__(
self.func,
v_range=v_range,
u_range=[u_min, np.sqrt(base_radius ** 2 + height ** 2)],
checkerboard_colors=checkerboard_colors,
**kwargs,
)
# used for rotations
self._current_theta = 0
self._current_phi = 0
if show_base:
self.base_circle = Circle(
radius=base_radius,
color=self.fill_color,
fill_opacity=self.fill_opacity,
stroke_width=0,
)
self.base_circle.shift(height * IN)
self.add(self.base_circle)
| |
develop your own SSH server with paramiko for a cetain
plattform like Linux, you should call C{krb5_kuserok()} in your
local kerberos library to make sure that the krb5_principal has
an account on the server and is allowed to log in as a user.
:see: `http://www.unix.com/man-page/all/3/krb5_kuserok/`
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_gssapi_keyex(self, username,
gss_authenticated=AUTH_FAILED,
cc_file=None):
"""
Authenticate the given user to the server if he is a valid krb5
principal and GSS-API Key Exchange was performed.
If GSS-API Key Exchange was not performed, this authentication method
won't be available.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: `.AUTH_FAILED` if the user is not authenticated otherwise
`.AUTH_SUCCESSFUL`
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss` `.kex_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
plattform like Linux, you should call C{krb5_kuserok()} in your
local kerberos library to make sure that the krb5_principal has
an account on the server and is allowed to log in as a user.
:see: `http://www.unix.com/man-page/all/3/krb5_kuserok/`
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def enable_auth_gssapi(self):
"""
Overwrite this function in your SSH server to enable GSSAPI
authentication.
The default implementation always returns false.
:return: True if GSSAPI authentication is enabled otherwise false
:rtype: Boolean
:see: : `.ssh_gss`
"""
UseGSSAPI = False
GSSAPICleanupCredentials = False
return UseGSSAPI
def check_port_forward_request(self, address, port):
"""
Handle a request for port forwarding. The client is asking that
connections to the given address and port be forwarded back across
this ssh connection. An address of ``"0.0.0.0"`` indicates a global
address (any address associated with this server) and a port of ``0``
indicates that no specific port is requested (usually the OS will pick
a port).
The default implementation always returns ``False``, rejecting the
port forwarding request. If the request is accepted, you should return
the port opened for listening.
:param str address: the requested address
:param int port: the requested port
:return:
the port number (`int`) that was opened for listening, or ``False``
to reject
"""
return False
def cancel_port_forward_request(self, address, port):
"""
The client would like to cancel a previous port-forwarding request.
If the given address and port is being forwarded across this ssh
connection, the port should be closed.
:param str address: the forwarded address
:param int port: the forwarded port
"""
pass
def check_global_request(self, kind, msg):
"""
Handle a global request of the given ``kind``. This method is called
in server mode and client mode, whenever the remote host makes a global
request. If there are any arguments to the request, they will be in
``msg``.
There aren't any useful global requests defined, aside from port
forwarding, so usually this type of request is an extension to the
protocol.
If the request was successful and you would like to return contextual
data to the remote host, return a tuple. Items in the tuple will be
sent back with the successful result. (Note that the items in the
tuple can only be strings, ints, longs, or bools.)
The default implementation always returns ``False``, indicating that it
does not support any global requests.
.. note:: Port forwarding requests are handled separately, in
`check_port_forward_request`.
:param str kind: the kind of global request being made.
:param .Message msg: any extra arguments to the request.
:return:
``True`` or a `tuple` of data if the request was granted; ``False``
otherwise.
"""
return False
### Channel requests
def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight,
modes):
"""
Determine if a pseudo-terminal of the given dimensions (usually
requested for shell access) can be provided on the given channel.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str term: type of terminal requested (for example, ``"vt100"``).
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return:
``True`` if the pseudo-terminal has been allocated; ``False``
otherwise.
"""
return False
def check_channel_shell_request(self, channel):
"""
Determine if a shell will be provided to the client on the given
channel. If this method returns ``True``, the channel should be
connected to the stdin/stdout of a shell (or something that acts like
a shell).
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:return:
``True`` if this channel is now hooked up to a shell; ``False`` if
a shell can't or won't be provided.
"""
return False
def check_channel_exec_request(self, channel, command):
"""
Determine if a shell command will be executed for the client. If this
method returns ``True``, the channel should be connected to the stdin,
stdout, and stderr of the shell command.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:param str command: the command to execute.
:return:
``True`` if this channel is now hooked up to the stdin, stdout, and
stderr of the executing command; ``False`` if the command will not
be executed.
.. versionadded:: 1.1
"""
return False
def check_channel_subsystem_request(self, channel, name):
"""
Determine if a requested subsystem will be provided to the client on
the given channel. If this method returns ``True``, all future I/O
through this channel will be assumed to be connected to the requested
subsystem. An example of a subsystem is ``sftp``.
The default implementation checks for a subsystem handler assigned via
`.Transport.set_subsystem_handler`.
If one has been set, the handler is invoked and this method returns
``True``. Otherwise it returns ``False``.
.. note:: Because the default implementation uses the `.Transport` to
identify valid subsystems, you probably won't need to override this
method.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str name: name of the requested subsystem.
:return:
``True`` if this channel is now hooked up to the requested
subsystem; ``False`` if that subsystem can't or won't be provided.
"""
handler_class, larg, kwarg = channel.get_transport()._get_subsystem_handler(name)
if handler_class is None:
return False
handler = handler_class(channel, name, self, *larg, **kwarg)
handler.start()
return True
def check_channel_window_change_request(self, channel, width, height, pixelwidth, pixelheight):
"""
Determine if the pseudo-terminal on the given channel can be resized.
This only makes sense if a pty was previously allocated on it.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return: ``True`` if the terminal was resized; ``False`` if not.
"""
return False
def check_channel_x11_request(self, channel, single_connection, auth_protocol, auth_cookie, screen_number):
"""
Determine if the client will be provided with an X11 session. If this
method returns ``True``, X11 applications should be routed through new
SSH channels, using `.Transport.open_x11_channel`.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the X11 request arrived on
:param bool single_connection:
``True`` if only a single X11 channel should be opened, else
``False``.
:param str auth_protocol: the protocol used for X11 authentication
:param str auth_cookie: the cookie used to authenticate to X11
:param int screen_number: | |
from bs4 import BeautifulSoup
import urllib.request
from dotenv import load_dotenv
import json
import requests
# import pyperclip
import time
import datetime
import os
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
from selenium import webdriver
# Explicitly wait
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
# selenium
chromedriver_dir=r'C:\Users\multicampus\Downloads\chromedriver\chromedriver.exe'
load_dotenv(verbose=True)
# CGV INFO
def updateCGV(url_option, tdate, cinema_pk):
global onscreen_pk
global onscreen_movie
CGV_ONSCREEN = []
tg_date = makeCGVDate(tdate)
iframe_base = 'http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?'
CGV_URL = 'http://www.cgv.co.kr'
iframe_url = iframe_base + url_option + '&date=' + tg_date
iframe_html = urllib.request.urlopen(iframe_url)
soup = BeautifulSoup(iframe_html, 'lxml')
movie_list = soup.find_all('div', {'class': 'col-times'})
for movie in movie_list:
# 영화 정보(영화사 영화 자세히보기 페이지)
movie_info = movie.find('div', {'class': 'info-movie'})
movie_atag = movie_info.find('a')
movie_href = movie_atag.get('href')
movie_code = getCGVMovieIdx(movie_href)
movie_name = getCGVMovieName(movie_code)
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['CGV'] = movie_code
else:
onscreen_movie[movie_name] = {
'CGV': movie_code
}
# 상영관 정보
hall_list = movie.find_all('div', {'class': 'type-hall'})
for hall in hall_list:
hall_info = hall.find_all('li')
movie_d = getCGVStr(hall_info[0].text)
seat_total = getCGVStr(hall_info[2].text)[1:-1]
time_table = hall.find('div', {'class': 'info-timetable'})
atag_list = time_table.find_all('a')
for atag in atag_list:
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
}
new_onscreen_info_field = {}
atag_href = atag.get('href')
if atag_href == '/':
TICKET_URL = CGV_URL + '/ticket/?' + url_option + '&date=' + tg_date
seat_left = '준비중'
start_time = atag.find('em')
start_time = start_time.text
end_time = atag.find('span', {'class': 'end-time'}).text
end_time = deleteWord(end_time, 3, len(end_time))
info_hall = hall.find('div', {'class': 'info-hall'})
hall_name = info_hall.find_all('li')[1]
hall_name = getCGVStr(hall_name.text)
else:
TICKET_URL = CGV_URL + atag_href
start_time = atag.get('data-playstarttime')
start_time = makeStrtoTime(start_time)
end_time = atag.get('data-playendtime')
end_time = makeStrtoTime(end_time)
seat_left = atag.get('data-seatremaincnt')
hall_name = atag.get('data-screenkorname')
new_onscreen_info_field['cinema'] = cinema_pk
new_onscreen_info_field['movie'] = int(movie_code)
new_onscreen_info_field['cm_code'] = int(movie_code)
new_onscreen_info_field['date'] = tdate
new_onscreen_info_field['info'] = movie_d + ' | ' + hall_name
new_onscreen_info_field['start_time'] = start_time
new_onscreen_info_field['end_time'] = end_time
new_onscreen_info_field['total_seats'] = seat_total
new_onscreen_info_field['seats'] = seat_left
new_onscreen_info_field['url'] = TICKET_URL
new_onscreen_info['fields'] = new_onscreen_info_field
# print(new_onscreen_info)
CGV_ONSCREEN.append(new_onscreen_info)
onscreen_pk += 1
return CGV_ONSCREEN
def getCGVMovieName(tg_code):
CGV_MOVIE_DETAIL = 'http://www.cgv.co.kr/movies/detail-view/?midx='
detail_url = CGV_MOVIE_DETAIL + tg_code
detail_html = urllib.request.urlopen(detail_url)
detail_soup = BeautifulSoup(detail_html, 'lxml')
movie_name = detail_soup.find('div', {'class': 'title'})
res = movie_name.find('strong').text
return res
def getCGVStr(tg_text):
start_point = 0
tg_text_len = len(tg_text)
res = ''
for idx in range(tg_text_len):
if tg_text[idx] == ' ':
continue
elif tg_text[idx] == '\r':
continue
elif tg_text[idx] == '\n':
continue
else:
res += tg_text[idx]
return res
def getCGVMovieIdx(movie_url):
equal_idx = movie_url.index('=')
cgv_movie_code = movie_url[equal_idx+1:]
return cgv_movie_code
def makeStrtoTime(tg_str):
res = ''
tg_len = len(tg_str)
minute = tg_str[tg_len-2:]
hour = tg_str[:tg_len-2]
res = hour + ':' + minute
return res
def deleteWord(tg_str, st_idx, end_idx):
new_str = tg_str[st_idx:end_idx]
return new_str
# MEGABOX INFO
def updateMEGABOX(tg_url, tg_date, cinema_pk):
global onscreen_pk
global onscreen_movie
TICKET_BASE = 'https://www.megabox.co.kr/booking/seat?playSchdlNo='
driver.get(tg_url)
time.sleep(2)
# 내일 날짜로 조회
dotdate = getDotDate(tg_date)
dayxPath = '//*[@date-data=\"' + dotdate + '\"]'
tmr_btn = driver.find_element_by_xpath(dayxPath)
tmr_btn.click()
time.sleep(2)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
movie_list = soup.find_all('div', {'class': 'theater-list'})
MEGABOX_ONSCREEN = []
for movie_col in movie_list:
movie_info = movie_col.find('div', {'class': 'theater-tit'})
movie_name = checkMegaName(movie_info.find_all('p')[1].text)
theater_type_list = movie_col.find_all('div', {'class': 'theater-type-box'})
for box in theater_type_list:
theater_type = box.find('div', {'class': 'theater-type'})
hall_name = theater_type.find('p', {'class': 'theater-name'}).text
total_seat = theater_type.find('p', {'class': 'chair'}).text[2:-1]
theater_time = box.find('div', {'class': 'theater-time'})
movie_d = theater_time.find('div', {'class': 'theater-type-area'}).text
movie_info = movie_d + ' | ' + hall_name
movie_timetable = theater_time.find_all('td')
for movie_time in movie_timetable:
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
}
new_field = {
'cinema': cinema_pk,
'movie': '',
'date': tg_date,
'info': movie_info,
'start_time': '',
'end_time': '',
'total_seats': total_seat,
'seats': '',
'url': tg_url
}
if movie_time.get('play-de') != deleteSlash(tg_date):
return []
if movie_time.get('class') == 'end-time':
new_field['start_time'] = movie_time.find('p', {'class': 'time'}).text
new_field['seats'] = '매진'
else:
book_code = movie_time.get('play-schdl-no')
if book_code:
TICKET_URL = TICKET_BASE + book_code
else:
TICKET_URL = tg_url
movie_code = movie_time.get('rpst-movie-no')
# 상영작 업로드
if movie_name and movie_code:
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['MEGABOX'] = movie_code
else:
onscreen_movie[movie_name] = {
'MEGABOX': movie_code
}
play_info = movie_time.find('div', {'class': 'play-time'})
if play_info:
play_time = play_info.find('p').text
start_end = divideTime(play_time)
seat_left = movie_time.find('p', {'class': 'chair'}).text[:-1]
new_field['start_time'] = start_end[0]
new_field['end_time'] = start_end[1]
new_field['seats'] = seat_left
if movie_code:
new_field['movie'] = int(movie_code)
new_field['cm_code'] = int(movie_code)
else:
continue
new_field['url'] = TICKET_URL
new_onscreen_info['fields'] = new_field
MEGABOX_ONSCREEN.append(new_onscreen_info)
onscreen_pk += 1
return MEGABOX_ONSCREEN
def getDashDate(tg_date):
res = tg_date[:4] + '-' + tg_date[4:6] + '-' + tg_date[6:]
return res
def divideTime(tg_time):
divideIdx = tg_time.index('~')
res1 = tg_time[:divideIdx]
res2 = tg_time[divideIdx+1:]
return res1, res2
def makeCGVDate(tg_date):
res = ''
for idx in range(len(tg_date)):
if tg_date[idx] == '-':
continue
else:
res += tg_date[idx]
return res
def checkMegaName(tg_str):
if tg_str[0] == '[':
endIdx = tg_str.index(']')
return tg_str[endIdx+2:]
elif tg_str[0] == '(':
endIdx = tg_str.index(')')
return tg_str[endIdx+2:]
else:
return tg_str
def getDotDate(tdate):
res = ''
for idx in range(len(tdate)):
if tdate[idx] == '-':
res += '.'
else:
res += tdate[idx]
return res
def updateLOTTE(tg_url, tg_date, cinema_pk):
global onscreen_pk
global onscreen_movie
driver.get(tg_url)
time.sleep(2)
ck_source = driver.page_source
ck_soup = BeautifulSoup(ck_source, 'html.parser')
ck_layer = ck_soup.find('div', {'id': 'layerGetPopup'})
if ck_layer.text:
popupLayer = driver.find_element_by_id('layerGetPopup')
ck_btn = popupLayer.find_element_by_class_name('btn_close.btnCloseLayer')
ck_btn.click()
time.sleep(1)
day_list = driver.find_elements_by_class_name('date')
ck_date = str(int(tg_date[-2:]))
LOTTE_ONSCREEN = []
# 내일 날짜로 조회
for day in day_list:
day_text = day.find_element_by_tag_name('strong').text
if day_text == ck_date:
tg_btn = day.find_element_by_tag_name('label')
tg_btn.click()
time.sleep(2)
break
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
movie_list = soup.find_all('div', {'class': 'time_select_wrap ty2 timeSelect'})
for movie in movie_list:
movie_tit = movie.find('div', {'class': 'list_tit'})
movie_name = movie_tit.find('p').text
if movie_name == '테스트콘텐츠':
continue
movie_atag = movie_tit.find('a')
movie_href = movie_atag.get('href')
movie_code = findLotteCode(movie_href)
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['LOTTE'] = movie_code
else:
onscreen_movie[movie_name] = {
'LOTTE': movie_code
}
movie_info_ul = movie.find('ul', {'class': 'list_hall mt20'})
movie_info_li = movie_info_ul.find_all('li')
movie_info_list = []
for info_li in movie_info_li:
movie_info_list.append(info_li.text)
movie_info = ' | '.join(movie_info_list)
timetable_ul = movie.find('ul', {'class': 'list_time'})
timetable_atag_list = timetable_ul.find_all('li')
for timetable_info in timetable_atag_list:
time_info = timetable_info.find('dd', {'class': 'time'})
start_time = time_info.find('strong').text
end_time_info = time_info.find('div', {'class': 'tooltip'}).text
end_time = strBeforeSpace(end_time_info)
seat_info = timetable_info.find('dd', {'class': 'seat'})
seat_left = seat_info.find('strong').text
seat_total = strBeforeSpace(seat_info.text)
hall_info = timetable_info.find('dd', {'class': 'hall'}).text
new_movie_info = movie_info + ' | ' + hall_info
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
'fields': {
'cinema': cinema_pk,
'movie': int(movie_code),
'date': tg_date,
'info': new_movie_info,
'start_time': start_time,
'end_time': end_time,
'total_seats': seat_total,
'seats': seat_left,
'url': tg_url,
'cm_code': int(movie_code)
}
}
onscreen_pk += 1
LOTTE_ONSCREEN.append(new_onscreen_info)
return LOTTE_ONSCREEN
def findLotteCode(tg_href):
idx = 0
for i in range(len(tg_href)):
if tg_href[i] == '=':
idx = i
break
if idx:
return tg_href[idx+1:]
def strBeforeSpace(tg_str):
idx = 0
for i in range(len(tg_str)-1, -1, -1):
if tg_str[i] == ' ':
idx = i+1
break
return tg_str[idx:]
def updateETC(tg_url, tg_date, cinema_pk):
global onscreen_pk
global onscreen_movie
if cinema_pk == 75 or cinema_pk == 84:
driver.get(tg_url)
time.sleep(3)
# 내일 찾기
tommorow_btn = driver.find_element_by_xpath('//*[@id="content"]/div[2]/div/div[1]/ul/li[3]/a')
tommorow_btn.click()
time.sleep(1)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
time_box = soup.find('div', {'class': 'theater-movie'})
movie_list = time_box.find_all('div', {'class': 'each-movie-time'})
CINEQ_ONSCREEN = []
for movie_div in movie_list:
movie_title = movie_div.find('div', {'class': 'title'})
movie_grade = movie_title.find('span').get('class')
movie_name = getMovieName(movie_title.text, movie_grade[0])
hall_list = movie_div.find_all('div', {'class': 'screen'})
for hall in hall_list:
hall_name = hall.find('div', {'class': 'screen-name'})
hall_info = hall_name.text
time_div = hall.find('div', {'class': 'time-block'})
time_list = time_div.find_all('div', {'class': 'time'})
for time_info in time_list:
movie_code = time_info.get('data-moviecode')
if not movie_code:
continue
else:
if onscreen_movie.get(movie_name):
onscreen_movie[movie_name]['CINEQ'] = str(int(movie_code))
else:
onscreen_movie[movie_name] = {
'CINEQ': str(int(movie_code))
}
end_time = time_info.find('span', {'class': 'to'}).text[3:]
seat_info = time_info.find('span', {'class': 'seats-status'}).text
seat_left, seat_total = getSeatInfo(seat_info)
start_text = time_info.find('a').text
start_time = getCineqTime(start_text)
new_onscreen_info = {
'pk': onscreen_pk,
'model': 'movies.onscreen',
'fields': {
'cinema': cinema_pk,
'movie': int(movie_code),
'date': tg_date,
'info': hall_info,
'start_time': start_time,
'end_time': end_time,
'total_seats': seat_total,
'seats': seat_left,
'url': tg_url,
'cm_code': int(movie_code)
}
}
onscreen_pk += 1
CINEQ_ONSCREEN.append(new_onscreen_info)
return CINEQ_ONSCREEN
else:
def getHallInfo(tg_str):
res1 = ''
res2 = ''
for i in range(len(tg_str)):
if tg_str[i] == '관' and res1 == '':
res1 = tg_str[:i+1]
elif tg_str[i] == ' ' and res2 == '':
res2 = tg_str[i+1:]
return res1, res2
def getEndTime(tg_str):
res = ''
for i in range(len(tg_str)):
if tg_str[i] == '~':
res = tg_str[i+2:]
break
return res
def renameYesTitle(tg_str):
res = tg_str
if res[len(tg_str)-1] == ')':
idx = res.index('(')
res = res[:idx-1]
if res[0] == '[':
idx = res.index(']')
res = res[idx+2:]
return res
TICKET_BASE = 'https://movie.yes24.com/Movie/Ticket?gId=&'
YES_ONSCREEN = []
driver.get(tg_url)
until_time = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.CLASS_NAME,"time_sel_cont")))
time.sleep(2)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
if not soup.find('div', {'class': | |
<filename>sarpy/utils/nitf_utils.py
"""
A utility for dumping a NITF header to the console. Contributed by <NAME> of L3/Harris.
To dump NITF header information to a text file from the command-line
>>> python -m sarpy.utils.nitf_utils <path to nitf file>
For a basic help on the command-line, check
>>> python -m sarpy.utils.nitf_utils --help
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>, L3/Harris"
import argparse
import functools
import sys
from xml.dom import minidom
import os
from typing import Union, BinaryIO, TextIO, List, Dict
from io import StringIO
from sarpy.io.general.nitf import NITFDetails
from sarpy.io.general.nitf_elements.base import NITFElement, TRE, TREList, UserHeaderType
from sarpy.io.general.nitf_elements.des import DataExtensionHeader, DataExtensionHeader0, \
DESUserHeader
from sarpy.io.general.nitf_elements.graphics import GraphicsSegmentHeader
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader, ImageSegmentHeader0, MaskSubheader
from sarpy.io.general.nitf_elements.label import LabelSegmentHeader
from sarpy.io.general.nitf_elements.nitf_head import NITFHeader, NITFHeader0
from sarpy.io.general.nitf_elements.res import ReservedExtensionHeader, ReservedExtensionHeader0, \
RESUserHeader
from sarpy.io.general.nitf_elements.symbol import SymbolSegmentHeader
from sarpy.io.general.nitf_elements.text import TextSegmentHeader, TextSegmentHeader0
from sarpy.io.general.nitf_elements.tres.tre_elements import TREElement
# Custom print function
print_func = print
############
# helper methods
def _filter_files(input_path):
"""
Determine if a given input path corresponds to a NITF 2.1 or 2.0 file.
Parameters
----------
input_path : str
Returns
-------
bool
"""
if not os.path.isfile(input_path):
return False
_, fext = os.path.splitext(input_path)
with open(input_path, 'rb') as fi:
check = fi.read(9)
return check in [b'NITF02.10', b'NITF02.00']
def _create_default_output_file(input_file, output_directory=None):
if not isinstance(input_file, str):
if output_directory is None:
return os.path.expanduser('~/Desktop/header_dump.txt')
else:
return os.path.join(output_directory, 'header_dump.txt')
if output_directory is None:
return os.path.splitext(input_file)[0] + '.header_dump.txt'
else:
return os.path.join(output_directory, os.path.splitext(os.path.split(input_file)[1])[0] + '.header_dump.txt')
def _decode_effort(value):
# type: (bytes) -> Union[bytes, str]
# noinspection PyBroadException
try:
return value.decode()
except Exception:
return value
############
# printing methods
def _print_element_field(elem, field, prefix=''):
# type: (Union[None, NITFElement], Union[None, str], str) -> None
if elem is None or field is None:
return
value = getattr(elem, field, None)
if value is None:
value = ''
print_func('{}{} = {}'.format(prefix, field, value))
def _print_element(elem, prefix=''):
# type: (Union[None, NITFElement], str) -> None
if elem is None:
return
# noinspection PyProtectedMember
for field in elem._ordering:
_print_element_field(elem, field, prefix=prefix)
def _print_element_list(elem_list, prefix=''):
# type: (Union[None, List[NITFElement]], str) -> None
if elem_list is None:
return
for i, elem in enumerate(elem_list):
_print_element(elem, prefix='{}[{}].'.format(prefix, i))
def _print_tre_element(field, value, prefix=''):
# type: (Union[None, str], Union[str, int, bytes], str) -> None
if field is None:
return
if value is None:
value = ''
print_func('{}{} = {}'.format(prefix, field, value))
def _print_tre_list(elem_list, prefix=''):
# type: (Union[None, List, TREList], str) -> None
if elem_list is None:
return
for i, elem in enumerate(elem_list):
_print_tre_dict(elem, '{}[{}].'.format(prefix, i))
def _print_tre_dict(elem_dict, prefix=''):
# type: (Union[None, Dict], str) -> None
if elem_dict is None:
return
for field, value in elem_dict.items():
if isinstance(value, list):
_print_tre_list(value, '{}{}'.format(prefix, field))
else:
_print_tre_element(field, value, prefix)
def _print_tres(tres):
# type: (Union[TREList, List[TRE]]) -> None
for tre in tres:
print_func('')
if isinstance(tre.DATA, TREElement):
_print_tre_dict(tre.DATA.to_dict(), prefix='{}.'.format(tre.TAG))
else:
# Unknown TRE
_print_tre_element('DATA', _decode_effort(tre.DATA), prefix='{}.'.format(tre.TAG))
def _print_file_header(hdr):
# type: (Union[NITFHeader, NITFHeader0]) -> None
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix='FS')
elif field == 'FBKGC':
value = getattr(hdr, field, None)
print_func('FBKGC = {} {} {}'.format(value[0], value[1], value[2]))
elif field in [
'ImageSegments', 'GraphicsSegments', 'SymbolSegments', 'LabelSegments',
'TextSegments', 'DataExtensions', 'ReservedExtensions']:
pass
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
assert(isinstance(value, UserHeaderType))
if value and value.data and value.data.tres:
_print_tres(value.data.tres)
else:
_print_element_field(hdr, field)
def _print_mask_header(hdr):
# type: (Union[None, MaskSubheader]) -> None
if hdr is None:
return
print_func('----- Mask Subheader (part of image data segment) -----')
# noinspection PyProtectedMember
for field in hdr._ordering:
if field in ['BMR', 'TMR']:
value = getattr(hdr, field, None)
if value is None:
continue
else:
for the_band, subarray in enumerate(value):
print_func('{}BND{} = {}'.format(field, the_band, subarray))
else:
_print_element_field(hdr, field, prefix='')
def _print_image_header(hdr):
# type: (Union[ImageSegmentHeader, ImageSegmentHeader0]) -> None
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix='IS')
elif field in ['Comments', 'Bands']:
_print_element_list(getattr(hdr, field, None), prefix='{}'.format(field))
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
assert(isinstance(value, UserHeaderType))
if value and value.data and value.data.tres:
_print_tres(value.data.tres)
else:
_print_element_field(hdr, field)
_print_mask_header(hdr.mask_subheader)
def _print_basic_header(hdr, prefix):
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix=prefix)
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
assert(isinstance(value, UserHeaderType))
if value and value.data and value.data.tres:
_print_tres(value.data.tres)
else:
_print_element_field(hdr, field)
def _print_graphics_header(hdr):
# type: (GraphicsSegmentHeader) -> None
_print_basic_header(hdr, 'SS')
def _print_symbol_header(hdr):
# type: (SymbolSegmentHeader) -> None
_print_basic_header(hdr, 'SS')
def _print_label_header(hdr):
# type: (LabelSegmentHeader) -> None
_print_basic_header(hdr, 'LS')
def _print_text_header(hdr):
# type: (Union[TextSegmentHeader, TextSegmentHeader0]) -> None
_print_basic_header(hdr, 'TS')
def _print_extension_header(hdr, prefix):
# noinspection PyProtectedMember
for field in hdr._ordering:
if field == 'Security':
_print_element(getattr(hdr, field, None), prefix=prefix)
elif field in ['UserHeader', 'ExtendedHeader']:
value = getattr(hdr, field, None)
if isinstance(value, (DESUserHeader, RESUserHeader)):
if value.data:
# Unknown user-defined subheader
print_func('{}SHF = {}'.format(prefix, _decode_effort(value.data)))
else:
# e.g., XMLDESSubheader
_print_element(value, prefix='{}SHF.'.format(prefix))
else:
_print_element_field(hdr, field)
def _print_des_header(hdr):
# type: (Union[DataExtensionHeader, DataExtensionHeader0]) -> None
_print_extension_header(hdr, 'DES')
def _print_res_header(hdr):
# type: (Union[ReservedExtensionHeader, ReservedExtensionHeader0]) -> None
_print_extension_header(hdr, 'RES')
def print_nitf(file_name, dest=sys.stdout):
"""
Worker function to dump the NITF header and various subheader details to the
provided destination.
Parameters
----------
file_name : str|BinaryIO
dest : TextIO
"""
# Configure print function for desired destination
# - e.g., stdout, string buffer, file
global print_func
print_func = functools.partial(print, file=dest)
details = NITFDetails(file_name)
if isinstance(file_name, str):
print_func('')
print_func('Details for file {}'.format(file_name))
print_func('')
print_func('----- File Header -----')
_print_file_header(details.nitf_header)
print_func('')
if details.img_subheader_offsets is not None:
for img_subhead_num in range(details.img_subheader_offsets.size):
print_func('----- Image {} -----'.format(img_subhead_num))
hdr = details.parse_image_subheader(img_subhead_num)
_print_image_header(hdr)
print_func('')
if details.graphics_subheader_offsets is not None:
for graphics_subhead_num in range(details.graphics_subheader_offsets.size):
print_func('----- Graphic {} -----'.format(graphics_subhead_num))
hdr = details.parse_graphics_subheader(graphics_subhead_num)
_print_graphics_header(hdr)
data = details.get_graphics_bytes(graphics_subhead_num)
print_func('GSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.symbol_subheader_offsets is not None:
for symbol_subhead_num in range(details.symbol_subheader_offsets.size):
print_func('----- Symbol {} -----'.format(symbol_subhead_num))
hdr = details.parse_symbol_subheader(symbol_subhead_num)
_print_symbol_header(hdr)
data = details.get_symbol_bytes(symbol_subhead_num)
print_func('SSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.label_subheader_offsets is not None:
for label_subhead_num in range(details.label_subheader_offsets.size):
print_func('----- Label {} -----'.format(label_subhead_num))
hdr = details.parse_label_subheader(label_subhead_num)
_print_label_header(hdr)
data = details.get_label_bytes(label_subhead_num)
print_func('LSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.text_subheader_offsets is not None:
for text_subhead_num in range(details.text_subheader_offsets.size):
print_func('----- Text {} -----'.format(text_subhead_num))
hdr = details.parse_text_subheader(text_subhead_num)
_print_text_header(hdr)
data = details.get_text_bytes(text_subhead_num)
print_func('TSDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.des_subheader_offsets is not None:
for des_subhead_num in range(details.des_subheader_offsets.size):
print_func('----- DES {} -----'.format(des_subhead_num))
hdr = details.parse_des_subheader(des_subhead_num)
_print_des_header(hdr)
data = details.get_des_bytes(des_subhead_num)
des_id = hdr.DESID if details.nitf_version == '02.10' else hdr.DESTAG
if des_id.strip() in ['XML_DATA_CONTENT', 'SICD_XML', 'SIDD_XML']:
xml_str = minidom.parseString(
data.decode()).toprettyxml(indent=' ', newl='\n')
# NB: this may or not exhibit platform dependent choices in which codec (i.e. latin-1 versus utf-8)
print_func('DESDATA =')
for line_num, xml_entry in enumerate(xml_str.splitlines()):
if line_num == 0:
# Remove xml that gets inserted by minidom, if it's not actually there
if (not data.startswith(b'<?xml version')) and xml_entry.startswith('<?xml version'):
continue
print_func(xml_entry)
elif xml_entry.strip() != '':
# Remove extra new lines if XML is already formatted
print_func(xml_entry)
elif des_id.strip() in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']:
tres = TREList.from_bytes(data, 0)
print_func('DESDATA = ')
_print_tres(tres)
else:
# Unknown user-defined data
print_func('DESDATA = {}'.format(_decode_effort(data)))
print_func('')
if details.res_subheader_offsets is not None:
for res_subhead_num in range(details.res_subheader_offsets.size):
print_func('----- RES {} -----'.format(res_subhead_num))
hdr = details.parse_res_subheader(res_subhead_num)
_print_res_header(hdr)
data = details.get_res_bytes(res_subhead_num)
print_func('RESDATA = {}'.format(_decode_effort(data)))
print_func('')
##########
# method for dumping file using the print method(s)
def dump_nitf_file(file_name, dest, over_write=True):
"""
Utility to dump the NITF header and various subheader details to a configurable
destination.
Parameters
----------
file_name : str|BinaryIO
The path to or file-like object containing a NITF 2.1 or 2.0 file.
dest : str
'stdout', 'string', 'default' (will use `file_name+'.header_dump.txt'`),
or the path to an output file.
over_write : bool
If `True`, then overwrite the destination file, otherwise append to the
file.
Returns
-------
None|str
There is only a return value if `dest=='string'`.
"""
if dest == 'stdout':
print_nitf(file_name, dest=sys.stdout)
return
if dest == 'string':
out = StringIO()
print_nitf(file_name, dest=out)
value = out.getvalue()
out.close() # free the buffer
return value
the_out_file = _create_default_output_file(file_name) if dest == 'default' else dest
if not os.path.exists(the_out_file) or over_write:
with open(the_out_file, 'w') as the_file:
print_nitf(file_name, dest=the_file)
else:
with open(the_out_file, 'a') as the_file:
print_nitf(file_name, dest=the_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Utility to dump NITF 2.1 or 2.0 headers.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'input_file',
help='The path to a nitf file, or directory to search for NITF files.')
parser.add_argument(
'-o', '--output', default='default',
help="'default', 'stdout', or the path for an output file.\n"
"* 'default', the output will be at '<input path>.header_dump.txt' \n"
" This will be overwritten, if it exists.\n"
"* 'stdout' will print the information to standard out.\n"
"* Otherwise, "
" if | |
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/StaticResources/{resourceCategory}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceCategoryDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def static_resources_get_static_resource_file(self, data_view_name, resource_category, resource_name, **kwargs): # noqa: E501
"""Returns a resource file (such as an image file) for the given category and system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.static_resources_get_static_resource_file(data_view_name, resource_category, resource_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to find the resource for (required)
:param str resource_category: The category of the resource to return (required)
:param str resource_name: The name of the resource to return (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.static_resources_get_static_resource_file_with_http_info(data_view_name, resource_category, resource_name, **kwargs) # noqa: E501
def static_resources_get_static_resource_file_with_http_info(self, data_view_name, resource_category, resource_name, **kwargs): # noqa: E501
"""Returns a resource file (such as an image file) for the given category and system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.static_resources_get_static_resource_file_with_http_info(data_view_name, resource_category, resource_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to find the resource for (required)
:param str resource_category: The category of the resource to return (required)
:param str resource_name: The name of the resource to return (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'resource_category', 'resource_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method static_resources_get_static_resource_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `static_resources_get_static_resource_file`") # noqa: E501
# verify the required parameter 'resource_category' is set
if ('resource_category' not in local_var_params or
local_var_params['resource_category'] is None):
raise ApiValueError("Missing the required parameter `resource_category` when calling `static_resources_get_static_resource_file`") # noqa: E501
# verify the required parameter 'resource_name' is set
if ('resource_name' not in local_var_params or
local_var_params['resource_name'] is None):
raise ApiValueError("Missing the required parameter `resource_name` when calling `static_resources_get_static_resource_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'resource_category' in local_var_params:
path_params['resourceCategory'] = local_var_params['resource_category'] # noqa: E501
if 'resource_name' in local_var_params:
path_params['resourceName'] = local_var_params['resource_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/StaticResources/{resourceCategory}/Resources/{resourceName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def static_resources_get_static_resource_file_details(self, data_view_name, resource_category, resource_name, **kwargs): # noqa: E501
"""Requires OrbitAdmin: Returns the details of a resource file (such as an image file) for the given category and system # noqa: E501
This endpoint is only available for users with the OrbitAdmin role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.static_resources_get_static_resource_file_details(data_view_name, resource_category, resource_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to find the resource for (required)
:param str resource_category: The category of the resource to return (required)
:param str resource_name: The name of the resource to return (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ResourceDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.static_resources_get_static_resource_file_details_with_http_info(data_view_name, resource_category, resource_name, **kwargs) # noqa: E501
def static_resources_get_static_resource_file_details_with_http_info(self, data_view_name, resource_category, resource_name, **kwargs): # noqa: E501
"""Requires OrbitAdmin: Returns the details of a resource file (such as an image file) for the given category and system # noqa: E501
This endpoint is only available for users with the OrbitAdmin role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.static_resources_get_static_resource_file_details_with_http_info(data_view_name, resource_category, resource_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str data_view_name: The name of the DataView to find the resource for (required)
:param str resource_category: The category of the resource to return (required)
:param str resource_name: The name of the resource to return (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ResourceDetails, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['data_view_name', 'resource_category', 'resource_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method static_resources_get_static_resource_file_details" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data_view_name' is set
if ('data_view_name' not in local_var_params or
local_var_params['data_view_name'] is None):
raise ApiValueError("Missing the required parameter `data_view_name` when calling `static_resources_get_static_resource_file_details`") # noqa: E501
# verify the required parameter 'resource_category' is set
if ('resource_category' not in local_var_params or
local_var_params['resource_category'] is None):
raise ApiValueError("Missing the required parameter `resource_category` when calling `static_resources_get_static_resource_file_details`") # noqa: E501
# verify the required parameter 'resource_name' is set
if ('resource_name' not in local_var_params or
local_var_params['resource_name'] is None):
raise ApiValueError("Missing the required parameter `resource_name` when calling `static_resources_get_static_resource_file_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'data_view_name' in local_var_params:
path_params['dataViewName'] = local_var_params['data_view_name'] # noqa: E501
if 'resource_category' in local_var_params:
path_params['resourceCategory'] = local_var_params['resource_category'] # noqa: E501
if 'resource_name' in local_var_params:
path_params['resourceName'] = local_var_params['resource_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['faststats_auth'] # noqa: E501
return self.api_client.call_api(
'/{dataViewName}/StaticResources/{resourceCategory}/Resources/{resourceName}/Details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceDetails', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def static_resources_get_static_resources_for_category(self, data_view_name, resource_category, **kwargs): # noqa: E501
"""Requires OrbitAdmin: Returns a list of details for the resource files (such as image files) in the given resource category and system # noqa: E501
This endpoint is only available for users with the OrbitAdmin role # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> | |
<filename>rs/localization_files/AR.py
# -*- coding: utf-8 -*-
################################################################################
# LexaLink Copyright information - do not remove this copyright notice
# Copyright (C) 2012
#
# Lexalink - a free social network and dating platform for the Google App Engine.
#
# Original author: <NAME>
# Documentation and additional information: http://www.LexaLink.com
# Git source code repository: https://github.com/lexalink/LexaLink.git
#
# Please consider contributing your enhancements and modifications to the LexaLink community,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Argentina
AR_regions = [
((u'C', u'Buenos Aires (Capital Federal)'),[
(u'1', u'Agronomía'),
(u'2', u'Almagro'),
(u'3', u'Balvanera'),
(u'4', u'Barracas'),
(u'5', u'Belgrano'),
(u'6', u'Boedo'),
(u'7', u'Caballito'),
(u'8', u'Chacarita'),
(u'9', u'Coghlan'),
(u'10', u'Colegiales'),
(u'11', u'Constitución'),
(u'12', u'Flores'),
(u'13', u'Floresta'),
(u'14', u'La Boca'),
(u'15', u'La Paternal'),
(u'16', u'Liniers'),
(u'17', u'Mataderos'),
(u'18', u'<NAME>'),
(u'19', u'Montserrat'),
(u'20', u'<NAME>'),
(u'21', u'Núñez'),
(u'22', u'Palermo'),
(u'23', u'<NAME>'),
(u'24', u'<NAME>'),
(u'25', u'<NAME>'),
(u'26', u'<NAME>'),
(u'27', u'<NAME>'),
(u'28', u'Recoleta'),
(u'29', u'Retiro'),
(u'30', u'Saavedra'),
(u'31', u'San Cristóbal'),
(u'32', u'San Nicolás'),
(u'33', u'San Telmo'),
(u'34', u'<NAME>'),
(u'35', u'Versalles'),
(u'36', u'Villa Crespo'),
(u'37', u'Villa del Parque'),
(u'38', u'Villa Devoto'),
(u'39', u'Villa Lugano'),
(u'40', u'Villa Luro'),
(u'41', u'Villa Mitre'),
(u'42', u'Villa Ortúzar'),
(u'43', u'Villa Pueyrredón'),
(u'44', u'Villa Real'),
(u'45', u'Villa Riachuelo'),
(u'46', u'Villa Santa Rita'),
(u'47', u'Villa Soldati'),
(u'48', u'Villa Urquiza'),
]),
((u'B', u'Buenos Aires (Provincia)'),[
(u'1', u'<NAME> (Carhué)'),
(u'2', u'<NAME> Chaves'),
(u'3', u'Alberti'),
(u'4', u'<NAME> (Adrogué)'),
(u'5', u'Arrecifes'),
(u'6', u'Avellaneda'),
(u'7', u'Ayacucho'),
(u'8', u'Azul'),
(u'9', u'<NAME>'),
(u'10', u'Balcarce'),
(u'11', u'Baradero'),
(u'12', u'<NAME>'),
(u'13', u'Berazategui'),
(u'14', u'Berisso'),
(u'15', u'Bolívar (San Carlos de Bolívar)'),
(u'16', u'Bragado'),
(u'17', u'Brandsen'),
(u'18', u'Campana'),
(u'19', u'Cañuelas'),
(u'20', u'<NAME>'),
(u'21', u'<NAME>'),
(u'22', u'<NAME>'),
(u'23', u'<NAME>'),
(u'24', u'Castelli'),
(u'25', u'Chacabuco'),
(u'26', u'Chascomús)'),
(u'27', u'Chivilcoy'),
(u'28', u'Colón'),
(u'29', u'<NAME>'),
(u'30', u'<NAME>'),
(u'31', u'<NAME> (Punta Alta)'),
(u'32', u'<NAME>'),
(u'33', u'Daireaux'),
(u'34', u'Dolores'),
(u'35', u'Ensenada'),
(u'36', u'Escobar (Belén de Escobar)'),
(u'37', u'<NAME> (Monte Grande)'),
(u'38', u'Exaltación de la Cruz (Capilla del Señor)'),
(u'39', u'Ezeiza (<NAME>)'),
(u'40', u'<NAME>'),
(u'41', u'<NAME>'),
(u'42', u'General Alvarado (Miramar)'),
(u'43', u'General Alvear'),
(u'44', u'General Arenales'),
(u'45', u'General Belgrano'),
(u'46', u'General Guido'),
(u'47', u'General La Madrid'),
(u'48', u'General Las Heras'),
(u'49', u'General Lavalle'),
(u'50', u'General Madariaga (General Juan Madariaga)'),
(u'51', u'General Paz (Ranchos)'),
(u'52', u'General Pinto'),
(u'53', u'General Pueyrredón (Mar del Plata)'),
(u'54', u'General Rodríguez'),
(u'55', u'General San Martín'),
(u'56', u'General Viamonte'),
(u'57', u'General Villegas'),
(u'58', u'Guaminí'),
(u'59', u'<NAME> (Henderson)'),
(u'60', u'Hurlingham'),
(u'61', u'Ituzaingo'),
(u'62', u'<NAME>'),
(u'63', u'Junín'),
(u'64', u'La Costa (Mar del Tuyú)'),
(u'65', u'La Matanza (San Justo)'),
(u'66', u'La Plata'),
(u'67', u'Lanús'),
(u'68', u'Laprida'),
(u'69', u'<NAME>'),
(u'70', u'<NAME> (Vedia)'),
(u'71', u'Lezama'),
(u'72', u'Lincoln'),
(u'73', u'Lobería'),
(u'74', u'Lobos'),
(u'75', u'<NAME>'),
(u'76', u'Luján'),
(u'77', u'Magdalena'),
(u'78', u'Maipú'),
(u'79', u'<NAME>entinas (Los Polvorines)'),
(u'80', u'<NAME> (Coronel Vidal)'),
(u'81', u'<NAME>'),
(u'82', u'Mercedes'),
(u'83', u'Merlo'),
(u'84', u'<NAME>'),
(u'85', u'Moreno'),
(u'86', u'Morón'),
(u'87', u'Navarro'),
(u'88', u'Necochea'),
(u'89', u'<NAME>'),
(u'90', u'Olavarría'),
(u'91', u'Patagones (Carmen de Patagones)'),
(u'92', u'Pehuajó'),
(u'93', u'Pellegrini'),
(u'94', u'Pergamino'),
(u'95', u'Pila'),
(u'96', u'Pilar'),
(u'97', u'Pinamar'),
(u'98', u'Presidente Perón (Guernica)'),
(u'99', u'Puán'),
(u'100', u'Punta Indio (Verónica)'),
(u'101', u'Quilmes'),
(u'102', u'Ramallo'),
(u'103', u'Rauch'),
(u'104', u'Rivadavia (América)'),
(u'105', u'Rojas'),
(u'106', u'<NAME>'),
(u'107', u'Saavedra (Pigüé)'),
(u'108', u'Saladillo'),
(u'109', u'Salto'),
(u'110', u'Salliqueló'),
(u'111', u'<NAME>'),
(u'112', u'San Antonio de Areco'),
(u'113', u'San Cayetano'),
(u'114', u'<NAME>'),
(u'115', u'San Isidro'),
(u'116', u'San Miguel'),
(u'117', u'San Mig<NAME> (Monte)'),
(u'118', u'San Nicolás (San Nicolás de los Arroyos)'),
(u'119', u'<NAME>'),
(u'120', u'San Vicente'),
(u'121', u'Suipacha'),
(u'122', u'Tandil'),
(u'123', u'Tapalqué'),
(u'124', u'Tigre'),
(u'125', u'Tordillo (General Conesa)'),
(u'126', u'Tornquist'),
(u'127', u'<NAME>'),
(u'128', u'<NAME>'),
(u'129', u'Tres de Febrero (Caseros)'),
(u'130', u'<NAME>'),
(u'131', u'Veinticinco de Mayo'),
(u'132', u'<NAME> (Olivos)'),
(u'133', u'Villa Gesell'),
(u'134', u'Villarino (Médanos)'),
(u'135', u'Zárate'),
]),
((u"K", u"Catamarca"), [
(u"AM", u"Ambato"),
(u"AC", u"Ancasti"),
(u"AG", u"Andalgalá"),
(u"AS", u"Antofagasta de la Sierra"),
(u"BE", u"Belén"),
(u"CA", u"Capayán"),
(u"CT", u"Capital (Catamarca)"),
(u"EA", u"El Alto"),
(u"FE", u"<NAME>"),
(u"LP", u"La Paz"),
(u"PA", u"Paclín"),
(u"PO", u"Pomán"),
(u"SM", u"Santa María"),
(u"SR", u"Santa Rosa"),
(u"TI", u"Tinogasta"),
(u"VV", u"<NAME>"),
]),
((u"H", u"Chaco"), [
(u"AB", u"<NAME>"),
(u"BE", u"Bermejo"),
(u"CH", u"Chacabuco"),
(u"CF", u"<NAME>"),
(u"DC", u"Doce de Octubre"),
(u"DA", u"Dos de Abril"),
(u"FJ", u"<NAME> María <NAME>"),
(u"GB", u"General Belgrano"),
(u"GD", u"General Donovan"),
(u"GG", u"General Güemes"),
(u"IN", u"Independencia"),
(u"LI", u"Libertad"),
(u"LM", u"Libertador General San Martín"),
(u"MA", u"Maipú"),
(u"MF", u"<NAME> <NAME>"),
(u"NJ", u"Nueve de Julio"),
(u"OH", u"O'Higgins"),
(u"PP", u"Presidencia de la Plaza"),
(u"PM", u"Primero de Mayo"),
(u"QU", u"Quitilipi"),
(u"SF", u"San Fernando"),
(u"SL", u"San Lorenzo"),
(u"SC", u"Sargento Cabral"),
(u"TA", u"Tapenagá"),
(u"VM", u"Veinticinco de Mayo"),
]),
((u"U", u"Chubut"), [
(u"BI", u"Biedma"),
(u"CU", u"Cushamen"),
(u"ES", u"Escalante"),
(u"FA", u"<NAME>"),
(u"FU", u"Futaleufú"),
(u"GM", u"Gaiman"),
(u"GS", u"Gastre"),
(u"LA", u"Languiñeo"),
(u"MA", u"Mártires"),
(u"PI", u"Paso de Indios"),
(u"RA", u"Rawson"),
(u"RS", u"<NAME>"),
(u"SA", u"Sarmiento"),
(u"TH", u"Tehuelches"),
(u"TN", u"Telsen"),
]),
((u"W", u"Corrientes"), [
(u"BV", u"Bella Vista"),
(u"BA", u"Berón de Astrada"),
(u"CN", u"Capital (Corrientes)"),
(u"CO", u"Concepción"),
(u"CC", u"Curuzú Cuatiá"),
(u"EM", u"Empedrado"),
(u"ES", u"Esquina"),
(u"GA", u"General Alvear"),
(u"GP", u"General Paz"),
(u"GO", u"Goya"),
(u"IT", u"Itatí"),
(u"IZ", u"Ituzaingó"),
(u"LA", u"Lavalle"),
(u"MB", u"Mburucuyá"),
(u"ME", u"Mercedes"),
(u"MC", u"Monte Caseros"),
(u"PL", u"Paso de los Libres"),
(u"SL", u"Saladas"),
(u"SC", u"San Cosme"),
(u"SP", u"San Luis del Palmar"),
(u"SM", u"San Martín"),
(u"SG", u"San Miguel"),
(u"SR", u"San Roque"),
(u"ST", u"Santo Tomé"),
(u"SA", u"Sauce"),
]),
((u"X", u"Córdoba"), [
(u"CA", u"Calamuchita"),
(u"CB", u"Capital (Córdoba)"),
(u"CO", u"Colón"),
(u"CE", u"Cruz del Eje"),
(u"GR", u"General Roca"),
(u"GM", u"General San Martín"),
(u"IS", u"Ischilín"),
(u"JC", u"<NAME>"),
(u"MJ", u"<NAME>"),
(u"MI", u"Minas"),
(u"PO", u"Pocho"),
(u"PP", u"Presidente Roque Sáenz Peña"),
(u"PU", u"Punilla"),
(u"RC", u"Río Cuarto"),
(u"RP", u"Río Primero"),
(u"RS", u"Río Seco"),
(u"RG", u"Río Segundo"),
(u"SA", u"San Alberto"),
(u"SV", u"San Javier"),
(u"SJ", u"San Justo"),
(u"SM", u"Santa María"),
(u"SO", u"Sobremonte"),
(u"TA", u"Tercero Arriba"),
(u"TO", u"Totoral"),
(u"TU", u"Tulumba"),
(u"UN", u"Unión"),
]),
((u"E", u"Entre Ríos"), [
(u"CO", u"Colón"),
(u"CC", u"Concordia"),
(u"DI", u"Diamante"),
(u"FC", u"Federación"),
(u"FE", u"Federal"),
(u"FL", u"Feliciano"),
(u"GG", u"Gualeguay"),
(u"GC", u"Gualeguaychú"),
(u"II", u"Islas del Ibicuy"),
(u"LP", u"La Paz"),
(u"NO", u"Nogoyá"),
(u"PA", u"Paraná"),
(u"SS", u"San Salvador"),
(u"TA", u"Tala"),
(u"UR", u"Uruguay"),
(u"VC", u"Victoria"),
(u"VG", u"Villaguay"),
]),
((u"P", u"Formosa"), [
(u"BE", u"Bermejo"),
(u"FO", u"Formosa"),
(u"LA", u"Laishi"),
(u"MA", u"Matacos"),
(u"PA", u"Patiño"),
(u"PG", u"Pilagás"),
(u"PM", u"Pilcomayo"),
(u"PR", u"Pirané"),
(u"RL", u"<NAME>"),
]),
((u"Y", u"Jujuy"), [
(u"CO", u"Cochinoca"),
(u"DB", u"Doctor <NAME>"),
(u"EC", u"El Carmen"),
(u"HU", u"Humahuaca"),
(u"LE", u"Ledesma"),
(u"PA", u"Palpalá"),
(u"RI", u"Rinconada"),
(u"SA", u"San Antonio"),
(u"SP", u"San Pedro"),
(u"SB", u"Santa Bárbara"),
(u"SC", u"Santa Catalina"),
(u"SU", u"Susques"),
(u"TI", u"Tilcara"),
(u"TU", u"Tumbaya"),
(u"VG", u"<NAME>"),
(u"YA", u"Yavi"),
]),
((u"L", u"La Pampa"), [
(u"AT", u"Atreucó"),
(u"CL", u"Caleu Caleu"),
(u"SR", u"Capital (Santa Rosa)"),
(u"CA", u"Catriló"),
(u"CH", u"Chalileo"),
(u"CP", u"Chapaleufú"),
(u"CC", u"<NAME>"),
(u"CO", u"Conhelo"),
(u"CU", u"Curacó"),
(u"GU", u"Guatraché"),
(u"HU", u"Hucal"),
(u"UC", u"<NAME>"),
(u"LM", u"<NAME>"),
(u"LO", u"Loventué"),
(u"MA", u"Maracó"),
(u"PU", u"Puelén"),
(u"QQ", u"<NAME>"),
(u"RA", u"Rancul"),
(u"RE", u"Realicó"),
(u"TO", u"Toay"),
(u"TR", u"Trenel"),
(u"UT", u"Utracán"),
]),
((u"F", u"La Rioja"), [
(u"AR", u"Arauco"),
(u"LR", u"Capital (La Rioja)"),
(u"CB", u"<NAME>"),
(u"CM", u"Chamical"),
(u"CL", u"Chilecito"),
(u"CV", u"<NAME>"),
(u"FA", u"Famatina"),
(u"GP", u"General Angel Vicente Peñaloza"),
(u"GB", u"General Belgrano"),
(u"GQ", u"General Juan Facundo Quiroga"),
(u"GL", u"General Lamadrid"),
(u"GO", u"General Ocampo"),
(u"GM", u"General San Martín"),
(u"IN", u"Independencia"),
(u"RP", u"Rosario Vera Peñaloza"),
(u"SS", u"San Blas de los Sauces"),
(u"SA", u"Sanagasta"),
(u"VI", u"Vinchina"),
]),
((u"M", u"Mendoza"), [
(u"MZ", u"Capital (Mendoza)"),
(u"GA", u"General Alvear"),
(u"GC", u"Godoy Cruz"),
(u"GU", u"Guaymallén"),
(u"JU", u"Junín"),
(u"LP", u"La Paz"),
(u"LH", u"Las Heras"),
(u"LA", u"Lavalle"),
(u"LC", u"Luján de Cuyo"),
(u"MP", u"Maipú"),
(u"ML", u"Malargüe"),
(u"RI", u"Rivadavia"),
(u"SC", u"San Carlos"),
(u"SM", u"San Martín"),
(u"SR", u"San Rafael"),
(u"ST", u"Santa Rosa"),
(u"TN", u"Tunuyán"),
(u"TP", u"Tupungato"),
]),
((u"N", u"Misiones"), [
(u"AP", u"Apóstoles"),
(u"CG", u"Cainguás"),
(u"CD", u"Candelaria"),
(u"PS", u"Capital (Posadas)"),
(u"CO", u"Concepción"),
(u"ED", u"Eldorado"),
(u"GB", u"<NAME>"),
(u"GU", u"Guaraní"),
(u"IG", u"Iguazú"),
(u"LA", u"<NAME>"),
(u"LM", u"Libertador General San Martín"),
(u"MO", u"Montecarlo"),
(u"OB", u"Oberá"),
(u"SI", u"San Ignacio"),
(u"SJ", u"San Javier"),
(u"SP", u"San Pedro"),
(u"VM", u"Veinticinco de Mayo"),
]),
((u"Q", u"Neuquén"), [
(u"AL", u"Aluminé"),
(u"AN", u"Añelo"),
(u"CL", u"<NAME>"),
(u"CM", u"<NAME>"),
(u"CC", u"<NAME>"),
(u"CO", u"Confluencia"),
(u"HU", u"Huiliches"),
(u"LA", u"Lacar"),
(u"LO", u"Loncopué"),
| |
import re
import os
import tempfile
import shutil
import copy
import time
import random
import socket
import hashlib
from .log import LoggerFactory
from .gossip import gossiper
from .kv import kvmgr
from .stop import stopper
from .threadmgr import threader
from .perfdata import PerfDatas
from .evaluater import evaluater
from .ts import tsmgr
from .handlermgr import handlermgr
from .topic import topiker, TOPIC_MONITORING
from .basemanager import BaseManager
from .jsonmgr import jsoner
from .util import exec_command
# Global logger for this part
logger = LoggerFactory.create_logger('monitoring')
CHECK_STATES = ['ok', 'warning', 'critical', 'unknown', 'pending']
STATE_ID_COLORS = {0: 'green', 2: 'red', 1: 'yellow', 3: 'cyan'}
STATE_COLORS = {'ok': 'green', 'warning': 'yellow', 'critical': 'red', 'unknown': 'grey', 'pending': 'grey'}
class MonitoringManager(BaseManager):
history_directory_suffix = 'monitoring'
def __init__(self):
super(MonitoringManager, self).__init__()
self.logger = logger
self.checks = {}
self.services = {}
# keep a list of the checks names that match our groups
self.active_checks = []
# Compile the macro pattern once
self.macro_pat = re.compile(r'(\$ *(.*?) *\$)+')
def load(self, cfg_dir, cfg_data):
self.cfg_dir = cfg_dir
self.cfg_data = cfg_data
# Load and sanatize a check object in our configuration
def import_check(self, check, fr, name, mod_time=0, service='', pack_name='', pack_level=''):
check['from'] = fr
check['pack_name'] = pack_name
check['pack_level'] = pack_level
check['id'] = check['name'] = name
defaults_ = {'interval' : '10s', 'script': '', 'ok_output': '', 'critical_if': '',
'critical_output': '', 'warning_if': '', 'warning_output': '', 'last_check': 0,
'notes' : ''}
for (k, v) in defaults_.items():
if k not in check:
check[k] = v
if service:
check['service'] = service
if 'if_group' not in check:
# we take the basename of this check directory for the if_group
# and if /, take * (aka means all)
if_group = os.path.basename(os.path.dirname(name))
if not if_group:
if_group = '*'
check['if_group'] = if_group
if 'display_name' in check:
check['display_name'] = '[%s]' % check.get('display_name')
else:
check['display_name'] = name.split('/')[-1]
check['modification_time'] = mod_time
check['state'] = 'pending'
check['state_id'] = 3
check['old_state'] = 'pending'
check['old_state_id'] = 3
check['output'] = ''
check['variables'] = check.get('variables', {})
check['computed_variables'] = {}
self.checks[check['id']] = check
# We have a new check from the HTTP, save it where it need to be
def delete_check(self, cname):
p = os.path.normpath(os.path.join(self.cfg_dir, cname + '.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# clean on disk
if os.path.exists(p):
os.unlink(p)
# Now clean in memory too
if cname in self.checks:
del self.checks[cname]
self.link_checks()
# We have a new check from the HTTP, save it where it need to be
def save_check(self, cname, check):
p = os.path.normpath(os.path.join(self.cfg_dir, cname + '.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# Look if the file directory exists or if not cannot be created
p_dir = os.path.dirname(p)
if not os.path.exists(p_dir):
os.makedirs(p_dir)
# import a copy, so we don't mess with the fields we need to save
to_import = copy.copy(check)
# Now import it in our running part
self.import_check(to_import, 'from:http', cname)
# and put the new one in the active running checks, maybe
self.link_checks()
# Now we can save the received entry, but first clean unless props
to_remove = ['from', 'last_check', 'modification_time', 'state', 'output', 'state_id', 'id', 'old_state', 'old_state_id']
for prop in to_remove:
try:
del check[prop]
except KeyError:
pass
o = {'check': check}
logger.debug('HTTP check saving the object %s into the file %s' % (o, p))
buf = jsoner.dumps(o, sort_keys=True, indent=4)
tempdir = tempfile.mkdtemp()
f = open(os.path.join(tempdir, 'temp.json'), 'w')
f.write(buf)
f.close()
shutil.move(os.path.join(tempdir, 'temp.json'), p)
shutil.rmtree(tempdir)
def import_service(self, service, fr, sname, mod_time=0, pack_name='', pack_level=''):
service['from'] = fr
service['pack_name'] = pack_name
service['pack_level'] = pack_level
service['name'] = service['id'] = sname
if 'notes' not in service:
service['notes'] = ''
if 'if_group' not in service:
# we take the basename of this check directory for the if_group
# and if /, take the service name
if_group = os.path.basename(os.path.dirname(sname))
if not if_group:
if_group = service['name']
service['if_group'] = service['name']
if_group = service['if_group']
if 'check' in service:
check = service['check']
cname = 'service:%s' % sname
# for the same if_group of the check as ourself
check['if_group'] = if_group
self.import_check(check, fr, cname, mod_time=mod_time, service=service['id'], pack_name=pack_name, pack_level=pack_level)
# Put the default state to unknown, retention will load
# the old data
service['state_id'] = 3
service['modification_time'] = mod_time
service['incarnation'] = 0
# Add it into the services list
self.services[service['id']] = service
def load_check_retention(self, check_retention):
if not os.path.exists(check_retention):
return
logger.log('CHECK loading check retention file %s' % check_retention)
with open(check_retention, 'r') as f:
loaded = jsoner.loads(f.read())
for (cid, c) in loaded.items():
if cid in self.checks:
check = self.checks[cid]
to_load = ['last_check', 'output', 'state', 'state_id', 'old_state', 'old_state_id']
for prop in to_load:
check[prop] = c[prop]
def load_service_retention(self, service_retention):
if not os.path.exists(service_retention):
return
logger.log('Service loading service retention file %s' % service_retention)
with open(service_retention, 'r') as f:
loaded = jsoner.loads(f.read())
for (cid, c) in loaded.items():
if cid in self.services:
service = self.services[cid]
to_load = ['state_id', 'incarnation']
for prop in to_load:
service[prop] = c[prop]
# We have a new service from the HTTP, save it where it need to be
def save_service(self, sname, service):
p = os.path.normpath(os.path.join(self.cfg_dir, sname + '.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# Look if the file directory exists or if not cannot be created
p_dir = os.path.dirname(p)
if not os.path.exists(p_dir):
os.makedirs(p_dir)
# import a copy, so we dont mess with the fieldsweneed to save
to_import = copy.copy(service)
# Now import it in our running part
self.import_service(to_import, 'from:http', sname)
# and put the new one in the active running checks, maybe
self.link_services()
# We maybe got a new service, so export this data to every one in the gossip way :)
gossiper.increase_incarnation_and_broadcast()
# Now we can save the received entry, but first clean unless props
to_remove = ['from', 'last_check', 'modification_time', 'state', 'output', 'state_id', 'id']
for prop in to_remove:
try:
del service[prop]
except KeyError:
pass
o = {'service': service}
logger.debug('HTTP service saving the object %s into the file %s' % (o, p))
buf = jsoner.dumps(o, sort_keys=True, indent=4)
tempdir = tempfile.mkdtemp()
f = open(os.path.join(tempdir, 'temp.json'), 'w')
f.write(buf)
f.close()
shutil.move(os.path.join(tempdir, 'temp.json'), p)
shutil.rmtree(tempdir)
# We have a new check from the HTTP, save it where it need to be
def delete_service(self, sname):
p = os.path.normpath(os.path.join(self.cfg_dir, sname + '.json'))
if not p.startswith(self.cfg_dir):
raise Exception("Bad file path for your script, won't be in the cfg directory tree")
# clean on disk
if os.path.exists(p):
os.unlink(p)
# Now clean in memory too
if sname in self.services:
del self.services[sname]
self.link_services()
# We maybe got a less service, so export this data to every one in the gossip way :)
gossiper.increase_incarnation_and_broadcast()
# Look at our services dict and link the one we are if_group
# so the other nodes are aware about our groups/service
def link_services(self):
logger.debug('LINK my services and my node entry')
node = gossiper.get(gossiper.uuid)
with gossiper.nodes_lock:
groups = node['groups']
for (sname, service) in self.services.items():
if_group = service.get('if_group', '')
if if_group and if_group in groups:
node['services'][sname] = service
# For checks we will only populate our active_checks list
# with the name of the checks we are if_group about
def link_checks(self):
logger.debug('LOOKING FOR our checks that match our groups')
node = gossiper.get(gossiper.uuid)
with gossiper.nodes_lock:
groups = node['groups']
active_checks = []
for (cname, check) in self.checks.items():
if_group = check.get('if_group', '*')
if if_group == '*' or if_group in groups:
active_checks.append(cname)
self.active_checks = active_checks
# Also update our checks list in KV space
self.update_checks_kv()
# and in our own node object
checks_entry = {}
for (cname, check) in self.checks.items():
if cname not in active_checks:
continue
checks_entry[cname] = {'state_id': check['state_id']} # by default state are unknown
node['checks'] = checks_entry
def __get_variables(self, check):
variables = check['variables']
# We need to evaluate our variables if there are some
computed_variables = {}
for (k, expr) in variables.items():
try:
computed_variables[k] = evaluater.eval_expr(expr)
except Exception as exp:
raise Exception('the variable %s expr %s did fail to evaluate: | |
<filename>supra/GUI/Dialogs/SolutionGUI.py
################################################
# Credits:
# <NAME> - Supervisor
# <NAME> - General coding
# <NAME> - Ballistic code, WMPL
# <NAME> - Supracenter code
# <NAME> - Updated Supracenter code, Geminus
# <NAME> - Advice on atmospheric profiles
# Stack Overflow - Frequent care and support
# Western Meteor Python Group
#################################################
import os
import time
import datetime
import copy
import webbrowser
import zipfile
import pickle
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset
from PyQt5.QtWidgets import *
from functools import partial
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import numpy as np
import obspy
import scipy.signal
from scipy.fft import fft
import pyqtgraph.exporters
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from supra.Fireballs.SeismicTrajectory import timeOfArrival, trajSearch, estimateSeismicTrajectoryAzimuth, plotStationsAndTrajectory, waveReleasePointWindsContour
from supra.Supracenter.slowscan2 import cyscan as slowscan
from supra.Supracenter.psoSearch import psoSearch
from supra.Supracenter.fetchCopernicus import copernicusAPI
from supra.Supracenter.cyscan5 import cyscan
# from supra.Supracenter.cyscanVectors import cyscan as cyscanV
from supra.Supracenter.propegateBackwards import propegateBackwards
from supra.GUI.Dialogs.AnnoteWindow import AnnoteWindow
from supra.GUI.Dialogs.Preferences import PreferenceWindow
from supra.GUI.Dialogs.Yields import Yield
from supra.GUI.Dialogs.FragStaff import FragmentationStaff
from supra.GUI.Dialogs.TrajSpace import TrajSpace
from supra.GUI.Dialogs.AllWaveformView import AllWaveformViewer
from supra.GUI.Dialogs.TrajInterp import TrajInterpWindow
from supra.GUI.Dialogs.StationList import StationList
from supra.GUI.Dialogs.ParticleMot import ParticleMotion
from supra.GUI.Dialogs.Polmap import Polmap
from supra.GUI.Dialogs.BandpassGUI import BandpassWindow
from supra.GUI.Dialogs.ReportDialog import ReportWindow
from supra.GUI.Dialogs.RayTraceView import rtvWindowDialog
from supra.GUI.Dialogs.GLMReader import glmWindowDialog
from supra.GUI.Dialogs.RotatePol import RotatePolWindow
from supra.GUI.Tools.GUITools import *
from supra.GUI.Tools.Theme import theme
from supra.GUI.Tools.WidgetBuilder import *
from supra.GUI.Tools.htmlLoader import htmlBuilder
from supra.GUI.Tools.Errors import errorCodes
from supra.GUI.Tabs.SupracenterSearch import supSearch
from supra.GUI.Tabs.TrajectorySearch import trajectorySearch
from supra.Stations.Filters import *
from supra.Stations.ProcessStation import procTrace, procStream, findChn
from supra.Stations.CalcAllTimes4 import calcAllTimes
from supra.Stations.CalcAllSigs import calcAllSigs
from supra.Stations.StationObj import Polarization, AnnotationList
from wmpl.Utils.TrajConversions import datetime2JD, jd2Date
from wmpl.Utils.Earth import greatCircleDistance
from supra.Utils.AngleConv import loc2Geo, chauvenet, angle2NDE
from supra.Utils.Formatting import *
from supra.Utils.Classes import Position, Constants, Pick, RectangleItem, Color, Plane, Annote, Constants
from supra.Utils.TryObj import *
from supra.Utils.pso import pso
from supra.Files.SaveObjs import Prefs, BAMFile
from supra.Files.SaveLoad import save, load, loadSourcesIntoBam
from supra.Atmosphere.Parse import parseWeather
from supra.Atmosphere.radiosonde import downloadRadio
from supra.Geminus.geminusGUI import Geminus
from supra.Supracenter.l137 import estPressure
from supra.Atmosphere.NRLMSISE import getAtmDensity
from supra.Atmosphere.HWM93 import getHWM
from wmpl.Utils.TrajConversions import date2JD
from wmpl.Utils.OSTools import mkdirP
HEIGHT_SOLVER_DIV = 250
THEO = False
PEN = [(0 *255, 0.4470*255, 0.7410*255),
(0.8500*255, 0.3250*255, 0.0980*255),
(0.9290*255, 0.6940*255, 0.1250*255),
(0.4940*255, 0.1840*255, 0.5560*255),
(0.4660*255, 0.6740*255, 0.1880*255),
(0.3010*255, 0.7450*255, 0.9330*255),
(0.6350*255, 0.0780*255, 0.1840*255)]
consts = Constants()
# Main Window
class SolutionGUI(QMainWindow):
def __init__(self):
super().__init__()
##############################
# Load system-wide preferences
##############################
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.move(qtRectangle.topLeft())
self.prefs = Prefs()
try:
with open(os.path.join('supra', 'Misc', 'BAMprefs.bam'), 'rb') as f:
self.prefs = pickle.load(f)
except FileNotFoundError as e:
# Prefs file missing - use default settings
print(printMessage("status"), "Preferences file not found (Was deleted, or fresh install) - Generating a default preference file.")
with open(os.path.join('supra', 'Misc', 'BAMprefs.bam'), 'wb') as f:
pickle.dump(self.prefs, f)
self.bam = BAMFile()
self.color = Color()
# Initialize all of the pyqt things in the GUI
initMainGUI(self)
initMainGUICosmetic(self)
# Add widgets to the floating box
self.addIniDockWidgets()
def geminus(self):
if not hasattr(self.bam.setup, "trajectory"):
errorMessage('No trajectory found!', 2, detail="Please include a trajectory in the source tab before using Geminus!")
return None
self.geminus_gui = Geminus(self.bam, self.prefs)
self.geminus_gui.setGeometry(QRect(100, 100, 1000, 800))
self.geminus_gui.show()
def fPar(self):
file_name = fileSearch(['CSV (*.csv)'], None)
#read csv
t = []
fpar = []
with open(file_name, "r+") as f:
for line in f:
a = line.split(',')
time = None
try:
time = datetime.datetime.strptime(a[0], "%Y-%m-%dT%H:%M:%S.%fZ")
fpar.append(float(a[1]))
except:
pass
if time is not None:
shift = float(self.f_shift_edits.text())
t.append((time - self.bam.setup.fireball_datetime).total_seconds() + shift)
### scale fpar
min_fpar = np.min(fpar)
max_fpar = np.max(fpar)
fpar = fpar - min_fpar
axY = self.make_picks_waveform_canvas.getAxis('left')
waveform_min, waveform_max = axY.range
fpar = fpar/max_fpar*waveform_max
# print(t, fpar)
self.fpar_waveform = pg.PlotDataItem(x=t, y=fpar, pen='r')
self.make_picks_waveform_canvas.addItem(self.fpar_waveform)
def viewToolbar(self):
# Toggles the toolbar
self.ini_dock.toggleViewAction().trigger()
def viewFullscreen(self):
# Toggles fullscreen
if self.windowState() & QtCore.Qt.WindowFullScreen:
self.showNormal()
else:
self.showFullScreen()
def quitApp(self):
# Begins quit sequence
reply = QMessageBox.question(self, 'Quit Program', 'Are you sure you want to quit?', QMessageBox.Yes, QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
qApp.quit()
else:
return None
def openGit(self):
webbrowser.open_new_tab("https://github.com/dvida/Supracenter")
def openDocs(self):
# docs are a locally stored html file
webbrowser.open_new_tab(self.doc_file)
def genReport(self):
self.gr = ReportWindow(self.bam, self.prefs)
self.gr.setGeometry(QRect(500, 400, 500, 400))
self.gr.show()
def stndownloadDialog(self):
self.sd = StationList()
self.sd.setGeometry(QRect(500, 400, 500, 400))
self.sd.show()
def preferencesDialog(self):
self.p = PreferenceWindow()
self.p.setGeometry(QRect(500, 400, 500, 400))
self.p.show()
def trajInterpDialog(self):
self.t = TrajInterpWindow(self.bam, self)
self.t.setGeometry(QRect(500, 400, 500, 400))
self.t.show()
def rtvWindow(self):
self.rtv = rtvWindowDialog(self.bam, self.prefs)
self.rtv.setGeometry(QRect(100, 100, 1200, 700))
self.rtv.show()
def trajSpace(self):
self.ts = TrajSpace(self.bam)
self.ts.setGeometry(QRect(100, 100, 1200, 700))
self.ts.show()
def glmviewer(self):
self.glm = glmWindowDialog(self.bam)
self.glm.setGeometry(QRect(100, 100, 1200, 700))
self.glm.show()
def csvLoad(self, table):
""" Loads csv file into a table
"""
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.AnyFile)
dlg.setNameFilters(['CSV File (*.csv)'])
dlg.exec_()
filename = dlg.selectedFiles()
try:
with open(filename[0]) as f:
data_table = []
next(f)
for line in f:
a = line.split(',')
if len(a) != 9:
errorMessage('Wrong number of columns for a picks file!', 1, info='Make sure a picks file is imported!')
return None
data_table.append(a)
except IsADirectoryError as e:
errorMessage('Please select a valid file to load', 1, detail='{:}'.format(e))
return None
defTable(self.csv_table, 0, 9, headers=['Pick Group', 'Network', 'Code', 'Latitude', 'Longitude', 'Elevation', 'Pick JD', 'Pick Time', 'Station Number'])
toTable(table, data_table)
def csvSave(self, table):
""" Saves a table to a csv
"""
dlg = QFileDialog.getSaveFileName(self, 'Save File')
file_name = checkExt(dlg[0], '.csv')
data_set = fromTable(table)
# Open the output CSV
with open(os.path.join(file_name), 'w') as f:
# Write the header
f.write('Pick group, Network, Code, Lat, Lon, Elev, Pick JD, Pick time, station_number \n')
# Go through all picks
for line in data_set:
line[-1] = int(line[-1])
# Write the CSV entry
f.write("{:}, {:}, {:}, {:}, {:}, {:}, {:}, {:}, {:}\n".format(*line))
errorMessage('Output to CSV!', 0, title='Exported!', detail='Filename: {:}'.format(file_name))
def supSearchSetup(self, manual):
supSearch(self.bam, self.prefs, manual=manual, results_print=False, obj=self)
def trajSearchSetup(self):
x, fopt, geo, stat_names, stat_picks = trajectorySearch(self.bam, self.prefs)
# x, fopt = trajectorySearch(self.bam, self.prefs)
##########
# Display
##########
### Solution Table
defTable(self.seis_table, 9, 2, headers=['Parameter', 'Value'])
data_table = [['Error', fopt],
['X', x[0]],
['Y', x[1]],
['Time', x[2]],
['Velocity', x[3]],
['Azimuth', x[4]],
['Zenith', x[5]],
['Latitude', geo.lat],
['Longitude', geo.lon]]
toTable(self.seis_table, data_table)
### Residual Table
defTable(self.seis_resids, 0, 2, headers=['Station', 'Residual'])
res_table = []
for ss in range(len(stat_names)):
res_table.append([stat_names[ss], stat_picks[ss]])
toTable(self.seis_resids, res_table)
def rayTrace(self):
A = Position(float(self.ray_lat_edits.text()), float(self.ray_lon_edits.text()), float(self.ray_height_edits.text()))
B = Position(self.ray_pick_point[0], self.ray_pick_point[1], self.ray_pick_point[2])
A.pos_loc(B)
B.pos_loc(B)
try:
sounding = parseWeather(self.setup)
except:
errorMessage('Error reading weather profile in rayTrace', 2)
return None
if self.prefs.debug:
print("Starting and End points of Ray Trace")
print(A)
print(B)
if self.setup.perturb_times == 0:
self.setup.perturb_times = 1
trace_data = [None]*self.setup.perturb_times
trace_var = [None]*self.setup.perturb_times
t_arrival = [None]*self.setup.perturb_times
t_arrival_cy = [None]*self.setup.perturb_times
err = [None]*self.setup.perturb_times
#plt.style.use('dark_background')
fig = plt.figure(figsize=plt.figaspect(0.5))
fig.set_size_inches(5, 5)
ax = fig.add_subplot(1, 1, 1, projection='3d')
if self.setup.perturb_method == 'ensemble':
ensemble_file = self.setup.perturbation_spread_file
else:
ensemble_file = ''
x_var = []
y_var = []
z_var = []
p_var = []
t_var = []
error_list = []
for ptb_n in range(self.setup.perturb_times):
trace_data = []
trace_var = []
if ptb_n > 0 and self.ray_enable_perts.isChecked():
if self.prefs.debug:
print(printMessage("status"), "Perturbation {:}".format(ptb_n))
# generate a perturbed sounding profile
sounding_p = perturb(self.setup, sounding, self.setup.perturb_method, \
spread_file=self.setup.perturbation_spread_file, lat=self.setup.lat_centre, lon=self.setup.lon_centre, ensemble_file=ensemble_file, ensemble_no=ptb_n)
else:
# if not using perturbations on this current step, then return the original sounding profile
sounding_p = sounding
z_profile, _ = getWeather(np.array([A.x, A.y, A.z]), np.array([B.x, B.y, B.z]), \
self.setup.weather_type, A, copy.copy(sounding_p))
z_profile = zInterp(B.z, A.z, z_profile, div=100)
a, b, c, E, trace_data = slowscan(A.xyz, B.xyz, z_profile, wind=True, n_theta=self.setup.n_theta, n_phi=self.setup.n_theta, h_tol=self.setup.h_tol, v_tol=self.setup.v_tol)
if trace_data == trace_data:
if self.ray_enable_vars.isChecked():
last_k = 0
N = 15
m, n = np.shape(trace_var[0][0])
for i in range(m//N):
for j in range(n//N):
for line in trace_var:
k = line[3]
if k != last_k:
#c = (0, 0, (t_var[0] - np.pi/2)/np.pi/2%1)
ax.plot3D(x_var, y_var, z_var, c='r')
x_var = []
y_var = []
z_var = []
p_var = []
t_var = []
x_var.append(line[0][i*N, j*N])
y_var.append(line[1][i*N, j*N])
z_var.append(line[2][i*N, j*N])
p_var.append(line[4][i*N, j*N])
t_var.append(line[5][i*N, j*N])
last_k = k
ax.plot3D(x_var, y_var, z_var, c='r')
if ptb_n == 0:
xline = []
yline = []
zline = []
try:
for line in trace_data:
#line[0], line[1], line[2] = loc2Geo(A.lat, A.lon, A.elev, [line[0], line[1], line[2]])
xline.append(line[0])
yline.append(line[1])
zline.append(line[2])
ax.plot3D(np.array(xline)/1000, np.array(yline)/1000, np.array(zline)/1000, 'black')
#ax.scatter(xline, yline, zline, 'blue', marker='o')
#ax.scatter(0, 0, 0, 'orange', marker='^')
except IndexError:
pass
except TypeError:
pass
# ax.set_xlim3d(B.x, A.x)
# ax.set_ylim3d(B.y, A.y)
# ax.set_zlim3d(B.z, A.z)
x_pts = [None]*len(xline)
y_pts = [None]*len(xline)
for i in range(len(xline)):
x_pts[i], y_pts[i], _ = loc2Geo(B.lat, B.lon, B.elev, [xline[i], | |
#!/usr/bin/env python
"""
Convert DICOM files to corresponding PNG files.
NOTE: Only works with DICOM files containing a single
layer!
"""
import sys, os, collections, toml, asyncio, hashlib, time, platform, queue
import numpy as np, png, pydicom, multiprocessing
from pathlib import Path
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import (
QMainWindow,
QApplication,
QPushButton,
QWidget,
QLabel,
QGridLayout,
QPlainTextEdit,
QFileDialog,
)
from PyQt5.QtCore import QObject, QThread, pyqtSignal, pyqtSlot, QSize, QTimer
def trap_exc_during_debug(*args):
# when app raises uncaught exception, print info
print(args)
sys.exit(1)
def getConfigFileName(path=None):
if path is None:
path = os.path.dirname(sys.argv[0])
config_file_name = ".dicom_to_png.conf"
full_path = os.path.join(path, config_file_name)
return full_path
def readConfigFile(path=None):
config_file = getConfigFileName(path)
if not os.path.isfile(config_file):
initializeConfigFile(path)
config = ""
with open(config_file, "r") as fin:
config = toml.loads(fin.read())
return config
def initializeConfigFile(path=None):
config_file = getConfigFileName(path)
default_config = {
"output_path": os.path.join(os.path.expanduser("~"), "png_files_from_dicom")
}
saveConfigToFile(default_config)
def saveConfigToFile(config, path=None):
config_file = getConfigFileName(path)
with open(config_file, "w") as fout:
fout.write(toml.dumps(config))
def needs_rescale(hdr):
return hasattr(hdr, "RescaleSlope") or hasattr(hdr, "RescaleIntercept")
def rescale_image(img, hdr):
"""Apply rescale formula from DICOM header, if that information is available."""
if not needs_rescale(hdr):
return (img, hdr)
if type(hdr) == type([]):
hdr = hdr[0]
img = np.array(img)
img_type = img.dtype
# Get the scaling info
rescale_slope = float(getattr(hdr, "RescaleSlope", 1))
rescale_intercept = float(getattr(hdr, "RescaleIntercept", 0))
# Re-Scale
img = img.astype(np.float64) * rescale_slope + rescale_intercept
img = img.astype(img_type)
# Update the header
setattr(hdr, "RescaleSlope", 1.0)
setattr(hdr, "RescaleIntercept", 0.0)
return (img, hdr)
def apply_LUT(img, hdr):
"""
Apply LUT specified in header to the image, if the header specifies one.
Specification:
http://dicom.nema.org/medical/dicom/2017a/output/chtml/part03/sect_C.11.2.html#sect_C.11.2.1.1
"""
lut_seq = getattr(hdr, "VOILUTSequence", None)
if lut_seq is None:
# print("No LUT for image {}".format(generate_unique_filename(hdr)))
return img, hdr
# Use the first available LUT:
lut_desc = getattr(lut_seq[0], "LUTDescriptor", None)
lut_data = getattr(lut_seq[0], "LUTData", None)
if lut_desc is None or lut_data is None:
return img, hdr
try:
first_value = int(lut_desc[1])
except:
pass
bit_depth = int(lut_desc[2])
sign_selector = "u" if type(first_value) == int and first_value >= 0 else ""
type_selector = 8
while type_selector < bit_depth and type_selector < 64:
type_selector *= 2
orig_type = img.dtype
img = np.round(img)
if type(first_value) != int:
first_value = img.min()
LUT = {
int(v): lut_data[j]
for j, v in [(i, first_value + i) for i in range(len(lut_data))]
}
img2 = np.array(img)
img2 = img2.astype("{}int{}".format(sign_selector, type_selector))
img2[img < first_value] = first_value
img2 = np.vectorize(lambda x: LUT[int(x)])(img2)
img2[img >= (first_value + len(lut_data))] = lut_data[-1]
del hdr.VOILUTSequence
return img2.astype(orig_type), hdr
def apply_window(img, hdr):
"""
Apply intensity window as defined in the DICOM header to the image (if any window
is defined).
This is applied after any LUT and rescale/intercept.
See https://www.dabsoft.ch/dicom/3/C.11.2.1.2/
This implementation will set the output range (min, max) equal to the
input range (original min, max). If scaling is desired, do that after calling
this function.
"""
window_center = getattr(hdr, "WindowCenter", None)
if window_center is None:
return img, hdr
y_min = img.min()
y_max = img.max()
window_width = getattr(hdr, "WindowWidth", None)
window_center, window_width = float(window_center), float(window_width)
img_out = np.zeros_like(img)
# y = ((x - (c - 0.5)) / (w-1) + 0.5) * (y max - y min )+ y min
img_out = ((img - (window_center - 0.5)) / (window_width - 1) + 0.5) * (
y_max - y_min
) + y_min
# if (x <= c - 0.5 - (w-1)/2), then y = y min
img_out[img <= (window_center - 0.5 - (window_width - 1) / 2.0)] = y_min
# else if (x > c - 0.5 + (w-1)/2), then y = y max ,
img_out[img > (window_center - 0.5 + (window_width - 1) / 2.0)] = y_max
return img_out, hdr
def read_dicom_raw(file_path):
dicom = pydicom.read_file(file_path)
img = dicom.pixel_array
return img, dicom
def read_dicom(file_path):
img, hdr = read_dicom_raw(file_path)
img, hdr = rescale_image(img, hdr)
img, hdr = apply_LUT(img, hdr)
img, hdr = apply_window(img, hdr)
return img, hdr
def path_to_list(file_path):
if file_path == "" or file_path == os.path.sep:
return []
rest, last = os.path.split(file_path)
# Check to see if we have hit a "root" (or "drive"), bail out if so.
if rest == file_path:
return [rest]
return path_to_list(rest) + [last] if last != "" else path_to_list(rest)
def abbreviate_path(file_path, length=2):
path_list = path_to_list(file_path)
abbrev = file_path
if len(path_list) > length + 1:
abbrev = os.path.join("...", *path_list[-length:])
return abbrev
def win_safe_path(path):
"""
Remove leading 'slash' in Windows paths, which should be relative or begin with
a drive letter, not a slash.
"""
if path is None or path == '':
return None
# Sometimes in Windows, you end up with a path like '/C:/foo/bar' --- not sure why.
if platform.system().lower() == "windows":
if path[0] in ["/", "\\"]:
return path[1:]
return path
def generate_unique_filename(dicom_header, extension=".dcm"):
"""
Generates and returns a unique filename based on the Patient ID and
internal unique idientifier contained in the DICOM header metadata.
Unique names are created in the following format:
```text {PatientID}_{InstanceHash}{Extension}
```
where `{PatientID}` is the `Patient ID` field from the DICOM header,
`{InstanceHash}` is the initial 16 characters of the hexadecimal encoding
of the *sha-1* hash of the `SOP Instance UID` field from the DICOM header,
and `{Extension}` is the file extension e.g. `.dcm` for DICOM files and
`.png` for PNG format.
"""
patient_id = dicom_header.PatientID
sop_instance_uid = dicom_header.SOPInstanceUID
instance_hash = hashlib.sha1(sop_instance_uid.encode("utf-8")).hexdigest()[:16]
return "{}_{}{}".format(patient_id, instance_hash, extension)
class ConverterWindow(QMainWindow):
NUM_THREADS = multiprocessing.cpu_count()
sig_abort_workers = pyqtSignal()
def __init__(self):
self.platform = platform.system().lower()
QMainWindow.__init__(self)
self.config = readConfigFile()
self.setWindowTitle("DICOM to PNG")
self.outputDir = self.config["output_path"]
# we want to drop files onto this window:
self.setAcceptDrops(True)
# keep it large enough to be an easy target
self.setMinimumSize(QSize(400, 600))
self.responseLines = collections.deque(maxlen=20)
self.queue = queue.Queue()
self.working = {}
self.convertedCount = 0
self.didAbort = False
centralWidget = QWidget(self)
gridLayout = QGridLayout()
centralWidget.setLayout(gridLayout)
self.setCentralWidget(centralWidget)
self.configButton = QPushButton("Save to...")
self.configButton.clicked.connect(self.setOutputDirectory)
self.configButton.setToolTip(
"Click to select the folder where PNG images should be placed."
)
self.addFilesButton = QPushButton("Add Files")
self.addFilesButton.clicked.connect(lambda: self.addFilesDialog())
self.addFilesButton.setToolTip("Click to add one or more DICOM files.")
self.addDirButton = QPushButton("Add Folder")
self.addDirButton.clicked.connect(lambda: self.addDirectoryDialog())
self.addDirButton.setToolTip("Click to add a DICOM folder or folder tree.")
self.stopButton = QPushButton("Stop")
self.stopButton.setDisabled(True)
self.stopButton.clicked.connect(self.abortWorkers)
self.stopButton.setToolTip("Click to stop all conversions already in progress.")
self.exitButton = QPushButton("Exit")
self.exitButton.clicked.connect(self.stopAndExit)
self.exitButton.setToolTip("Click to exit the application.")
gridLayout.addWidget(self.configButton, 0, 0, 1, 1)
gridLayout.addWidget(self.stopButton, 0, 2, 1, 1)
gridLayout.addWidget(self.exitButton, 0, 3, 1, 1)
gridLayout.addWidget(self.addFilesButton, 1, 0, 1, 2)
gridLayout.addWidget(self.addDirButton, 1, 2, 1, 2)
self.indicateThreadsRunning(False)
# Give some simple instructions:
self.instructions = QLabel(
"Drop DICOM files or directories\nhere to convert to PNG.", self
)
self.instructions.setAlignment(QtCore.Qt.AlignCenter)
gridLayout.addWidget(self.instructions, 2, 0, 1, 4)
self.log = QPlainTextEdit()
self.log.setReadOnly(True)
gridLayout.addWidget(self.log, 3, 0, 1, 4)
self.setStatusBar("Output Folder: {}".format(abbreviate_path(self.outputDir)))
@pyqtSlot()
def stopAndExit(self):
self.exitButton.setText("Exiting...")
self.exitButton.setEnabled(False)
self.setStatusBar("Exiting....")
self.abortWorkers()
app.quit()
def addResponse(self, msg):
self.log.appendPlainText(msg)
print(msg)
def setResponse(self, msg):
self.setStatusBar(msg)
self.addResponse(msg)
def setStatusBar(self, msg):
self.statusBar().showMessage(msg)
def indicateThreadsRunning(self, flag):
self.stopButton.setEnabled(flag)
self.exitButton.setEnabled(not flag)
self.configButton.setEnabled(not flag)
msg = "Working..." if flag else "Ready."
self.setStatusBar(msg)
def addFilesDialog(self):
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
files, _ = QFileDialog.getOpenFileNames(
self,
"Select DICOM Files",
"",
"DICOM Files (*.dcm *.dicom *);;All Files (*)",
options=options,
)
new_items = {"files": files, "dirs": []}
self.processNewItems(new_items)
def showDirectoryDialog(self, caption="Select DICOM Folder."):
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
dirname = QFileDialog.getExistingDirectory(
self, caption, os.getcwd(), options=options
)
return win_safe_path(dirname)
def addDirectoryDialog(self):
dirname = self.showDirectoryDialog()
new_items = {"files": [], "dirs": [dirname]}
self.processNewItems(new_items)
def setOutputDirectory(self):
dialog = QFileDialog(self, "Select destination folder for output images.")
dialog.setFileMode(QFileDialog.Directory)
dialog.setDirectory(self.outputDir)
dialog.setOption(QFileDialog.ShowDirsOnly, True)
# [KLUDGE] alert: This one can't use the system dialog on Mac OS because for
# some reason the "New Folder" button will crash the app. So on Mac, use the
# non-native dialog instead for now.
# Re-visit this in the future to try to remove it...
if self.platform == "darwin":
dialog.setOption(QFileDialog.DontUseNativeDialog, True)
if dialog.exec():
dirname = win_safe_path(dialog.selectedFiles()[0])
self.config["output_path"] = dirname
self.outputDir = dirname
saveConfigToFile(self.config)
self.setStatusBar(
"Output Folder: {}".format(abbreviate_path(self.outputDir))
)
else:
print("Set output folder failed.")
# --------------------------------------------------
# Originally inspired by:
# https://stackoverflow.com/a/8580720
#
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
new_items = {"files": [], "dirs": []}
for url in event.mimeData().urls():
path = win_safe_path(url.path())
if os.path.isdir(path):
self.addResponse(
"Folder: {}".format(os.path.basename(os.path.normpath(path)))
)
new_items["dirs"].append(path)
else:
path = url.toLocalFile()
if os.path.isfile(path):
self.addResponse("File: {}".format(os.path.basename(path)))
new_items["files"].append(path)
self.setStatusBar(
"Added {} files and {} directories.".format(
len(new_items["files"]), len(new_items["dirs"])
)
)
self.processNewItems(new_items)
# --------------------------------------------------
| |
= p
# now go there...
bestp = bestp * scale
self.SetPos( self.pos + bestp )
return bestp
def DrawMore(self, f):
# make current
vv.figure(f.nr)
if not hasattr(self, '_patch') or not hasattr(self, '_kw'):
return
a=vv.subplot(311)
vv.volshow(self._patch)
a.daspect = 1,-1,-1
a=vv.subplot(312)
vv.volshow(self._kw)
a.daspect = 1,-1,-1
a=vv.subplot(313)
vv.volshow(self._patch2)
a.daspect = 1,-1,-1
tmp = Pointset(3)
sam = self._manager.data.sampling
shape = self._patch.shape
c = Point(shape[2],shape[1],shape[0]) * Point(sam[2],sam[1],sam[0]) * 0.5
tmp.append(c)
tmp.append(c+self.dir*4)
p=vv.plot(tmp)
p.alpha = 0.5
class DirComWithRingWalker2D(DirComWalker2D):
""" A better version of the DirComWalker.
It uses a second direction term based on sampling local maxima
in a ring around the current position.
The final direction is a linear combination of the walkDir, ringDir
and ringDir, and is thereafter limited.
Also, the kernel is used differently, but for this one needs to
change the if-statement in DirComWalker2D.DirCom().
"""
def DirRing(self):
""" Find the position of the wire penetrating a ring
around the current position. If there are two such
positions, return the sum of the vectors to them. """
# get data and its scale vector
data = self._manager.data
sam = self._manager.data.sampling
scale = Point(sam[1],sam[0])
# get COM kernels
sigma = self._manager.params.scale
sigma2size = 2
g = self._manager.GetGaussianKernel(sigma, sigma2size, (0,0) )
# calculate sze's
szes = [(s-1)/2 for s in g.shape]
sze_y, sze_x = szes[0], szes[1]
# get patch
patch = self.GetPatch( tuple(szes) )
if patch is None:
self.Kill("Out of bounds in getting patch for DirRing.")
return
# only keep the edges
# todo: this mask can be calculated beforehand
patch = Aarray(patch+0, sampling=sam)
patch.origin = -sze_y * sam[0], -sze_x * sam[1]
dref = patch.index_to_point(sze_y,0).norm()
for y in range(patch.shape[0]):
for x in range(patch.shape[1]):
d = patch.index_to_point(y, x).norm()
if d < dref-1 or d >= dref:
patch[y,x] = 0
# get high local maxima.
mask = ( patch - sp.ndimage.filters.maximum_filter(patch,3) ) == 0
patch[mask==0] = 0
patch[patch<self._manager.params.th1] = 0
# show
self._patch2 = patch
# if there are two pixels, create a vector!
p = Point(0,0)
Iy, Ix = np.where(patch>0)
if len(Iy) >= 2 and len(Iy) < 3:
for i in range(len(Ix)):
tmp = patch.index_to_point(Iy[i], Ix[i])
p = p + tmp.normalize()
# Done
return p
def Move(self):
""" Overloaded move method. """
# apply directional center of mass operator
comDir = self.DirCom(self.viewDir)
if comDir is None:
return
# get full center of mass
ringDir = self.DirRing()
# get walkdir
if not self.history:
walkdir = self.viewDir
else:
refpos = self.history[-2:][0]
walkdir = self.pos - refpos
if walkdir.norm()>0:
walkdir = walkdir.normalize()
# combine
oldViewDir = self.viewDir
params = self._manager.params
w0, w1, w2 = params.comWeight, params.ringWeight, params.historyWeight
self.viewDir = comDir*w0 + ringDir*w1 + walkdir*w2
self.viewDir = self.viewDir.normalize()
# apply limit to angle
limitingAngle = self._manager.params.limitingAngle
self.viewDir = self._LimitAngle(self.viewDir, oldViewDir, limitingAngle)
limitingAngle = self._manager.params.limitingAngle2
self.viewDir = self._LimitAngle(self.viewDir, walkdir, limitingAngle)
# Do a step in that direction
self.viewDir = self.viewDir.normalize()
stepDir = self.DoStep(self.viewDir)
# combining walkdir and fullcom: the walkdir is "reset" each time
# by rounding to the voxel, and therefore does not bend along.
# what do we visualize?
self.dir = self.viewDir
#self.dir = self.DirCom(self.viewDir) * 0.01
#self.dir = walkdir #(not used anymore)
# test if we are ok here...
# There are two thresholds. th1 says below which intensity we
# should start to worry. th2 says below which intensity we can
# be sure it is background. An error measure is calculated
# which indicate where between th2 and th1 the value is now.
# The square of the value is subtracted from a foodsupply.
# when this supply reaches 0, the walker is killed.
# Each time we encounter a sample above th1, the food supply
# is reset to 1.0.
val = self._manager.data.sample(self.pos)
th1, th2 = self._manager.params.th1, self._manager.params.th2
if val < th1:
portion = (th1 - val ) / (th1-th2)
self._foodsupply -= portion**2
if self._foodsupply <= 0:
self.Kill("Ran in too low intensity pixels")
else:
self._foodsupply = 1.0
class DirComWithRingWalker3D(DirComWalker3D):
def DirRing(self):
""" Find the position of the wire penetrating a ring
around the current position. If there are two such
positions, return the sum of the vectors to them. """
# get data and its scale vector
data = self._manager.data
sam = self._manager.data.sampling
scale = Point(sam[2], sam[1],sam[0])
# get COM kernels
sigma = self._manager.params.scale
sigma2size = 2
g = self._manager.GetGaussianKernel(sigma, sigma2size, (0,0,0) )
# calculate sze's
szes = [(s-1)/2 for s in g.shape]
sze_z, sze_y, sze_x = szes[0], szes[1], szes[2]
# get patch
patch = self.GetPatch( tuple(szes) )
if patch is None:
self.Kill("Out of bounds in getting patch for DirRing.")
return
# only keep the edges
# todo: this mask can be calculated beforehand
patch = Aarray(patch+0, sampling=sam)
patch.origin = -sze_z * sam[0], -sze_y * sam[1], -sze_x * sam[2]
dref = patch.index_to_point(sze_y,0,0).norm()
for z in range(patch.shape[0]):
for y in range(patch.shape[1]):
for x in range(patch.shape[2]):
d = patch.index_to_point(z,y,x).norm()
if d < dref-1 or d >= dref:
patch[z,y,x] = 0
# get high local maxima.
mask = ( patch - sp.ndimage.filters.maximum_filter(patch,3) ) == 0
patch[mask==0] = 0
patch[patch<self._manager.params.th1] = 0
# show
self._patch2 = Aarray(patch, self._manager.data.sampling)
# if there are two pixels, create a vector!
p = Point(0,0,0)
Iz, Iy, Ix = np.where(patch>0)
if len(Iy) >= 2 and len(Iy) < 3:
for i in range(len(Ix)):
tmp = patch.index_to_point(Iz[i], Iy[i], Ix[i])
p = p + tmp.normalize()
# Done
return p
def Move(self):
# "inherit" from walker2D
Testing2D.Move.im_func(self)
class MPCWalker2D(BaseWalker2D):
def __init__(self, manager, p):
BaseWalker2D.__init__(self, manager, p)
# get params
ctvalues2double = self._manager.params.ctvalues2double
mcpDistance = self._manager.params.mcpDistance
# create mcp object if required
if not hasattr(self._manager, 'mcp'):
speed = 1/2**(self._manager.data/ctvalues2double)
self._manager.mcp = mcp.McpDistance(speed, 0, mcpDistance)
# dont show a vector
self.dir = None
# keep a path to walk
self._future = Pointset(self._manager.data.ndim)
self._distance = 0
def SetPos(self, pos):
# set this pos
if pos is not None:
#self.RememberPos(pos)
BaseWalker2D.SetPos(self, pos)
def RememberPos(self, pos, sze=0):
""" Remember this pos as a position where we've been,
so we cannot go there again. """
mask = self._manager.mask
iy, ix = self._manager.data.point_to_index(pos)
for dy in range(-sze,sze+1):
for dx in range(-sze,sze+1):
y, x = iy+dy, ix+dx
if y<0 or x<0 or y>=mask.shape[0] or x>=mask.shape[1]:
continue
mask[y,x] = 1
def Move(self):
# todo: only in patch (but nasty to take edges into account...)
# do we have some path to walk left over?
maxdist = self._manager.params.mcpDistance/2.0
if len(self._future) and self._distance < maxdist:
p = self._future.pop()
self._distance += p.distance(self.pos)
self.SetPos( p )
return
else:
self._distance = 0
m = self._manager.mcp
# reset mcp object
m.Reset(self.pos)
# freeze the voxels that we came from
if self._history:
for pos in self._history[-20:]:
ii = m.MakeIntPos(pos)
m.nindex_f[ ii ] = - abs(m.nindex_f[ ii ])
ii = m.MakeIntPos(self._history[-1])
m.nindex_f[ ii ] = - abs(m.nindex_f[ ii ])
for n in m.GetNeighbors(ii):
m.nindex_f[ n ] = - abs(m.nindex_f[ n ])
# lets go!
m.EvolveFront()
if m._endpoint is None:
self.Kill("No stent to follow.")
return
path = m.GetPathAsPoints(m._endpoint)
# store
self._future = path[:-1]
self.Move() # do one step
# add to history
# d = 0
# for p in reversed(path[:-1]):
# if p == self.pos:
# print('same one')
# self.SetPos(p)
# d += p.distance(self.pos)
# if d > self._manager.params.mcpDistance/2.0:
# break
class MPCWalker3D(BaseWalker3D):
def __init__(self, manager, p):
BaseWalker3D.__init__(self, manager, p)
# get params
ctvalues2double = self._manager.params.ctvalues2double
mcpDistance = self._manager.params.mcpDistance
# create mcp object if required
if not hasattr(self._manager, 'mcp'):
speed = 1/2**(self._manager.data/ctvalues2double)
self._manager.mcp = mcp.McpDistance(speed, 0, mcpDistance)
# don't show a direction vector
self.dir = None
# keep a path to walk
self._future = Pointset(self._manager.data.ndim)
self._distance = 0
| |
<reponame>Qm-Dev/figures-calculator
import math
import time
version = "Alpha 7.1"
lang = "ES"
redo = 0
while redo == 0:
try:
def main():
print("\n\n\n")
print("\t _____ _ _ ")
time.sleep(0.3)
print("\t/ __ \ (_) (_) ")
time.sleep(0.3)
print("\t| / \/ ___ _ __ ___ _ __ _ __ _ ___ _ _ _ _ __ ___ ")
time.sleep(0.3)
print("\t| | / _ \| '_ \ / _ \ '__| '_ \| |/ __| | | | | '_ ` _ \ ")
time.sleep(0.3)
print("\t| \__/\ (_) | |_) | __/ | | | | | | (__| | |_| | | | | | |")
time.sleep(0.3)
print("\t \____/\___/| .__/ \___|_| |_| |_|_|\___|_|\__,_|_| |_| |_|")
time.sleep(0.3)
print("\t | | ")
time.sleep(1)
print("\nBienvenido al programa. A continuación, escoja la opción que estime conveniente.")
print("\t1 - Polígonos (2D)")
print("\t2 - Poliedros (3D)")
print("\t3 - Créditos")
print("\t4 - Salir")
opcion = int(input("Opción: "))
# Figuras 2D
if opcion == 1:
def fig_2d():
print("\nSeleccione un polígono.")
print("1 - Cuadriláteros")
print("2 - Circunferencia")
print("3 - Triángulo")
print("4 - Pentágono")
print("5 - Volver al menú principal")
option = int(input("Opción: "))
# Cuadriláteros
if option == 1:
def cuadrilateros():
print("\nSeleccione una figura:")
print("1 - Cuadrado")
print("2 - Rectángulo")
print("3 - Rombo")
print("4 - Romboide")
print("5 - Trapecio")
print("6 - Trapezoide")
print("7 - Escoger otro polígono")
option = int(input("Opción: "))
# Cuadrado
if option == 1:
print("\nSeleccione aspecto a calcular.")
print("1 - Área")
print("2 - Perímetro")
print("3 - Diagonal")
print("4 - Escoger otro cuadrilátero")
option = int(input("Opción: "))
# Área de un cuadrado
if option == 1:
lado = float(input("Ingrese la medida de uno de sus lados: "))
area_sq = lado ** 2
if lado <= 0:
print("\nError: El lado alberga un valor negativo o igual a cero.")
time.sleep(2)
print("Volviendo al menú de polígonos...")
time.sleep(1)
fig_2d()
else:
print(f"\nLa medida del área de este cuadrado es de {area_sq} unidades cuadradas.")
time.sleep(4)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Perímetro de un cuadrado
elif option == 2:
lado = float(input("Ingrese la medida de uno de sus lados: "))
perim_sq = lado * 4
if lado <= 0:
print("\nError: El lado alberga un valor negativo o igual a cero.")
time.sleep(3)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
else:
print(f"\nEl perímetro de este cuadrado es de {perim_sq} unidades.")
time.sleep(4)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Diagonal de un cuadrado
elif option == 3:
lado = float(input("Ingrese la medida de uno de sus lados: "))
diag_sq = ((lado ** 2) + (lado ** 2))
diag_sq_final = (math.sqrt(diag_sq))
if lado <= 0:
print("\nError: El valor ingresado no puede tener valor negativo o igual a cero.")
time.sleep(3)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
else:
print(f"\nLa diagonal de este cuadrado es de {diag_sq_final:.2f} unidades.")
time.sleep(4)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Escoger otro cuadrilátero
elif option == 4:
cuadrilateros()
# Otro valor
else:
print("\nError: Operación incorrecta.")
time.sleep(2)
print("Regresando al menú principal...")
time.sleep(1)
main()
# Rectángulo
elif option == 2:
print("\nSeleccione aspecto a calcular.")
print("1 - Área")
print("2 - Perímetro")
print("3 - Diagonal")
print("4 - Escoger otro cuadrilátero")
option = int(input("Opción: "))
# Área de un rectángulo
if option == 1:
print("\nA continuación ingrese las medidas de ancho y largo del rectángulo.")
time.sleep(0.5)
ancho = float(input("\nAncho: "))
largo = float(input("Largo: "))
area_rect = ancho * largo
print(f"\nEl área de este rectángulo es de {area_rect} unidades cuadradas.")
time.sleep(4)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Perímetro de un rectángulo
elif option == 2:
print("\nA continuación ingrese las medidas de ancho y largo del rectángulo.")
ancho = float(input("Ancho: "))
largo = float(input("Largo: "))
perim_rect = (ancho * 2) + (largo * 2)
print(f"\nEl perímetro de este rectángulo es de {perim_rect} unidades.")
time.sleep(4)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Diagonal de un rectángulo
elif option == 3:
print("\nA continuación ingrese las medidas de ancho y largo del rectángulo.")
ancho = float(input("Ancho: "))
largo = float(input("Largo: "))
diag_rect = ((ancho ** 2) + (largo ** 2))
diag_rect_final = (math.sqrt(diag_rect))
print(
f"\nLa diagonal aproximada de este rectángulo es de {diag_rect_final:.2f} unidades.")
time.sleep(4)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Escoger otro cuadrilátero
elif option == 4:
cuadrilateros()
# Otro valor
else:
print("\nError: Operación incorrecta.")
time.sleep(2)
print("Regresando al menú principal...")
time.sleep(1)
main()
# Rombo
elif option == 3:
print("\nSeleccione aspecto a calcular.")
print("1 - Área")
print("2 - Perímetro")
print("3 - Escoger otro cuadrilátero")
option = int(input("Opción: "))
# Área de un rombo
if option == 1:
D = float(input("\nIngrese el valor de la diagonal mayor: "))
d = float(input("Ingrese el valor de la diagonal menor: "))
area_romb = (D * d) / 2
if D > 0 and d > 0:
print(f"\nEl área del rombo es de {area_romb:.2f} unidades cuadradas.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
else:
print(
"\nError: Los valores ingresados no pueden albergar números negativos o iguales a"
" cero.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
# Perímetro de un rombo
elif option == 2:
D = float(input("\nIngrese el valor de la diagonal mayor: "))
d = float(input("Ingrese el valor de la diagonal menor: "))
perim_romb = 2 * (math.sqrt((D ** 2) + (d ** 2)))
if D > 0 and d > 0:
print(f"\nEl perímetro de este rombo es de {perim_romb:.2f} unidades.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
else:
print(
"\nError: Los valores ingresados no pueden albergar números negativos o iguales a"
" cero.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
# Escoger otra figura
elif option == 3:
cuadrilateros()
# Otro valor
else:
print("\nError: Operación incorrecta.")
time.sleep(2)
print("Regresando al menú principal...")
time.sleep(1)
main()
# Romboide
elif option == 4:
print("\nSeleccione aspecto a calcular.")
print("1 - Área")
print("2 - Perímetro")
print("3 - Escoger otro cuadrilátero")
option = int(input("Opción: "))
# Área de un romboide
if option == 1:
b = float(input("\nIngrese el valor del lado que actúa como base: "))
h = float(input("Ingrese el valor de la altura relativa: "))
area_romboid = b * h
if b <= 0 or h <= 0:
print(
"\nError: Los valores ingresados albergan números negativos o iguales a cero.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
else:
print(f"\nEl área del romboide es de {area_romboid:.2f} unidades cuadradas.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
# Perímetro de un romboide
elif option == 2:
a = float(input("\nIngrese la medida de un lado: "))
b = float(input("Ingrese la medida del otro lado: "))
perim_romboid = 2 * (a + b)
if a <= 0 or b <= 0:
print(
"\nError: Los valores ingresados albergan números negativos o iguales a cero.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
else:
print(f"\nEl perímetro del romboide es de {perim_romboid:.2f} unidades.")
time.sleep(3)
print("Regresando al menú de polígonos...")
time.sleep(2)
fig_2d()
# Escoger otra figura
elif option == 3:
cuadrilateros()
# Otro valor
else:
print("\nError: Operación incorrecta.")
time.sleep(2)
print("Regresando al menú principal...")
time.sleep(1)
main()
# Trapecio
elif option == 5:
print("\nEn construcción...")
time.sleep(2)
main()
# Trapezoide
elif option == 6:
print("\nEn construcción...")
time.sleep(2)
main()
# Escoger otro polígono
elif option == 7:
fig_2d()
# Otro valor
else:
print("\nError: Operación incorrecta.")
time.sleep(2)
print("Regresando al menú principal...")
time.sleep(1)
main()
cuadrilateros()
# Circunferencia
elif option == 2:
print("\nSeleccione aspecto a calcular.")
print("1 - Área")
print("2 - Perímetro")
print("3 - Diámetro")
print("4 - Escoger otra figura")
option = int(input("Opción: "))
# Área de la circunferencia
if option == 1:
print("\nA continuación ingrese la medida del radio del círculo.")
radio = float(input("Radio: "))
area_cir = math.pi * (radio ** 2)
print(f"\nLa medida del área del círculo es de {area_cir:.3f} unidades cuadradas.")
print(f"Con π como incógnita: {radio ** 2}π unidades cuadradas.")
time.sleep(3)
print("Volviendo al menú de polígonos...")
time.sleep(2)
fig_2d()
# Perímetro de la circunferencia
elif option == 2:
print("\nA continuación ingrese la medida del radio del círculo.")
radio = float(input("Radio: "))
perim_circ = 2 * radio * math.pi
| |
import os
import sys
sys.path.append('.')
sys.path.append('..')
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.lines as lines
import matplotlib.image as mpimg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkinter as tk
from tools.get_dates_umich import get_dates_umich
from tools.staticmap_for_gps import map_for_gps
from tools.data_manager import DataManager
from tools.view_lidar import hokuyo_plot
from tools.view_lidar import threshold_lidar_pts
class VisualizerFrame(tk.Frame):
"""
This is the main window where the robot data is seen by the user.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.label = None
self.ax_map = None
self.ax_gps = None
self.ax_lidar = None
self.map_plot = None
self.gps_plot = None
self.lidar_plot = None
self.canvas = None
self.data_manager = None
self.gps_data = None
self.lidar_data = None
self.gps_on = False
self.map_on = False
self.lidar_on = False
self.map_image = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.label = tk.Label(self, text="Viewer")
self.label.pack(side=tk.TOP)
self.fig = Figure(figsize=(5, 4), dpi=100)
self.ax_map = self.fig.add_subplot(111)
self.ax_gps = self.fig.add_subplot(111)
self.ax_lidar = self.fig.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.master)
self.canvas.draw()
self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)
def callback_initialize_data_manager(self):
"""
This callback responds to the *Load Data* button.
:return: None
"""
date = self.parent.toolbar.date.get()
if self.data_manager is None:
self.setup_data(date)
else:
if self.data_manager.date is not date:
os.chdir('../..') # TODO patched here - add this to end of load_gps() / load_lidar() functions
self.setup_data(date)
else:
pass
def setup_data(self, date):
"""
This function sets up all of the data (except lidar) needed by the application.
:param date: Determines which date from the robotics dataset to use.
:type date: str.
:return: None
"""
if self.data_manager is not None:
os.chdir(self.data_manager.owd)
self.ax_gps.clear()
self.ax_map.clear()
self.ax_lidar.clear()
self.canvas.draw()
self.gps_on = False
self.map_on = False
self.lidar_on = False
self.parent.set_status('DM_START', hold=True)
self.data_manager = DataManager(date)
self.data_manager.setup_data_files('sensor_data')
self.data_manager.load_gps()
x_coords, y_coords = map_for_gps(self.data_manager.data_dict, self.data_manager.data_dir)
self.lidar_data = None
self.gps_data = [x_coords, y_coords] # in image coords
self.map_image = mpimg.imread(os.path.join(self.data_manager.data_dir, 'map.png'))
self.label.config(text='Viewer')
self.parent.set_status('DM_READY')
def callback_gps_on(self):
"""
This callback responds to the *On* button under the *GPS Control* menu.
:return: None
"""
if not self.lidar_on:
if not self.gps_on:
self.gps_on = True
self.parent.set_status('GPS_START')
idx = self.get_idx_for_gps_update()
self.update_timestamp(idx)
self.gps_plot = self.ax_gps.plot(self.gps_data[0][:idx], self.gps_data[1][:idx], 'r')[0]
self.canvas.show()
self.parent.set_status('GPS_READY')
else:
pass
else:
self.callback_lidar_off()
self.callback_gps_on()
def callback_gps_off(self):
"""
This callback responds to the *Off* button under the *GPS Control* menu.
:return: None
"""
if self.gps_on:
self.gps_on = False
self.update_gps(0)
self.label.config(text='Viewer')
self.parent.set_status('GPS_REMOVE')
else:
pass
def callback_gps_slider_changed(self, event):
"""
This callback responds to the scale position changing under the *GPS Control* menu.
:return: None
"""
self.gps_on = True
idx = self.get_idx_for_gps_update()
self.update_gps(idx)
self.update_timestamp(idx)
self.parent.set_status('GPS_UPDATE')
def update_gps(self, idx):
"""
This function updates the GPS data that is displayed in the main viewing window.
:param idx: Index into the array of GPS data that is to be displayed.
:type idx: int.
:return: None
"""
if self.gps_data is not None:
self.gps_plot.set_xdata(self.gps_data[0][:idx])
self.gps_plot.set_ydata(self.gps_data[1][:idx])
self.canvas.draw()
else:
pass
def update_timestamp(self, idx):
"""
This function updates the timestamp in the main viewing window.
:param idx: Index into the array of GPS data to be used for retrieval of the time stamp.
:type idx: int.
:return: None
"""
curr_tstamp = self.get_timestamp_for_gps_update(idx)
self.label.config(text=str('time stamp: ' + curr_tstamp))
def get_idx_for_gps_update(self):
"""
This function returns the index to be used for updating the GPS data.
:return: int -- the index to be used for the GPS update
"""
slider_val = self.parent.control.gps_control.selection_scale.get()
idx_ratio = len(self.gps_data[0]) / 100
return int(slider_val * idx_ratio)
def get_timestamp_for_gps_update(self, gps_data_idx):
"""
This function returns the timestamp in a readable format for the given GPS data index.
:param gps_data_idx: Index into the array of GPS data to be used for retrieval of the time stamp.
:return: str -- the timestamp
"""
idx_ratio = len(self.data_manager.data_dict['gps']['tstamp']) / len(self.gps_data[0])
idx = int(gps_data_idx * idx_ratio) - 1
ts = int(self.data_manager.data_dict['gps']['tstamp'][idx] / 1000000)
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def callback_map_on(self):
"""
This callback responds to the *On* button under the *Map Control* menu.
:return: None
"""
if not self.lidar_on:
if not self.map_on:
self.map_on = True
if self.map_image is not None:
self.ax_map.imshow(self.map_image)
# draw scale on the map
map_scale = self.get_map_scale()
line = lines.Line2D([0, 200], [0, 0], linewidth=4, color='b')
self.ax_map.add_line(line)
distance = map_scale * 200
if distance > 1000:
scale_str = "scale = " + str(float("%.2f" % (distance / 1000))) + " kilometers"
else:
scale_str = "scale = " + str(float("%.2f" % (distance))) + " meters"
self.ax_map.text(0, -10, scale_str, fontsize=8)
self.canvas.draw()
self.parent.set_status('MAP_READY')
else:
self.parent.set_status('MAP_ERROR')
else:
pass
else:
self.callback_lidar_off()
self.callback_map_on()
def callback_map_off(self):
"""
This callback responds to the *Off* button under the *Map Control* menu.
:return: None
"""
if self.map_on:
self.map_on = False
self.ax_map.clear()
if self.gps_on:
self.gps_on = False
self.callback_gps_on() # because the previous line clears both map and gps
self.canvas.draw()
else:
pass
def callback_date_changed(self):
"""
This callback responds to a change in the date selection menu in the toolbar.
:return: None
"""
new_date = self.parent.toolbar.date.get() # Need to call get() because this is a StringVar object
if self.parent.toolbar.date is not new_date:
self.parent.toolbar.date.set(new_date)
else:
pass
def get_map_scale(self):
"""
This function calculates the map scale in units of meters per pixel.
:return: float64 -- map scale (m/px)
"""
k = 111000 # meters per degree of latitude (approx.)
lat_range = self.data_manager.data_dict['gps_range'][0]
d_lat_range = abs(lat_range[0] - lat_range[1])
d_x_pixels = abs(max(self.gps_data[0]) - min(self.gps_data[0]))
map_scale = d_lat_range * k / d_x_pixels
return map_scale # units of meters per pixel
def callback_lidar_slider_changed(self, event):
"""
This callback responds to the scale position changing under the *Lidar Control* menu.
:return: None
"""
self.lidar_on = True
idx = self.get_idx_for_lidar_update()
self.update_lidar(idx)
# self.update_timestamp(idx)
self.parent.set_status('Lidar updated')
def get_idx_for_lidar_update(self):
"""
This function returns the index to be used for updating the Lidar data.
:return: int -- the index to be used for the Lidar update
"""
slider_val = self.parent.control.lidar_control.selection_scale.get()
idx_ratio = len(self.lidar_data) / 100
return max(int(slider_val * idx_ratio) - 1, 0)
def update_lidar(self, idx):
"""
This function updates the Lidar data that is displayed in the main viewing window.
:param idx: Index into the array of Lidar data that is to be displayed.
:type idx: int.
:return: None
"""
if self.lidar_data is not None:
yt, xt, _ = threshold_lidar_pts(self.lidar_data[idx])
self.lidar_plot.set_xdata(xt)
self.lidar_plot.set_ydata(yt)
self.canvas.draw()
else:
pass
def callback_lidar_on(self):
"""
This callback responds to the *On* button under the *Lidar Control* menu.
:return: None
"""
if not self.lidar_on:
self.lidar_on = True
self.callback_map_off()
self.callback_gps_off()
if self.data_manager is None:
self.callback_initialize_data_manager()
if not 'lidar' in self.data_manager.data_dict.keys():
self.data_manager.setup_data_files('hokuyo')
pickled = True
delete_pickle = False
self.data_manager.load_lidar(4000, pickled, delete_pickle) # TODO - global constant for lidar samples
self.lidar_data = self.data_manager.data_dict['lidar']
xlimits, ylimits = [-32, 32], [-32, 32]
self.ax_lidar.set_xlim(xlimits)
self.ax_lidar.set_ylim(ylimits)
hokuyo_plot(self.ax_lidar)
yt, xt, _ = threshold_lidar_pts(self.lidar_data[0])
self.lidar_plot = self.ax_lidar.plot(xt, yt, 'r.')[0]
self.canvas.show()
else:
pass
def callback_lidar_off(self):
"""
This callback responds to the *Off* button under the *Lidar Control* menu.
:return: None
"""
if self.lidar_on:
self.lidar_on = False
self.ax_lidar.clear()
self.canvas.draw()
else:
pass
class ToolbarFrame(tk.Frame):
"""
This class represents the toolbar at the top of the window.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.date = None
self.dates = get_dates_umich()
self.load_button = None
self.option_menu = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.dates = get_dates_umich()
self.load_button = tk.Button(self, text="Load Data")
self.load_button.pack(side=tk.LEFT, padx=2, pady=2)
self.date = tk.StringVar(self)
self.date.set(self.dates[24])
self.option_menu = tk.OptionMenu(self, self.date, *self.dates, command=self.callback_date_changed)
self.option_menu.pack(side=tk.LEFT, padx=2, pady=2)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.load_button.config(command=self.parent.window.callback_initialize_data_manager)
def callback_date_changed(self, event):
self.parent.window.callback_date_changed()
class ControlFrame(tk.Frame):
"""
This class represents the controls on the right hand side of the main
window. There are two nested classes for the slam and map controls.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = parent
self.slam_control = None
self.map_control = None
self.lidar_control = None
self.widgets()
class GpsControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.selection_scale = None
self.scale_val = None
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="GPS Control", bg="blue", fg="white")
label.pack(side=tk.TOP, fill=tk.X)
self.selection_scale = tk.Scale(self, orient=tk.HORIZONTAL, to=100, variable=self.scale_val)
self.selection_scale.set(100)
self.selection_scale.pack(side=tk.TOP)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_gps_on)
self.off_button.config(command=self.root.window.callback_gps_off)
self.selection_scale.bind("<ButtonRelease-1>", self.root.window.callback_gps_slider_changed)
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 03 13:46:22 2016
@author: kbefus
"""
# -*- coding: utf-8 -*-
"""
Created on Mon May 16 09:19:36 2016
@author: kbefus
"""
import sys,os
import numpy as np
import time
from shutil import copyfile
res_dir = r'/mnt/data2/CloudStation'
code_dir = os.path.join(res_dir,r'ca_slr/scripts')
sys.path.insert(1,code_dir)
from cgw_model import cgw_package_tools as cpt
from cgw_model.cgw_utils import cgw_general_utils as cgu
from cgw_model.cgw_utils import cgw_raster_utils as cru
from cgw_model.cgw_utils import cgw_feature_utils as cfu
from cgw_model.cgw_modflow import cgw_mf_tools as cmft
from cgw_model.cgw_modflow import cgw_mf_utils as cmfu
from cgw_model.cgw_zonebudget import cgw_zb_tools as czbt
import geopandas as gpd
#from cgw_model.cgw_modpath import cgw_mp_tools as cmpt
#%% California inputs
all_start = time.time()
ca_regions = ['norca','paca','sfbay','cenca','soca']
out_research_dir = 'None'#r'C:\research\CloudStation\research\coastal_gw\ca_slr'
#out_main_model_dir=os.path.join(out_research_dir,'model')
research_dir_main = os.path.join(res_dir,'ca_slr')
#research_dir = r'C:\research\kbefus\ca_slr'
#research_dir = out_research_dir
research_dir = r'/mnt/762D83B545968C9F'
main_model_dir = os.path.join(research_dir,'model_lmsl_noghb')
out_main_model_dir = main_model_dir
data_dir = os.path.join(research_dir_main,'data')
nc_dir = os.path.join(main_model_dir,'nc_inputs')
ref_name = 'usgs.model.reference'
dem_date = '11Feb19'
elev_dir = os.path.join(data_dir,'gw_dems{}'.format(dem_date))
nmodel_domains_shp = os.path.join(data_dir,'ca_{}_slr_gw_domains_{}.shp'.format('n',dem_date))
ndomain_df = gpd.read_file(nmodel_domains_shp)
smodel_domains_shp = os.path.join(data_dir,'ca_{}_slr_gw_domains_{}.shp'.format('s',dem_date))
sdomain_df = gpd.read_file(smodel_domains_shp)
nallmodels = sdomain_df.shape[0]+ndomain_df.shape[0]
id_col = 'Id'
sealevel_elevs = np.hstack([np.arange(0,2.25,.25),2.5,3.,5.])# m
# ----------- Model run options --------------
overwrite_switch = False # Delete model directory and all contents prior to run, ***use at own risk***
force_write_inputs= True # Force writing of modflow input files
plot_results = True # Plot results of modflow model run
prep_zb,run_zb = False,False # Prepare and run zonebudget analysis
run_mf = True
run_mp = False
clear_all = True
load_nc_grids_bool=True # Tries to load grid files from previous run
save_nc_grids=True # automatically switches to false if nc_grid is loaded
model_type = 'fw' # 'swi' or 'fw'
# swi options
run_swi = False
plt_swi=False
zb_swi = False
run_swi_model = False
solver = 'nwt'
use_solver_dir = False
# ----------- Model parameterization -----------
# Flow options
#v_ani = None # if provided by spatial datasets
v_ani = 10. # kh/kv, vertical anisotropy
porosity = 0.2
Kh_vals = [0.1,1.,10.]
#Kh = 1. #m/day, use None if supplying raster data
min_hk = 1e-4
k_decay = 1.
# Discretization options
nlay = 1#None for all, len(layer_thick)
cell_spacing = 10. # meters
layer_thick = 50.
elev_thick_min = -50# if negative: elevation, if positive: thickness, or None
min_surface_elev = None#-100.
min_zthick = .3048
# Time options
nyears = 10. # rough time in years, will be a few days more than this to get an integer number of days
ndays = np.ceil(nyears*365.25)
nper,nstp = 1, 1
perlen = np.int(np.ceil(ndays/nstp)*nstp)
steady=True
rchg_ratio = 1.
# ZoneBudget options
max_zb_layer = 1#m_maker.dict_obj.zbot.shape[0] # None, or 1 to nlay
# Sea level and salinity inputs
datum_type = 'LMSL'
cs_sl_sal = 'NAD83'
sl_fname = os.path.join(data_dir,'sea_level','CA_sl_{}_12Feb18.txt'.format(datum_type))
sl_data,_ = cru.read_txtgrid(sl_fname)
sl_data = np.array(sl_data) # lon, lat, sl
sal_fname = os.path.join(data_dir,'salinity','CA_sal_12Feb18.txt')
sal_data,_ = cru.read_txtgrid(sal_fname)
sal_data = np.array(sal_data) # lon, lat, density
rerun=True
rerun_sl_dens = True
rerun_older_date = "Sep 12 2019 06:00AM"
date_fmt = '%b %d %Y %I:%M%p'
rr_date = time.strptime(rerun_older_date,date_fmt)
rr_date_s = time.mktime(rr_date)
# Make model_management file to keep track of currently and previously run models
active_date = '24Oct19'
model_name_fmt = '{0:s}_{1:d}_{2}_slr{3:3.2f}m_Kh{4:3.2f}_{5:.0f}m'
other_model_name_fmt = '{0:s}_{1:d}_{2}_slr{3:3.2f}m_Kh{4:3.1f}_{5:.0f}m'
dirname_fmt = '_{0}_res{1}m_sl{2:3.2f}m_Kh{3:3.2f}'
other_dirname_fmt = '_{0}_res{1}m_sl{2:3.2f}m_Kh{3:3.1f}'
#%%
model_mng_file = os.path.join(main_model_dir,'model_management_{0}.txt'.format(active_date))
for Kh in Kh_vals:
# if Kh==Kh_vals[0]:
# fmt = model_name_fmt
# otherfmt = other_model_name_fmt
# dirfmt = dirname_fmt
# else:
fmt = other_model_name_fmt
otherfmt = model_name_fmt
dirfmt = other_dirname_fmt
for sealevel_elev in sealevel_elevs:
for ca_region in ca_regions: # loop through CA regions
# ----------- Region directory information -----------
region_dir = os.path.join(main_model_dir,ca_region)
out_region_dir = os.path.join(out_main_model_dir,ca_region)
results_dir = os.path.join(region_dir,'output')
use_other_dir = dirfmt.format(datum_type,cell_spacing,sealevel_elev,Kh)#'_{}lay'.format(nlay)
if use_solver_dir:
model_inputs_dir = os.path.join(region_dir,'model{}'.format(solver))
model_outputs_dir = os.path.join(region_dir,'output{}'.format(solver))
elif use_other_dir is not None:
model_inputs_dir = os.path.join(region_dir,'model{}'.format(use_other_dir))
model_outputs_dir = os.path.join(region_dir,'output{}'.format(use_other_dir))
else:
model_inputs_dir = os.path.join(region_dir,'model')
model_outputs_dir = os.path.join(region_dir,'output')
figs_dir = os.path.join(model_outputs_dir,'figures')
for temp_dir in [figs_dir,nc_dir,model_inputs_dir,model_outputs_dir]:
if not os.path.isdir(temp_dir):
os.makedirs(temp_dir)
# Define model information for region
if ca_region in ['soca']:
domain_df = sdomain_df.copy()
model_domains_shp = smodel_domains_shp
r_prefix = 's'
else:
domain_df = ndomain_df.copy()
model_domains_shp = nmodel_domains_shp
r_prefix = 'n'
# Select only models for current region
active_models = domain_df.loc[domain_df['ca_region']==ca_region,id_col].values
nmodels = domain_df.shape[0]
budget_outfile = os.path.join(model_outputs_dir,'{}_budget_summary.csv'.format(ca_region))
if os.path.isfile(budget_outfile):
model_budget_df = czbt.zbu.pd.read_csv(budget_outfile)
model_budget_df.set_index('model_name',inplace=True)
else:
model_budget_df = czbt.zbu.pd.DataFrame()
# ----------- Supporting spatial data -----------
rchg_fname = os.path.join(data_dir,"{}_wcoast_rc_eff_0011_utm.tif".format(r_prefix))
# Set model projection for region
elev_fname = os.path.join(elev_dir,'{0}_{1:02.0f}_dem_landfel.tif'.format(ca_region,active_models[0]))
temp_proj = cru.gdal.Open(elev_fname)
in_proj = temp_proj.GetProjectionRef()
temp_proj = None
# Project salinity and sea-level data
sal_xy_proj = cru.projectXY(sal_data[:,:2],inproj=cs_sl_sal,outproj=in_proj)
sl_xy_proj = cru.projectXY(sl_data[:,:2],inproj=cs_sl_sal,outproj=in_proj)
sal_data_proj = np.column_stack([sal_xy_proj,sal_data[:,2]])
sl_data_proj = np.column_stack([sl_xy_proj,sl_data[:,2]])
for active_domain in active_models: # or in active_domains
model_start = time.time()
active_domain_data = domain_df.loc[domain_df[id_col]==active_domain,:]
# Set model output directories
model_name = fmt.format(ca_region,
active_domain,datum_type,
sealevel_elev,Kh,cell_spacing)
model_in_dir = os.path.join(model_inputs_dir,model_name)
model_out_dir = os.path.join(model_outputs_dir,model_name)
# Print loop info
print('------------- Model {} of {} -------------'.format(active_domain+1,nallmodels))
print('Model: {}, sea level = {} m'.format(model_name,sealevel_elev))
print('Start time: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())))
main_hds = os.path.join(model_out_dir,'{}.hds'.format(model_name))
out_hds = os.path.join(out_region_dir,os.path.basename(model_outputs_dir),model_name,'{}.hds'.format(model_name))
new_nc_fname = os.path.join(model_in_dir,'{}.nc'.format(model_name))
new_nc_folder = os.path.join(nc_dir,model_name)
if os.path.isfile(main_hds):
if os.stat(main_hds).st_size>0 and os.path.getmtime(main_hds) > rr_date_s:
running_bool,model_list = cgu.update_txt(model_mng_file,model_name) # write model_name into management file
print('Model already run. Moving on to next')
print('--------------------------------------------\n')
if os.path.isfile(new_nc_fname) and not os.path.isfile(os.path.join(new_nc_folder,ref_name)): # copy over nc file to nc_folder
# Copy nc file into general shared folder
if not os.path.isdir(new_nc_folder):
os.makedirs(new_nc_folder)
store_nc_fname = os.path.join(new_nc_folder,os.path.basename(new_nc_fname))
copyfile(new_nc_fname,store_nc_fname)
copyfile(os.path.join(model_in_dir,ref_name),os.path.join(new_nc_folder,ref_name))
continue # skip this file
elif os.path.isfile(out_hds): # and not rerun
if os.stat(out_hds).st_size>0 and os.path.getmtime(main_hds) > rr_date_s:
print('Model already run. Moving on to next')
print('--------------------------------------------\n')
continue # skip this file
# Check management file
if os.path.isfile(model_mng_file):
running_bool,model_list = cgu.update_txt(model_mng_file,model_name)
if running_bool:
print('Model already run or running. Moving on to next')
print('--------------------------------------------\n')
continue
other_model_name = otherfmt.format(ca_region,
active_domain,datum_type,
sealevel_elev,Kh,cell_spacing)
running_bool,model_list = cgu.update_txt(model_mng_file,other_model_name)
if running_bool:
print('Model already run or running. Moving on to next')
print('--------------------------------------------\n')
continue
else:
# Make new file
running_bool,model_list = cgu.update_txt(model_mng_file,model_name)
for temp_dir in [model_in_dir,model_out_dir]:
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# See if nc file exists for other Kh models and copy nc file if so
nc_found = False
if not os.path.isfile(new_nc_fname):
for other_kh in Kh_vals:
for other_sl in sealevel_elevs:
for mnamefmt in [otherfmt,fmt]:
if not nc_found:
other_model_name = mnamefmt.format(ca_region,
active_domain,datum_type,
other_sl,other_kh,cell_spacing)
model_nc_dir = os.path.join(nc_dir,other_model_name)
if os.path.isdir(model_nc_dir):
model_nc_fname=os.path.join(model_nc_dir,'{}.nc'.format(other_model_name))
if os.path.exists(model_nc_fname):
# copy files
copyfile(model_nc_fname,new_nc_fname)
copyfile(os.path.join(model_nc_dir,ref_name),os.path.join(model_in_dir,ref_name))
nc_found=True
# ----------- Develop model domain -----------
elev_fname = os.path.join(elev_dir,'{0}_{1:02.0f}_dem_landfel.tif'.format(ca_region,active_domain))
if ca_region in ['soca']:
active_domain = active_domain-domain_df.iloc[0][id_col]
domain_dict = {'cell_spacing':cell_spacing,'input_dir':data_dir,'domain_shp':domain_df,
'active_domain':active_domain,'elev_fname':elev_fname,'rchg_fname':rchg_fname,
'k_fnames':None,'model_in_dir':model_in_dir,'sea_level':sealevel_elev,
'in_proj':in_proj,'use_ll':False}
start_time = time.time()
m_domain = cpt.Model_domain_driver(**domain_dict)
m_domain.run_make_domain(load_vk=False,save_nc_grids=save_nc_grids,
load_nc_grids_bool=load_nc_grids_bool)
print('Grid import took {0:4.1f} min'.format((time.time()-start_time)/60.))
if not hasattr(m_domain,'density') or rerun_sl_dens:
# Make density and sea level grids
print('Loading sea level and seawater density data...')
g_dict = {'xi':(m_domain.cc_proj[0],m_domain.cc_proj[1]),'method':'nearest'}
# if active_domain in [0]:
# g_dict['method'] = 'nearest'
# else:
# g_dict['method'] = 'linear'
buffer0 = 8e3 # m buffer around model
temp_extent = [m_domain.cc_proj[0].min(),m_domain.cc_proj[0].max(),
m_domain.cc_proj[1].min(),m_domain.cc_proj[1].max()]
inpts = (sl_data_proj[:,:1]<=temp_extent[1]+buffer0) & (sl_data_proj[:,:1]>=temp_extent[0]-buffer0) \
& (sl_data_proj[:,1:2]<=temp_extent[3]+buffer0) & (sl_data_proj[:,1:2]>=temp_extent[2]-buffer0)
m_domain.sea_level = cru.griddata(sl_data_proj[inpts.ravel(),:2],sl_data_proj[inpts.ravel(),2:],**g_dict).squeeze()+sealevel_elev
#m_domain.sea_level = np.median(sl_data_proj[inpts.ravel(),2:])+sealevel_elev
# g_dict['method'] = 'nearest'
inpts = (sal_data_proj[:,:1]<=temp_extent[1]+buffer0) & (sal_data_proj[:,:1]>=temp_extent[0]-buffer0) \
& (sal_data_proj[:,1:2]<=temp_extent[3]+buffer0) & (sal_data_proj[:,1:2]>=temp_extent[2]-buffer0)
m_domain.density = cru.griddata(sal_data_proj[inpts.ravel(),:2],sal_data_proj[inpts.ravel(),2:],**g_dict).squeeze()
#m_domain.density = np.median(sal_data_proj[inpts.ravel(),2:])
# Assign cell types
assign_dict = {'domain_obj':m_domain,'ws_shp':None}
m_assignment = cpt.Assign_cell_types(**assign_dict)
m_assignment.run_assignment(assign_wb=False,use_ws=False)
#%%
# ----------- Create flopy objects -----------
# Model information
m_info_dict = {'workspace':model_in_dir,'model_name':model_name}
m_info = cmft.Model_info(**m_info_dict)
# Develop discretization in space and time
m_dis_dict = {'cell_spacing':cell_spacing,'nlay':nlay,'nrow':m_domain.nrow,
'ncol':m_domain.ncol,'delv':layer_thick,
'zthick_elev_min':elev_thick_min, 'min_elev':min_surface_elev,
}
m_dis = cmft.Model_dis(**m_dis_dict)
m_dis_time_dict = {'nper':nper,'perlen':perlen,'nstp':nstp,'steady':steady}
m_dis.time_dis(**m_dis_time_dict)
# Model specific changes to aid convergence
# if active_domain in [8]:
# # Set upper limit on recharge
# max_rchg = 0.2
# m_domain.recharge[m_domain.recharge>max_rchg] = max_rchg
#%%
# Make flopy package inputs
m_dicts_in = {'dis_obj':m_dis,'cell_types':m_assignment.cell_types,
'elev_array':m_domain.elevation,'rchg_array':m_domain.recharge,
'hk':Kh,'k_decay':k_decay,'porosity':porosity,'run_swi':run_swi,
'solver':solver,'rho_s':m_domain.density,'sea_level':m_domain.sea_level}
dis_kwargs = {'zthick':None,'min_zthick':min_zthick,
'smooth_zbot':True,'nlayer_continuous':1}
drn_kwargs = {'dampener':1e4,'elev_damp':1e0}
ghb_kwargs = {'skip_ghb':True}
rchg_kwargs = {'rchg_units':[2,5],'reduce_lowk_rchg':True,
'lowk_rchg_ratio':1,'lowk_val':1e-3,'rchg_ratio':rchg_ratio} # recharge in m/yr
bcf_kwargs = {'v_ani_ratio':v_ani,'iwdflg':1, 'smooth_k':True,
'hk_in':None,
'hk_botm':None,
'min_hk':min_hk,'nan_lower':False,'propkdown':True,
'hk_extent_clip':True}
bas_kwargs = {'use_fweq_head':False,'set_marine_to_constant':True,
'min_thick_calc':None,'ibound_thick_threshold':False,
'ibound_minelev_threshold':0.,'check_inactive':False}
gmg_kwargs = {'rclose':1e-2,'hclose':1e-2,
'mxiter':100,'iiter':100,
'isc':1,'ism':0} # loose convergance for iterations
nwt_kwargs = {'options':'COMPLEX','iprnwt':1,'headtol':1e-2,'maxiterout':1000}#,'linmeth':2}
if solver in ['nwt']:
upw_kwargs = bcf_kwargs.copy()
del upw_kwargs['iwdflg'] # Remove iwdflg
run_all_dicts_inputs = {'drn_kwargs':drn_kwargs,
'ghb_kwargs':ghb_kwargs,
'rchg_kwargs':rchg_kwargs,
'upw_kwargs':upw_kwargs,
'nwt_kwargs':nwt_kwargs,
'dis_kwargs':dis_kwargs,
'bas_kwargs':bas_kwargs}
else:
run_all_dicts_inputs = {'drn_kwargs':drn_kwargs,
'ghb_kwargs':ghb_kwargs,
'rchg_kwargs':rchg_kwargs,
'bcf_kwargs':bcf_kwargs,
'gmg_kwargs':gmg_kwargs,
'dis_kwargs':dis_kwargs,
'bas_kwargs':bas_kwargs}
m_dicts = cmft.Model_dict(**m_dicts_in)
m_dicts.run_all(**run_all_dicts_inputs)
# Force layers to no flow
# m_dicts.bas_dict['ibound'][1:,:,:] = 0
# m_dicts.bas_dict['ibound'][0,(m_dicts.ztop-m_dicts.zbot[0])<10.] = 0
# Make flopy packages
maker_dict = {'dict_obj':m_dicts,'info_obj':m_info,
'external_path':None,'output_path':model_out_dir}
m_maker = cmft.Model_maker(**maker_dict)
m_maker.run()
# setting inputs_exist=True will not overwrite inputs, only re-run model
run_dict = {'model_obj':m_maker,'run_mf':True,'inputs_exist':False}
m_run_obj = cmft.Model_run(**run_dict)
#%%
if force_write_inputs:
m_run_obj.inputs_exist=False
else:
# see if files already written, ghb written last
if os.path.isfile(os.path.join(model_in_dir,'{}.ghb'.format(model_name))):
m_run_obj.inputs_exist=True
if hasattr(m_dicts,'conversion_mask'):
# Need to reduce extent of zones
cgu.recursive_applybydtype(m_domain,
func=cgu.shrink_ndarray,
func_args={'mask_in':m_maker.dict_obj.conversion_mask,
'shape_out':m_maker.dict_obj.cell_types.shape})
# Re-write grid data if the size of the grids changed
m_domain.load_griddata(save_nc_grids=save_nc_grids)
ref_dict = {'model_info_dict':{'model_ws':model_in_dir,'model_name':model_name,
'xul':m_domain.cc_proj[0].data[0,0],# should | |
<filename>performance/benchmarks/bm_unpack_sequence.py
"""Microbenchmark for Python's sequence unpacking."""
import perf
from six.moves import xrange
def do_unpacking(loops, to_unpack):
range_it = xrange(loops)
t0 = perf.perf_counter()
for _ in range_it:
# 400 unpackings
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, | |
"""
Services module forms
"""
from django import forms
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from anaf.core.conf import settings
from anaf.identities.models import Contact
from anaf.core.decorators import preprocess_form
from anaf.core.models import Object, ModuleSetting
from anaf.core.rendering import get_template_source
from anaf.messaging.models import Message
from anaf.messaging.emails import EmailMessage
from anaf.services.models import Ticket, TicketRecord, ServiceAgent, TicketStatus, Service, ServiceLevelAgreement, \
TicketQueue
preprocess_form()
class SettingsForm(forms.Form):
""" Administration settings form """
default_ticket_status = forms.ModelChoiceField(
label='Default Ticket Status', queryset=[])
default_ticket_queue = forms.ModelChoiceField(
label='Default Queue', queryset=[])
send_email_to_caller = forms.ChoiceField(label="Notify Caller By E-mail", choices=((True, _('Yes')),
(False, _('No'))),
required=False)
send_email_template = forms.CharField(
label="E-mail Template", widget=forms.Textarea, required=False)
def __init__(self, user, *args, **kwargs):
"""Sets choices and initial value"""
super(SettingsForm, self).__init__(*args, **kwargs)
# Translate
self.fields['default_ticket_status'].label = _('Default Ticket Status')
self.fields['default_ticket_queue'].label = _('Default Queue')
self.fields['send_email_to_caller'].label = _(
"Notify Caller By E-mail")
self.fields['send_email_template'].label = _("E-mail Template")
self.fields['default_ticket_status'].queryset = Object.filter_permitted(
user, TicketStatus.objects, mode='x')
self.fields['default_ticket_queue'].queryset = Object.filter_permitted(
user, TicketQueue.objects, mode='x')
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'default_ticket_status')[0]
default_ticket_status = TicketStatus.objects.get(
pk=int(conf.value))
self.fields[
'default_ticket_status'].initial = default_ticket_status.id
except Exception:
pass
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'default_ticket_queue')[0]
default_ticket_queue = TicketQueue.objects.get(pk=int(conf.value))
self.fields[
'default_ticket_queue'].initial = default_ticket_queue.id
except Exception:
pass
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'send_email_to_caller')[0]
self.fields['send_email_to_caller'].initial = conf.value
except:
self.fields[
'send_email_to_caller'].initial = settings.ANAF_SEND_EMAIL_TO_CALLER
# notification template
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'send_email_template')[0]
self.fields['send_email_template'].initial = conf.value
except Exception:
self.fields['send_email_template'].initial = get_template_source(
'services/emails/notify_caller.html')
def save(self):
"Form processor"
try:
ModuleSetting.set_for_module('default_ticket_status',
self.cleaned_data[
'default_ticket_status'].id,
'anaf.services')
ModuleSetting.set_for_module('default_ticket_queue',
self.cleaned_data[
'default_ticket_queue'].id,
'anaf.services')
ModuleSetting.set_for_module('send_email_to_caller',
self.cleaned_data[
'send_email_to_caller'],
'anaf.services')
ModuleSetting.set_for_module('send_email_template',
self.cleaned_data[
'send_email_template'],
'anaf.services')
return True
except Exception:
return False
class MassActionForm(forms.Form):
""" Mass action form for Tickets """
status = forms.ModelChoiceField(queryset=[], required=False)
service = forms.ModelChoiceField(queryset=[], required=False)
queue = forms.ModelChoiceField(queryset=[], required=False)
delete = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
"""Sets allowed values"""
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
self.fields['status'].queryset = Object.filter_permitted(
user, TicketStatus.objects, mode='x')
self.fields['status'].label = _("Status")
self.fields['service'].queryset = Object.filter_permitted(
user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['queue'].queryset = Object.filter_permitted(
user, TicketQueue.objects, mode='x')
self.fields['queue'].label = _("Queue")
self.fields['delete'] = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'),
('delete', _(
'Delete Completely')),
('trash', _('Move to Trash'))),
required=False)
def save(self, *args, **kwargs):
"""Process form"""
if self.instance and self.is_valid():
if self.cleaned_data['service']:
self.instance.service = self.cleaned_data['service']
if self.cleaned_data['status']:
self.instance.status = self.cleaned_data['status']
if self.cleaned_data['queue']:
self.instance.queue = self.cleaned_data['queue']
self.instance.save()
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class TicketForm(forms.ModelForm):
""" Ticket form """
name = forms.CharField(
label='Title', widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, user, queue, agent, *args, **kwargs):
"Sets allowed values"
super(TicketForm, self).__init__(*args, **kwargs)
# Filter allowed selections for TicketForm
self.fields['reference'].required = False
self.fields['reference'].label = _("Reference")
self.fields['caller'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['caller'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['caller'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['caller'].label = _("Caller")
self.fields['assigned'].queryset = Object.filter_permitted(
user, ServiceAgent.objects, mode='x')
self.fields['assigned'].label = _("Assigned to")
self.fields['assigned'].help_text = ""
self.fields['assigned'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('services_ajax_agent_lookup')})
self.fields['assigned'].widget.attrs.update(
{'popuplink': reverse('services_agent_add')})
self.fields['status'].queryset = Object.filter_permitted(
user, TicketStatus.objects, mode='x')
self.fields['status'].label = _("Status")
self.fields['service'].queryset = Object.filter_permitted(
user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['queue'].queryset = Object.filter_permitted(
user, TicketQueue.objects, mode='x')
self.fields['queue'].label = _("Queue")
self.fields['sla'].queryset = Object.filter_permitted(
user, ServiceLevelAgreement.objects, mode='x')
self.fields['sla'].label = _("Service Level Agreement")
self.fields['resolution'].label = _("Resolution")
# Set default values if not editing
if 'instance' not in kwargs:
try:
self.fields['caller'].initial = user.get_contact().id
except Exception:
pass
if queue:
self.fields['queue'].initial = queue.id
if queue.default_ticket_status and queue.default_ticket_status in self.fields['status'].queryset:
self.fields[
'status'].initial = queue.default_ticket_status_id
else:
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'default_ticket_status')[0]
self.fields['status'].initial = int(conf.value)
except:
pass
if queue.default_ticket_priority:
self.fields[
'priority'].initial = queue.default_ticket_priority
if queue.default_service:
self.fields['service'].initial = queue.default_service_id
try:
default_sla = ServiceLevelAgreement.objects.get(
service=queue.default_service, default=True)
if default_sla:
self.fields['sla'].initial = default_sla.id
except:
pass
else:
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'default_ticket_status')[0]
self.fields['status'].initial = int(conf.value)
except:
pass
try:
conf = ModuleSetting.get_for_module(
'anaf.services', 'default_ticket_queue')[0]
self.fields['queue'].initial = int(conf.value)
except:
pass
self.fields['name'].label = _("Name")
self.fields['name'].widget.attrs.update({'class': 'duplicates',
'callback': reverse('services_ajax_ticket_lookup')})
self.fields['priority'].label = _("Priority")
self.fields['priority'].choices = ((5, _('Highest')), (
4, _('High')), (3, _('Normal')), (2, _('Low')), (1, _('Lowest')))
self.fields['urgency'].label = _("Urgency")
self.fields['urgency'].choices = ((5, _('Highest')), (
4, _('High')), (3, _('Normal')), (2, _('Low')), (1, _('Lowest')))
self.fields['details'].label = _("Details")
if not agent:
del self.fields['caller']
del self.fields['reference']
del self.fields['priority']
del self.fields['status']
del self.fields['queue']
del self.fields['sla']
del self.fields['assigned']
del self.fields['resolution']
class Meta:
"Ticket specified as model"
model = Ticket
fields = ('name', 'reference', 'caller', 'assigned', 'urgency', 'priority',
'status', 'service', 'sla', 'queue', 'details', 'resolution')
class TicketStatusForm(forms.ModelForm):
""" TicketStatus form """
name = forms.CharField(widget=forms.TextInput(attrs={'size': '30'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(TicketStatusForm, self).__init__(*args, **kwargs)
class Meta:
"TicketStatus specified as model"
model = TicketStatus
fields = ('name', 'active', 'hidden', 'details')
class TicketRecordForm(forms.ModelForm):
""" TicketRecord form """
def __init__(self, agent, ticket, *args, **kwargs):
super(TicketRecordForm, self).__init__(*args, **kwargs)
self.ticket = ticket
self.fields['body'].label = _("body")
self.fields['body'].required = True
self.fields['notify'].label = _("Notify caller")
self.fields['resolution'] = forms.BooleanField(
label=_("Set as Resolution"), required=False)
if not agent:
del self.fields['notify']
del self.fields['resolution']
def save(self, *args, **kwargs):
"Set Resolution if selected"
instance = super(TicketRecordForm, self).save(*args, **kwargs)
ticket = self.ticket
if 'resolution' in self.cleaned_data and self.cleaned_data['resolution']:
ticket.resolution = self.cleaned_data['body']
ticket.save()
# Send update if notify clicked
if 'notify' in self.cleaned_data and self.cleaned_data['notify'] and ticket.caller:
toaddr = ticket.caller.get_email()
if ticket.message or toaddr:
reply = Message()
reply.author = instance.sender
reply.body = instance.body
reply.auto_notify = False
if ticket.message:
reply.stream = ticket.message.stream
reply.reply_to = ticket.message
else:
reply.stream = ticket.queue.message_stream if ticket.queue else None
reply.title = "[#{0!s}] {1!s}".format(ticket.reference, ticket.name)
reply.save()
if not ticket.message:
ticket.message = reply
reply.recipients.add(ticket.caller)
email = EmailMessage(reply)
email.send_email()
return instance
class Meta:
"TicketRecord specified as model"
model = TicketRecord
fields = ['body', 'notify']
class QueueForm(forms.ModelForm):
""" Queue form """
name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(QueueForm, self).__init__(*args, **kwargs)
manager = TicketQueue.objects
if 'instance' in kwargs:
instance = kwargs['instance']
manager = manager.exclude(Q(parent=instance) & Q(pk=instance.id))
self.fields['parent'].queryset = Object.filter_permitted(
user, manager, mode='x')
self.fields['default_service'].queryset = Object.filter_permitted(
user, Service.objects, mode='x')
self.fields['waiting_time'].help_text = "seconds"
self.fields['name'].label = _("Name")
self.fields['active'].label = _("Active")
self.fields['parent'].label = _("Parent")
self.fields['default_ticket_status'].label = _("Default ticket status")
self.fields['default_ticket_priority'].label = _(
"Default ticket priority")
self.fields['default_service'].label = _("Default service")
self.fields['waiting_time'].label = _("Waiting time")
self.fields['next_queue'].queryset = Object.filter_permitted(
user, TicketQueue.objects, mode='x')
self.fields['next_queue'].label = _("Next queue")
self.fields['ticket_code'].label = _("Ticket code")
self.fields['message_stream'].label = _("Message stream")
self.fields['message_stream'].widget.attrs.update(
{'popuplink': reverse('messaging_stream_add')})
self.fields['details'].label = _("Details")
class Meta:
"TicketQueue specified as model"
model = TicketQueue
fields = ('name', 'active', 'parent', 'default_ticket_status',
'default_ticket_priority', 'default_service', 'waiting_time',
'next_queue', 'ticket_code', 'message_stream', 'details')
class ServiceForm(forms.ModelForm):
""" Service form """
name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(ServiceForm, self).__init__(*args, **kwargs)
manager = Service.objects
if 'instance' in kwargs:
instance = kwargs['instance']
manager = manager.exclude(Q(parent=instance) & Q(pk=instance.id))
self.fields['parent'].queryset = Object.filter_permitted(
user, manager, mode='x')
self.fields['name'].label = _("Name")
self.fields['parent'].label = _("Parent")
self.fields['details'].label = _("Details")
class Meta:
"Service specified as model"
model = Service
fields = ('name', 'parent', 'details')
class ServiceLevelAgreementForm(forms.ModelForm):
""" ServiceLevelAgreement form """
name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(ServiceLevelAgreementForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['response_time'].help_text = 'minutes'
self.fields['response_time'].widget.attrs.update({'size': 10})
self.fields['response_time'].label = _("Response time")
self.fields['uptime_rate'].help_text = 'percent'
self.fields['uptime_rate'].widget.attrs.update({'size': 5})
self.fields['uptime_rate'].label = _("Uptime rate")
self.fields['service'].queryset = Object.filter_permitted(
user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['client'].queryset = Object.filter_permitted(
user, Contact.objects, mode='x')
self.fields['client'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['client'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['client'].label = _("Client")
self.fields['provider'].queryset = Object.filter_permitted(
user, Contact.objects, mode='x')
self.fields['provider'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['provider'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['provider'].label = _("Provider")
self.fields['available_from'].initial = "09:00"
self.fields['available_from'].widget.attrs.update({'size': 10})
self.fields['available_from'].label = _("Available from")
self.fields['available_to'].initial = "18:00"
self.fields['available_to'].widget.attrs.update({'size': 10})
self.fields['available_to'].label = _("Available to")
contact = user.default_group.get_contact()
if contact:
self.fields['provider'].initial = contact.id
class Meta:
"ServiceLevelAgreement specified as model"
model = ServiceLevelAgreement
fields = ('name', 'service', 'client', 'provider', 'response_time', 'uptime_rate', 'available_from',
'available_to')
class AgentForm(forms.ModelForm):
""" Agent form """
def __init__(self, user, *args, **kwargs):
"Sets allowed values"
super(AgentForm, self).__init__(*args, **kwargs)
self.fields['related_user'].label = _("Related user")
self.fields['related_user'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:ajax_user_lookup')})
self.fields['active'].label = _("Active")
self.fields['occupied'].label = _("Occupied")
self.fields['available_from'].label = _("Available from")
self.fields['available_to'].label = _("Available to")
class Meta:
"Agent specified as model"
model = ServiceAgent
fields = ('related_user', 'active', 'occupied',
'available_from', 'available_to')
class FilterForm(forms.ModelForm):
""" Ticket Filters definition """
def __init__(self, user, skip=None, *args, **kwargs):
"Sets allowed values"
if skip is None:
skip = []
super(FilterForm, self).__init__(*args, **kwargs)
if 'caller' in skip:
del self.fields['caller']
else:
self.fields['caller'].queryset = Object.filter_permitted(
user, Contact.objects, mode='x')
self.fields['caller'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['caller'].label = _("Caller")
if 'status' in skip:
del self.fields['status']
else:
self.fields['status'].queryset = Object.filter_permitted(
user, TicketStatus.objects, mode='x')
self.fields['status'].label = _("Status")
self.fields['service'].queryset = Object.filter_permitted(
user, Service.objects, mode='x')
self.fields['service'].label = _("Service")
self.fields['sla'].queryset = Object.filter_permitted(
user, ServiceLevelAgreement.objects, mode='x')
| |
<filename>gget/gget_seq.py
import logging
# Add and format time stamp in logging messages
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
level=logging.INFO,
datefmt="%c",
)
# Mute numexpr threads info
logging.getLogger("numexpr").setLevel(logging.WARNING)
import numpy as np
# Custom functions
from .utils import rest_query, get_uniprot_seqs
from .gget_info import info
# Constants
from .constants import ENSEMBL_REST_API, UNIPROT_REST_API
def seq(
ens_ids,
transcribe=False,
seqtype=None,
isoforms=False,
save=False,
):
"""
Fetch nucleotide or amino acid sequence (FASTA) of a gene
(and all its isoforms) or transcript by Ensembl, WormBase or FlyBase ID.
Args:
- ens_ids One or more Ensembl IDs (passed as string or list of strings).
Also supports WormBase and FlyBase IDs.
- transcribe True/False (default: False -> returns nucleotide sequences).
Defines whether nucleotide or amino acid sequences are returned.
Nucleotide sequences are fetched from the Ensembl REST API server.
Amino acid sequences are fetched from the UniProt REST API server.
- isoforms If True, returns the sequences of all known transcripts (default: False).
(Only for gene IDs.)
- save If True, saves output FASTA to current directory (default: False).
Returns a list (or FASTA file if 'save=True') containing the requested sequences.
Deprecated arguments: 'seqtype' (use True/False flag 'transcribe' instead.)
"""
# Handle deprecated arguments
if seqtype:
logging.error(
"'seqtype' argument deprecated! Please use True/False argument 'transcribe' instead."
)
return
## Clean up arguments
# Clean up Ensembl IDs
# If single Ensembl ID passed as string, convert to list
if type(ens_ids) == str:
ens_ids = [ens_ids]
# Remove Ensembl ID version if passed
ens_ids_clean = []
temp = 0
for ensembl_ID in ens_ids:
# But only for Ensembl ID (and not for flybase/wormbase IDs)
if ensembl_ID.startswith("ENS"):
ens_ids_clean.append(ensembl_ID.split(".")[0])
if "." in ensembl_ID and temp == 0:
logging.info(
"We noticed that you may have passed a version number with your Ensembl ID.\n"
"Please note that gget seq will return information linked to the latest Ensembl ID version."
)
temp = +1
else:
ens_ids_clean.append(ensembl_ID)
# Initiate empty 'fasta'
fasta = []
## Fetch nucleotide sequece
if transcribe is False:
# Define Ensembl REST API server
server = ENSEMBL_REST_API
# Define type of returned content from REST
content_type = "application/json"
# Initiate dictionary to save results for all IDs in
master_dict = {}
# Query REST APIs from https://rest.ensembl.org/
for ensembl_ID in ens_ids_clean:
# Create dict to save query results
results_dict = {ensembl_ID: {}}
# If isoforms False, just fetch sequences of passed Ensembl ID
if isoforms == False:
# sequence/id/ query: Request sequence by stable identifier
query = "sequence/id/" + ensembl_ID + "?"
# Try if query valid
try:
# Submit query; this will throw RuntimeError if ID not found
df_temp = rest_query(server, query, content_type)
# Delete superfluous entries
keys_to_delete = ["query", "id", "version", "molecule"]
for key in keys_to_delete:
# Pop keys, None -> do not raise an error if key to delete not found
df_temp.pop(key, None)
# Add results to main dict
results_dict[ensembl_ID].update({"seq": df_temp})
logging.info(
f"Requesting nucleotide sequence of {ensembl_ID} from Ensembl."
)
except RuntimeError:
logging.error(
f"ID {ensembl_ID} not found. "
"Please double-check spelling/arguments and try again."
)
# If isoforms true, fetch sequences of isoforms instead
if isoforms == True:
# Get ID type (gene, transcript, ...) using gget info
info_df = info(ensembl_ID, verbose=False)
# Check if Ensembl ID was found
if isinstance(info_df, type(None)):
logging.warning(
f"ID '{ensembl_ID}' not found. Please double-check spelling/arguments."
)
continue
ens_ID_type = info_df.loc[ensembl_ID]["object_type"]
# If the ID is a gene, get the IDs of all its transcripts
if ens_ID_type == "Gene":
logging.info(
f"Requesting nucleotide sequences of all transcripts of {ensembl_ID} from Ensembl."
)
for transcipt_id in info_df.loc[ensembl_ID]["all_transcripts"]:
# Remove version number for Ensembl IDs (not for flybase/wormbase IDs)
if transcipt_id.startswith("ENS"):
transcipt_id = transcipt_id.split(".")[0]
# Try if query is valid
try:
# Define the REST query
query = "sequence/id/" + transcipt_id + "?"
# Submit query
df_temp = rest_query(server, query, content_type)
# Delete superfluous entries
keys_to_delete = ["query", "version", "molecule"]
for key in keys_to_delete:
# Pop keys, None -> do not raise an error if key to delete not found
df_temp.pop(key, None)
# Add results to main dict
results_dict[ensembl_ID].update(
{f"{transcipt_id}": df_temp}
)
except RuntimeError:
logging.error(
f"ID {transcipt_id} not found. "
"Please double-check spelling/arguments and try again."
)
# If isoform true, but ID is not a gene; ignore the isoform parameter
else:
# Try if query is valid
try:
# Define the REST query
query = "sequence/id/" + ensembl_ID + "?"
# Submit query
df_temp = rest_query(server, query, content_type)
# Delete superfluous entries
keys_to_delete = ["query", "id", "version", "molecule"]
for key in keys_to_delete:
# Pop keys, None -> do not raise an error if key to delete not found
df_temp.pop(key, None)
# Add results to main dict
results_dict[ensembl_ID].update({"seq": df_temp})
logging.info(
f"Requesting nucleotide sequence of {ensembl_ID} from Ensembl."
)
logging.warning("The isoform option only applies to gene IDs.")
except RuntimeError:
logging.error(
f"ID {ensembl_ID} not found. "
"Please double-check spelling/arguments and try again."
)
# Add results to master dict
master_dict.update(results_dict)
# Build FASTA file
for ens_ID in master_dict:
for key in master_dict[ens_ID].keys():
if key == "seq":
fasta.append(">" + ens_ID + " " + master_dict[ens_ID][key]["desc"])
fasta.append(master_dict[ens_ID][key]["seq"])
else:
fasta.append(
">"
+ master_dict[ens_ID][key]["id"]
+ " "
+ master_dict[ens_ID][key]["desc"]
)
fasta.append(master_dict[ens_ID][key]["seq"])
## Fetch amino acid sequences from UniProt
if transcribe:
if isoforms is False:
# List to collect transcript IDs
trans_ids = []
for ensembl_ID in ens_ids_clean:
# Get ID type (gene, transcript, ...) using gget info
info_df = info(ensembl_ID, verbose=False)
# Check that Ensembl ID was found
if isinstance(info_df, type(None)):
logging.warning(
f"ID '{ensembl_ID}' not found. Please double-check spelling/arguments."
)
continue
ens_ID_type = info_df.loc[ensembl_ID]["object_type"]
# If the ID is a gene, use the ID of its canonical transcript
if ens_ID_type == "Gene":
# Get ID of canonical transcript
can_trans = info_df.loc[ensembl_ID]["canonical_transcript"]
if ensembl_ID.startswith("ENS"):
# Remove Ensembl ID version from transcript IDs and append to transcript IDs list
temp_trans_id = can_trans.split(".")[0]
trans_ids.append(temp_trans_id)
elif ensembl_ID.startswith("WB"):
# Remove added "." at the end of transcript IDs
temp_trans_id1 = ".".join(can_trans.split(".")[:-1])
# For WormBase transcript IDs, also remove the version number for submission to UniProt API
temp_trans_id = ".".join(temp_trans_id1.split(".")[:-1])
trans_ids.append(temp_trans_id)
else:
# Remove added "." at the end of other transcript IDs
temp_trans_id = ".".join(can_trans.split(".")[:-1])
trans_ids.append(temp_trans_id)
logging.info(
f"Requesting amino acid sequence of the canonical transcript {temp_trans_id} of gene {ensembl_ID} from UniProt."
)
# If the ID is a transcript, append the ID directly
elif ens_ID_type == "Transcript":
# For WormBase transcript IDs, remove the version number for submission to UniProt API
if ensembl_ID.startswith("T"):
trans_ids.append(".".join(ensembl_ID.split(".")[:-1]))
else:
trans_ids.append(ensembl_ID)
logging.info(
f"Requesting amino acid sequence of {ensembl_ID} from UniProt."
)
else:
logging.warning(
f"{ensembl_ID} not recognized as either a gene or transcript ID. It will not be included in the UniProt query."
)
# Check if this is a Wrombase ID:
if ensembl_ID.startswith("WB") or ensembl_ID.startswith("T"):
id_type = "wormbase"
# Check if this is a flybase ID:
elif ensembl_ID.startswith("FB"):
id_type = "flybase"
else:
id_type = "ensembl"
# Fetch the amino acid sequences of the transcript Ensembl IDs
df_uniprot = get_uniprot_seqs(UNIPROT_REST_API, trans_ids, id_type=id_type)
if isoforms == True:
# List to collect transcript IDs
trans_ids = []
for ensembl_ID in ens_ids_clean:
# Get ID type (gene, transcript, ...) using gget info
info_df = info(ensembl_ID, verbose=False)
# Check that Ensembl ID was found
if isinstance(info_df, type(None)):
logging.warning(
f"ID '{ensembl_ID}' not found. Please double-check spelling/arguments."
)
continue
ens_ID_type = info_df.loc[ensembl_ID]["object_type"]
# If the ID is a gene, get the IDs of all isoforms
if ens_ID_type == "Gene":
# Get the IDs of all transcripts from the gget info results
for transcipt_id in info_df.loc[ensembl_ID]["all_transcripts"]:
if ensembl_ID.startswith("ENS"):
# Append transcript ID (without Ensembl version number) to list of transcripts to fetch
trans_ids.append(transcipt_id.split(".")[0])
elif ensembl_ID.startswith("WB"):
# For WormBase transcript IDs, remove the version number for submission to UniProt API
temp_trans_id = ".".join(transcipt_id.split(".")[:-1])
trans_ids.append(temp_trans_id)
else:
# Note: No need to remove the added "." at the end of unversioned transcripts here, because "all_transcripts" are returned without it
trans_ids.append(transcipt_id)
logging.info(
f"Requesting amino acid sequences of all transcripts of gene {ensembl_ID} from UniProt."
)
elif ens_ID_type == "Transcript":
# For WormBase | |
from copy import deepcopy
import json
import logging
import os
import random
import re
import string
import subprocess
import time
import urllib
import boto3
from boto3.dynamodb.conditions import Attr
from citrination_client import CitrinationClient
import globus_sdk
import jsonschema
import mdf_toolbox
import requests
from mdf_connect_server import CONFIG
logger = logging.getLogger(__name__)
# SQS setup
SQS_CLIENT = boto3.resource('sqs',
aws_access_key_id=CONFIG["AWS_KEY"],
aws_secret_access_key=CONFIG["AWS_SECRET"],
region_name="us-east-1")
SQS_QUEUE_NAME = CONFIG["SQS_QUEUE"]
assert SQS_QUEUE_NAME.endswith(".fifo")
SQS_ATTRIBUTES = {
"FifoQueue": 'true',
"ContentBasedDeduplication": 'true',
"ReceiveMessageWaitTimeSeconds": '20'
}
SQS_GROUP = CONFIG["SQS_GROUP_ID"]
# DynamoDB setup
DMO_CLIENT = boto3.resource('dynamodb',
aws_access_key_id=CONFIG["AWS_KEY"],
aws_secret_access_key=CONFIG["AWS_SECRET"],
region_name="us-east-1")
DMO_TABLES = {
"status": CONFIG["DYNAMO_STATUS_TABLE"],
"curation": CONFIG["DYNAMO_CURATION_TABLE"]
}
DMO_SCHEMA = {
# "TableName": DMO_TABLE,
"AttributeDefinitions": [{
"AttributeName": "source_id",
"AttributeType": "S"
}],
"KeySchema": [{
"AttributeName": "source_id",
"KeyType": "HASH"
}],
"ProvisionedThroughput": {
"ReadCapacityUnits": 20,
"WriteCapacityUnits": 20
}
}
STATUS_STEPS = (
("sub_start", "Submission initialization"),
("old_cancel", "Cancellation of previous submissions"),
("data_download", "Connect data download"),
("data_transfer", "Data transfer to primary destination"),
("extracting", "Metadata extraction"),
("curation", "Dataset curation"),
("ingest_search", "MDF Search ingestion"),
("ingest_backup", "Data transfer to secondary destinations"),
("ingest_publish", "MDF Publish publication"),
("ingest_citrine", "Citrine upload"),
("ingest_mrr", "Materials Resource Registration"),
("ingest_cleanup", "Post-processing cleanup")
)
# Status codes indicating some form of not-failure,
# defined as "the step is over, and processing is continuing"
SUCCESS_CODES = [
"S",
"M",
"L",
"R",
"N"
]
def authenticate_token(token, groups, require_all=False):
# Function should be called from api_utils instead
raise NotImplementedError("Calling deprecated version")
"""Authenticate a token.
Arguments:
token (str): The token to authenticate with.
groups (str or list of str): The Globus Group UUIDs to require the user belong to.
The special value "public" is also allowed to always pass this check.
require_all (bool): When True, the user must be in all groups to succeed the
group check.
When False, the user must be in at least one group to succeed.
Default False.
Returns:
dict: Token and user info.
"""
if not token:
return {
"success": False,
"error": "Not Authenticated",
"error_code": 401
}
try:
token = token.replace("Bearer ", "")
auth_client = globus_sdk.ConfidentialAppAuthClient(CONFIG["API_CLIENT_ID"],
CONFIG["API_CLIENT_SECRET"])
auth_res = auth_client.oauth2_token_introspect(token, include="identities_set")
except Exception as e:
logger.error("Error authenticating token: {}".format(repr(e)))
return {
"success": False,
"error": "Authentication could not be completed",
"error_code": 500
}
if not auth_res:
return {
"success": False,
"error": "Token could not be validated",
"error_code": 401
}
# Check that token is active
if not auth_res["active"]:
return {
"success": False,
"error": "Token expired",
"error_code": 403
}
# Check correct scope and audience
if (CONFIG["API_SCOPE"] not in auth_res["scope"]
or CONFIG["API_SCOPE_ID"] not in auth_res["aud"]):
return {
"success": False,
"error": "Not authorized to MDF Connect scope",
"error_code": 401
}
# Finally, verify user is in appropriate group(s)
if isinstance(groups, str):
groups = [groups]
# Groups setup
groups_auth = deepcopy(CONFIG["GLOBUS_CREDS"])
groups_auth["services"] = ["groups"]
try:
nexus = mdf_toolbox.confidential_login(**groups_auth)["groups"]
except Exception as e:
logger.error("NexusClient creation error: {}".format(repr(e)))
return {
"success": False,
"error": "Unable to connect to Globus Groups",
"error_code": 500
}
# Globus Groups does not take UUIDs, only usernames, but Globus Auth uses UUIDs
# for identity-aware applications. Therefore, for Connect to be identity-aware,
# we must convert the UUIDs into usernames.
# However, the GlobusID "username" is not the email-like address, just the prefix.
user_usernames = set([iden["username"].replace("@globusid.org", "")
for iden in auth_client.get_identities(
ids=auth_res["identities_set"])["identities"]])
auth_succeeded = False
missing_groups = [] # Used for require_all compliance
group_roles = []
for grp in groups:
# public always succeeds
if grp.lower() == "public":
group_roles.append("member")
auth_succeeded = True
else:
# Translate convert and admin groups
if grp.lower() == "extract" or grp.lower() == "convert":
grp = CONFIG["EXTRACT_GROUP_ID"]
elif grp.lower() == "admin":
grp = CONFIG["ADMIN_GROUP_ID"]
# Group membership checks - each identity with each group
for user_identifier in user_usernames:
try:
member_info = nexus.get_group_membership(grp, user_identifier)
assert member_info["status"] == "active"
group_roles.append(member_info["role"])
# Not in group or not active
except (globus_sdk.GlobusAPIError, AssertionError):
# Log failed groups
missing_groups.append(grp)
# Error getting membership
except Exception as e:
logger.error("NexusClient fetch error: {}".format(repr(e)))
return {
"success": False,
"error": "Unable to connect to Globus Groups",
"error_code": 500
}
else:
auth_succeeded = True
# If must be in all groups, fail out if any groups missing
if require_all and missing_groups:
logger.debug("Auth rejected: require_all set, user '{}' not in '{}'"
.format(user_usernames, missing_groups))
return {
"success": False,
"error": "You cannot access this service or organization",
"error_code": 403
}
if not auth_succeeded:
logger.debug("Auth rejected: User '{}' not in any group: '{}'"
.format(user_usernames, groups))
return {
"success": False,
"error": "You cannot access this service or organization",
"error_code": 403
}
# Admin membership check (allowed to fail)
is_admin = False
for user_identifier in user_usernames:
try:
admin_info = nexus.get_group_membership(CONFIG["ADMIN_GROUP_ID"], user_identifier)
assert admin_info["status"] == "active"
# Username is not active admin, which is fine
except (globus_sdk.GlobusAPIError, AssertionError):
pass
# Error getting membership
except Exception as e:
logger.error("NexusClient admin fetch error: {}".format(repr(e)))
return {
"success": False,
"error": "Unable to connect to Globus Groups",
"error_code": 500
}
# Successful check, is admin
else:
is_admin = True
return {
"success": True,
"token_info": auth_res,
"user_id": auth_res["sub"],
"username": user_identifier,
"name": auth_res["name"] or "Not given",
"email": auth_res["email"] or "Not given",
"identities_set": auth_res["identities_set"],
"group_roles": group_roles,
"is_admin": is_admin
}
def make_source_id(title, author, test=False, index=None, sanitize_only=False):
# Function should be called from api_utils instead
raise NotImplementedError("Calling deprecated version")
"""Make a source name out of a title."""
if index is None:
index = (CONFIG["INGEST_TEST_INDEX"] if test else CONFIG["INGEST_INDEX"])
# Stopwords to delete from the source_name
# Not using NTLK to avoid an entire package dependency for one minor feature,
# and the NLTK stopwords are unlikely to be in a dataset title ("your", "that'll", etc.)
delete_words = [
"a",
"an",
"and",
"as",
"data",
"dataset",
"for",
"from",
"in",
"of",
"or",
"study",
"test", # Clears test flag from new source_id
"that",
"the",
"this",
"to",
"very",
"with"
]
# Remove any existing version number from title
title = split_source_id(title)["source_name"]
# Tokenize title and author
# Valid token separators are space and underscore
# Discard empty tokens
title_tokens = [t for t in title.strip().replace("_", " ").split() if t]
author_tokens = [t for t in author.strip().replace("_", " ").split() if t]
# Clean title tokens
title_clean = []
for token in title_tokens:
# Clean token is lowercase and alphanumeric
# TODO: After Py3.7 upgrade, use .isascii()
clean_token = "".join([char for char in token.lower() if char.isalnum()])
# and char.isascii()])
if clean_token and clean_token not in delete_words:
title_clean.append(clean_token)
# Clean author tokens, merge into one word
author_word = ""
for token in author_tokens:
clean_token = "".join([char for char in token.lower() if char.isalnum()])
# and char.isascii()])
author_word += clean_token
# Remove author_word from title, if exists (e.g. from previous make_source_id())
while author_word in title_clean and not sanitize_only:
title_clean.remove(author_word)
# Select words from title for source_name
# Use up to the first two words + last word
if len(title_clean) >= 1:
word1 = title_clean[0]
else:
# Must have at least one word
raise ValueError("Title '{}' invalid: Must have at least one word that is not "
"the author name (the following words do not count: '{}')"
.format(title, delete_words))
if len(title_clean) >= 2:
word2 = title_clean[1]
else:
word2 = ""
if len(title_clean) >= 3:
word3 = title_clean[-1]
else:
word3 = ""
# Assemble source_name
# Strip trailing underscores from missing words
if sanitize_only:
source_name = "_".join(title_clean).strip("_")
else:
source_name = "{}_{}_{}_{}".format(author_word, word1, word2, word3).strip("_")
# Add test flag if necessary
if test:
source_name = "_test_" + source_name
# Determine version number to add
# Get last Search version
search_creds = mdf_toolbox.dict_merge(CONFIG["GLOBUS_CREDS"], {"services": ["search"]})
search_client = mdf_toolbox.confidential_login(**search_creds)["search"]
old_q = {
"q": "mdf.source_name:{} AND mdf.resource_type:dataset".format(source_name),
"advanced": True,
"limit": 2, # Should only ever be one, if two are returned there's a problem
"offset": 0
}
old_search = mdf_toolbox.gmeta_pop(search_client.post_search(
mdf_toolbox.translate_index(index), old_q))
if len(old_search) == 0:
search_version = 1
elif len(old_search) == 1:
search_version = old_search[0]["mdf"]["version"] + 1
else:
logger.error("{}: {} dataset entries found in Search: {}"
.format(source_name, len(old_search), old_search))
raise ValueError("Dataset entry in Search has error")
# Get old submission information
scan_res = scan_table(table_name="status", fields=["source_id", "user_id"],
filters=[("source_id", "^", source_name)])
if not scan_res["success"]:
logger.error("Unable to scan status database for '{}': '{}'"
.format(source_name, scan_res["error"]))
raise ValueError("Dataset status has error")
user_ids = set([sub["user_id"] for sub in scan_res["results"]])
# Get most recent previous source_id and info
old_search_version = 0
old_sub_version = 0
for old_sid in scan_res["results"]:
old_sid_info = split_source_id(old_sid["source_id"])
# If found more recent Search version, | |
<filename>mrjob/runner.py
# Copyright 2009-2012 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
"""Base class for all runners."""
import copy
import datetime
import getpass
import logging
import os
import random
import re
import shutil
import sys
from subprocess import CalledProcessError
from subprocess import Popen
from subprocess import PIPE
from subprocess import check_call
import tempfile
try:
from cStringIO import StringIO
StringIO # quiet "redefinition of unused ..." warning from pyflakes
except ImportError:
from StringIO import StringIO
from mrjob import compat
from mrjob.conf import combine_cmds
from mrjob.conf import combine_dicts
from mrjob.conf import combine_envs
from mrjob.conf import combine_local_envs
from mrjob.conf import combine_lists
from mrjob.conf import combine_paths
from mrjob.conf import combine_path_lists
from mrjob.conf import load_opts_from_mrjob_confs
from mrjob.conf import OptionStore
from mrjob.fs.local import LocalFilesystem
from mrjob.util import cmd_line
from mrjob.util import file_ext
from mrjob.util import tar_and_gzip
log = logging.getLogger('mrjob.runner')
# use to detect globs and break into the part before and after the glob
GLOB_RE = re.compile(r'^(.*?)([\[\*\?].*)$')
#: cleanup options:
#:
#: * ``'ALL'``: delete local scratch, remote scratch, and logs
#: * ``'LOCAL_SCRATCH'``: delete local scratch only
#: * ``'LOGS'``: delete logs only
#: * ``'NONE'``: delete nothing
#: * ``'REMOTE_SCRATCH'``: delete remote scratch only
#: * ``'SCRATCH'``: delete local and remote scratch, but not logs
#: ``cleanup_on_failure``.
CLEANUP_CHOICES = ['ALL', 'LOCAL_SCRATCH', 'LOGS', 'NONE', 'REMOTE_SCRATCH',
'SCRATCH']
_STEP_RE = re.compile(r'^M?C?R?$')
# buffer for piping files into sort on Windows
_BUFFER_SIZE = 4096
class RunnerOptionStore(OptionStore):
ALLOWED_KEYS = OptionStore.ALLOWED_KEYS.union(set([
'base_tmp_dir',
'bootstrap_mrjob',
'cleanup',
'cleanup_on_failure',
'cmdenv',
'hadoop_extra_args',
'hadoop_streaming_jar',
'hadoop_version',
'jobconf',
'label',
'owner',
'python_archives',
'python_bin',
'setup_cmds',
'setup_scripts',
'steps_python_bin',
'upload_archives',
'upload_files',
]))
COMBINERS = combine_dicts(OptionStore.COMBINERS, {
'base_tmp_dir': combine_paths,
'cmdenv': combine_envs,
'hadoop_extra_args': combine_lists,
'jobconf': combine_dicts,
'python_archives': combine_path_lists,
'python_bin': combine_cmds,
'setup_cmds': combine_lists,
'setup_scripts': combine_path_lists,
'steps_python_bin': combine_cmds,
'upload_archives': combine_path_lists,
'upload_files': combine_path_lists,
})
def __init__(self, alias, opts, conf_paths):
"""
:param alias: Runner alias (e.g. ``'local'``)
:param opts: Options from the command line
:param conf_paths: Either a file path or an iterable of paths to config
files
"""
super(RunnerOptionStore, self).__init__()
# sanitize incoming options and issue warnings for bad keys
opts = self.validated_options(
opts, 'Got unexpected keyword arguments: %s')
unsanitized_opt_dicts = load_opts_from_mrjob_confs(
alias, conf_paths=conf_paths)
for path, mrjob_conf_opts in unsanitized_opt_dicts:
self.cascading_dicts.append(self.validated_options(
mrjob_conf_opts,
'Got unexpected opts from %s: %%s' % path))
self.cascading_dicts.append(opts)
if (len(self.cascading_dicts) > 2 and
all(len(d) == 0 for d in self.cascading_dicts[2:-1])):
log.warning('No configs specified for %s runner' % alias)
self.populate_values_from_cascading_dicts()
self._validate_cleanup()
def default_options(self):
super_opts = super(RunnerOptionStore, self).default_options()
try:
owner = getpass.getuser()
except:
owner = None
return combine_dicts(super_opts, {
'base_tmp_dir': tempfile.gettempdir(),
'bootstrap_mrjob': True,
'cleanup': ['ALL'],
'cleanup_on_failure': ['NONE'],
'hadoop_version': '0.20',
'owner': owner,
'python_bin': ['python'],
'steps_python_bin': [sys.executable or 'python'],
})
def _validate_cleanup(self):
# old API accepts strings for cleanup
# new API wants lists
for opt_key in ('cleanup', 'cleanup_on_failure'):
if isinstance(self[opt_key], basestring):
self[opt_key] = [self[opt_key]]
def validate_cleanup(error_str, opt_list):
for choice in opt_list:
if choice not in CLEANUP_CHOICES:
raise ValueError(error_str % choice)
if 'NONE' in opt_list and len(set(opt_list)) > 1:
raise ValueError(
'Cannot clean up both nothing and something!')
cleanup_error = ('cleanup must be one of %s, not %%s' %
', '.join(CLEANUP_CHOICES))
validate_cleanup(cleanup_error, self['cleanup'])
cleanup_failure_error = (
'cleanup_on_failure must be one of %s, not %%s' %
', '.join(CLEANUP_CHOICES))
validate_cleanup(cleanup_failure_error,
self['cleanup_on_failure'])
class MRJobRunner(object):
"""Abstract base class for all runners"""
#: alias for this runner; used for picking section of
#: :py:mod:``mrjob.conf`` to load one of ``'local'``, ``'emr'``,
#: or ``'hadoop'``
alias = None
OPTION_STORE_CLASS = RunnerOptionStore
### methods to call from your batch script ###
def __init__(self, mr_job_script=None, conf_path=None,
extra_args=None, file_upload_args=None,
hadoop_input_format=None, hadoop_output_format=None,
input_paths=None, output_dir=None, partitioner=None,
stdin=None, conf_paths=None, **opts):
"""All runners take the following keyword arguments:
:type mr_job_script: str
:param mr_job_script: the path of the ``.py`` file containing the
:py:class:`~mrjob.job.MRJob`. If this is None,
you won't actually be able to :py:meth:`run` the
job, but other utilities (e.g. :py:meth:`ls`)
will work.
:type conf_path: str, None, or False
:param conf_path: Deprecated. Alternate path to read configs from, or
``False`` to ignore all config files. Use
*conf_paths* instead.
:type conf_paths: None or list
:param conf_paths: List of config files to combine and use, or None to
search for mrjob.conf in the default locations.
:type extra_args: list of str
:param extra_args: a list of extra cmd-line arguments to pass to the
mr_job script. This is a hook to allow jobs to take
additional arguments.
:param file_upload_args: a list of tuples of ``('--ARGNAME', path)``.
The file at the given path will be uploaded
to the local directory of the mr_job script
when it runs, and then passed into the script
with ``--ARGNAME``. Useful for passing in
SQLite DBs and other configuration files to
your job.
:type hadoop_input_format: str
:param hadoop_input_format: name of an optional Hadoop ``InputFormat``
class. Passed to Hadoop along with your
first step with the ``-inputformat``
option. Note that if you write your own
class, you'll need to include it in your
own custom streaming jar (see
*hadoop_streaming_jar*).
:type hadoop_output_format: str
:param hadoop_output_format: name of an optional Hadoop
``OutputFormat`` class. Passed to Hadoop
along with your first step with the
``-outputformat`` option. Note that if you
write your own class, you'll need to
include it in your own custom streaming
jar (see *hadoop_streaming_jar*).
:type input_paths: list of str
:param input_paths: Input files for your job. Supports globs and
recursively walks directories (e.g.
``['data/common/', 'data/training/*.gz']``). If
this is left blank, we'll read from stdin
:type output_dir: str
:param output_dir: An empty/non-existent directory where Hadoop
streaming should put the final output from the job.
If you don't specify an output directory, we'll
output into a subdirectory of this job's temporary
directory. You can control this from the command
line with ``--output-dir``. This option cannot be
set from configuration files. If used with the
hadoop runner, this path does not need to be fully
qualified with ``hdfs://`` URIs because it's
understood that it has to be on HDFS.
:type partitioner: str
:param partitioner: Optional name of a Hadoop partitoner class, e.g.
``'org.apache.hadoop.mapred.lib.HashPartitioner'``.
Hadoop streaming will use this to determine how
mapper output should be sorted and distributed
to reducers.
:param stdin: an iterable (can be a ``StringIO`` or even a list) to use
as stdin. This is a hook for testing; if you set
``stdin`` via :py:meth:`~mrjob.job.MRJob.sandbox`, it'll
get passed through to the runner. If for some reason
your lines are missing newlines, we'll add them;
this makes it easier to write automated tests.
"""
if conf_path is not None:
if conf_paths is not None:
raise ValueError("Can't specify both conf_path and conf_paths")
else:
log.warn("The conf_path argument to MRJobRunner() is"
" deprecated. Use conf_paths instead.")
if conf_path is False:
conf_paths = []
else:
conf_paths = [conf_path]
self._opts = self.OPTION_STORE_CLASS(self.alias, opts, conf_paths)
self._fs = None
# we potentially have a lot of files to copy, so we keep track
# of them as a list of dictionaries, with the following keys:
#
# 'path': the path to the file on the local system
# 'name': a unique name for the file when we copy it into HDFS etc.
# if this is blank, we'll pick one
# 'cache': if 'file', copy into mr_job_script's working directory
# on the Hadoop nodes. If 'archive', uncompress the file
self._files = []
# add the script to our list of files (don't actually commit to
# uploading it)
if mr_job_script:
self._script = {'path': mr_job_script}
self._files.append(self._script)
self._ran_job = False
else:
self._script = None
self._ran_job = True # don't allow user to call run()
# setup cmds and wrapper script
self._setup_scripts = []
for path in self._opts['setup_scripts']:
file_dict = self._add_file_for_upload(path)
self._setup_scripts.append(file_dict)
# we'll create the wrapper script later
self._wrapper_script = None
# extra args to our job
self._extra_args = list(extra_args) if extra_args else []
# extra file arguments to our job
self._file_upload_args = []
if file_upload_args:
for arg, path in file_upload_args:
file_dict = self._add_file_for_upload(path)
self._file_upload_args.append((arg, file_dict))
# set up | |
data = {'content': "{'lr': 0.1}"}
assert self.queryset.first().params is None
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
assert mock_fct.call_count == 0
assert self.queryset.count() == 1
@pytest.mark.experiments_mark
class TestResumeExperimentViewV1(BaseViewTest):
serializer_class = ExperimentSerializer
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
self.object = self.factory_class(project=project)
self.url = '/{}/{}/{}/experiments/{}/resume'.format(
API_V1,
project.user.username,
project.name,
self.object.id)
self.queryset = self.model_class.objects.all()
def test_resume(self):
data = {}
assert self.queryset.count() == 1
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_201_CREATED
assert mock_fct.call_count == 1
assert self.queryset.count() == 2
last_experiment = self.queryset.last()
assert last_experiment.is_clone is True
assert last_experiment.is_restart is False
assert last_experiment.is_copy is False
assert last_experiment.is_resume is True
assert last_experiment.original_experiment == self.object
assert last_experiment.original_unique_name == self.object.unique_name
def test_resume_patch_config(self):
data = {'content': "{'params': {'lr': 0.1}}"}
assert self.queryset.first().params is None
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_201_CREATED
assert mock_fct.call_count == 1
assert self.queryset.count() == 2
assert self.queryset.first().params is None
assert self.queryset.last().params == {'lr': 0.1}
last_experiment = self.queryset.last()
assert last_experiment.is_clone is True
assert last_experiment.is_restart is False
assert last_experiment.is_copy is False
assert last_experiment.is_resume is True
assert last_experiment.original_experiment == self.object
assert last_experiment.original_unique_name == self.object.unique_name
def test_resume_patch_wrong_config_raises(self):
data = {'content': "{'lr': 0.1}"}
assert self.queryset.first().params is None
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
assert mock_fct.call_count == 0
assert self.queryset.count() == 1
@pytest.mark.experiments_mark
class TestCopyExperimentViewV1(BaseViewTest):
serializer_class = ExperimentSerializer
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
DISABLE_RUNNER = False
DISABLE_EXECUTOR = False
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
self.object = self.factory_class(project=project)
self.url = '/{}/{}/{}/experiments/{}/copy'.format(
API_V1,
project.user.username,
project.name,
self.object.id)
self.queryset = self.model_class.objects.all()
def test_resume(self):
data = {}
assert self.queryset.count() == 1
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_201_CREATED
assert mock_fct.call_count == 1
assert self.queryset.count() == 2
last_experiment = self.queryset.last()
assert last_experiment.is_clone is True
assert last_experiment.is_restart is False
assert last_experiment.is_copy is True
assert last_experiment.is_resume is False
assert last_experiment.original_experiment == self.object
assert last_experiment.original_unique_name == self.object.unique_name
def test_resume_patch_config(self):
data = {'content': "{'params': {'lr': 0.1}}"}
assert self.queryset.first().params is None
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_201_CREATED
assert mock_fct.call_count == 1
assert self.queryset.count() == 2
assert self.queryset.first().params is None
assert self.queryset.last().params == {'lr': 0.1}
last_experiment = self.queryset.last()
assert last_experiment.is_clone is True
assert last_experiment.is_restart is False
assert last_experiment.is_copy is True
assert last_experiment.is_resume is False
assert last_experiment.original_experiment == self.object
assert last_experiment.original_unique_name == self.object.unique_name
def test_resume_patch_wrong_config_raises(self):
data = {'content': "{'lr': 0.1}"}
assert self.queryset.first().params is None
with patch('scheduler.tasks.experiments.experiments_build.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
assert mock_fct.call_count == 0
assert self.queryset.count() == 1
@pytest.mark.experiments_mark
class TestStopExperimentViewV1(BaseViewTest):
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
self.object = self.factory_class(project=project)
self.url = '/{}/{}/{}/experiments/{}/stop'.format(
API_V1,
project.user.username,
project.name,
self.object.id)
self.queryset = self.model_class.objects.all()
def test_stop(self):
data = {}
assert self.queryset.count() == 1
with patch('scheduler.tasks.experiments.experiments_stop.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_200_OK
assert self.queryset.count() == 1
@pytest.mark.experiments_mark
class TestStopExperimentManyViewV1(BaseViewTest):
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
self.objects = [self.factory_class(project=project) for _ in range(3)]
self.url = '/{}/{}/{}/experiments/stop'.format(
API_V1,
project.user.username,
project.name)
self.queryset = self.model_class.objects.all()
def test_stop_many(self):
data = {}
assert self.queryset.count() == 3
with patch('scheduler.tasks.experiments.experiments_stop.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_200_OK
assert mock_fct.call_count == 0
data = {'ids': [obj.id for obj in self.objects]}
with patch('scheduler.tasks.experiments.experiments_stop.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_200_OK
assert mock_fct.call_count == 3
assert self.queryset.count() == 3
@pytest.mark.experiments_mark
class TestDeleteExperimentManyViewV1(BaseViewTest):
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
self.objects = [self.factory_class(project=project) for _ in range(3)]
self.url = '/{}/{}/{}/experiments/delete'.format(
API_V1,
project.user.username,
project.name)
self.queryset = self.model_class.objects.all()
def test_delete_many(self):
data = {}
assert self.queryset.count() == 3
resp = self.auth_client.delete(self.url, data)
assert resp.status_code == status.HTTP_200_OK
assert self.queryset.count() == 3
data = {'ids': [obj.id for obj in self.objects]}
resp = self.auth_client.delete(self.url, data)
assert resp.status_code == status.HTTP_200_OK
assert self.queryset.count() == 0
@pytest.mark.experiments_mark
class TestExperimentLogsViewV1(BaseViewTest):
num_log_lines = 10
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
self.experiment = ExperimentFactory(project=project)
self.logs = []
self.url = '/{}/{}/{}/experiments/{}/logs'.format(
API_V1,
project.user.username,
project.name,
self.experiment.id)
self.stream_url = '/{}/{}/{}/experiments/{}/logs/stream'.format(
API_V1,
project.user.username,
project.name,
self.experiment.id)
self.ws_url = '/{}/{}/{}/experiments/{}/logs'.format(
WS_V1,
project.user.username,
project.name,
self.experiment.id)
def create_logs(self, temp):
log_path = stores.get_experiment_logs_path(
experiment_name=self.experiment.unique_name,
temp=temp)
stores.create_experiment_logs_path(experiment_name=self.experiment.unique_name, temp=temp)
fake = Faker()
self.logs = []
for _ in range(self.num_log_lines):
self.logs.append(fake.sentence())
with open(log_path, 'w') as file:
for line in self.logs:
file.write(line)
file.write('\n')
def test_get_done_experiment(self):
self.experiment.set_status(ExperimentLifeCycle.SUCCEEDED)
self.assertTrue(self.experiment.is_done)
# No logs
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_404_NOT_FOUND
# Check the it does not return temp file
self.create_logs(temp=True)
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_404_NOT_FOUND
# Check returns the correct file
self.create_logs(temp=False)
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
data = [i for i in resp._iterator] # pylint:disable=protected-access
data = [d for d in data[0].decode('utf-8').split('\n') if d]
assert len(data) == len(self.logs)
assert data == self.logs
@patch('api.experiments.views.process_logs')
def test_get_non_done_experiment(self, _):
self.assertFalse(self.experiment.is_done)
# No logs
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_404_NOT_FOUND
# Check the it does not return non temp file
self.create_logs(temp=False)
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_404_NOT_FOUND
# Check returns the correct file
self.create_logs(temp=True)
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
data = [i for i in resp._iterator] # pylint:disable=protected-access
data = [d for d in data[0].decode('utf-8').split('\n') if d]
assert len(data) == len(self.logs)
assert data == self.logs
def test_post_logs(self):
resp = self.auth_client.post(self.url)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
data = 'logs here'
with patch('logs_handlers.tasks.logs_handle_experiment_job.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_200_OK
assert mock_fct.call_count == 1
data = ['logs here', 'dfg dfg']
with patch('logs_handlers.tasks.logs_handle_experiment_job.apply_async') as mock_fct:
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_200_OK
assert mock_fct.call_count == 1
def test_stream_redirects_to_internal_service(self):
response = self.auth_client.get(self.stream_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(ProtectedView.NGINX_REDIRECT_HEADER in response)
self.assertEqual(response[ProtectedView.NGINX_REDIRECT_HEADER], self.ws_url)
@pytest.mark.experiments_mark
class TestExperimentOutputsTreeViewV1(BaseFilesViewTest):
num_log_lines = 10
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
experiment = ExperimentFactory(project=project)
self.url = '/{}/{}/{}/experiments/{}/outputs/tree'.format(
API_V1,
project.user.username,
project.name,
experiment.id)
outputs_path = stores.get_experiment_outputs_path(
persistence=experiment.persistence_outputs,
experiment_name=experiment.unique_name,
original_name=experiment.original_unique_name,
cloning_strategy=experiment.cloning_strategy)
stores.create_experiment_outputs_path(
persistence=experiment.persistence_outputs,
experiment_name=experiment.unique_name)
self.create_paths(path=outputs_path, url=self.url)
def test_get(self):
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
self.assert_same_content(resp.data['files'], self.top_level['files'])
self.assert_same_content(resp.data['dirs'], self.top_level['dirs'])
resp = self.auth_client.get(self.url_second_level)
assert resp.status_code == status.HTTP_200_OK
self.assert_same_content(resp.data['files'], self.second_level['files'])
self.assert_same_content(resp.data['dirs'], self.second_level['dirs'])
resp = self.auth_client.get(self.url_second_level2)
assert resp.status_code == status.HTTP_200_OK
self.assert_same_content(resp.data['files'], self.second_level['files'])
self.assert_same_content(resp.data['dirs'], self.second_level['dirs'])
@pytest.mark.experiments_mark
class TestExperimentOutputsFilesViewV1(BaseFilesViewTest):
num_log_lines = 10
HAS_AUTH = True
def setUp(self):
super().setUp()
project = ProjectFactory(user=self.auth_client.user)
experiment = ExperimentFactory(project=project)
self.url = '/{}/{}/{}/experiments/{}/outputs/files'.format(
API_V1,
project.user.username,
project.name,
experiment.id)
outputs_path = stores.get_experiment_outputs_path(
persistence=experiment.persistence_outputs,
experiment_name=experiment.unique_name,
original_name=experiment.original_unique_name,
cloning_strategy=experiment.cloning_strategy)
stores.create_experiment_outputs_path(
persistence=experiment.persistence_outputs,
experiment_name=experiment.unique_name)
self.create_paths(path=outputs_path, url=self.url)
def test_get(self):
for file_content in self.top_level_files:
resp = self.auth_client.get(self.url + '?path={}'.format(file_content['file']))
assert resp.status_code == status.HTTP_200_OK
data = [i for i in resp._iterator] # pylint:disable=protected-access
assert data[0].decode('utf-8') == file_content['data']
for file_content in self.second_level_files:
resp = self.auth_client.get(self.url + '?path={}'.format(file_content['file']))
assert resp.status_code == status.HTTP_200_OK
data = [i for i in resp._iterator] # pylint:disable=protected-access
assert data[0].decode('utf-8') == file_content['data']
@pytest.mark.experiments_mark
class DownloadExperimentOutputsViewTest(BaseViewTest):
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
HAS_INTERNAL = True
def setUp(self):
super().setUp()
self.project = ProjectFactory(user=self.auth_client.user)
self.experiment = self.factory_class(project=self.project)
self.download_url = '/{}/{}/{}/experiments/{}/outputs/download'.format(
API_V1,
self.project.user.username,
self.project.name,
self.experiment.id)
self.experiment_outputs_path = stores.get_experiment_outputs_path(
persistence=self.experiment.persistence_outputs,
experiment_name=self.experiment.unique_name)
self.url = self.download_url
def create_tmp_outputs(self):
stores.create_experiment_outputs_path(
persistence=self.experiment.persistence_outputs,
experiment_name=self.experiment.unique_name)
for i in range(4):
open('{}/{}'.format(self.experiment_outputs_path, i), '+w')
def test_redirects_nginx_to_file(self):
self.create_tmp_outputs()
# Assert that the experiment outputs
self.assertTrue(os.path.exists(self.experiment_outputs_path))
response = self.auth_client.get(self.download_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(ProtectedView.NGINX_REDIRECT_HEADER in response)
self.assertEqual(response[ProtectedView.NGINX_REDIRECT_HEADER],
'{}/{}.tar.gz'.format(conf.get(ARCHIVES_ROOT_ARTIFACTS),
self.experiment.unique_name.replace('.', '_')))
@pytest.mark.experiments_mark
class TestExperimentEphemeralTokenViewV1(BaseViewTest):
HAS_AUTH = False
factory_class = ExperimentFactory
def setUp(self):
super().setUp()
self.auth_user = self.auth_client.user
self.project = ProjectFactory(user=self.auth_client.user)
self.experiment = self.factory_class(project=self.project)
self.other_experiment = self.factory_class(project=self.project)
self.url = '/{}/{}/{}/experiments/{}/ephemeraltoken'.format(
API_V1,
self.project.user.username,
self.project.name,
self.experiment.id)
self.other_url = '/{}/{}/{}/experiments/{}/ephemeraltoken'.format(
API_V1,
self.project.user.username,
self.project.name,
self.other_experiment.id)
@staticmethod
def create_ephemeral_token(experiment, **kwargs):
scope = RedisEphemeralTokens.get_scope(user=experiment.user.id,
model='experiment',
object_id=experiment.id)
return RedisEphemeralTokens.generate(scope=scope, **kwargs)
def test_is_forbidden_for_non_running_or_scheduled_experiment(self):
ephemeral_token = self.create_ephemeral_token(self.experiment)
token = RedisEphemeralTokens.create_header_token(ephemeral_token)
ephemeral_client = EphemeralClient(token=token)
resp = ephemeral_client.post(self.url)
assert resp.status_code == status.HTTP_403_FORBIDDEN
self.assertEqual(ephemeral_token.get_state(), None)
def test_using_other_experiment_token(self):
ephemeral_token = self.create_ephemeral_token(self.other_experiment)
token = RedisEphemeralTokens.create_header_token(ephemeral_token)
ephemeral_client = EphemeralClient(token=token)
resp = ephemeral_client.post(self.url)
assert resp.status_code == status.HTTP_403_FORBIDDEN
self.assertEqual(ephemeral_token.get_state(), None)
def test_using_timed_out_experiment_token(self):
self.experiment.set_status(status=JobLifeCycle.RUNNING)
ephemeral_token = self.create_ephemeral_token(self.experiment, ttl=1)
token = RedisEphemeralTokens.create_header_token(ephemeral_token)
ephemeral_client = EphemeralClient(token=token)
time.sleep(1.1)
resp = ephemeral_client.post(self.url)
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
self.assertEqual(ephemeral_token.get_state(), None)
def test_using_used_experiment_token(self):
self.experiment.set_status(status=JobLifeCycle.RUNNING)
ephemeral_token = self.create_ephemeral_token(self.experiment)
token = RedisEphemeralTokens.create_header_token(ephemeral_token)
ephemeral_token.clear()
ephemeral_client = EphemeralClient(token=token)
resp = ephemeral_client.post(self.url)
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
self.assertEqual(ephemeral_token.get_state(), None)
def test_using_scheduled_experiment_token(self):
self.experiment.set_status(status=ExperimentLifeCycle.SCHEDULED)
ephemeral_token = self.create_ephemeral_token(self.experiment)
token = RedisEphemeralTokens.create_header_token(ephemeral_token)
ephemeral_client = EphemeralClient(token=token)
resp | |
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the DriverBase abstract class for Marconi storage drivers."""
import abc
import six
DEFAULT_QUEUES_PER_PAGE = 10
DEFAULT_MESSAGES_PER_PAGE = 10
DEFAULT_SHARDS_PER_PAGE = 10
DEFAULT_MESSAGES_PER_CLAIM = 10
@six.add_metaclass(abc.ABCMeta)
class DriverBase(object):
"""Base class for both data and control plane drivers
:param conf: Configuration containing options for this driver.
:type conf: `oslo.config.ConfigOpts`
:param cache: Cache instance to use for reducing latency
for certain lookups.
:type cache: `marconi.common.cache.backends.BaseCache`
"""
def __init__(self, conf, cache):
self.conf = conf
self.cache = cache
@six.add_metaclass(abc.ABCMeta)
class DataDriverBase(DriverBase):
"""Interface definition for storage drivers.
Data plane storage drivers are responsible for implementing the
core functionality of the system.
Connection information and driver-specific options are
loaded from the config file or the shard catalog.
:param conf: Configuration containing options for this driver.
:type conf: `oslo.config.ConfigOpts`
:param cache: Cache instance to use for reducing latency
for certain lookups.
:type cache: `marconi.common.cache.backends.BaseCache`
"""
def __init__(self, conf, cache):
super(DataDriverBase, self).__init__(conf, cache)
@abc.abstractmethod
def is_alive(self):
"""Check whether the storage is ready."""
raise NotImplementedError
@abc.abstractproperty
def queue_controller(self):
"""Returns the driver's queue controller."""
raise NotImplementedError
@abc.abstractproperty
def message_controller(self):
"""Returns the driver's message controller."""
raise NotImplementedError
@abc.abstractproperty
def claim_controller(self):
"""Returns the driver's claim controller."""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class ControlDriverBase(DriverBase):
"""Interface definition for control plane storage drivers.
Storage drivers that work at the control plane layer allow one to
modify aspects of the functionality of the system. This is ideal
for administrative purposes.
Allows access to the shard registry through a catalogue and a
shard controller.
:param conf: Configuration containing options for this driver.
:type conf: `oslo.config.ConfigOpts`
:param cache: Cache instance to use for reducing latency
for certain lookups.
:type cache: `marconi.common.cache.backends.BaseCache`
"""
@abc.abstractproperty
def catalogue_controller(self):
"""Returns the driver's catalogue controller."""
raise NotImplementedError
@abc.abstractproperty
def shards_controller(self):
"""Returns storage's shard management controller."""
raise NotImplementedError
class ControllerBase(object):
"""Top-level class for controllers.
:param driver: Instance of the driver
instantiating this controller.
"""
def __init__(self, driver):
self.driver = driver
@six.add_metaclass(abc.ABCMeta)
class Queue(ControllerBase):
"""This class is responsible for managing queues.
Queue operations include CRUD, monitoring, etc.
Storage driver implementations of this class should
be capable of handling high workloads and huge
numbers of queues.
"""
@abc.abstractmethod
def list(self, project=None, marker=None,
limit=DEFAULT_QUEUES_PER_PAGE, detailed=False):
"""Base method for listing queues.
:param project: Project id
:param marker: The last queue name
:param limit: (Default 10) Max number of queues to return
:param detailed: Whether metadata is included
:returns: An iterator giving a sequence of queues
and the marker of the next page.
"""
raise NotImplementedError
@abc.abstractmethod
def get_metadata(self, name, project=None):
"""Base method for queue metadata retrieval.
:param name: The queue name
:param project: Project id
:returns: Dictionary containing queue metadata
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def create(self, name, project=None):
"""Base method for queue creation.
:param name: The queue name
:param project: Project id
:returns: True if a queue was created and False
if it was updated.
"""
raise NotImplementedError
@abc.abstractmethod
def exists(self, name, project=None):
"""Base method for testing queue existence.
:param name: The queue name
:param project: Project id
:returns: True if a queue exists and False
if it does not.
"""
raise NotImplementedError
@abc.abstractmethod
def set_metadata(self, name, metadata, project=None):
"""Base method for updating a queue metadata.
:param name: The queue name
:param metadata: Queue metadata as a dict
:param project: Project id
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, name, project=None):
"""Base method for deleting a queue.
:param name: The queue name
:param project: Project id
"""
raise NotImplementedError
@abc.abstractmethod
def stats(self, name, project=None):
"""Base method for queue stats.
:param name: The queue name
:param project: Project id
:returns: Dictionary with the
queue stats
"""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class Message(ControllerBase):
"""This class is responsible for managing message CRUD."""
@abc.abstractmethod
def list(self, queue, project=None, marker=None,
limit=DEFAULT_MESSAGES_PER_PAGE,
echo=False, client_uuid=None,
include_claimed=False):
"""Base method for listing messages.
:param queue: Name of the queue to get the
message from.
:param project: Project id
:param marker: Tail identifier
:param limit: (Default 10) Max number of messages to return.
:type limit: Maybe int
:param echo: (Default False) Boolean expressing whether
or not this client should receive its own messages.
:param client_uuid: A UUID object. Required when echo=False.
:param include_claimed: omit claimed messages from listing?
:type include_claimed: bool
:returns: An iterator giving a sequence of messages and
the marker of the next page.
"""
raise NotImplementedError
@abc.abstractmethod
def first(self, queue, project=None, sort=1):
"""Get first message in the queue (including claimed).
:param queue: Name of the queue to list
:param sort: (Default 1) Sort order for the listing. Pass 1 for
ascending (oldest message first), or -1 for descending (newest
message first).
:returns: First message in the queue, or None if the queue is
empty
"""
raise NotImplementedError
@abc.abstractmethod
def get(self, queue, message_id, project=None):
"""Base method for getting a message.
:param queue: Name of the queue to get the
message from.
:param project: Project id
:param message_id: Message ID
:returns: Dictionary containing message data
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def bulk_get(self, queue, message_ids, project=None):
"""Base method for getting multiple messages.
:param queue: Name of the queue to get the
message from.
:param project: Project id
:param message_ids: A sequence of message IDs.
:returns: An iterable, yielding dicts containing
message details
"""
raise NotImplementedError
@abc.abstractmethod
def post(self, queue, messages, client_uuid, project=None):
"""Base method for posting one or more messages.
Implementations of this method should guarantee
and preserve the order, in the returned list, of
incoming messages.
:param queue: Name of the queue to post message to.
:param messages: Messages to post to queue, an iterable
yielding 1 or more elements. An empty iterable
results in undefined behavior.
:param client_uuid: A UUID object.
:param project: Project id
:returns: List of message ids
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, queue, message_id, project=None, claim=None):
"""Base method for deleting a single message.
:param queue: Name of the queue to post
message to.
:param message_id: Message to be deleted
:param project: Project id
:param claim: Claim this message
belongs to. When specified, claim must
be valid and message_id must belong to
it.
"""
raise NotImplementedError
@abc.abstractmethod
def bulk_delete(self, queue, message_ids, project=None):
"""Base method for deleting multiple messages.
:param queue: Name of the queue to post
message to.
:param message_ids: A sequence of message IDs
to be deleted.
:param project: Project id
"""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class Claim(ControllerBase):
@abc.abstractmethod
def get(self, queue, claim_id, project=None):
"""Base method for getting a claim.
:param queue: Name of the queue this
claim belongs to.
:param claim_id: The claim id
:param project: Project id
:returns: (Claim's metadata, claimed messages)
:raises: DoesNotExist
"""
raise NotImplementedError
@abc.abstractmethod
def create(self, queue, metadata, project=None,
limit=DEFAULT_MESSAGES_PER_CLAIM):
"""Base method for creating a claim.
:param queue: Name of the queue this
claim belongs to.
:param metadata: Claim's parameters
to be stored.
:param project: Project id
:param limit: (Default 10) Max number
of messages to claim.
:returns: (Claim ID, claimed messages)
"""
raise NotImplementedError
@abc.abstractmethod
def update(self, queue, claim_id, metadata, project=None):
"""Base method for updating a claim.
:param queue: Name of the queue this
claim belongs to.
:param claim_id: Claim to be updated
:param metadata: Claim's parameters
to be updated.
:param project: Project id
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, queue, claim_id, project=None):
"""Base method for deleting a claim.
:param queue: Name of the queue this
claim belongs to.
:param claim_id: Claim to be deleted
:param project: Project id
"""
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class ShardsBase(ControllerBase):
"""A controller for managing shards."""
@abc.abstractmethod
def list(self, marker=None, limit=DEFAULT_SHARDS_PER_PAGE,
detailed=False):
"""Lists all registered shards.
:param marker: used to determine which shard to start with
:type marker: | |
<gh_stars>10-100
# Copyright 2009 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from bisect import bisect_left
from whoosh.compat import iteritems, xrange
from whoosh.filedb.compound import CompoundStorage
from whoosh.filedb.fieldcache import FieldCache, DefaultFieldCachingPolicy
from whoosh.matching import FilterMatcher
from whoosh.reading import IndexReader, TermNotFound
from whoosh.store import OverlayStorage
from whoosh.support import dawg
SAVE_BY_DEFAULT = True
# Reader class
class SegmentReader(IndexReader):
GZIP_CACHES = False
def __init__(self, storage, schema, segment, generation=None, codec=None):
self.storage = storage
self.schema = schema
self.segment = segment
self._gen = generation
self.is_closed = False
# Copy info from underlying segment
self._has_deletions = segment.has_deletions()
self._dc = segment.doc_count()
self._dc_all = segment.doc_count_all()
if hasattr(self.segment, "segment_id"):
self.segid = self.segment.segment_id()
else:
from whoosh.codec.base import Segment
self.segid = Segment._random_id()
# self.files is a storage object from which to load the segment files.
# This is different from the general storage (which will be used for
# cahces) if the segment is in a compound file.
if segment.is_compound():
# Use an overlay here instead of just the compound storage because
# in rare circumstances a segment file may be added after the
# segment is written
self.files = OverlayStorage(segment.open_compound_file(storage),
self.storage)
else:
self.files = storage
# Get microreaders from codec
if codec is None:
from whoosh.codec import default_codec
codec = default_codec()
self._codec = codec
self._terms = codec.terms_reader(self.files, self.segment)
self._lengths = codec.lengths_reader(self.files, self.segment)
self._stored = codec.stored_fields_reader(self.files, self.segment)
self._vectors = None # Lazy open with self._open_vectors()
self._graph = None # Lazy open with self._open_dawg()
self.set_caching_policy()
def _open_vectors(self):
if self._vectors:
return
self._vectors = self._codec.vector_reader(self.files, self.segment)
def _open_dawg(self):
if self._graph:
return
self._graph = self._codec.graph_reader(self.files, self.segment)
def has_deletions(self):
return self._has_deletions
def doc_count(self):
return self._dc
def doc_count_all(self):
return self._dc_all
def is_deleted(self, docnum):
return self.segment.is_deleted(docnum)
def generation(self):
return self._gen
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.segment)
def __contains__(self, term):
return term in self._terms
def close(self):
self._terms.close()
self._stored.close()
if self._lengths:
self._lengths.close()
if self._vectors:
self._vectors.close()
if self._graph:
self._graph.close()
self.files.close()
self.caching_policy = None
self.is_closed = True
def stored_fields(self, docnum):
assert docnum >= 0
schema = self.schema
return dict(item for item in iteritems(self._stored[docnum])
if item[0] in schema)
def all_stored_fields(self):
is_deleted = self.segment.is_deleted
sf = self.stored_fields
for docnum in xrange(self._dc_all):
if not is_deleted(docnum):
yield sf(docnum)
def field_length(self, fieldname):
return self._lengths.field_length(fieldname)
def min_field_length(self, fieldname):
return self._lengths.min_field_length(fieldname)
def max_field_length(self, fieldname):
return self._lengths.max_field_length(fieldname)
def doc_field_length(self, docnum, fieldname, default=0):
return self._lengths.doc_field_length(docnum, fieldname,
default=default)
def has_vector(self, docnum, fieldname):
if self.schema[fieldname].vector:
try:
self._open_vectors()
except (NameError, IOError):
return False
return (docnum, fieldname) in self._vectors
else:
return False
def _test_field(self, fieldname):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
if self.schema[fieldname].format is None:
raise TermNotFound("Field %r is not indexed" % fieldname)
def all_terms(self):
schema = self.schema
return ((fieldname, text) for fieldname, text in self._terms.keys()
if fieldname in schema)
def terms_from(self, fieldname, prefix):
self._test_field(fieldname)
schema = self.schema
return ((fname, text) for fname, text
in self._terms.keys_from((fieldname, prefix))
if fname in schema)
def term_info(self, fieldname, text):
self._test_field(fieldname)
try:
return self._terms[fieldname, text]
except KeyError:
raise TermNotFound("%s:%r" % (fieldname, text))
def _texts_in_fieldcache(self, fieldname, prefix=''):
# The first value in a fieldcache is the default
texts = self.fieldcache(fieldname).texts[1:]
if prefix:
i = bisect_left(texts, prefix)
while i < len(texts) and texts[i].startswith(prefix):
yield texts[i]
i += 1
else:
for text in texts:
yield text
def expand_prefix(self, fieldname, prefix):
self._test_field(fieldname)
# If a fieldcache for the field is already loaded, we already have the
# values for the field in memory, so just yield them from there
if self.fieldcache_loaded(fieldname):
return self._texts_in_fieldcache(fieldname, prefix)
else:
# Call super
return IndexReader.expand_prefix(self, fieldname, prefix)
def lexicon(self, fieldname):
self._test_field(fieldname)
# If a fieldcache for the field is already loaded, we already have the
# values for the field in memory, so just yield them from there
if self.fieldcache_loaded(fieldname):
return self._texts_in_fieldcache(fieldname)
else:
# Call super
return IndexReader.lexicon(self, fieldname)
def __iter__(self):
schema = self.schema
return ((term, terminfo) for term, terminfo in self._terms.items()
if term[0] in schema)
def iter_from(self, fieldname, text):
schema = self.schema
self._test_field(fieldname)
for term, terminfo in self._terms.items_from((fieldname, text)):
if term[0] not in schema:
continue
yield (term, terminfo)
def frequency(self, fieldname, text):
self._test_field(fieldname)
try:
return self._terms.frequency((fieldname, text))
except KeyError:
return 0
def doc_frequency(self, fieldname, text):
self._test_field(fieldname)
try:
return self._terms.doc_frequency((fieldname, text))
except KeyError:
return 0
def postings(self, fieldname, text, scorer=None):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
format_ = self.schema[fieldname].format
matcher = self._terms.matcher(fieldname, text, format_, scorer=scorer)
deleted = self.segment.deleted
if deleted:
matcher = FilterMatcher(matcher, deleted, exclude=True)
return matcher
def vector(self, docnum, fieldname):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
vformat = self.schema[fieldname].vector
if not vformat:
raise Exception("No vectors are stored for field %r" % fieldname)
self._open_vectors()
return self._vectors.matcher(docnum, fieldname, vformat)
# DAWG methods
def has_word_graph(self, fieldname):
if fieldname not in self.schema:
return False
if not self.schema[fieldname].spelling:
return False
try:
self._open_dawg()
except (NameError, IOError, dawg.FileVersionError):
return False
return self._graph.has_root(fieldname)
def word_graph(self, fieldname):
if not self.has_word_graph(fieldname):
raise KeyError("No word graph for field %r" % fieldname)
return dawg.Node(self._graph, self._graph.root(fieldname))
def terms_within(self, fieldname, text, maxdist, prefix=0):
if not self.has_word_graph(fieldname):
# This reader doesn't have a graph stored, use the slow method
return IndexReader.terms_within(self, fieldname, text, maxdist,
prefix=prefix)
return dawg.within(self._graph, text, k=maxdist, prefix=prefix,
address=self._graph.root(fieldname))
# Field cache methods
def supports_caches(self):
return True
def set_caching_policy(self, cp=None, save=True, storage=None):
"""This method lets you control the caching policy of the reader. You
can either pass a :class:`whoosh.filedb.fieldcache.FieldCachingPolicy`
as the first argument, *or* use the `save` and `storage` keywords to
alter the default caching policy::
# Use a custom field caching policy object
reader.set_caching_policy(MyPolicy())
# Use the default caching policy but turn off saving caches to disk
reader.set_caching_policy(save=False)
# Use the default caching policy but save caches to a custom
# storage
from whoosh.filedb.filestore import FileStorage
mystorage = FileStorage("path/to/cachedir")
reader.set_caching_policy(storage=mystorage)
:param cp: a :class:`whoosh.filedb.fieldcache.FieldCachingPolicy`
object. If this argument is not given, the default caching policy
is used.
:param save: save field caches to disk for re-use. If a caching policy
object is specified using `cp`, this argument is ignored.
:param storage: a custom :class:`whoosh.store.Storage` object to use
for saving field caches. If a caching policy object is specified
using `cp` or `save` is `False`, this argument is ignored.
"""
if not cp:
if save and storage is None:
storage = self.storage
elif not save:
storage = None
cp = DefaultFieldCachingPolicy(self.segment.segment_id(),
storage=storage)
if type(cp) is type:
cp = cp()
self.caching_policy = cp
def _fieldkey(self, fieldname):
return "%s/%s" % (self.segid, fieldname)
def fieldcache(self, fieldname, save=SAVE_BY_DEFAULT):
"""Returns a :class:`whoosh.filedb.fieldcache.FieldCache` object for
the given field.
:param fieldname: the name of the field to get a cache for.
:param save: if True (the default), the cache is saved to disk if it
doesn't already exist.
"""
key = self._fieldkey(fieldname)
fc = self.caching_policy.get(key)
if not fc:
fc = FieldCache.from_field(self, fieldname)
self.caching_policy.put(key, fc, save=save)
return fc
def fieldcache_available(self, fieldname):
"""Returns True if | |
be set to False. Note that " +
"the container image property must not be specified via this " +
"attribute."
),
),
"export_configs": attr.bool(
doc = (
"Specifies whether to copy generated configs to the 'output_base' " +
"of the 'toolchain_config_suite_spec' (if configs are generated) " +
"If set to False, a configs.tar file will also be produced in the " +
("external repo. This tar file can be then published to a URL and " +
" e.g., be used via an 'http_archive' rule from an arbitrary repo." +
"Default is False.")
),
mandatory = True,
),
"java_home": attr.string(
doc = ("Optional. The location of java_home in the container. For " +
"example , '/usr/lib/jvm/java-8-openjdk-amd64'. Only " +
"relevant if 'create_java_configs' is true. If 'create_java_configs' is " +
"true, the execution of the rule generates configs, and this attribute " +
"is not set, the rule will attempt to read the " +
"JAVA_HOME env var from the container. If that is not set, the rule " +
"will fail."),
),
"java_version": attr.string(
doc = ("Optional. The Java release version in the container. For " +
" example, 11. Should only be set if java_home is set."),
),
"os_family": attr.string(
doc = ("Optional. The os_family to generate the config for. For example, " +
"Linux or Windows (Mac OS X is not supported at this time). The default is " +
"OS Bazel runs on."),
),
"registry": attr.string(
doc = ("Optional. The registry to pull the container from. For example, " +
"marketplace.gcr.io. The default is the value for the selected " +
"toolchain_config_suite_spec (rbe-ubuntu16-04 image for " +
"default_toolchain_config_suite_spec, if no toolchain_config_suite_spec was selected)."),
),
"repository": attr.string(
doc = ("Optional. The repository to pull the container from. For example, " +
"google/ubuntu. The default is the " +
"value for the selected toolchain_config_suite_spec (rbe-ubuntu16-04 image for " +
"default_toolchain_config_suite_spec, if no toolchain_config_suite_spec was selected)."),
),
"toolchain_config_suite_spec": attr.string_dict(
doc = ("Set by rbe_autoconfig macro. Dict containing values to identify a " +
"toolchain container + GitHub repo where configs are " +
"stored. Must include keys: 'repo_name' (name of the " +
"external repo, 'output_base' (relative location of " +
"the output base in the GitHub repo where configs are " +
"located), and 'container_repo', 'container_registry', " +
"'container_name' (describing the location of the " +
"base toolchain container)"),
allow_empty = False,
mandatory = True,
),
"setup_cmd": attr.string(
default = "cd .",
doc = ("Optional. Pass an additional command that will be executed " +
"(inside the container) before running bazel to generate the " +
"toolchain configs"),
),
"tag": attr.string(
doc = ("Optional. The tag of the image to pull, e.g. latest."),
),
"target_compatible_with": attr.string_list(
doc = ("The list of constraints that will be added to the " +
"toolchain in its target_compatible_with attribute. For " +
"example, [\"@bazel_tools//platforms:linux\"]."),
),
"use_checked_in_confs": attr.string(
default = CHECKED_IN_CONFS_TRY,
doc = ("Default: 'Try'. Try to look for checked in configs " +
"before generating them. If set to 'False' (string) the " +
"rule will allways attempt to generate the configs " +
"by pulling a toolchain container and running Bazel inside. " +
"If set to 'Force' rule will error out if no checked-in" +
"configs were found."),
values = CHECKED_IN_CONFS_VALUES,
),
"use_legacy_platform_definition": attr.bool(
doc = (
"Specifies whether the underlying platform uses the " +
"remote_execution_properties property (if use_legacy_platform_definition " +
"is True) or the exec_properties property. The reason why this " +
"is important is because a platform that inherits from this " +
"platform and wishes to add execution properties must use the " +
"same field remote_execution_properties/exec_properties that " +
"the parent platform uses. This attribute must be set to False if the " +
"exec_properties attribute is set."
),
mandatory = True,
),
},
environ = [
AUTOCONF_ROOT,
DOCKER_PATH,
],
implementation = _rbe_autoconfig_impl,
local = True,
)
def rbe_autoconfig(
name,
base_container_digest = None,
bazel_version = None,
bazel_rc_version = None,
toolchain_config_spec_name = None,
config_repos = None,
create_cc_configs = True,
create_java_configs = True,
create_testdata = False,
create_versions = True,
detect_java_home = False,
digest = None,
env = None,
exec_compatible_with = None,
exec_properties = None,
export_configs = False,
java_home = None,
java_version = None,
os_family = None,
tag = None,
toolchain_config_suite_spec = default_toolchain_config_suite_spec(),
registry = None,
repository = None,
target_compatible_with = None,
use_checked_in_confs = CHECKED_IN_CONFS_TRY,
use_legacy_platform_definition = True):
""" Creates a repository with toolchain configs generated for a container image.
This macro wraps (and simplifies) invocation of _rbe_autoconfig rule.
Use this macro in your WORKSPACE.
Args:
name: Name of the rbe_autoconfig repository target.
base_container_digest: Optional. If the container to use for the RBE build
extends from the container defined in the toolchain_config_suite_spec
(by default, the rbe-ubuntu16-04 image), you can pass the digest
(sha256 sum) of the base container using this attr.
The rule will try to use of checked-in configs, if possible.
bazel_version: The version of Bazel to use to generate toolchain configs.
`Use only (major, minor, patch), e.g., '0.20.0'. Default is "local"
which means the same version of Bazel that is currently running will
be used. If local is a non release version, rbe_autoconfig will fallback
to using the latest release version (see _BAZEL_VERSION_FALLBACK).
Note, if configs are not found for a patch version, rule will attempt
to find ones for the corresponding x.x.0 version. So if you are using
Bazel 0.25.2, and configs are not found for that version, but are
available for 0.25.0, those will be used instead. Note: this is only
the case if use_checked_in_confs != "False" (string 'False').
bazel_rc_version: The rc (for the given version of Bazel) to use.
Must be published in https://releases.bazel.build. E.g. 2.
toolchain_config_spec_name: Optional. String. Override default config
defined in toolchain_config_suite_spec.
If export_configs is True, this value is used to set the name of the
toolchain config spec to be generated.
config_repos: Optional. List of additional external repos corresponding to
configure like repo rules that need to be produced in addition to
local_config_cc.
create_cc_configs: Optional. Specifies whether to generate C/C++ configs.
Defauls to True.
create_java_configs: Optional. Specifies whether to generate java configs.
Defauls to True.
create_testdata: Optional. Specifies whether to generate additional testing
only outputs. Defauls to False.
create_versions: Specifies whether to generate versions.bzl
file in 'output_base' of the 'toolchain_config_suite_spec'.
This option is temporary while migration to use.
generated file by this rule is taking place.
Defauls to True.
digest: Optional. The digest of the image to pull.
Should not be set if 'tag' is used.
Must be set together with 'registry' and 'repository'.
detect_java_home: Optional. Default False. Should only be set
to True if 'create_java_configs' is also True. If set to True the rule
will attempt to read the JAVA_HOME env var from the container.
Note if java_home is not set and this is set to False, the rule will
attempt to find a value of java_home in a compatible
'toolchain_config_spec', fallback to using the 'default_java_home' in
the 'toolchain_config_suite_spec', fallback to turning on 'detect_java_home,
(unless use_checked_in_confs = Force was set), or otherwise fail with an
informative error.
env: dict. Optional. Additional environment variables that will be set when
running the Bazel command to generate the toolchain configs.
Set to values for marketplace.gcr.io/google/rbe-ubuntu16-04 container.
Note: Do not pass a custom JAVA_HOME via env, use java_home attr instead.
exec_compatible_with: Optional. List of constraints to add to the produced
toolchain/platform targets (e.g., ["@bazel_tools//platforms:linux"] in the
exec_compatible_with/constraint_values attrs, respectively.
exec_properties: Optional. A string->string dict containing execution
properties to be used when creating the underlying platform. When
providing this attribute use_legacy_platform_definition must be set
to False. Note that the container image property must not be specified
via this attribute.
export_configs: Optional, default False. Whether to copy generated configs
(if they are generated) to the 'output_base' defined in
'toolchain_config_suite_spec'. If set to False, a configs.tar file
| |
# Импорт системных библиотек
from __future__ import print_function
from shutil import copyfile
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from PyQt5 import QtWidgets, QtCore
import sys, smtplib, json, ssl, random, string, webbrowser, csv
from datetime import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.utils import formataddr
from email.header import Header
from string import Template
# Импорт форм
from form_mainForm import Ui_mainForm
from form_dialogJSONCreds import Ui_formJSONCreds
from form_emailSettings import Ui_form_emailSettings
from form_checkEmail import Ui_form_checkEmail
from form_editUser import Ui_form_editUser
from form_regUser import Ui_form_regUser
from form_loader import Ui_form_loader
# Инициализация констант
SCOPES = [
'https://www.googleapis.com/auth/admin.directory.user',
'https://www.googleapis.com/auth/admin.directory.orgunit'
]
CREDENTIALS = 'credentials.json'
SETTINGS = 'settings.json'
DIRECTORY_API = None
PAYLOAD = {
'orgUnits': None,
'settings': None
}
EDITABLE_ID = None
application = None
_dialogJSON = None
_emailSettings = None
_checkEmail = None
_dialogEditUser = None
_dialogRegUser = None
_loader = None
# Функция получения полного пути до файла
getFullPath = lambda _path = '': os.path.join(os.path.dirname(__file__), _path)
# Функция генерации случайной строки
getRandomString = lambda _length = 8: ''.join(random.choice(string.ascii_letters) for i in range(_length))
# Функция создания окна подтверждения с последующим возвращением булевого ответа
confirm = lambda _header = 'HEADER_IS_NOT_SET', _message = 'MESSAGE_IS_NOT_SET': QtWidgets.QMessageBox.question(None, _header, _message, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) == QtWidgets.QMessageBox.Yes
# Функция вызова диалогового окна открытия файла (получение полного пути до файла)
getFile = lambda _header = 'HEADER_IS_NOT_SET', _type = '': QtWidgets.QFileDialog.getOpenFileName(None, _header, '', _type, options=QtWidgets.QFileDialog.Options())
# Функция вызова диалогового окна сохранения файла (получение полного пути до файла)
saveFile = lambda _header = 'HEADER_IS_NOT_SET', _type = '': QtWidgets.QFileDialog.getSaveFileName(None, _header, '', _type, options=QtWidgets.QFileDialog.Options())
# Процедура создания окна предупреждения
def alert(_header:str = 'HEADER_IS_NOT_SET', _message:str = 'MESSAGE_IS_NOT_SET', _type:str = 'information'):
if _type == 'information':
QtWidgets.QMessageBox.information(None, _header, _message, QtWidgets.QMessageBox.Ok)
elif _type == 'warning':
QtWidgets.QMessageBox.warning(None, _header, _message, QtWidgets.QMessageBox.Ok)
elif _type == 'critical':
QtWidgets.QMessageBox.critical(None, _header, _message, QtWidgets.QMessageBox.Ok)
# Рекурсивная функция создания организационных подразделений
def collectOrgUnits(_orgUnits = None, _qTree = None):
_returned = []
for orgUnit in _orgUnits:
_secReturn = {
'name': orgUnit['name'],
'orgUnitId': orgUnit['orgUnitId'],
'orgUnitPath': orgUnit['orgUnitPath'],
'in': None
}
_newTree = QtWidgets.QTreeWidgetItem(_qTree)
_newTree.setText(0, orgUnit['name'])
_check = DIRECTORY_API.orgunits().list(customerId='my_customer', orgUnitPath=orgUnit.get('orgUnitPath')).execute()
if _check.get('organizationUnits'):
_secReturn.update({'in': collectOrgUnits(_orgUnits = _check.get('organizationUnits'), _qTree = _newTree)})
_returned.append(_secReturn)
return _returned
# Функция проверки существования файла (относительные пути)
checkFileExsist = lambda _file: os.path.isfile(getFullPath(_file))
# Инициализация подключения к Directory API (Google Workspace)
def directoryAPI_exec():
if (checkFileExsist(CREDENTIALS)):
creds = None
if os.path.exists('rick.pickle'):
with open('rick.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS, SCOPES)
creds = flow.run_local_server(port=0)
with open('rick.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('admin', 'directory_v1', credentials=creds)
else:
return False
# Функция подстановки текста в шаблоны
def getFormattedText(_filename:str = None, _args:dict = None):
if (_filename != None):
with open(_filename, 'r') as _f:
return _f.read() if _args == None else Template(_f.read()).substitute(_args)
else:
return False
# Функция отправки писем через TLS SMTP
def sendMail(_payloads:list = None):
if (PAYLOAD.get('settings') != None) & (_payloads != None):
_emailSettings = PAYLOAD['settings']['email']
try:
_server = smtplib.SMTP_SSL(_email['address'], int(_email['port']))
_server.login(_email['login'], _email['password'])
for _unit in _payloads:
_message = MIMEMultipart('alternative')
_message['Subject'] = _unit['subject']
_message['From'] = formataddr((str(Header(PAYLOAD['settings']['names']['from'], 'utf-8')), _email['login']))
_message['To'] = _unit['to']
_message.attach(MIMEText(getFormattedText('email/template.mail', {'mainbody': _unit['message'], 'year': str(datetime.now().year)}), _subtype='html'))
_server.sendmail(_email['login'], _unit['to'], _message.as_string())
alert('Успешно!', 'Почта в очереди была отправлена!')
return True
except Exception as e:
alert('Ошибка!', str(e), 'warning')
return False
finally:
_server.quit()
else:
return False
# Функция транслитерации кирилицы
def transliterateCyrilic(_string:str = ''):
_alphabet = {
'а': 'a',
'б': 'b',
'в': 'v',
'г': 'g',
'д': 'd',
'е': 'e',
'ё': 'e',
'ж': 'zh',
'з': 'z',
'и': 'i',
'й': 'ii',
'к': 'k',
'л': 'l',
'м': 'm',
'н': 'n',
'о': 'o',
'п': 'p',
'р': 'r',
'с': 's',
'т': 't',
'у': 'y',
'ф': 'f',
'х': 'x',
'ц': 'c',
'ч': 'ch',
'ш': 'sh',
'щ': 'sh',
'ъ': '',
'ы': 'i',
'ь': '',
'э': 'e',
'ю': 'yu',
'я': 'ya',
' ': '.',
'-': '.'
}
if len(_string) != 0:
_returned = ''
for _char in _string.lower():
_returned += _alphabet[_char]
return _returned
else:
return False
# Инициализация главной формы
class execute(QtWidgets.QMainWindow):
def __init__(self):
super(execute, self).__init__()
self.ui = Ui_mainForm()
self.ui.setupUi(self)
def actionUpdateData_triggered(self):
application.ui.treeOrgUnits.clear()
results = DIRECTORY_API.orgunits().list(customerId='my_customer').execute()
PAYLOAD.update({'orgUnits': collectOrgUnits(_orgUnits=results.get('organizationUnits'), _qTree = application.ui.treeOrgUnits)})
def treeOrgUnits_itemSelected(self, _item, _id):
_orgPath = []
_subItem = _item
while _subItem != None:
_orgPath.append(_subItem.text(0))
_subItem = _subItem.parent()
_orgPath.reverse()
_orgPath = '/' + '/'.join(_orgPath)
# print(_orgPath)
_users = DIRECTORY_API.users().list(customer='my_customer', query='orgUnitPath=\'{}\''.format(_orgPath)).execute()
self.ui.tableUsers.setRowCount(0)
for _user in _users['users']:
_position = self.ui.tableUsers.rowCount()
self.ui.tableUsers.insertRow(_position)
self.ui.tableUsers.setItem(_position, 0, QtWidgets.QTableWidgetItem(_user['name']['familyName']))
self.ui.tableUsers.setItem(_position, 1, QtWidgets.QTableWidgetItem(_user['name']['givenName']))
self.ui.tableUsers.setItem(_position, 2, QtWidgets.QTableWidgetItem(_user['primaryEmail']))
self.ui.tableUsers.setItem(_position, 3, QtWidgets.QTableWidgetItem(_user['id']))
def actionEmailSettings_triggered(self):
_emailSettings.exec()
def actionEmailCheckConnect_triggered(self):
_checkEmail.exec()
def buttonAddOrgUnit_clicked(self):
print('Placeholder for buttonAddOrgUnit_clicked()')
def buttonAddUser_clicked(self):
_dialogRegUser.exec()
def itemUsers_doubleClicked(self, _x, _y):
EDITABLE_ID = int(self.ui.tableUsers.item(_x, 3).text())
_user = DIRECTORY_API.users().get(userKey=EDITABLE_ID).execute()
_dialogEditUser.ui.editLastName.setText(_user['name']['givenName'])
_dialogEditUser.ui.editFirstName.setText(_user['name']['familyName'])
_dialogEditUser.ui.editPrimaryEmail.setText(_user['primaryEmail'])
_dialogEditUser.ui.editOrgUnitPath.setText(_user['orgUnitPath'])
_subRecEmail = '' if _user.get('recoveryEmail') == None else _user['recoveryEmail']
_dialogEditUser.ui.editRecoveryEmail.setText(_subRecEmail)
_subRecPhone = '' if _user.get('phones') == None else _user['phones'][0]['value']
_dialogEditUser.ui.editRecoveryMobilePhone.setText(_subRecPhone)
_extId = '' if _user.get('externalIds') == None else _user['externalIds'][0]['value']
_dialogEditUser.ui.editEmployeeId.setText(_extId)
_addresses = {
'work': '',
'home': ''
}
if _user.get('addresses') != None:
for _address in _user['addresses']:
_addresses.update({_address['type']: '{}, "{}"'.format(_addresses[_address['type']], _address['formatted'])})
_addresses.update({'work': _addresses['work'][2:]})
_addresses.update({'home': _addresses['home'][2:]})
_dialogEditUser.ui.editWorkAddress.setText(_addresses['work'])
_dialogEditUser.ui.editHomeAddress.setText(_addresses['home'])
if _user['suspended']:
_dialogEditUser.ui.comboEmployeeStatus.setCurrentIndex(1)
elif _user['archived']:
_dialogEditUser.ui.comboEmployeeStatus.setCurrentIndex(2)
else:
_dialogEditUser.ui.comboEmployeeStatus.setCurrentIndex(0)
if _user['changePasswordAtNextLogin']:
_dialogEditUser.ui.comboChangePassword.setCurrentIndex(1)
else:
_dialogEditUser.ui.comboChangePassword.setCurrentIndex(0)
_dialogEditUser.ui.editPassword.setText('****')
_dialogEditUser.exec()
# Инициализация формы настройки файла электронной почты
class dialogEmailSettings(QtWidgets.QDialog):
def __init__(self):
super(dialogEmailSettings, self).__init__()
self.ui = Ui_form_emailSettings()
self.ui.setupUi(self)
def buttonSave_clicked(self):
if PAYLOAD.get('settings') == None:
if (self.ui.editLogin.text() != '') & (self.ui.editPassword.text() != '') & (self.ui.editSMTPAddress.text() != '') & (self.ui.editSMTPPort.text() != ''):
_settingsJSON = open(SETTINGS, mode='w')
_settings = {
'email': {
'address': self.ui.editSMTPAddress.text(),
'port': self.ui.editSMTPPort.text(),
'login': self.ui.editLogin.text(),
'password': <PASSWORD>()
}
}
_settingsJSON.write(json.dumps(_settings))
PAYLOAD.update({'settings': _settings})
_settingsJSON.close()
self.close()
application.show()
else:
alert('Внимание!', 'Все поля должны быть заполнены для сохранения!', 'warning')
else:
_settings = PAYLOAD['settings']
_settings.update({
'email': {
'address': self.ui.editSMTPAddress.text(),
'port': self.ui.editSMTPPort.text(),
'login': self.ui.editLogin.text(),
'password': <PASSWORD>.text()
}
})
PAYLOAD.update({'settings': _settings})
_settingsJSON = open(SETTINGS, mode='w')
_settingsJSON.write(json.dumps(_settings))
_settingsJSON.close()
# Инициализация формы загрузки CREDS-файла
class dialogJSONCreds(QtWidgets.QDialog):
def __init__(self):
super(dialogJSONCreds, self).__init__()
self.ui = Ui_formJSONCreds()
self.ui.setupUi(self)
def buttonFile_clicked(self):
_filename = getFile('Открыть файл полномочий', 'JSON (*.json)')
if _filename:
self.ui.lineEditFile.setText(_filename[0])
copyfile(_filename[0], CREDENTIALS)
def buttonSave_clicked(self):
try:
directoryAPI_exec()
except ValueError:
alert('Ошибка!', 'Предоставленный файл полномочий не обладает всеми полномочиями, либо он некорректный! Попробуйте ещё раз.', 'critical')
os.remove(CREDENTIALS)
else:
self.close()
# Инициализация формы проверки подключения к сервису электронной почты
class dialogCheckEmail(QtWidgets.QDialog):
def __init__(self):
super(dialogCheckEmail, self).__init__()
self.ui = Ui_form_checkEmail()
self.ui.setupUi(self)
def buttonSend_clicked(self):
if self.ui.editEmail.text() != '':
sendMail([{
'to': self.ui.editEmail.text(),
'subject': 'Проверка подключения к SMTP-серверу',
'message': getFormattedText('email/checkConnect.mail')
}])
else:
alert('Внимание!', 'Для того, чтобы проверить подключение, нужно ввести корректный адрес электронной почты!', 'warning')
# Инициализация формы редактирования пользователя
class dialogEditUser(QtWidgets.QDialog):
def __init__(self):
super(dialogEditUser, self).__init__()
self.ui = Ui_form_editUser()
self.ui.setupUi(self)
def buttonSave_clicked(self):
print('buttonSave_clicked')
def buttonCreatePassword_clicked(self):
if confirm('Подтвердите действие', 'Вы уверены, что хотите изменить пароль пользователю? Если у пользователя указан запасной адрес электронной почты, то ему придет письмо с новым паролем.\nТакже, пункт "Сменить пароль при следующем входе" будет установлен в значении "Да".'):
self.ui.editPassword.setText(getRandomString(10))
self.ui.comboChangePassword.setCurrentIndex(1)
def buttonCreateEmployeeId_clicked(self):
if PAYLOAD['settings'].get('employeeIdMask') != None:
print('create employee id...')
else:
alert('Внимание!', 'Вы не можете создать уникальный идентификатор работника, т.к. у вас нет маски генерации уникального идентификатора работника!', 'warning')
# Инициализация формы регистрации пользователя или пользователей
class dialogRegUser(QtWidgets.QDialog):
def __init__(self):
super(dialogRegUser, self).__init__()
self.ui = Ui_form_regUser()
self.ui.setupUi(self)
def buttonRegistration_clicked(self):
self.setCursor(QtCore.Qt.BusyCursor)
if self.ui.labelCSVSearch.text() != '':
if checkFileExsist(self.ui.labelCSVSearch.text()):
with open(self.ui.labelCSVSearch.text(), 'r') as _csv:
_reader = csv.DictReader(_csv)
_triggers = {
'primaryEmail': '',
'employeeId': '',
'isFirst': True
}
_formattedUsers = []
for _user in _reader:
if _triggers['isFirst']:
if (_user.get('lastname') != None) & (_user.get('firstname') != None) & (_user.get('recoveryEmail') != None) & (_user.get('primaryEmail') != None) & (_user.get('orgUnitPath') != None):
_triggers.update({
'primaryEmail': _user['primaryEmail'] if _user['primaryEmail'].find('@') == -1 else False,
'employeeId': False if _user.get('employeeId') == None else ('auto' if _user['employeeId'] == 'auto' else 'manual')
})
_triggers.update({'isFirst': False})
else:
alert('Внимание!', 'Не все обязательные поля указаны в таблице!', 'warning')
break
_usersPayload = {
'lastname': _user['lastname'][0:1].upper() + _user['lastname'][1:].lower(),
'firstname': _user['firstname'][0:1].upper() + _user['firstname'][1:].lower(),
'recoveryEmail': _user['recoveryEmail'],
'primaryEmail': '{}.{}@{}'.format(transliterateCyrilic(_user['lastname']), transliterateCyrilic(_user['firstname']), _triggers['primaryEmail']) if _triggers['primaryEmail'] else _user['primaryEmail'],
'orgUnitPath': _user['orgUnitPath'],
'password': <PASSWORD>(),
'recoveryPhone': _user['recoveryPhone'] if _user.get('recoveryPhone') != None else '',
'employeeId': '' if not _triggers['employeeId'] else ((_user['employeeId'] if _user.get('employeeId') != None else '') if _triggers['employeeId'] == 'manual' else 'autoDef'),
'workAddress': _user['workAddress'] if _user.get('workAddress') != None else '',
'homeAddress': _user['homeAddress'] if _user.get('homeAddress') != None else '',
'changePassword': True if _user.get('changePassword') == None else (True if _user['changePassword'] == "TRUE" else False),
'employeeStatus': 'Active' if _user.get('employeeStatus') == None else ('Active' if _user['employeeStatus'] == '' else _user['employeeStatus'][0:1].upper() + _user['employeeStatus'][1:].lower())
}
if (_usersPayload['lastname'] != '') & (_usersPayload['firstname'] != '') & (_usersPayload['recoveryEmail'] != '') & (_usersPayload['primaryEmail'] != '') & (_usersPayload['orgUnitPath'] != ''):
_formattedUsers.append(_usersPayload)
else:
alert('Внимание!', 'У регистрируемых пользователей не хватает данных! Перепроверьте данные в таблице.', 'warning')
_registratedUsers = []
_sendMail = []
_loader.setProgressBar(0, len(_formattedUsers))
_loader.exec()
_counter = 0
for | |
<reponame>theogruner/SimuRLacra
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from typing import List, Optional, Tuple, Union
import torch as to
import torch.nn as nn
from torch.jit import ScriptModule, export, script
import pyrado
from pyrado.policies.base import Policy
from pyrado.policies.initialization import init_param
from pyrado.utils.data_processing import correct_atleast_2d
from pyrado.utils.data_types import EnvSpec
class PolySplineTimePolicy(Policy):
"""A purely time-based policy, were the output is determined by a polynomial function satisfying given conditions"""
name: str = "pst"
def __init__(
self,
spec: EnvSpec,
dt: float,
t_end: float,
cond_lvl: str,
cond_final: Optional[Union[to.Tensor, List[float], List[List[float]]]] = None,
cond_init: Optional[Union[to.Tensor, List[float], List[List[float]]]] = None,
t_init: float = 0.0,
overtime_behavior: str = "hold",
init_param_kwargs: Optional[dict] = None,
use_cuda: bool = False,
):
"""
Constructor
:param spec: environment specification
:param dt: time step [s]
:param t_end: final time [s], relative to `t_init`
:param cond_lvl: highest level of the condition, so far, only velocity 'vel' and acceleration 'acc' level
conditions on the polynomial are supported. These need to be consistent with the actions.
:param cond_final: final condition for the least squares proble,, needs to be of shape [X, dim_act] where X is
2 if `cond_lvl == 'vel'` and 4 if `cond_lvl == 'acc'`
:param cond_init: initial condition for the least squares proble,, needs to be of shape [X, dim_act] where X is
2 if `cond_lvl == 'vel'` and 4 if `cond_lvl == 'acc'`
:param t_init: initial time [s], also used on calling `reset()`, relative to `t_end`
:param overtime_behavior: determines how the policy acts when `t > t_end`, e.g. 'hold' to keep the last action
:param init_param_kwargs: additional keyword arguments for the policy parameter initialization
:param use_cuda: `True` to move the policy to the GPU, `False` (default) to use the CPU
"""
if t_end <= t_init:
raise pyrado.ValueErr(given=t_end, g_constraint=t_init)
if not overtime_behavior.lower() in ["hold", "zero"]:
raise pyrado.ValueErr(given=overtime_behavior, eq_constraint=("hold", "zero"))
# Call Policy's constructor
super().__init__(spec, use_cuda)
self._dt = float(dt)
self._t_end = float(t_end)
self._t_init = float(t_init)
self._t_curr = float(t_init)
self._overtime_behavior = overtime_behavior.lower()
# Determine the initial and final conditions used to compute the coefficients of the polynomials
if cond_lvl.lower() == "vel":
self._order = 3
elif cond_lvl.lower() == "acc":
self._order = 5
else:
raise pyrado.ValueErr(given=cond_lvl, eq_constraint="'vel' or 'acc'")
num_cond = (self._order + 1) // 2
if cond_final is not None:
# Given initialization
rand_init = False
cond_final = to.as_tensor(cond_final, dtype=to.get_default_dtype())
cond_final = correct_atleast_2d(to.atleast_2d(cond_final))
if cond_final.shape != (num_cond, spec.act_space.flat_dim):
raise pyrado.ShapeErr(given=cond_final, expected_match=(num_cond, spec.act_space.flat_dim))
else:
# Empty initialization
rand_init = True
cond_final = to.empty(num_cond, spec.act_space.flat_dim)
if cond_init is not None:
# Given initialization
cond_init = to.as_tensor(cond_init, dtype=to.get_default_dtype())
cond_init = correct_atleast_2d(to.atleast_2d(cond_init))
if cond_init.shape != (num_cond, spec.act_space.flat_dim):
raise pyrado.ShapeErr(given=cond_init, expected_match=(num_cond, spec.act_space.flat_dim))
else:
# Zero initialization
cond_init = to.zeros(num_cond, spec.act_space.flat_dim)
conds = to.cat([cond_init, cond_final], dim=0)
assert conds.shape[0] in [4, 6]
# Define the policy parameters
self.conds = nn.Parameter(conds, requires_grad=False)
# Store the polynomial coefficients for each output dimension in a matrix
self.coeffs = to.empty(self._order + 1, spec.act_space.flat_dim, device=self.device)
if rand_init:
# Call custom initialization function after PyTorch network parameter initialization
init_param_kwargs = init_param_kwargs if init_param_kwargs is not None else dict()
self.init_param(None, **init_param_kwargs)
else:
# Compute the coefficients to match the given (initial and) final conditions
self._compute_coefficients()
self.to(self.device)
@to.no_grad()
def _compute_coefficients(self):
"""
Compute the coefficients of the polynomial spline, and set them into the internal linear layer for storing.
"""
# Treat each action dimension separately
for idx_act in range(self.env_spec.act_space.flat_dim):
# Get the feature matrices for both points in time
feats = to.cat([self._compute_feats(self._t_init), self._compute_feats(self._t_end)], dim=0)
# Solve least squares problem
coeffs = to.lstsq(self.conds[:, idx_act], feats).solution
# Store
self.coeffs[:, idx_act] = coeffs.squeeze()
@to.no_grad()
def _compute_feats(self, t: float) -> to.Tensor:
"""
Compute the feature matrix depending on the time and the number of conditions.
:param t: time to evaluate at [s]
:return: feature matrix, either of shape [2, 4], or shape [3, 6]
"""
if self._order == 3:
# 3rd order polynomials, i.e. position and velocity level constrains
feats = to.tensor(
[
[1.0, t, t ** 2, t ** 3],
[0.0, 1.0, 2 * t, 3 * t ** 2],
]
)
else:
# 5th order polynomials, i.e. position velocity, and acceleration level constrains
feats = to.tensor(
[
[1.0, t, t ** 2, t ** 3, t ** 4, t ** 5],
[0.0, 1.0, 2 * t, 3 * t ** 2, 4 * t ** 3, 5 * t ** 4],
[0.0, 0.0, 2.0, 6 * t, 12 * t ** 2, 20 * t ** 3],
]
)
return feats
def init_param(self, init_values: to.Tensor = None, **kwargs):
if init_values is None:
init_param(self.conds, **kwargs)
else:
self.param_values = init_values # ignore the IntelliJ warning
def reset(self, **kwargs):
self._t_curr = self._t_init
def forward(self, obs: Optional[to.Tensor] = None) -> to.Tensor:
# Check in which time regime the policy/environment is currently in
if self._t_curr < self._t_end:
# Get a vector of powers of the current time
t_powers = to.tensor(
[self._t_curr ** o for o in range(self._order + 1)], dtype=self.coeffs.dtype, device=self.device
)
# Compute the action
act = to.mv(self.coeffs.T, t_powers)
elif self._overtime_behavior == "hold":
# Get a vector of powers of the current time
t_powers = to.tensor(
[self._t_end ** o for o in range(self._order + 1)], dtype=self.coeffs.dtype, device=self.device
)
# Compute the action
act = to.mv(self.coeffs.T, t_powers)
else: # self._overtime_behavior == "zero"
act = to.zeros(self.env_spec.act_space.shape, dtype=self.coeffs.dtype)
# Advance the internal time counter
self._t_curr += self._dt
return to.atleast_1d(act)
def script(self) -> ScriptModule:
cond_lvl = "vel" if self._order == 3 else "acc"
cond_init, cond_final = to.chunk(self.conds, 2)
return script(
TraceablePolySplineTimePolicy(
spec=self.env_spec,
dt=self._dt,
t_end=self._t_end,
cond_lvl=cond_lvl,
cond_final=cond_final,
cond_init=cond_init,
t_init=self._t_init,
overtime_behavior=self._overtime_behavior,
)
)
class TraceablePolySplineTimePolicy(nn.Module):
"""
A scriptable version of `PolySplineTimePolicy`.
We could try to make `PolySplineTimePolicy` itself scriptable, but that won't work anyways due to `Policy` not
being scriptable. Better to just write another class.
"""
name: str = "pst"
# Attributes
input_size: int
output_size: int
dt: float
t_end: float
t_init: float
t_curr: float
overtime_behavior: str
act_space_shape: Tuple[
int,
]
act_space_flat_dim: int
def __init__(
self,
spec: EnvSpec,
dt: float,
t_end: float,
cond_lvl: str,
cond_final: Union[to.Tensor, List[float], List[List[float]]],
cond_init: Union[to.Tensor, List[float], List[List[float]]],
t_init: float = 0.0,
overtime_behavior: str = "hold",
):
"""
In contrast to PolySplineTimePolicy, this constructor needs to be called with learned / working values for
`cond_final` and `cond_init`.
:param spec: environment specification
:param dt: time step [s]
:param t_end: final time [s], relative to `t_init`
:param cond_lvl: highest level of the condition, so far, only velocity 'vel' and acceleration 'acc' level
conditions on the polynomial are supported. These need to be consistent with the actions.
:param cond_final: final condition for the least squares proble,, | |
<reponame>Felicia56/flavio<filename>flavio/physics/bdecays/matrixelements.py
from math import pi
from cmath import sqrt, log, atan
import pkgutil
import numpy as np
from io import StringIO
import scipy.interpolate
from flavio.physics.running import running
from flavio.physics import ckm
from flavio.math.functions import li2, zeta
from functools import lru_cache
from flavio.config import config
# functions for C9eff
def h(s, mq, mu):
"""Fermion loop function as defined e.g. in eq. (11) of hep-ph/0106067v2."""
if mq == 0.:
return 8/27. + (4j*pi)/9. + (8 * log(mu))/9. - (4 * log(s))/9.
if s == 0.:
return -4/9. * (1 + log(mq**2/mu**2))
z = 4 * mq**2/s
if z > 1:
A = atan(1/sqrt(z-1))
else:
A = log((1+sqrt(1-z))/sqrt(z)) - 1j*pi/2.
return (-4/9. * log(mq**2/mu**2) + 8/27. + 4/9. * z
-4/9. * (2 + z) * sqrt(abs(z - 1)) * A)
def Y(q2, wc, par, scale, qiqj):
"""Function $Y$ that contains the contributions of the matrix
elements of four-quark operators to the effective Wilson coefficient
$C_9^{\mathrm{eff}}=C_9 + Y(q^2)$.
See e.g. eq. (10) of 0811.1214v5."""
mb = running.get_mb_pole(par)
mc = running.get_mc_pole(par)
F_c = 4/3.*wc['C1_'+qiqj] + wc['C2_'+qiqj] + 6*wc['C3_'+qiqj] + 60*wc['C5_'+qiqj]
F_b = 7*wc['C3_'+qiqj] + 4/3.*wc['C4_'+qiqj] + 76*wc['C5_'+qiqj] + 64/3.*wc['C6_'+qiqj]
F_u = wc['C3_'+qiqj] + 4/3.*wc['C4_'+qiqj] + 16*wc['C5_'+qiqj] + 64/3.*wc['C6_'+qiqj]
F_4 = 4/3.*wc['C3_'+qiqj] + 64/9.*wc['C5_'+qiqj] + 64/27.*wc['C6_'+qiqj]
return ( h(s=q2, mq=mc, mu=scale) * F_c
- 1/2. * h(s=q2, mq=mb, mu=scale) * F_b
- 1/2. * h(s=q2, mq=0., mu=scale) * F_u
+ F_4 )
# eq. (43) of hep-ph/0412400v1
def Yu(q2, wc, par, scale, qiqj):
mc = running.get_mc_pole(par)
return ( (4/3.*wc['C1_'+qiqj] + wc['C2_'+qiqj])
* ( h(s=q2, mq=mc, mu=scale) - h(s=q2, mq=0, mu=scale) ))
# NNLO matrix elements of C_1 and C_2 needed for semi-leptonic B decays
_f_string = pkgutil.get_data('flavio.physics', 'data/arXiv-0810-4077v3/f_12_79.dat')
_f_array = np.loadtxt(StringIO(_f_string.decode('utf-8')))
_f_x = _f_array[::51*11,0]
_f_y = _f_array[:51*11:51,1]
_f_z = _f_array[:51,2]
_f_val_17 = _f_array[:,3].reshape(11,11,51) + 1j*_f_array[:,4].reshape(11,11,51)
_f_val_19 = _f_array[:,5].reshape(11,11,51) + 1j*_f_array[:,6].reshape(11,11,51)
_f_val_27 = _f_array[:,7].reshape(11,11,51) + 1j*_f_array[:,8].reshape(11,11,51)
_f_val_29 = _f_array[:,9].reshape(11,11,51) + 1j*_f_array[:,10].reshape(11,11,51)
_F_17 = scipy.interpolate.RegularGridInterpolator((_f_x, _f_y, _f_z), _f_val_17, bounds_error=False, fill_value=None)
_sh_F_19 = scipy.interpolate.RegularGridInterpolator((_f_x, _f_y, _f_z), _f_val_19, bounds_error=False, fill_value=None)
_F_27 = scipy.interpolate.RegularGridInterpolator((_f_x, _f_y, _f_z), _f_val_27, bounds_error=False, fill_value=None)
_sh_F_29 = scipy.interpolate.RegularGridInterpolator((_f_x, _f_y, _f_z), _f_val_29, bounds_error=False, fill_value=None)
@lru_cache(maxsize=config['settings']['cache size'])
def F_17(muh, z, sh):
"""Function $F_1^{(7)}$ giving the contribution of $O_7$ to the matrix element
of $O_1$, as defined in arXiv:0810.4077.
- `muh` is $\hat \mu=mu/m_b$,
- `z` is $z=m_c^2/m_b^2$,
- `sh` is $\hat s=q^2/m_b^2$.
"""
return _F_17([muh, z, sh])[0]
@lru_cache(maxsize=config['settings']['cache size'])
def F_19(muh, z, sh):
"""Function $F_1^{(9)}$ giving the contribution of $O_9$ to the matrix element
of $O_1$, as defined in arXiv:0810.4077.
- `muh` is $\hat \mu=mu/m_b$,
- `z` is $z=m_c^2/m_b^2$,
- `sh` is $\hat s=q^2/m_b^2$.
"""
if sh == 0:
return 0
return _sh_F_19([muh, z, sh])[0] / sh
@lru_cache(maxsize=config['settings']['cache size'])
def F_27(muh, z, sh):
"""Function $F_2^{(7)}$ giving the contribution of $O_7$ to the matrix element
of $O_2$, as defined in arXiv:0810.4077.
- `muh` is $\hat \mu=mu/m_b$,
- `z` is $z=m_c^2/m_b^2$,
- `sh` is $\hat s=q^2/m_b^2$.
"""
return _F_27([muh, z, sh])[0]
@lru_cache(maxsize=config['settings']['cache size'])
def F_29(muh, z, sh):
"""Function $F_2^{(9)}$ giving the contribution of $O_9$ to the matrix element
of $O_2$, as defined in arXiv:0810.4077.
- `muh` is $\hat \mu=mu/m_b$,
- `z` is $z=m_c^2/m_b^2$,
- `sh` is $\hat s=q^2/m_b^2$.
"""
if sh == 0:
return 0
return _sh_F_29([muh, z, sh])[0] / sh
def F_89(Ls, sh):
"""Function $F_8^{(9)}$ giving the contribution of $O_9$ to the matrix element
of $O_8$, as given in eq. (39) of hep-ph/0312063.
- `sh` is $\hat s=q^2/m_b^2$,
- `Ls` is $\ln(\hat s)$.
"""
return (104/9. - 32/27. * pi**2 + (1184/27. - 40/9. * pi**2) * sh
+ (14212/135. - 32/3 * pi**2) * sh**2 + (193444/945.
- 560/27. * pi**2) * sh**3 + 16/9. * Ls * (1 + sh + sh**2 + sh**3))
def F_87(Lmu, sh):
"""Function $F_8^{(7)}$ giving the contribution of $O_7$ to the matrix element
of $O_8$, as given in eq. (40) of hep-ph/0312063.
- `sh` is $\hat s=q^2/m_b^2$,
"""
if sh==0.:
return (-4*(33 + 24*Lmu + 6j*pi - 2*pi**2))/27.
return (-32/9. * Lmu + 8/27. * pi**2 - 44/9. - 8/9. * 1j * pi
+ (4/3. * pi**2 - 40/3.) * sh + (32/9. * pi**2 - 316/9.) * sh**2
+ (200/27. * pi**2 - 658/9.) * sh**3 - 8/9. * log(sh) * (sh + sh**2 + sh**3))
# Functions for the two-loop virtual corrections to the matrix elements of
# O1,2 in b->dl+l- (also needed for doubly Cabibbo-suppressed contributions
# to b>sl+l-). Taken from hep-ph/0403185v2 (Seidel)
def acot(x):
return pi/2.-atan(x)
@lru_cache(maxsize=config['settings']['cache size'])
def SeidelA(q2, mb, mu):
"""Function $A(s\equiv q^2)$ defined in eq. (29) of hep-ph/0403185v2.
"""
if q2==0:
return 1/729. * (833 + 120j*pi - 312 * log(mb**2/mu**2))
sh = min(q2/mb**2, 0.999)
z = 4 / sh
return (-(104)/(243) * log((mb**2)/(mu**2)) + (4 * sh)/(27 * (1 - sh)) *
(li2(sh) + log(sh) * log( 1 - sh)) + (1)/(729 * (1 - sh)**2) * (6 * sh *
(29 - 47 * sh) * log(sh) + 785 - 1600 * sh + 833 * sh**2 + 6 * pi * 1j * (20 -
49 * sh + 47 * sh**2)) - (2)/(243 * (1 - sh)**3) * (2 * sqrt( z - 1) * (-4 +
9 * sh - 15 * sh**2 + 4 * sh**3) * acot(sqrt(z - 1)) + 9 * sh**3 *
log(sh)**2 + 18 * pi * 1j * sh * (1 - 2 * sh) * log(sh)) + (2 * sh)/(243 *
(1 - sh)**4) * (36 * acot( sqrt(z - 1))**2 + pi**2 * (-4 + 9 * sh - 9 *
sh**2 + 3 * sh**3)))
@lru_cache(maxsize=config['settings']['cache size'])
def SeidelB(q2, mb, mu):
"""Function $A(s\equiv q^2)$ defined in eq. (30) of hep-ph/0403185v2.
"""
sh = min(q2/mb**2, 0.999)
z = 4 / sh
x1 = 1/2 + 1j/2 * sqrt(z - 1)
x2 = 1/2 - 1j/2 * sqrt(z - 1)
x3 = 1/2 + 1j/(2 * sqrt(z - 1))
x4 = 1/2 - 1j/(2 * sqrt(z - 1))
return ((8)/(243 * sh) * ((4 - 34 * sh - 17 * pi * 1j * sh) *
log((mb**2)/(mu**2)) + 8 * sh * log((mb**2)/(mu**2))**2 + 17 * sh * log(sh) *
log((mb**2)/(mu**2))) + ((2 + sh) * sqrt( z - 1))/(729 * sh) * (-48 *
log((mb**2)/(mu**2)) * acot( sqrt(z - 1)) - 18 * pi * log(z - 1) + 3 * 1j *
log(z - 1)**2 - 24 * 1j * li2(-x2/x1) - 5 * pi**2 * 1j + 6 * 1j * (-9 *
log(x1)**2 + log(x2)**2 - 2 * log(x4)**2 + 6 * log(x1) * log(x2) - 4 * log(x1) *
log(x3) + 8 * log(x1) * log(x4)) - 12 * pi * (2 * log(x1) + log(x3) + log(x4))) -
(2)/(243 * sh * (1 - sh)) * (4 * sh * (-8 + 17 * sh) * (li2(sh) + log(sh) *
log(1 - sh)) + 3 * (2 + sh) * (3 - sh) * log(x2/x1)**2 + 12 * pi * (-6 - sh +
sh**2) * acot( sqrt(z - 1))) + (2)/(2187 * sh * (1 - sh)**2) * (-18 * sh * (120 -
211 * sh + 73 * sh**2) * log(sh) - 288 - 8 * sh + 934 * sh**2 - 692 * sh**3 + 18 *
pi * 1j * sh * (82 - 173 * sh + 73 * sh**2)) - (4)/(243 * sh * (1 - sh)**3) *
(-2 * sqrt( z - 1) * (4 - 3 * sh - 18 * sh**2 + 16 * sh**3 - 5 * sh**4) * acot(
sqrt(z - 1)) - 9 * sh**3 * log(sh)**2 + 2 * pi * 1j * sh * (8 - 33 * sh + 51 *
sh**2 - 17 * sh**3) * log( sh)) + (2)/(729 * sh * (1 - sh)**4) * (72 * (3 - 8 *
sh + 2 * sh**2) * acot( sqrt(z - 1))**2 - pi**2 * (54 - 53 * sh - 286 * sh**2 +
612 * sh**3 - 446 * sh**4 + 113 * sh**5)) )
def SeidelC(q2, mb, mu):
"""Function $A(s\equiv q^2)$ defined in | |
<gh_stars>0
# -*- coding: utf-8 -*-
import csv
import copy
import datetime
import logging
import io
import json
import re
import time
import zipfile
import codecs
from collections import defaultdict, deque, OrderedDict
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.db import IntegrityError, transaction, connection
from django.db.models import Q
from django.db.models.expressions import RawSQL
from django.forms import ValidationError
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, \
Http404, QueryDict
from django.shortcuts import redirect, render, render_to_response
from django.template import RequestContext
from django.template import Template
from django.db.models.query_utils import DeferredAttribute
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_protect
from django.views.generic.edit import FormView
from django.utils.decorators import method_decorator
from reversion.models import Revision, Version
from cobl.settings import LIMIT_TO, META_TAGS
from cobl.forms import AddCitationForm, \
AddCogClassTableForm, \
AddLanguageListForm, \
AddLanguageListTableForm, \
AddMeaningListForm, \
AddLexemeForm, \
AuthorCreationForm, \
AuthorDeletionForm, \
AuthorTableForm, \
AuthorRowForm, \
ChooseCognateClassForm, \
CladeCreationForm, \
CladeDeletionForm, \
CladeTableForm, \
CloneLanguageForm, \
CognateJudgementSplitTable, \
EditCitationForm, \
AddLanguageForm, \
EditLanguageListForm, \
EditLanguageListMembersForm, \
EditLexemeForm, \
EditMeaningForm, \
EditMeaningListForm, \
EditMeaningListMembersForm, \
EditSourceForm, \
LanguageListProgressForm, \
EditSingleLanguageForm, \
LexemeTableEditCognateClassesForm, \
LexemeTableLanguageWordlistForm, \
LexemeTableViewMeaningsForm, \
MeaningListTableForm, \
MergeCognateClassesForm, \
SearchLexemeForm, \
SndCompCreationForm, \
SndCompDeletionForm, \
SndCompTableForm, \
make_reorder_languagelist_form, \
make_reorder_meaninglist_form, \
AddMissingLexemsForLanguageForm, \
RemoveEmptyLexemsForLanguageForm, \
CognateClassEditForm, \
SourceDetailsForm, \
SourceEditForm, \
UploadBiBTeXFileForm, \
CognateJudgementFormSet, \
CognateClassFormSet, \
LexemeFormSet, \
AssignCognateClassesFromLexemeForm, \
LanguageDistributionTableForm, \
TwoLanguageWordlistTableForm
from cobl.lexicon.models import Author, \
Clade, \
CognateClass, \
CognateClassCitation, \
CognateJudgement, \
CognateJudgementCitation, \
Language, \
LanguageClade, \
LanguageList, \
LanguageListOrder, \
Lexeme, \
LexemeCitation, \
Meaning, \
MeaningList, \
SndComp, \
Source, \
NexusExport, \
MeaningListOrder, \
RomanisedSymbol
from cobl.lexicon.defaultModels import getDefaultLanguage, \
getDefaultLanguageId, \
getDefaultLanguagelist, \
getDefaultLanguagelistId, \
getDefaultMeaning, \
getDefaultMeaningId, \
getDefaultWordlist, \
getDefaultWordlistId, \
getDefaultSourceLanguage, \
setDefaultLanguage, \
setDefaultLanguageId, \
setDefaultLanguagelist, \
setDefaultMeaning, \
setDefaultMeaningId, \
setDefaultWordlist, \
setDefaultSourceLanguage
from cobl.shortcuts import render_template
from cobl.utilities import next_alias, \
anchored, oneline, logExceptions, fetchMarkdown
from cobl.languageCladeLogic import updateLanguageCladeRelations
from cobl.tables import SourcesTable, SourcesUpdateTable
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from django_tables2 import RequestConfig
from dal import autocomplete
# -- Database input, output and maintenance functions ------------------------
@logExceptions
def view_changes(request, username=None, revision_id=None, object_id=None):
"""Recent changes"""
boring_models = [LanguageListOrder, LanguageList, MeaningList]
boring_model_ids = [ContentType.objects.get_for_model(m).id for m in
boring_models]
def interesting_versions(self):
return self.version_set.exclude(content_type_id__in=boring_model_ids)
Revision.add_to_class("interesting_versions", interesting_versions)
if not username:
recent_changes = Revision.objects.all().order_by("-id")
else:
recent_changes = Revision.objects.filter(
user__username=username).order_by("-id")
paginator = Paginator(recent_changes, 50)
try: # Make sure page request is an int. If not, deliver first page.
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try: # If page request is out of range, deliver last page of results.
changes = paginator.page(page)
except (EmptyPage, InvalidPage):
changes = paginator.page(paginator.num_pages)
userIds = set(Revision.objects.values_list("user", flat=True).distinct())
contributors = sorted([(User.objects.get(id=user_id),
Revision.objects.filter(user=user_id).count())
for user_id in userIds
if user_id is not None],
key=lambda x: -x[1])
return render_template(request, "view_changes.html",
{"changes": changes,
"contributors": contributors})
@login_required
@logExceptions
def revert_version(request, revision_id):
"""Roll back the object saved in a Version to the previous Version"""
referer = request.META.get("HTTP_REFERER", "/")
revision_obj = Revision.objects.get(pk=revision_id)
revision_obj.revert() # revert all associated objects too
msg = "Rolled back revision %s" % (revision_obj.id)
messages.info(request, msg)
return HttpResponseRedirect(referer)
@logExceptions
def view_object_history(request, version_id):
version = Version.objects.get(pk=version_id)
obj = version.content_type.get_object_for_this_type(id=version.object_id)
fields = [field.name for field in obj._meta.fields]
versions = [[v.field_dict[f] for f in fields] + [v.id] for v in
Version.objects.get_for_object(obj).order_by(
"revision__date_created")]
return render_template(request, "view_object_history.html",
{"object": obj,
"versions": versions,
"fields": fields})
# -- General purpose queries and functions -----------------------------------
@logExceptions
def get_canonical_meaning(meaning):
"""Identify meaning from id number or partial name"""
try:
if meaning.isdigit():
meaning = Meaning.objects.get(id=meaning)
else:
meaning = Meaning.objects.get(gloss=meaning)
except Meaning.DoesNotExist:
raise Http404("Meaning '%s' does not exist" % meaning)
return meaning
@logExceptions
def get_canonical_language(language, request=None):
"""Identify language from id number or partial name"""
if not language:
raise Language.DoesNotExist
if language.isdigit():
language = Language.objects.get(id=language)
else:
try:
language = Language.objects.get(ascii_name=language)
except Language.DoesNotExist:
try:
language = Language.objects.get(
ascii_name__istartswith=language)
except Language.MultipleObjectsReturned:
if request:
messages.info(
request,
"There are multiple languages matching"
" '%s' in the database" % language)
language = Language.objects.get(ascii_name=Language.DEFAULT)
except Language.DoesNotExist:
if request:
messages.info(
request,
"There is no language named or starting"
" with '%s' in the database" % language)
language = Language.objects.get(ascii_name=Language.DEFAULT)
return language
@logExceptions
def get_prev_and_next_languages(request, current_language, language_list=None):
if language_list is None:
language_list = LanguageList.objects.get(
name=getDefaultLanguagelist(request))
elif isinstance(language_list, str):
language_list = LanguageList.objects.get(name=language_list)
ids = list(language_list.languages.exclude(
level0=0).values_list("id", flat=True))
try:
current_idx = ids.index(current_language.id)
except ValueError:
current_idx = 0
try:
prev_language = Language.objects.get(id=ids[current_idx - 1])
except IndexError:
try:
prev_language = Language.objects.get(id=ids[len(ids) - 1])
except IndexError:
prev_language = current_language
try:
next_language = Language.objects.get(id=ids[current_idx + 1])
except IndexError:
try:
next_language = Language.objects.get(id=ids[0])
except IndexError:
next_language = current_language
return (prev_language, next_language)
@logExceptions
def get_prev_and_next_meanings(request, current_meaning, meaning_list=None):
if meaning_list is None:
meaning_list = MeaningList.objects.get(
name=getDefaultWordlist(request))
elif isinstance(meaning_list, str):
meaning_list = MeaningList.objects.get(name=meaning_list)
meanings = list(meaning_list.meanings.all().order_by("meaninglistorder"))
ids = [m.id for m in meanings]
try:
current_idx = ids.index(current_meaning.id)
except ValueError:
current_idx = 0
try:
prev_meaning = meanings[current_idx - 1]
except IndexError:
prev_meaning = meanings[len(meanings) - 1]
try:
next_meaning = meanings[current_idx + 1]
except IndexError:
next_meaning = meanings[0]
return (prev_meaning, next_meaning)
@logExceptions
def get_prev_and_next_lexemes(request, current_lexeme):
"""Get the previous and next lexeme from the same language, ordered
by meaning and then alphabetically by form"""
lexemes = list(Lexeme.objects.filter(
language=current_lexeme.language).order_by(
"meaning", "phon_form", "romanised", "id"))
ids = [l.id for l in lexemes]
try:
current_idx = ids.index(current_lexeme.id)
except ValueError:
current_idx = 0
prev_lexeme = lexemes[current_idx - 1]
try:
next_lexeme = lexemes[current_idx + 1]
except IndexError:
next_lexeme = lexemes[0]
return (prev_lexeme, next_lexeme)
@logExceptions
def update_object_from_form(model_object, form):
"""Update an object with data from a form."""
assert set(form.cleaned_data).issubset(set(model_object.__dict__))
model_object.__dict__.update(form.cleaned_data)
model_object.save()
# -- /language(s)/ ----------------------------------------------------------
@logExceptions
def get_canonical_language_list(language_list=None, request=None):
"""Returns a LanguageList object"""
try:
if language_list is None:
language_list = LanguageList.objects.get(name=LanguageList.DEFAULT)
elif language_list.isdigit():
language_list = LanguageList.objects.get(id=language_list)
else:
language_list = LanguageList.objects.get(name=language_list)
except LanguageList.DoesNotExist:
if request:
messages.info(
request,
"There is no language list matching"
" '%s' in the database" % language_list)
language_list = LanguageList.objects.get(name=LanguageList.DEFAULT)
return language_list
@logExceptions
def get_canonical_meaning_list(meaning_list=None, request=None):
"""Returns a MeaningList object"""
try:
if meaning_list is None:
meaning_list = MeaningList.objects.get(name=MeaningList.DEFAULT)
elif meaning_list.isdigit():
meaning_list = MeaningList.objects.get(id=meaning_list)
else:
meaning_list = MeaningList.objects.get(name=meaning_list)
except MeaningList.DoesNotExist:
if request:
messages.info(
request,
"There is no meaning list matching"
" '%s' in the database" % meaning_list)
meaning_list = MeaningList.objects.get(name=MeaningList.DEFAULT)
return meaning_list
@csrf_protect
@logExceptions
def view_language_list(request, language_list=None):
if request.user.is_authenticated:
current_list = get_canonical_language_list(language_list, request)
setDefaultLanguagelist(request, current_list.name)
else:
current_list = LanguageList.objects.get(name=LanguageList.PUBLICDEFAULT)
request.session['defaultLanguagelist'] = LanguageList.PUBLICDEFAULT
languages = current_list.languages.all().prefetch_related(
"lexeme_set", "lexeme_set__meaning",
"languageclade_set", "clades")
if (request.method == 'POST') and ('langlist_form' in request.POST):
languageListTableForm = AddLanguageListTableForm(request.POST)
try:
languageListTableForm.validate()
except Exception as e:
logging.exception(
'Exception in POST validation for view_language_list')
messages.error(request, 'Sorry, the form data sent '
'did not pass server side validation: %s' % e)
return HttpResponseRedirect(
reverse("view-language-list", args=[current_list.name]))
# Updating languages and gathering clades to update:
updateClades = languageListTableForm.handle(request)
# Updating clade relations for changes languages:
if updateClades:
updateLanguageCladeRelations(languages=updateClades)
# Redirecting so that UA makes a GET.
return HttpResponseRedirect(
reverse("view-language-list", args=[current_list.name]))
elif (request.method == 'POST') and ('cloneLanguage' in request.POST):
# Cloning language and lexemes:
form = CloneLanguageForm(request.POST)
try:
form.validate()
form.handle(request, current_list)
# Redirect to newly created language:
messages.success(request, 'Language cloned.')
return HttpResponseRedirect(
reverse("view-language-list", args=[current_list.name]))
except Exception as e:
logging.exception('Problem cloning Language in view_language_list')
messages.error(request, 'Sorry, a problem occured '
'when cloning the language: %s' % e)
return HttpResponseRedirect(
reverse("view-language-list", args=[current_list.name]))
elif (request.method == 'GET') and ('exportCsv' in request.GET):
# Handle csv export iff desired:
return exportLanguageListCsv(request, languages)
meaningList = MeaningList.objects.get(name=getDefaultWordlist(request))
languages_editabletable_form = AddLanguageListTableForm()
exportMethod = ''
if request.method == 'GET':
if 'onlyexport' in request.path.split('/'):
exportMethod = 'onlyexport'
elif 'onlynotexport' in request.path.split('/'):
exportMethod = 'onlynotexport'
for lang in languages:
lang.idField = lang.id
lang.computeCounts(meaningList, exportMethod)
languages_editabletable_form.langlist.append_entry(lang)
otherLanguageLists = LanguageList.objects.exclude(name=current_list).all()
return render_template(request, "language_list.html",
{"languages": languages,
'lang_ed_form': languages_editabletable_form,
"current_list": current_list,
"otherLanguageLists": otherLanguageLists,
"wordlist": getDefaultWordlist(request),
"clades": Clade.objects.all()})
@csrf_protect
@logExceptions
def exportLanguageListCsv(request, languages=[]):
"""
@param languages :: [Language]
"""
fields = request.GET['exportCsv'].split(',')
rows = [l.getCsvRow(*fields) for l in languages]
rows.insert(0, ['"' + f + '"' for f in fields]) # Add headline
# Composing .csv data:
data = '\n'.join([','.join(row) for row in rows])
# Filename:
filename = "%s.%s.csv" % \
(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d'),
getDefaultLanguagelist(request))
# Answering request:
response = HttpResponse(data)
response['Content-Disposition'] = ('attachment;filename="%s"' % filename)
response['Control-Type'] = 'text/csv; charset=utf-8'
response['Pragma'] = 'public'
response['Expires'] = 0
response['Cache-Control'] = 'must-revalidate, post-check=0, pre-check=0'
return response
@csrf_protect
@logExceptions
def view_clades(request):
if request.method == 'POST':
# Updating existing clades:
if 'clades' in request.POST:
cladeTableForm = CladeTableForm(request.POST)
# Flag to see if a clade changed:
cladeChanged = False
# Updating individual clades:
try:
cladeTableForm.validate()
cladeChanged = cladeTableForm.handle(request)
except Exception as e:
logging.exception('Problem updating clades | |
<filename>tf_agents/replay_buffers/tfrecord_replay_buffer_test.py
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfrecord_replay_buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
import uuid
import weakref
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tf_agents.replay_buffers import tfrecord_replay_buffer
from tf_agents.trajectories import time_step
StepType = time_step.StepType
Data = collections.namedtuple('Data', ('step_type', 'value'))
# Pulls out the 'value' from Data encoded in a FeatureList, and reads the first
# (floating point) entry.
_key_by_first_value = lambda episode: episode[0].feature[1].float_list.value[0]
def read_feature_lists(fn):
feature_lists = []
for record in tf.compat.v1.io.tf_record_iterator(fn):
feature_list = tf.train.FeatureList.FromString(record)
feature_lists.append(feature_list)
return feature_lists
def create_feature_list(*value_lists):
feature_list = tf.train.FeatureList()
for values in value_lists:
values = np.array(values)
if values.dtype.kind == 'i':
feature_list.feature.add(
int64_list=tf.train.Int64List(value=values.astype(np.int64)))
elif values.dtype.kind == 'f':
feature_list.feature.add(
float_list=tf.train.FloatList(value=values.astype(np.float32)))
else:
feature_list.feature.add(
bytes_list=tf.train.BytesList(value=values.astype(np.bytes_)))
return feature_list
class TFRecordReplayBufferTest(tf.test.TestCase, parameterized.TestCase):
_simple_data_spec = Data(step_type=tf.TensorSpec(shape=(), dtype=tf.int32),
value=tf.TensorSpec(shape=(2,), dtype=tf.float64))
def setUp(self):
super(TFRecordReplayBufferTest, self).setUp()
tf.compat.v1.enable_resource_variables()
self._tempdir = self.create_tempdir()
self._file_prefix = '%s/test-%s/' % (self._tempdir.full_path, uuid.uuid4())
def tearDown(self):
super(TFRecordReplayBufferTest, self).tearDown()
del self._tempdir
def compareEpisode(self, expected, received):
self.assertLen(expected, len(received))
for expected_frame, received_frame in zip(expected, received):
self.assertProtoEquals(expected_frame, received_frame)
def testCreateAndDoNothing(self):
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
data_spec=self._simple_data_spec)
rb = weakref.ref(rb) # RB should be garbage collected
try:
self.assertFalse(tf.io.gfile.glob(self._file_prefix + '*'))
except tf.errors.NotFoundError: # Some fs impls raise this error.
pass
self.assertFalse(rb())
def testFailsWithDifferingBatchSize(self):
data_0 = Data(
step_type=tf.constant([StepType.FIRST]),
value=tf.constant([[1.0, -1.0]], dtype=tf.float64))
data_1 = Data(
step_type=tf.constant([StepType.MID, StepType.LAST]),
value=tf.constant([[3.0, -3.0], [4.0, -4.0]], dtype=tf.float64))
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
data_spec=self._simple_data_spec)
self.evaluate(rb.add_batch(data_0))
with self.assertRaisesOpError(
r'Batch size does not match previous batch size: 2 vs. 1'):
self.evaluate(rb.add_batch(data_1))
def testAddBatch(self):
data_0 = Data(
step_type=tf.constant([StepType.FIRST, StepType.MID]),
value=tf.constant([[1.0, -1.0], [2.0, -2.0]], dtype=tf.float64))
data_1 = Data(
step_type=tf.constant([StepType.MID, StepType.LAST]),
value=tf.constant([[3.0, -3.0], [4.0, -4.0]], dtype=tf.float64))
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
data_spec=self._simple_data_spec)
self.evaluate(rb.add_batch(data_0))
self.evaluate(rb.add_batch(data_1))
rb = weakref.ref(rb) # RB should be garbage collected
files = tf.io.gfile.glob(self._file_prefix + '*')
self.assertLen(files, 2)
self.assertFalse(rb())
episode_0, episode_1 = sorted(
[read_feature_lists(fn) for fn in files],
key=_key_by_first_value)
expected_episode_0 = [
create_feature_list([StepType.FIRST], [1.0, -1.0]),
create_feature_list([StepType.MID], [3.0, -3.0])
]
expected_episode_1 = [
create_feature_list([StepType.MID], [2.0, -2.0]),
create_feature_list([StepType.LAST], [4.0, -4.0])
]
self.compareEpisode(expected_episode_0, episode_0)
self.compareEpisode(expected_episode_1, episode_1)
def testAsContextManager(self):
data_0 = Data(
step_type=tf.constant([StepType.FIRST, StepType.MID]),
value=tf.constant([[1.0, -1.0], [2.0, -2.0]], dtype=tf.float64))
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
data_spec=self._simple_data_spec)
with rb:
self.evaluate(rb.add_batch(data_0))
files = tf.io.gfile.glob(self._file_prefix + '*')
self.assertLen(files, 2)
def testAddBatchTwiceWithNewEpisode(self):
data_0 = Data(
step_type=tf.constant([StepType.FIRST, StepType.MID]),
value=tf.constant([[1.0, -1.0], [2.0, -2.0]], dtype=tf.float64))
data_1 = Data(
step_type=tf.constant([StepType.MID, StepType.FIRST]),
value=tf.constant([[3.0, -3.0], [4.0, -4.0]], dtype=tf.float64))
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=1,
data_spec=self._simple_data_spec)
self.evaluate(rb.add_batch(data_0))
self.evaluate(rb.add_batch(data_1))
rb = weakref.ref(rb) # RB should be garbage collected
# There should be exactly 3 files because we force 1 episode per file.
files = tf.io.gfile.glob(self._file_prefix + '*')
self.assertLen(files, 3)
self.assertFalse(rb())
episode_0, episode_1, episode_2 = sorted(
[read_feature_lists(fn) for fn in files],
key=_key_by_first_value)
expected_episode_0 = [
create_feature_list([StepType.FIRST], [1.0, -1.0]),
create_feature_list([StepType.MID], [3.0, -3.0]),
]
expected_episode_1 = [
create_feature_list([StepType.MID], [2.0, -2.0])
]
expected_episode_2 = [
create_feature_list([StepType.FIRST], [4.0, -4.0])
]
self.compareEpisode(expected_episode_0, episode_0)
self.compareEpisode(expected_episode_1, episode_1)
self.compareEpisode(expected_episode_2, episode_2)
def testAddBatchTwiceWithNewFrameLimitPerFile(self):
data_0 = Data(
step_type=tf.constant([StepType.FIRST, StepType.MID]),
value=tf.constant([[1.0, -1.0], [2.0, -2.0]], dtype=tf.float64))
data_1 = Data(
step_type=tf.constant([StepType.MID, StepType.LAST]),
value=tf.constant([[3.0, -3.0], [4.0, -4.0]], dtype=tf.float64))
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
time_steps_per_file=1,
data_spec=self._simple_data_spec)
self.evaluate(rb.add_batch(data_0))
self.evaluate(rb.add_batch(data_1))
del rb
# There should be exactly 3 files because we force 1 step per file.
files = tf.io.gfile.glob(self._file_prefix + '*')
self.assertLen(files, 4)
episode_0, episode_1, episode_2, episode_3 = sorted(
[read_feature_lists(fn) for fn in files],
key=_key_by_first_value)
expected_episode_0 = [create_feature_list([StepType.FIRST], [1.0, -1.0])]
expected_episode_1 = [create_feature_list([StepType.MID], [2.0, -2.0])]
expected_episode_2 = [create_feature_list([StepType.MID], [3.0, -3.0])]
expected_episode_3 = [create_feature_list([StepType.LAST], [4.0, -4.0])]
self.compareEpisode(expected_episode_0, episode_0)
self.compareEpisode(expected_episode_1, episode_1)
self.compareEpisode(expected_episode_2, episode_2)
self.compareEpisode(expected_episode_3, episode_3)
def testAsDatasetFromOneFile(self):
episode_values = [
([StepType.FIRST], [1.0, -1.0]),
([StepType.MID], [2.0, -2.0]),
([StepType.MID], [3.0, -3.0]),
([StepType.FIRST], [4.0, -4.0]),
([StepType.LAST], [5.0, -5.0])
]
episode = [create_feature_list(*value) for value in episode_values]
# Maps e.g. 1 => ([StepType.FIRST], [1.0, -1.0])
# 2 => ([StepType.MID], [2.0, -2.0])
episode_map = dict((int(x[1][0]), x) for x in episode_values)
tf.io.gfile.makedirs(self._file_prefix[:self._file_prefix.rfind('/')])
with tf.io.TFRecordWriter(self._file_prefix + '_exp_0') as wr:
for step in episode:
wr.write(step.SerializeToString())
self._evaluate_written_records(episode_map, num_episodes=1)
@parameterized.parameters(
{'keep_prob': 0.0},
{'keep_prob': 0.25},
{'keep_prob': 0.5},
{'keep_prob': 0.9999})
def testAsDatasetBlockKeepProb(self, keep_prob):
episode_values = [
([StepType.FIRST], [1.0, -1.0]),
([StepType.MID], [2.0, -2.0]),
([StepType.MID], [3.0, -3.0]),
([StepType.MID], [4.0, -4.0]),
([StepType.LAST], [5.0, -5.0])
]
episode = [create_feature_list(*value) for value in episode_values]
tf.io.gfile.makedirs(self._file_prefix[:self._file_prefix.rfind('/')])
with tf.io.TFRecordWriter(self._file_prefix + '_exp_0') as wr:
for step in episode:
wr.write(step.SerializeToString())
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
seed=12345,
dataset_block_keep_prob=keep_prob,
data_spec=self._simple_data_spec)
batch_size = 1
num_steps = 2
ds = rb.as_dataset(sample_batch_size=batch_size, num_steps=num_steps)
# Get enough samples to form statistically significant counts
ds = ds.repeat(1000)
evaluate_gn = self.get_iterator_callback(ds)
frames = []
while True:
try:
frames.append(evaluate_gn())
except (tf.errors.OutOfRangeError, StopIteration):
break
# The total number of windows per file is 4:
# [[1.0, -1.0], [2.0, -2.0]]
# [[2.0, -2.0], [3.0, -3.0]]
# [[3.0, -3.0], [4.0, -4.0]]
# [[4.0, -4.0], [5.0, -5.0]]
#
# We ask for 1000 copies of full reads from the file. If we read 1000
# files, that 4 * 1000 = 4000 records total. The windows will come in
# shuffled.
if keep_prob == 0.0:
self.assertEmpty(frames)
else:
self.assertNear(
1.0 * len(frames) / 4000, keep_prob, err=0.025)
# Pull out the first values from block tensors, e.g. if the block value is
# [[1.0, -1.0], [2.0, -2.0]],
# then the first value is `1.0`.
first_values = np.asarray([x.value[0, 0, 0] for x in frames])
for allowed in (1.0, 2.0, 3.0, 4.0):
self.assertNear(1.0 * np.sum(first_values == allowed) / len(frames),
0.25,
err=0.025,
msg=('Expected to see value %g about 1/4 of the time, '
'but saw %d such occurences of %d frames total.'
% (allowed, np.sum(first_values == allowed),
len(frames))))
def testAsDatasetFrom10Files(self):
episode_map = {}
for i in range(10):
c = 10 * i
episode_values = [
([StepType.FIRST], [c + 1.0, -c - 1.0]),
([StepType.MID], [c + 2.0, -c - 2.0]),
([StepType.MID], [c + 3.0, -c - 3.0]),
([StepType.FIRST], [c + 4.0, -c - 4.0]),
([StepType.LAST], [c + 5.0, -c - 5.0])
]
episode = [create_feature_list(*value) for value in episode_values]
# Maps e.g. 1 => ([StepType.FIRST], [1.0, -1.0])
# 2 => ([StepType.MID], [2.0, -2.0])
episode_map.update(
dict((int(x[1][0]), x) for x in episode_values))
tf.io.gfile.makedirs(self._file_prefix[:self._file_prefix.rfind('/')])
with tf.io.TFRecordWriter(self._file_prefix + '_exp_%d' % i) as wr:
for step in episode:
wr.write(step.SerializeToString())
self._evaluate_written_records(episode_map, num_episodes=10)
def get_iterator_callback(self, ds):
if tf.executing_eagerly():
it = iter(ds)
evaluate_gn = lambda: tf.nest.map_structure(lambda t: t.numpy(), next(it))
else:
it = tf.compat.v1.data.make_initializable_iterator(ds)
self.evaluate(it.initializer)
gn = it.get_next()
evaluate_gn = lambda: self.evaluate(gn)
return evaluate_gn
def _evaluate_written_records(self, episode_map, num_episodes):
rb = tfrecord_replay_buffer.TFRecordReplayBuffer(
experiment_id='exp',
file_prefix=self._file_prefix,
episodes_per_file=10,
seed=12345,
data_spec=self._simple_data_spec)
batch_size = 2
num_steps = 3
ds = rb.as_dataset(sample_batch_size=batch_size, num_steps=num_steps)
ds = ds.repeat() # Repeat forever to get a good statistical sample.
evaluate_gn = self.get_iterator_callback(ds)
def check_shape_dtype(val, spec):
self.assertEqual(val.dtype, spec.dtype.as_numpy_dtype)
self.assertEqual(
val.shape,
(batch_size, num_steps) + tuple(spec.shape.as_list()))
starting_time_step_counter = collections.defaultdict(lambda: 0)
num_trials = 512
for _ in range(num_trials):
gn_value = evaluate_gn()
tf.nest.map_structure(check_shape_dtype, gn_value, self._simple_data_spec)
# Flatten the batched gn_values, then unstack each component of Data()
# individually, and group the results into a batch-size list of Data().
flat_gn_value = tf.nest.flatten(gn_value)
squeezed = []
for y in flat_gn_value:
squeezed.append([np.squeeze(x, 0) for x in np.split(y, batch_size)])
for batch_item in zip(*squeezed):
# batch_item is now a Data() containing one batch entry.
batch_item = tf.nest.pack_sequence_as(gn_value, batch_item)
# Identify the frame associated with each of num_steps' value[0]
which_frame = batch_item.value[:, 0].squeeze().astype(np.int32)
# Add the first frame's value[0] to a counter for later statistical
# testing of evenness of sampling.
starting_time_step_counter[which_frame[0]] += 1
# Ensure frames are increasing in order (since value[0]s are in
# increasing order)
self.assertAllEqual(
np.diff(which_frame),
[1] * (num_steps - 1))
# Ensure values are correct in the form float([x, -x])
self.assertAllEqual(
batch_item.value,
np.vstack((which_frame, -which_frame)).T)
# Ensure step_type is the correct step_type matching this starting
# frame.
self.assertAllEqual(
batch_item.step_type,
# Look up the step type from episode_values.
[episode_map[x][0][0] for x in which_frame])
# blocks start with value 1, 2, 3 (in multi-episode | |
the System.Windows.Forms.ToolStripItem.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnTextChanged(self, *args):
"""
OnTextChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnVisibleChanged(self, *args):
"""
OnVisibleChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def PerformClick(self):
"""
PerformClick(self: ToolStripItem)
Activates the System.Windows.Forms.ToolStripItem when it is clicked with the mouse.
"""
pass
def ProcessCmdKey(self, *args):
"""
ProcessCmdKey(self: ToolStripItem,m: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: false in all cases.
"""
pass
def ProcessDialogKey(self, *args):
"""
ProcessDialogKey(self: ToolStripItem,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the item; otherwise,false.
"""
pass
def ProcessMnemonic(self, *args):
"""
ProcessMnemonic(self: ToolStripItem,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true in all cases.
"""
pass
def ResetBackColor(self):
"""
ResetBackColor(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetDisplayStyle(self):
"""
ResetDisplayStyle(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetFont(self):
"""
ResetFont(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetForeColor(self):
"""
ResetForeColor(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetImage(self):
"""
ResetImage(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetMargin(self):
"""
ResetMargin(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetPadding(self):
"""
ResetPadding(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetRightToLeft(self):
"""
ResetRightToLeft(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def ResetTextDirection(self):
"""
ResetTextDirection(self: ToolStripItem)
This method is not relevant to this class.
"""
pass
def Select(self):
"""
Select(self: ToolStripItem)
Selects the item.
"""
pass
def SetBounds(self, *args):
"""
SetBounds(self: ToolStripItem,bounds: Rectangle)
Sets the size and location of the item.
bounds: A System.Drawing.Rectangle that represents the size and location of the
System.Windows.Forms.ToolStripItem
"""
pass
def SetVisibleCore(self, *args):
"""
SetVisibleCore(self: ToolStripItem,visible: bool)
Sets the System.Windows.Forms.ToolStripItem to the specified visible state.
visible: true to make the System.Windows.Forms.ToolStripItem visible; otherwise,false.
"""
pass
def ToString(self):
"""
ToString(self: ToolStripItem) -> str
Returns: A System.String containing the name of the System.ComponentModel.Component,if any,or null if
the System.ComponentModel.Component is unnamed.
"""
pass
def __enter__(self, *args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *args): # cannot find CLR constructor
"""
__new__(cls: type)
__new__(cls: type,text: str,image: Image,onClick: EventHandler)
__new__(cls: type,text: str,image: Image,onClick: EventHandler,name: str)
"""
pass
def __str__(self, *args):
pass
AccessibilityObject = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the System.Windows.Forms.AccessibleObject assigned to the control.
Get: AccessibilityObject(self: ToolStripItem) -> AccessibleObject
"""
AccessibleDefaultActionDescription = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the default action description of the control for use by accessibility client applications.
Get: AccessibleDefaultActionDescription(self: ToolStripItem) -> str
Set: AccessibleDefaultActionDescription(self: ToolStripItem)=value
"""
AccessibleDescription = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the description that will be reported to accessibility client applications.
Get: AccessibleDescription(self: ToolStripItem) -> str
Set: AccessibleDescription(self: ToolStripItem)=value
"""
AccessibleName = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the name of the control for use by accessibility client applications.
Get: AccessibleName(self: ToolStripItem) -> str
Set: AccessibleName(self: ToolStripItem)=value
"""
AccessibleRole = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the accessible role of the control,which specifies the type of user interface element of the control.
Get: AccessibleRole(self: ToolStripItem) -> AccessibleRole
Set: AccessibleRole(self: ToolStripItem)=value
"""
Alignment = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether the item aligns towards the beginning or end of the System.Windows.Forms.ToolStrip.
Get: Alignment(self: ToolStripItem) -> ToolStripItemAlignment
Set: Alignment(self: ToolStripItem)=value
"""
AllowDrop = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether drag-and-drop and item reordering are handled through events that you implement.
Get: AllowDrop(self: ToolStripItem) -> bool
Set: AllowDrop(self: ToolStripItem)=value
"""
Anchor = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the edges of the container to which a System.Windows.Forms.ToolStripItem is bound and determines how a System.Windows.Forms.ToolStripItem is resized with its parent.
Get: Anchor(self: ToolStripItem) -> AnchorStyles
Set: Anchor(self: ToolStripItem)=value
"""
AutoSize = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether the item is automatically sized.
Get: AutoSize(self: ToolStripItem) -> bool
Set: AutoSize(self: ToolStripItem)=value
"""
AutoToolTip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a value indicating whether to use the System.Windows.Forms.ToolStripItem.Text property or the System.Windows.Forms.ToolStripItem.ToolTipText property for the System.Windows.Forms.ToolStripItem ToolTip.
Get: AutoToolTip(self: ToolStripItem) -> bool
Set: AutoToolTip(self: ToolStripItem)=value
"""
Available = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether the System.Windows.Forms.ToolStripItem should be placed on a System.Windows.Forms.ToolStrip.
Get: Available(self: ToolStripItem) -> bool
Set: Available(self: ToolStripItem)=value
"""
BackColor = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the background color for the item.
Get: BackColor(self: ToolStripItem) -> Color
Set: BackColor(self: ToolStripItem)=value
"""
BackgroundImage = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the background image displayed in the item.
Get: BackgroundImage(self: ToolStripItem) -> Image
Set: BackgroundImage(self: ToolStripItem)=value
"""
BackgroundImageLayout = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the background image layout used for the System.Windows.Forms.ToolStripItem.
Get: BackgroundImageLayout(self: ToolStripItem) -> ImageLayout
Set: BackgroundImageLayout(self: ToolStripItem)=value
"""
Bounds = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the size and location of the item.
Get: Bounds(self: ToolStripItem) -> Rectangle
"""
CanRaiseEvents = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the component can raise an event.
"""
CanSelect = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether the item can be selected.
Get: CanSelect(self: ToolStripItem) -> bool
"""
ContentRectangle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the area where content,such as text and icons,can be placed within a System.Windows.Forms.ToolStripItem without overwriting background borders.
Get: ContentRectangle(self: ToolStripItem) -> Rectangle
"""
DefaultAutoToolTip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether to display the System.Windows.Forms.ToolTip that is defined as the default.
"""
DefaultDisplayStyle = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating what is displayed on the System.Windows.Forms.ToolStripItem.
"""
DefaultMargin = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the default margin of an item.
"""
DefaultPadding = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the internal spacing characteristics of the item.
"""
DefaultSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the default size of the item.
"""
DesignMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DismissWhenClicked = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether items on a System.Windows.Forms.ToolStripDropDown are hidden after they are clicked.
"""
DisplayStyle = property(
lambda self: object(), lambda self, v: None, | |
if figures == 'window': # possibly save the output
pass
elif figures == "png":
f.savefig(f"{png_path}/{fig_title}.png")
plt.close()
elif figures == 'png+window':
f.savefig(f"{png_path}/{fig_title}.png")
else:
pass
#%%
def signals_to_master_signal_comparison(signals, master_signal, density = False):
""" Given an array of signals (as row vectors), compare it to a single signal and plot a kernel
density estimate, and calculate a line of best fit through the points with R**2 value.
Inputs:
signals | rank 2 | signals as rows. Even if there's only 1 signal, it still needs to be rank 2
master_signal | rank 2 | signal as a row, but has to be rank 2
density | boolean | If True, gaussian kernel density estimate for the points. Can be slow.
Returns:
signal_to_msignal_comparison | dict | keys:
xyzs | list of rank 2 arrays | entry in the list for each signal, xyz are rows.
line_xys | list of rank 2 arrays | entry in the list for each signal, xy are points to plot for the lines of best fit
cor_coefs | list | correlation coefficients between each signal and the master signal.
History:
2021_04_22 | MEG | Written.
2021_04_26 | MEG | Add check that the signals are of the same length.
"""
import numpy as np
from scipy.stats import gaussian_kde
import numpy.polynomial.polynomial as poly # used for lines of best fit through dem/source plots
n_signals, n_pixels = signals.shape # each signal is a row, observations of that are columns.
if n_pixels != master_signal.shape[1]:
raise Exception(f"The signals aren't of the same length (2nd dimension), as 'signals' is {n_pixels} long, but 'master_signal' is {master_signal.shape[1]} long. Exiting. ")
xyzs = [] # initiate
line_xys = []
cor_coefs = []
print(f"Starting to calculate the 2D kernel density estimates for the signals. Completed ", end = '')
for signal_n, signal in enumerate(signals): # signal is a row of signals, and loop through them.
# 1: Do the kernel density estimate
xy = np.vstack((master_signal, signal[np.newaxis,:])) # master signal will be on X and be the top row.
x = xy[:1,:]
y = xy[1:2,:]
if density:
z = gaussian_kde(xy)(xy)
idx = z.argsort() # need to be sorted so that when plotted, those with the highest z value go on top.
x, y, z = x[0,idx], y[0,idx], z[idx]
xyzs.append(np.vstack((x,y,z))) # 3 rows, for each of x,y and z
else:
xyzs.append(np.vstack((x,y,np.zeros(n_pixels)))) # if we're not doing the kernel density estimate, z value is just zeros.
# 2: calculate the lines of best fit
line_coefs = poly.polyfit(x, y, 1) # polynomial of order 1 (i.e. a line of best fit)
line_yvals = (poly.polyval(x, line_coefs)) # calculate the lines yvalues for all x points
line_xys.append(np.vstack((x, line_yvals))) # x vals are first row, y vals are 2nd row
# 3: And the correlation coefficient
cor_coefs.append(np.corrcoef(x, y)[1,0]) # which is a 2x2 matrix, but we just want the off diagonal (as thats the correlation coefficient between the signals)
print(f"{signal_n} ", end = '')
print('\n')
signal_to_msignal_comparison = {'xyzs' : xyzs,
'line_xys' : line_xys,
'cor_coefs' : cor_coefs}
return signal_to_msignal_comparison
#%%
def create_all_ifgs(ifgs_r2, ifg_dates, max_n_all_ifgs = 1000):
"""Given a rank 2 of incremental ifgs, calculate all the possible ifgs that still step forward in time (i.e. if deformation is positive in all incremental ifgs,
it remains positive in all the returned ifgs.) If acquisition dates are provided, the tmeporal baselines of all the possible ifgs can also be found.
Inputs:
ifgs_r2 | rank 2 array | Interferograms as row vectors.
ifg_dates | list of strings | dates in the form YYYYMMDD_YYYYMMDD. As the interferograms are incremental, this should be the same length as the number of ifgs
Returns:
ifgs_r2 | rank 2 array | Only the ones that are non-zero (the diagonal in ifgs_r3) and in the lower left corner (so deformation isn't reversed. )
History:
2021_04_13 | MEG | Written
2021_04_19 | MEG | add funcionality to calculate the temporal baselines of all possible ifgs.
2021_04_29 | MEG | Add functionality to handle networks with breaks in them.
"""
import numpy as np
from datetime import datetime, timedelta
import random
from icasar.aux2 import acquisitions_from_ifg_dates
def triange_lower_left_indexes(side_length):
""" For a square matrix of size side_length, get the index of all the values that are in the lower
left quadrant (i.e. all to the lower left of the diagonals).
Inputs:
side_length | int | side length of the square. e.g. 5 for a 5x5
Returns:
lower_left_indexes | rank 2 array | indexes of all elements below the diagonal.
History:
2021_04_13 | MEG | Written.
"""
import numpy as np
zeros_array = np.ones((side_length, side_length)) # initate as ones so none will be selected.
zeros_array = np.triu(zeros_array) # set the lower left to 0s
lower_left_indexes = np.argwhere(zeros_array == 0) # select only the lower lefts
return lower_left_indexes
n_ifgs, n_pixs = ifgs_r2.shape
# 1: Determine if the network is continuous, and if not split it into lists
ifg_dates_continuous = [] # list of the dates for a continuous network
ifgs_r2_continuous = [] # and the incremental interferograms in that network.
start_continuous_run = 0
for ifg_n in range(n_ifgs-1):
if (ifg_dates[ifg_n][9:] != ifg_dates[ifg_n+1][:8]): # if the dates don't agree
ifg_dates_continuous.append(ifg_dates[start_continuous_run:ifg_n+1]) # +1 as want to include the last date in the selection
ifgs_r2_continuous.append(ifgs_r2[start_continuous_run:ifg_n+1,])
start_continuous_run = ifg_n+1 # +1 so that when we index the next time, it doesn't include ifg_n
if ifg_n == n_ifgs -2: # of if we've got to the end of the list.
ifg_dates_continuous.append(ifg_dates[start_continuous_run:]) # select to the end.
ifgs_r2_continuous.append(ifgs_r2[start_continuous_run:,])
n_networks = len(ifg_dates_continuous) # get the number of connected networks.
# for item in ifgs_r2_continuous:
# print(item.shape)
# for item in ifg_dates_continuous:
# print(item)
# print('\n')
# import copy
# ifg_dates_copy = copy.copy(ifg_dates)
# for ifg_list in ifg_dates_continuous:
# for ifg in ifg_list:
# try:
# del ifg_dates_copy[ifg_dates_copy.index(ifg)]
# except:
# pass
# print(ifg_dates_copy)
# 2: Loop through each continuous network and make all possible ifgs.
ifgs_all_r2 = []
dates_all_r1 = []
for n_network in range(n_networks):
ifgs_r2_temp = ifgs_r2_continuous[n_network]
ifg_dates_temp = ifg_dates_continuous[n_network]
n_acq = ifgs_r2_temp.shape[0] + 1
# 2a: convert from daisy chain of incremental to a relative to a single master at the start of the time series.
acq1_def = np.zeros((1, n_pixs)) # deformation is 0 at the first acquisition
ifgs_cs = np.cumsum(ifgs_r2_temp, axis = 0) # convert from incremental to cumulative.
ifgs_cs = np.vstack((acq1_def, ifgs_cs)) # add the 0 at first time ifg to the other cumulative ones.
# 2b: create all possible ifgs
ifgs_cube = np.zeros((n_acq, n_acq, n_pixs)) # cube to store all possible ifgs in
for i in range(n_acq): # used to loop through each column
ifgs_cube[:,i,] = ifgs_cs - ifgs_cs[i,] # make one column (ie all the rows) by taking all the ifgs and subtracting one time from it
# 2c: Get only the positive ones (ie the lower left quadrant)
lower_left_indexes = triange_lower_left_indexes(n_acq) # get the indexes of the ifgs in the lower left corner (ie. non 0, and with unreveresed deformation. )
ifgs_all_r2.append(ifgs_cube[lower_left_indexes[:,0], lower_left_indexes[:,1], :]) # get those ifgs and store as row vectors.
# 2d: Calculate the dates that the new ifgs run between.
acq_dates = acquisitions_from_ifg_dates(ifg_dates_temp) # get the acquisitions from the ifg dates.
ifg_dates_all_r2 = np.empty([n_acq, n_acq], dtype='U17') # initate an array that can hold unicode strings.
for row_n, date1 in enumerate(acq_dates): # loop through rows
for col_n, date2 in enumerate(acq_dates): # loop through columns
ifg_dates_all_r2[row_n, col_n] = f"{date2}_{date1}"
ifg_dates_all_r1 = list(ifg_dates_all_r2[lower_left_indexes[:,0], lower_left_indexes[:,1]]) # just get the lower left corner (like for the ifgs)
dates_all_r1.append(ifg_dates_all_r1)
# 3: convert lists back to a single matrix of all interferograms.
ifgs_all_r2 = | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import os
import cv2
import torch
import tqdm
from iopath.common.file_io import g_pathmgr
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
from slowfast.datasets.ava_helper import parse_bboxes_file
from slowfast.datasets.cv2_transform import scale, scale_boxes
from slowfast.datasets.utils import get_sequence
from slowfast.models import build_model
from slowfast.utils import misc
from slowfast.visualization.utils import process_cv2_inputs
# from slowfast.visualization.video_visualizer import VideoVisualizer
logger = logging.get_logger(__name__)
class AVAVisualizerWithPrecomputedBox:
"""
Visualize action predictions for videos or folder of images with precomputed
and ground-truth boxes in AVA format.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
self.source = g_pathmgr.get_local_path(path=cfg.DEMO.INPUT_VIDEO)
self.fps = None
if g_pathmgr.isdir(self.source):
self.fps = cfg.DEMO.FPS
self.video_name = self.source.split("/")[-1]
self.source = os.path.join(
self.source, "{}_%06d.jpg".format(self.video_name)
)
else:
self.video_name = self.source.split("/")[-1]
self.video_name = self.video_name.split(".")[0]
self.cfg = cfg
self.cap = cv2.VideoCapture(self.source)
if self.fps is None:
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError("Video {} cannot be opened".format(self.source))
self.output_file = None
if cfg.DEMO.OUTPUT_FILE != "":
self.output_file = self.get_output_file(cfg.DEMO.OUTPUT_FILE)
self.pred_boxes, self.gt_boxes = load_boxes_labels(
cfg,
self.video_name,
self.fps,
self.display_width,
self.display_height,
)
self.seq_length = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE
self.no_frames_repeat = cfg.DEMO.SLOWMO
def get_output_file(self, path):
"""
Return a video writer object.
Args:
path (str): path to the output video file.
"""
return cv2.VideoWriter(
filename=path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=float(30),
frameSize=(self.display_width, self.display_height),
isColor=True,
)
def get_input_clip(self, keyframe_idx):
"""
Get input clip from the video/folder of images for a given
keyframe index.
Args:
keyframe_idx (int): index of the current keyframe.
Returns:
clip (list of tensors): formatted input clip(s) corresponding to
the current keyframe.
"""
seq = get_sequence(
keyframe_idx,
self.seq_length // 2,
self.cfg.DATA.SAMPLING_RATE,
self.total_frames,
)
clip = []
for frame_idx in seq:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
was_read, frame = self.cap.read()
if was_read:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = scale(self.cfg.DATA.TEST_CROP_SIZE, frame)
clip.append(frame)
else:
logger.error(
"Unable to read frame. Duplicating previous frame."
)
clip.append(clip[-1])
clip = process_cv2_inputs(clip, self.cfg)
return clip
def get_predictions(self):
"""
Predict and append prediction results to each box in each keyframe in
`self.pred_boxes` dictionary.
"""
# Set random seed from configs.
np.random.seed(self.cfg.RNG_SEED)
torch.manual_seed(self.cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(self.cfg.OUTPUT_DIR)
# Print config.
logger.info("Run demo with config:")
logger.info(self.cfg)
assert (
self.cfg.NUM_GPUS <= 1
), "Cannot run demo visualization on multiple GPUs."
# Build the video model and print model statistics.
model = build_model(self.cfg)
model.eval()
logger.info("Start loading model info")
misc.log_model_info(model, self.cfg, use_train_input=False)
logger.info("Start loading model weights")
cu.load_test_checkpoint(self.cfg, model)
logger.info("Finish loading model weights")
logger.info("Start making predictions for precomputed boxes.")
for keyframe_idx, boxes_and_labels in tqdm.tqdm(
self.pred_boxes.items()
):
inputs = self.get_input_clip(keyframe_idx)
boxes = boxes_and_labels[0]
boxes = torch.from_numpy(np.array(boxes)).float()
box_transformed = scale_boxes(
self.cfg.DATA.TEST_CROP_SIZE,
boxes,
self.display_height,
self.display_width,
)
# Pad frame index for each box.
box_inputs = torch.cat(
[
torch.full((box_transformed.shape[0], 1), float(0)),
box_transformed,
],
axis=1,
)
if self.cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
box_inputs = box_inputs.cuda()
preds = model(inputs, box_inputs)
preds = preds.detach()
if self.cfg.NUM_GPUS:
preds = preds.cpu()
boxes_and_labels[1] = preds
# def draw_video(self):
# """
# Draw predicted and ground-truth (if provided) results on the video/folder of images.
# Write the visualized result to a video output file.
# """
# all_boxes = merge_pred_gt_boxes(self.pred_boxes, self.gt_boxes)
# common_classes = (
# self.cfg.DEMO.COMMON_CLASS_NAMES
# if len(self.cfg.DEMO.LABEL_FILE_PATH) != 0
# else None
# )
# video_vis = VideoVisualizer(
# num_classes=self.cfg.MODEL.NUM_CLASSES,
# class_names_path=self.cfg.DEMO.LABEL_FILE_PATH,
# top_k=self.cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,
# thres=self.cfg.DEMO.COMMON_CLASS_THRES,
# lower_thres=self.cfg.DEMO.UNCOMMON_CLASS_THRES,
# common_class_names=common_classes,
# colormap=self.cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
# mode=self.cfg.DEMO.VIS_MODE,
# )
# all_keys = sorted(all_boxes.keys())
# # Draw around the keyframe for 2/10 of the sequence length.
# # This is chosen using heuristics.
# draw_range = [
# self.seq_length // 2 - self.seq_length // 10,
# self.seq_length // 2 + self.seq_length // 10,
# ]
# draw_range_repeat = [
# draw_range[0],
# (draw_range[1] - draw_range[0]) * self.no_frames_repeat
# + draw_range[0],
# ]
# prev_buffer = []
# prev_end_idx = 0
# logger.info("Start Visualization...")
# for keyframe_idx in tqdm.tqdm(all_keys):
# pred_gt_boxes = all_boxes[keyframe_idx]
# # Find the starting index of the clip. If start_idx exceeds the beginning
# # of the video, we only choose valid frame from index 0.
# start_idx = max(0, keyframe_idx - self.seq_length // 2)
# # Number of frames from the start of the current clip and the
# # end of the previous clip.
# dist = start_idx - prev_end_idx
# # If there are unwritten frames in between clips.
# if dist >= 0:
# # Get the frames in between previous clip and current clip.
# frames = self._get_frame_range(prev_end_idx, dist)
# # We keep a buffer of frames for overlapping visualization.
# # Write these to the output file.
# for frame in prev_buffer:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.display(frame)
# # Write them to output file without any visualization
# # since they don't have any corresponding keyframes.
# for frame in frames:
# self.display(frame)
# prev_buffer = []
# num_new_frames = self.seq_length
# # If there are overlapping frames in between clips.
# elif dist < 0:
# # Flush all ready frames.
# for frame in prev_buffer[:dist]:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.display(frame)
# prev_buffer = prev_buffer[dist:]
# num_new_frames = self.seq_length + dist
# # Obtain new frames for the current clip from the input video file.
# new_frames = self._get_frame_range(
# max(start_idx, prev_end_idx), num_new_frames
# )
# new_frames = [
# cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in new_frames
# ]
# clip = prev_buffer + new_frames
# # Calculate the end of this clip. This will be `prev_end_idx` for the
# # next iteration.
# prev_end_idx = max(start_idx, prev_end_idx) + len(new_frames)
# # For each precomputed or gt boxes.
# for i, boxes in enumerate(pred_gt_boxes):
# if i == 0:
# repeat = self.no_frames_repeat
# current_draw_range = draw_range
# else:
# repeat = 1
# current_draw_range = draw_range_repeat
# # Make sure draw range does not fall out of end of clip.
# current_draw_range[1] = min(
# current_draw_range[1], len(clip) - 1
# )
# ground_truth = boxes[0]
# bboxes = boxes[1]
# label = boxes[2]
# # Draw predictions.
# clip = video_vis.draw_clip_range(
# clip,
# label,
# bboxes=torch.Tensor(bboxes),
# ground_truth=ground_truth,
# draw_range=current_draw_range,
# repeat_frame=repeat,
# )
# # Store the current clip as buffer.
# prev_buffer = clip
# # Write the remaining buffer to output file.
# for frame in prev_buffer:
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.display(frame)
# # If we still have some remaining frames in the input file,
# # write those to the output file as well.
# if prev_end_idx < self.total_frames:
# dist = self.total_frames - prev_end_idx
# remaining_clip = self._get_frame_range(prev_end_idx, dist)
# for frame in remaining_clip:
# self.display(frame)
def __call__(self):
self.get_predictions()
self.draw_video()
def display(self, frame):
"""
Either display a single frame (BGR image) to a window or write to
an output file if output path is provided.
"""
if self.output_file is None:
cv2.imshow("SlowFast", frame)
else:
self.output_file.write(frame)
def _get_keyframe_clip(self, keyframe_idx):
"""
Return a clip corresponding to a keyframe index for visualization.
Args:
keyframe_idx (int): keyframe index.
"""
start_idx = max(0, keyframe_idx - self.seq_length // 2)
clip = self._get_frame_range(start_idx, self.seq_length)
return clip
def _get_frame_range(self, start_idx, num_frames):
"""
Return a clip of `num_frames` frames starting from `start_idx`. If not enough frames
from `start_idx`, return the remaining frames from `start_idx`.
Args:
start_idx (int): starting idx.
num_frames (int): number of frames in the returned clip.
"""
was_read = True
assert start_idx < self.total_frames, "Start index out of range."
self.cap.set(cv2.CAP_PROP_POS_FRAMES, start_idx)
all_frames = []
for _ in range(num_frames):
was_read, frame = self.cap.read()
if was_read:
all_frames.append(frame)
else:
break
return all_frames
def merge_pred_gt_boxes(pred_dict, gt_dict=None):
"""
Merge data from precomputed and ground-truth boxes dictionaries.
Args:
pred_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`.
gt_dict (Optional[dict]): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` | |
<gh_stars>1-10
from collections import deque
class Process:
def __init__(self, process_name: str, arrival_time: int, burst_time: int) -> None:
self.process_name = process_name # process name
self.arrival_time = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
self.stop_time = arrival_time
self.burst_time = burst_time # remaining burst time
self.waiting_time = 0 # total time of the process wait in ready queue
self.turnaround_time = 0 # time from arrival time to completion time
class MLFQ:
"""
MLFQ(Multi Level Feedback Queue)
https://en.wikipedia.org/wiki/Multilevel_feedback_queue
MLFQ has a lot of queues that have different priority
In this MLFQ,
The first Queue(0) to last second Queue(N-2) of MLFQ have Round Robin Algorithm
The last Queue(N-1) has First Come, First Served Algorithm
"""
def __init__(
self,
number_of_queues: int,
time_slices: list[int],
queue: deque[Process],
current_time: int,
) -> None:
# total number of mlfq's queues
self.number_of_queues = number_of_queues
# time slice of queues that round robin algorithm applied
self.time_slices = time_slices
# unfinished process is in this ready_queue
self.ready_queue = queue
# current time
self.current_time = current_time
# finished process is in this sequence queue
self.finish_queue: deque[Process] = deque()
def calculate_sequence_of_finish_queue(self) -> list[str]:
"""
This method returns the sequence of finished processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_sequence_of_finish_queue()
['P2', 'P4', 'P1', 'P3']
"""
sequence = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def calculate_waiting_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates waiting time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_waiting_time([P1, P2, P3, P4])
[83, 17, 94, 101]
"""
waiting_times = []
for i in range(len(queue)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def calculate_turnaround_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates turnaround time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_turnaround_time([P1, P2, P3, P4])
[136, 34, 162, 125]
"""
turnaround_times = []
for i in range(len(queue)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def calculate_completion_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates completion time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_turnaround_time([P1, P2, P3, P4])
[136, 34, 162, 125]
"""
completion_times = []
for i in range(len(queue)):
completion_times.append(queue[i].stop_time)
return completion_times
def calculate_remaining_burst_time_of_processes(
self, queue: deque[Process]
) -> list[int]:
"""
This method calculate remaining burst time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue, ready_queue = mlfq.round_robin(deque([P1, P2, P3, P4]), 17)
>>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue)
[0]
>>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue)
[36, 51, 7]
>>> finish_queue, ready_queue = mlfq.round_robin(ready_queue, 25)
>>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue)
[0, 0]
>>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue)
[11, 26]
"""
return [q.burst_time for q in queue]
def update_waiting_time(self, process: Process) -> int:
"""
This method updates waiting times of unfinished processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> mlfq.current_time = 10
>>> P1.stop_time = 5
>>> mlfq.update_waiting_time(P1)
5
"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def first_come_first_served(self, ready_queue: deque[Process]) -> deque[Process]:
"""
FCFS(First Come, First Served)
FCFS will be applied to MLFQ's last queue
A first came process will be finished at first
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.first_come_first_served(mlfq.ready_queue)
>>> mlfq.calculate_sequence_of_finish_queue()
['P1', 'P2', 'P3', 'P4']
"""
finished: deque[Process] = deque() # sequence deque of finished process
while len(ready_queue) != 0:
cp = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(cp)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
cp.burst_time = 0
# set the process's turnaround time because it is finished
cp.turnaround_time = self.current_time - cp.arrival_time
# set the completion time
cp.stop_time = self.current_time
# add the process to queue that has finished queue
finished.append(cp)
self.finish_queue.extend(finished) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def round_robin(
self, ready_queue: deque[Process], time_slice: int
) -> tuple[deque[Process], deque[Process]]:
"""
RR(Round Robin)
RR will be applied to MLFQ's all queues except last queue
All processes can't use CPU for time more than time_slice
If the process consume CPU up to time_slice, it will go back to ready queue
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue, ready_queue = mlfq.round_robin(mlfq.ready_queue, 17)
>>> mlfq.calculate_sequence_of_finish_queue()
['P2']
"""
finished: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for i in range(len(ready_queue)):
cp = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(cp)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
cp.stop_time = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(cp)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
cp.burst_time = 0
# set the finish time
cp.stop_time = self.current_time
# update the process' turnaround time because it is finished
cp.turnaround_time = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(cp)
self.finish_queue.extend(finished) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def multi_level_feedback_queue(self) -> deque[Process]:
"""
MLFQ(Multi Level Feedback Queue)
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_sequence_of_finish_queue()
['P2', 'P4', 'P1', 'P3']
"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1):
finished, self.ready_queue = self.round_robin(
self.ready_queue, self.time_slices[i]
)
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
P1 = Process("P1", 0, 53)
P2 = Process("P2", 0, 17)
P3 = Process("P3", 0, 68)
P4 = Process("P4", 0, 24)
number_of_queues = 3
time_slices = [17, 25]
queue = deque([P1, P2, P3, P4])
if len(time_slices) != number_of_queues - 1:
exit()
doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])})
P1 = Process("P1", 0, 53)
P2 = Process("P2", 0, 17)
P3 = Process("P3", 0, 68)
P4 = Process("P4", 0, 24)
number_of_queues = 3
time_slices = [17, 25]
queue = deque([P1, P2, P3, P4])
mlfq = | |
# Copyright 2019 Rackspace US Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from operator import itemgetter
from uuid import UUID
from oslo_serialization import jsonutils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base
CONF = config.CONF
class FlavorProfileAPITest(test_base.LoadBalancerBaseTest):
"""Test the flavor profile object API."""
@decorators.idempotent_id('d0e3a08e-d58a-4460-83ed-34307ca04cde')
def test_flavor_profile_create(self):
"""Tests flavor profile create and basic show APIs.
* Tests that users without the loadbalancer admin role cannot
create flavor profiles.
* Create a fully populated flavor profile.
* Validate the response reflects the requested values.
"""
# We have to do this here as the api_version and clients are not
# setup in time to use a decorator or the skip_checks mixin
if not self.lb_admin_flavor_profile_client.is_version_supported(
self.api_version, '2.6'):
raise self.skipException('Flavor profiles are only available on '
'Octavia API version 2.6 or newer.')
flavor_profile_name = data_utils.rand_name(
"lb_admin_flavorprofile1-create")
flavor_data = {const.LOADBALANCER_TOPOLOGY: const.SINGLE}
flavor_data_json = jsonutils.dumps(flavor_data)
flavor_profile_kwargs = {
const.NAME: flavor_profile_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data_json
}
# Test that a user without the load balancer admin role cannot
# create a flavor profile
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.flavor_profile_client.create_flavor_profile,
**flavor_profile_kwargs)
# Happy path
flavor_profile = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
**flavor_profile_kwargs))
self.addCleanup(
self.lb_admin_flavor_profile_client.cleanup_flavor_profile,
flavor_profile[const.ID])
UUID(flavor_profile[const.ID])
self.assertEqual(flavor_profile_name, flavor_profile[const.NAME])
self.assertEqual(CONF.load_balancer.provider,
flavor_profile[const.PROVIDER_NAME])
self.assertEqual(flavor_data_json, flavor_profile[const.FLAVOR_DATA])
@decorators.idempotent_id('c4e17fdf-849a-4132-93ae-dfca21ce4444')
def test_flavor_profile_list(self):
"""Tests flavor profile list API and field filtering.
* Create three flavor profiles.
* Validates that non-admin accounts cannot list the flavor profiles.
* List the flavor profiles using the default sort order.
* List the flavor profiles using descending sort order.
* List the flavor profiles using ascending sort order.
* List the flavor profiles returning one field at a time.
* List the flavor profiles returning two fields.
* List the flavor profiles filtering to one of the three.
* List the flavor profiles filtered, one field, and sorted.
"""
# We have to do this here as the api_version and clients are not
# setup in time to use a decorator or the skip_checks mixin
if not self.lb_admin_flavor_profile_client.is_version_supported(
self.api_version, '2.6'):
raise self.skipException('Flavor profiles are only available on '
'Octavia API version 2.6 or newer.')
# Create flavor profile 1
flavor_profile1_name = data_utils.rand_name(
"lb_admin_flavorprofile-list-1")
flavor_data1 = {const.LOADBALANCER_TOPOLOGY: const.SINGLE}
flavor_data1_json = jsonutils.dumps(flavor_data1)
flavor_profile1_kwargs = {
const.NAME: flavor_profile1_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data1_json
}
flavor_profile1 = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
**flavor_profile1_kwargs))
self.addCleanup(
self.lb_admin_flavor_profile_client.cleanup_flavor_profile,
flavor_profile1[const.ID])
# Create flavor profile 2
flavor_profile2_name = data_utils.rand_name(
"lb_admin_flavorprofile-list-2")
flavor_data2 = {const.LOADBALANCER_TOPOLOGY: const.ACTIVE_STANDBY}
flavor_data2_json = jsonutils.dumps(flavor_data2)
flavor_profile2_kwargs = {
const.NAME: flavor_profile2_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data2_json
}
flavor_profile2 = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
**flavor_profile2_kwargs))
self.addCleanup(
self.lb_admin_flavor_profile_client.cleanup_flavor_profile,
flavor_profile2[const.ID])
# Create flavor profile 3
flavor_profile3_name = data_utils.rand_name(
"lb_admin_flavorprofile-list-3")
flavor_data3 = {const.LOADBALANCER_TOPOLOGY: const.SINGLE}
flavor_data3_json = jsonutils.dumps(flavor_data3)
flavor_profile3_kwargs = {
const.NAME: flavor_profile3_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data3_json
}
flavor_profile3 = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
**flavor_profile3_kwargs))
self.addCleanup(
self.lb_admin_flavor_profile_client.cleanup_flavor_profile,
flavor_profile3[const.ID])
# default sort order (by ID) reference list
ref_id_list_asc = [flavor_profile1[const.ID],
flavor_profile2[const.ID],
flavor_profile3[const.ID]]
ref_id_list_dsc = copy.deepcopy(ref_id_list_asc)
ref_id_list_asc.sort()
ref_id_list_dsc.sort(reverse=True)
# Test that a user without the load balancer admin role cannot
# list flavor profiles.
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.flavor_profile_client.list_flavor_profiles)
# Check the default sort order (by ID)
profiles = self.lb_admin_flavor_profile_client.list_flavor_profiles()
# Remove flavor profiles not used in this test
profiles = [prof for prof in profiles
if 'lb_admin_flavorprofile-list' in prof[const.NAME]]
self.assertEqual(3, len(profiles))
self.assertEqual(ref_id_list_asc[0], profiles[0][const.ID])
self.assertEqual(ref_id_list_asc[1], profiles[1][const.ID])
self.assertEqual(ref_id_list_asc[2], profiles[2][const.ID])
# Check the descending sort order by name
profiles = self.lb_admin_flavor_profile_client.list_flavor_profiles(
query_params='{sort}={name}:{order}'.format(
sort=const.SORT, name=const.NAME, order=const.DESC))
# Remove flavor profiles not used in this test
profiles = [prof for prof in profiles
if 'lb_admin_flavorprofile-list' in prof[const.NAME]]
self.assertEqual(3, len(profiles))
self.assertEqual(flavor_profile3_name, profiles[0][const.NAME])
self.assertEqual(flavor_profile2_name, profiles[1][const.NAME])
self.assertEqual(flavor_profile1_name, profiles[2][const.NAME])
# Check the ascending sort order by name
profiles = self.lb_admin_flavor_profile_client.list_flavor_profiles(
query_params='{sort}={name}:{order}'.format(
sort=const.SORT, name=const.NAME, order=const.ASC))
# Remove flavor profiles not used in this test
profiles = [prof for prof in profiles
if 'lb_admin_flavorprofile-list' in prof[const.NAME]]
self.assertEqual(3, len(profiles))
self.assertEqual(flavor_profile1_name, profiles[0][const.NAME])
self.assertEqual(flavor_profile2_name, profiles[1][const.NAME])
self.assertEqual(flavor_profile3_name, profiles[2][const.NAME])
ref_profiles = [flavor_profile1, flavor_profile2, flavor_profile3]
sorted_profiles = sorted(ref_profiles, key=itemgetter(const.ID))
# Test fields
flavor_profile_client = self.lb_admin_flavor_profile_client
for field in const.SHOW_FLAVOR_PROFILE_FIELDS:
profiles = flavor_profile_client.list_flavor_profiles(
query_params='{fields}={field}&{fields}={name}'.format(
fields=const.FIELDS, field=field, name=const.NAME))
# Remove flavor profiles not used in this test
profiles = [prof for prof in profiles
if 'lb_admin_flavorprofile-list' in prof[const.NAME]]
self.assertEqual(3, len(profiles))
self.assertEqual(sorted_profiles[0][field], profiles[0][field])
self.assertEqual(sorted_profiles[1][field], profiles[1][field])
self.assertEqual(sorted_profiles[2][field], profiles[2][field])
# Test filtering
profile = self.lb_admin_flavor_profile_client.list_flavor_profiles(
query_params='{name}={prof_name}'.format(
name=const.NAME,
prof_name=flavor_profile2[const.NAME]))
self.assertEqual(1, len(profile))
self.assertEqual(flavor_profile2[const.ID], profile[0][const.ID])
# Test combined params
profiles = self.lb_admin_flavor_profile_client.list_flavor_profiles(
query_params='{provider_name}={provider}&{fields}={name}&'
'{sort}={ID}:{desc}'.format(
provider_name=const.PROVIDER_NAME,
provider=CONF.load_balancer.provider,
fields=const.FIELDS, name=const.NAME,
sort=const.SORT, ID=const.ID,
desc=const.DESC))
# Remove flavor profiles not used in this test
profiles = [prof for prof in profiles
if 'lb_admin_flavorprofile-list' in prof[const.NAME]]
self.assertEqual(3, len(profiles))
self.assertEqual(1, len(profiles[0]))
self.assertEqual(sorted_profiles[2][const.NAME],
profiles[0][const.NAME])
self.assertEqual(sorted_profiles[1][const.NAME],
profiles[1][const.NAME])
self.assertEqual(sorted_profiles[0][const.NAME],
profiles[2][const.NAME])
@decorators.idempotent_id('a2c2ff9a-fce1-42fd-8cfd-56dea31610f6')
def test_flavor_profile_show(self):
"""Tests flavor profile show API.
* Create a fully populated flavor profile.
* Show flavor profile details.
* Validate the show reflects the requested values.
* Validates that non-lb-admin accounts cannot see the flavor profile.
"""
# We have to do this here as the api_version and clients are not
# setup in time to use a decorator or the skip_checks mixin
if not self.lb_admin_flavor_profile_client.is_version_supported(
self.api_version, '2.6'):
raise self.skipException('Flavor profiles are only available on '
'Octavia API version 2.6 or newer.')
flavor_profile_name = data_utils.rand_name(
"lb_admin_flavorprofile1-show")
flavor_data = {const.LOADBALANCER_TOPOLOGY: const.ACTIVE_STANDBY}
flavor_data_json = jsonutils.dumps(flavor_data)
flavor_profile_kwargs = {
const.NAME: flavor_profile_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data_json
}
flavor_profile = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
**flavor_profile_kwargs))
self.addCleanup(
self.lb_admin_flavor_profile_client.cleanup_flavor_profile,
flavor_profile[const.ID])
# Test that a user without the load balancer admin role cannot
# show a flavor profile
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.flavor_profile_client.show_flavor_profile,
flavor_profile[const.ID])
result = (
self.lb_admin_flavor_profile_client.show_flavor_profile(
flavor_profile[const.ID]))
self.assertEqual(flavor_profile_name, result[const.NAME])
self.assertEqual(CONF.load_balancer.provider,
result[const.PROVIDER_NAME])
self.assertEqual(flavor_data_json, result[const.FLAVOR_DATA])
@decorators.idempotent_id('32a2e285-8dfc-485f-a450-a4d450d3c3ec')
def test_flavor_profile_update(self):
"""Tests flavor profile update API.
* Create a fully populated flavor profile.
* Show flavor profile details.
* Validate the show reflects the initial values.
* Validates that non-admin accounts cannot update the flavor profile.
* Update the flavor profile details.
* Show flavor profile details.
* Validate the show reflects the updated values.
"""
# We have to do this here as the api_version and clients are not
# setup in time to use a decorator or the skip_checks mixin
if not self.lb_admin_flavor_profile_client.is_version_supported(
self.api_version, '2.6'):
raise self.skipException('Flavor profiles are only available on '
'Octavia API version 2.6 or newer.')
flavor_profile_name = data_utils.rand_name(
"lb_admin_flavorprofile1-update")
flavor_data = {const.LOADBALANCER_TOPOLOGY: const.SINGLE}
flavor_data_json = jsonutils.dumps(flavor_data)
flavor_profile_kwargs = {
const.NAME: flavor_profile_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data_json
}
flavor_profile = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
**flavor_profile_kwargs))
self.addCleanup(
self.lb_admin_flavor_profile_client.cleanup_flavor_profile,
flavor_profile[const.ID])
self.assertEqual(flavor_profile_name, flavor_profile[const.NAME])
self.assertEqual(CONF.load_balancer.provider,
flavor_profile[const.PROVIDER_NAME])
self.assertEqual(flavor_data_json, flavor_profile[const.FLAVOR_DATA])
flavor_profile_name2 = data_utils.rand_name(
"lb_admin_flavorprofile1-update2")
flavor_data2 = {const.LOADBALANCER_TOPOLOGY: const.ACTIVE_STANDBY}
flavor_data2_json = jsonutils.dumps(flavor_data2)
# TODO(johnsom) Figure out a reliable second provider
flavor_profile_updated_kwargs = {
const.NAME: flavor_profile_name2,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data2_json
}
# Test that a user without the load balancer admin role cannot
# create a flavor profile
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.flavor_profile_client.update_flavor_profile,
flavor_profile[const.ID], **flavor_profile_updated_kwargs)
result = self.lb_admin_flavor_profile_client.update_flavor_profile(
flavor_profile[const.ID], **flavor_profile_updated_kwargs)
self.assertEqual(flavor_profile_name2, result[const.NAME])
self.assertEqual(CONF.load_balancer.provider,
result[const.PROVIDER_NAME])
self.assertEqual(flavor_data2_json, result[const.FLAVOR_DATA])
# Check that a show reflects the new values
get_result = (
self.lb_admin_flavor_profile_client.show_flavor_profile(
flavor_profile[const.ID]))
self.assertEqual(flavor_profile_name2, get_result[const.NAME])
self.assertEqual(CONF.load_balancer.provider,
get_result[const.PROVIDER_NAME])
self.assertEqual(flavor_data2_json, get_result[const.FLAVOR_DATA])
@decorators.idempotent_id('4c2eaacf-c2c8-422a-b7dc-a30ceba6bcd4')
def test_flavor_profile_delete(self):
"""Tests flavor profile create and delete APIs.
* Creates a flavor profile.
* Validates that other accounts cannot delete the flavor profile.
* Deletes the flavor profile.
* Validates the flavor profile is in the DELETED state.
"""
# We have to do this here as the api_version and clients are not
# setup in time to use a decorator or the skip_checks mixin
if not self.lb_admin_flavor_profile_client.is_version_supported(
self.api_version, '2.6'):
raise self.skipException('Flavor profiles are only available on '
'Octavia API version 2.6 or newer.')
flavor_profile_name = data_utils.rand_name(
"lb_admin_flavorprofile1-delete")
flavor_data = {const.LOADBALANCER_TOPOLOGY: const.SINGLE}
flavor_data_json = jsonutils.dumps(flavor_data)
flavor_profile_kwargs = {
const.NAME: flavor_profile_name,
const.PROVIDER_NAME: CONF.load_balancer.provider,
const.FLAVOR_DATA: flavor_data_json
}
flavor_profile = (
self.lb_admin_flavor_profile_client.create_flavor_profile(
| |
channel changes.
Args:
event (dict): A NewCallerid event.
"""
channel = self._channels[event['Uniqueid']]
channel.caller_id = channel.caller_id.replace(
name=event['CallerIDName'],
num=event['CallerIDNum'],
)
channel.cid_calling_pres = event['CID-CallingPres']
def _on_new_connected_line(self, event):
"""
A NewConnectedLine event is sent when the ConnectedLine changes.
Args:
event (dict): A NewConnectedLine event.
"""
channel = self._channels[event['Uniqueid']]
channel.connected_line = channel.connected_line.replace(
name=event['ConnectedLineName'],
num=event['ConnectedLineNum'],
)
def _on_new_accountcode(self, event):
"""
A NewAccountCode is sent when the AccountCode of a channel changes.
Args:
event (dict): A NewAccountCode event.
"""
channel = self._channels[event['Uniqueid']]
channel.account_code = event['AccountCode']
# ===================================================================
# Actual event handlers you can override
# ===================================================================
def on_state_change(self, channel, old_state):
"""
Handle the change of a ChannelState.
If the status goes from DOWN to RING, then it means the calling party
hears a dialing tone. If the status goes from DOWN to RINGING or UP,
then it means the phone of a called party is starting to ring (or has
been answered immediately without ringing).
Args:
channel (Channel): The channel being changed.
old_state (int): The state before the state change.
"""
if channel.is_local:
return
new_state = channel.state
if old_state == AST_STATE_DOWN:
if new_state == AST_STATE_RING:
self.on_a_dial(channel)
elif new_state in (AST_STATE_RINGING, AST_STATE_UP):
self.on_b_dial(channel)
def on_a_dial(self, channel):
"""
Handle the event where the caller phone hears the ring tone.
We don't want this. It's work to get all the values right, and
when we do, this would look just like on_b_dial.
Args:
channel (Channel):
"""
pass
def on_b_dial(self, channel):
"""
Handle the event where a callee phone starts to ring.
In our case, we check if a dial has already been set up for the
channel. If so, we may want to send a ringing event.
Args:
channel (Channel): The channel of the B side.
"""
if channel.back_dial:
self.on_b_dial_ringing(channel)
def on_dial_begin(self, channel, destination):
"""
Handle an event where a dial is set up.
In our case, we check if the channel already has state ringing.
If so, we may want to send a ringing event.
Args:
channel (Channel): The channel initiating the dial.
destination (Channel): The channel being dialed.
"""
if not destination.is_local and destination.state == 5:
self.on_b_dial_ringing(destination)
def on_b_dial_ringing(self, channel):
"""
Check a ringing channel and sent a ringing event if required.
By default, this function will only be called if the destination
channel:
- Has an open dial (so a way to trace back how it's being called).
- Has state "ringing".
Args:
channel (Channel): The channel being dialed.
"""
if 'ignore_b_dial' in channel.custom:
# Notifications were already sent for this channel.
# Unset the flag and move on.
del (channel.custom['ignore_b_dial'])
return
a_chan = channel.get_dialing_channel()
if 'raw_blind_transfer' in a_chan.custom:
# This is an interesting exception: we got a Blind Transfer
# message earlier and recorded it in this attribute. We'll
# translate this b_dial to first a on_b_dial and then the
# on_transfer event.
transferer = a_chan.custom.pop('raw_blind_transfer')
target_chans = a_chan.get_dialed_channels()
for target in target_chans:
# To prevent notifications from being sent multiple times,
# we set a flag on all other channels except for the one
# starting to ring right now.
if target != channel:
target.custom['ignore_b_dial'] = True
self._reporter.on_blind_transfer(
caller=a_chan.as_namedtuple(),
transferer=transferer.as_namedtuple(),
targets=[chan.as_namedtuple() for chan in target_chans],
)
elif (
a_chan.is_originated and
a_chan.fwd_dials and a_chan.fwd_local_bridge
):
# Calls setup through Originate are harder to track.
# The Channel passed to the Originate has two semis. The Context
# channel is called first, and when it's up, put in a bridge
# with semi 2. Next, semi 1 will dial out to the other party.
# To make it look like a normal call, we will show the call from
# the Context as the calling party and the call from Channel as
# the called party.
originating_chan = a_chan
a_bridge = originating_chan.fwd_local_bridge.bridge
a_chan = [peer for peer in a_bridge.peers
if not peer.is_local][0]
if not a_chan.is_local:
called_exten = originating_chan.fwd_dials[0].exten
a_chan.exten = called_exten
a_chan.is_calling = True
if not a_chan.has_extension:
self._logger.error(
'Caller (Originate) did not have an extension: '
'{}'.format(channel))
self._reporter.on_b_dial(
caller=a_chan.as_namedtuple(),
targets=[channel.as_namedtuple()],
)
elif not a_chan.is_local:
# We'll want to send one ringing event for all targets, so send
# one notification and mark the rest as already notified.
open_dials = a_chan.get_dialed_channels()
targets = [dial.as_namedtuple() for dial in open_dials]
if not a_chan.has_extension:
self._logger.error(
'Caller (Dial) did not have an extension: {}'.format({
'caller': a_chan.as_namedtuple(),
'destination': channel.as_namedtuple(),
}))
if not targets:
self._logger.error(
'Caller (Dial) did not have any dialed channels: '
'{}'.format({
'caller': a_chan.as_namedtuple(),
'destination': channel.as_namedtuple(),
}))
self._reporter.on_b_dial(
caller=a_chan.as_namedtuple(),
targets=targets,
)
for b_chan in open_dials:
if b_chan != channel:
# To prevent notifications from being sent multiple
# times, we set a flag on all other channels except
# for the one starting to ring right now.
b_chan.custom['ignore_b_dial'] = True
def on_bridge_enter(self, channel, bridge):
"""
Post-process a BridgeEnter event to notify of a call in progress.
This function will check if the bridge already contains other SIP
channels. If so, it's interpreted as a call between two channels
being connected.
WARNING: This function does not behave as desired for
bridges with 3+ parties, e.g. conference calls.
Args:
channel (Channel): The channel entering the bridge.
bridge (Bridge): The bridge the channel enters.
"""
sip_peers = channel.get_bridge_peers_recursive()
if len(sip_peers) < 2:
# There are not enough interesting channels to form a call.
return
callers = set([peer for peer in sip_peers if peer.is_calling])
targets = sip_peers - callers
if len(callers) > 1:
# Hmm, we have multiple callers. This can happen on an
# AB-CB-AC transfer. Let's do something ugly.
# Our oldest caller is going to be the new caller.
sorted_callers = sorted(
callers, key=lambda chan: chan.name.rsplit('-', 1)[1])
caller = sorted_callers.pop(0)
# The rest are will be marked as targets.
for non_caller in sorted_callers:
targets.add(non_caller)
non_caller.is_calling = False
elif len(callers) < 1:
# A call should always have a caller.
self._logger.warning('Call {} has too few callers: {}'.format(
channel.linkedid, len(callers)))
return
else:
caller = next(iter(callers))
if len(targets) != 1:
# This can happen with a conference call, but is not supported.
self._logger.warning('Call {} has {} targets.'.format(
channel.linkedid, len(targets)))
return
else:
target = next(iter(targets))
# Check and set a flag to prevent the event from being fired again.
if 'is_picked_up' not in caller.custom:
caller.custom['is_picked_up'] = True
self._reporter.on_up(
caller=caller.as_namedtuple(),
target=target.as_namedtuple(),
)
def on_attended_transfer(self, orig_transferer, second_transferer, event):
"""
Gets invoked when an attended transfer is completed.
In an attended transfer, one of the participants of a conversation
calls a third participant, waits for the third party to answer, talks
to the third party and then transfers their original conversation
partner to the third party.
Args:
orig_transferer (Channel): The original channel is the channel
which the redirector used to talk with the person who's being
transferred.
second_transferer (Channel): The target channel is the channel
which the redirector used to set up the call to the person to
whom the call is being transferred.
event (dict): The data of the AttendedTransfer event.
"""
if 'TransfereeUniqueid' in event and 'TransferTargetUniqueid' in event:
# Nice, Asterisk just told us who the transferee and transfer
# target are. Let's just do what Asterisk says.
transferee = self._channels[event['TransfereeUniqueid']]
target = self._channels[event['TransferTargetUniqueid']]
else:
# Ouch, Asterisk didn't tell us who is the transferee and who is
# the target, which means we need to figure it out ourselves.
# We can find both channels in the Destination Bridge.
target_bridge = self._bridges[event['DestBridgeUniqueid']]
if len(target_bridge) < 2:
self._logger.warning(
'Attn Xfer DestBridge does not have enough peers for '
'event: {!r}'.format(event))
return
peer_one, peer_two = target_bridge.peers
# The next challenge now is to figure out which channel is the
# transferee and which one is the target..
if peer_one.linkedid == event['OrigTransfererLinkedid']:
# Peer one has the same linkedid as the call before the
# transfer, so it must be the transferee.
transferee = peer_one
target = peer_two
elif peer_two.linkedid == event['OrigTransfererLinkedid']:
transferee = peer_two
target = peer_one
else:
raise NotImplementedError(
'Could not determine caller and target after | |
#!/usr/bin/env python
import numpy as np
from scipy.linalg import expm
from scipy.linalg import logm
from scipy.linalg import inv
import rospy
import rosbag
import rospkg
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from cw2q4.youbotKineKDL import YoubotKinematicKDL
import PyKDL
from visualization_msgs.msg import Marker
from itertools import permutations
class YoubotTrajectoryPlanning(object):
def __init__(self):
# Initialize node
rospy.init_node('youbot_traj_cw2', anonymous=True)
# Save question number for check in main run method
self.kdl_youbot = YoubotKinematicKDL()
# Create trajectory publisher and a checkpoint publisher to visualize checkpoints
self.traj_pub = rospy.Publisher('/EffortJointInterface_trajectory_controller/command', JointTrajectory,
queue_size=5)
self.checkpoint_pub = rospy.Publisher("checkpoint_positions", Marker, queue_size=100)
def run(self):
"""This function is the main run function of the class. When called, it runs question 6 by calling the q6()
function to get the trajectory. Then, the message is filled out and published to the /command topic.
"""
print("run q6a")
rospy.loginfo("Waiting 5 seconds for everything to load up.")
rospy.sleep(2.0)
traj = self.q6()
traj.header.stamp = rospy.Time.now()
traj.joint_names = ["arm_joint_1", "arm_joint_2", "arm_joint_3", "arm_joint_4", "arm_joint_5"]
self.traj_pub.publish(traj)
def q6(self):
""" This is the main q6 function. Here, other methods are called to create the shortest path required for this
question. Below, a general step-by-step is given as to how to solve the problem.
Returns:
traj (JointTrajectory): A list of JointTrajectory points giving the robot joint positions to achieve in a
given time period.
"""
# Steps to solving Q6.
# 1. Load in targets from the bagfile (checkpoint data and target joint positions).
# 2. Compute the shortest path achievable visiting each checkpoint Cartesian position.
# 3. Determine intermediate checkpoints to achieve a linear path between each checkpoint and have a full list of
# checkpoints the robot must achieve. You can publish them to see if they look correct. Look at slides 39 in lecture 7
# 4. Convert all the checkpoints into joint values using an inverse kinematics solver.
# 5. Create a JointTrajectory message.
# Your code starts here ------------------------------
# TODO
#Create object (not necessary) youbot_traj_plan = YoubotTrajectoryPlanning()
#Load targets from bagfile
[target_cart_tf, target_joint_positions] = self.load_targets()
#Sort targets to find shortest path
[sorted_order, min_dist, index_shortest_dist] = self.get_shortest_path(target_cart_tf)
#FInd intermediate points between checkpoints to ensure straight line path
#num_points = 5, 5 intermediate points between checkpoints, for smooth straight movement
full_checkpoint_tfs = self.intermediate_tfs(index_shortest_dist, target_cart_tf, 5)
#This function gets a np.ndarray of transforms and publishes them in a color coded fashion to show how the
#Cartesian path of the robot end-effector.
self.publish_traj_tfs(full_checkpoint_tfs)
#This function converts checkpoint transformations (including intermediates) into joint positions
init_joint_position = np.array(target_joint_positions[:,0])
q_checkpoints = self.full_checkpoints_to_joints(full_checkpoint_tfs, init_joint_position) #What is init_joint_position in this?
traj = JointTrajectory()
dt = 2
t = 10
for i in range(q_checkpoints.shape[1]):
traj_point = JointTrajectoryPoint()
traj_point.positions = q_checkpoints[:, i]
t += dt
traj_point.time_from_start.secs = t
traj.points.append(traj_point)
#This function converts joint positions to a kdl array
#kdl_array = self.list_to_kdl_jnt_array(q_checkpoints) # is this traj??no
# Your code ends here ------------------------------
assert isinstance(traj, JointTrajectory)
return traj
def load_targets(self):
"""This function loads the checkpoint data from the 'data.bag' file. In the bag file, you will find messages
relating to the target joint positions. You need to use forward kinematics to get the goal end-effector position.
Returns:
target_cart_tf (4x4x5 np.ndarray): The target 4x4 homogenous transformations of the checkpoints found in the
bag file. There are a total of 5 transforms (4 checkpoints + 1 initial starting cartesian position).
target_joint_positions (5x5 np.ndarray): The target joint values for the 4 checkpoints + 1 initial starting
position.
"""
# Defining ros package path
rospack = rospkg.RosPack()
path = rospack.get_path('cw2q6')
# Initialize arrays for checkpoint transformations and joint positions
target_joint_positions = np.zeros((5, 5))
# Create a 4x4 transformation matrix, then stack 6 of these matrices together for each checkpoint
target_cart_tf = np.repeat(np.identity(4), 5, axis=1).reshape((4, 4, 5))
# Load path for selected question
bag = rosbag.Bag(path + '/bags/data.bag')
# Get the current starting position of the robot
target_joint_positions[:, 0] = self.kdl_youbot.kdl_jnt_array_to_list(self.kdl_youbot.current_joint_position)
# Initialize the first checkpoint as the current end effector position
target_cart_tf[:, :, 0] = self.kdl_youbot.forward_kinematics(target_joint_positions[:, 0])
# Your code starts here ------------------------------
#if len(sys.argv) != 2:
# sys.stderr.write('[ERROR] This script only takes input bag file as argument.n')
#else:
# inputFileName = sys.argv[1]
# print "[OK] Found bag: %s" % inputFileName
topicList = []
i = 1
for topic, msgs, t in bag.read_messages(['joint_data']):
target_joint_positions[:,i] = msgs.position
target_cart_tf[:,:,i] = self.kdl_youbot.forward_kinematics(target_joint_positions[:,i], 5)
i+=1
my_pt = JointTrajectoryPoint()
if topicList.count(topic) == 0:
topicList.append(topic)
#print '{0} topics found:'.format(len(topicList))
#print(target_cart_tf)
# Your code ends here ------------------------------
# Close the bag
bag.close()
assert isinstance(target_cart_tf, np.ndarray)
assert target_cart_tf.shape == (4, 4, 5)
assert isinstance(target_joint_positions, np.ndarray)
assert target_joint_positions.shape == (5, 5)
return target_cart_tf, target_joint_positions
def get_shortest_path(self, checkpoints_tf):
"""This function takes the checkpoint transformations and computes the order of checkpoints that results
in the shortest overall path.
Args:
checkpoints_tf (np.ndarray): The target checkpoint 4x4 transformations.
Returns:
sorted_order (np.array): An array of size 5 indicating the order of checkpoint
min_dist: (float): The associated distance to the sorted order giving the total estimate for travel
distance.
"""
# Your code starts here ------------------------------
#Calculate the distance between all points, then choose the shortest distances in the cost matrix
#print(checkpoints_tf.shape)
checkpoints = []
perm = permutations(checkpoints_tf)
for i in range(checkpoints_tf.shape[2]):
#checkpoints[i] = checkpoints_tf[0:3, 3, i]
#print(checkpoints_tf[0:3,3])
checkpoints.append(checkpoints_tf[0:3, 3, i])
# get checkpoint coordinates from checkpoint transformation matrix, rows 1-3 of last column
# Calculate cost matrix, distance between all n points, giving n x n matrix
checkpoints= np.array(checkpoints)
cost_matrix = np.zeros((checkpoints.shape[0], checkpoints.shape[0]))
for i in range(checkpoints.shape[0]):
for j in range(checkpoints.shape[0]):
cost_matrix[i,j] = np.sqrt((checkpoints[i][0] - checkpoints[j][0])**2 + (checkpoints[i][1] - checkpoints[j][1])**2 + (checkpoints[i][2] - checkpoints[j][2])**2)
#Make diagonals infinite so that distance between one point and itself isnt chosen
cost_matrix[i,i] = np.inf
# distance between each cartesian point
# Find shortest path using Greedy algorithm
index_shortest_dist = []
shortest_dist = cost_matrix[:,i].min() # get minimum in each column ( shortest distance from first point) and next etc
index = np.argmin(cost_matrix[:,1])
index_shortest_dist.append(index)
i = 0
min_dist = 0
while (i<6):
#for i in range(1,5):
shortest_dist = cost_matrix[:,index].min() # get minimum in each column ( shortest distance from first point) and next etc
index = np.argmin(cost_matrix[:,index])
index_shortest_dist.append(index) # add the index of the shortest distance
cost_matrix[index,:] = np.inf #remove previous row from next loop by making distance infinite
min_dist += shortest_dist # Add each shortest dist to get total min dist
i+=1
#Sort checkpoints into order dictated by index_shortest_dist
sorted_order = []
for i in range(5):
sorted_order.append(checkpoints[index_shortest_dist[i]])
# this will Append and sort checkpoints in order of shortest path
# Your code ends here ------------------------------
#assert isinstance(sorted_order, np.ndarray)
#assert sorted_order.shape == (5,)
assert isinstance(min_dist, float)
#return sorted_order
return sorted_order, min_dist, index_shortest_dist
def publish_traj_tfs(self, tfs):
"""This function gets a np.ndarray of transforms and publishes them in a color coded fashion to show how the
Cartesian path of the robot end-effector.
Args:
tfs (np.ndarray): A array of 4x4xn homogenous transformations specifying the end-effector trajectory.
"""
id = 0
for i in range(0, tfs.shape[2]): # full_checkpoint_tfs wrong type? CHanged from shape[2] to len()
marker = Marker()
marker.id = id
id += 1
marker.header.frame_id = 'base_link'
marker.header.stamp = rospy.Time.now()
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.01
marker.scale.y = 0.01
marker.scale.z = 0.01
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 0.0 + id * 0.05
marker.color.b = 1.0 - id * 0.05
marker.pose.orientation.w = 1.0
marker.pose.position.x = tfs[0,-1, i]
marker.pose.position.y = tfs[1,-1,i]
marker.pose.position.z = tfs[2,-1, i]
self.checkpoint_pub.publish(marker)
def intermediate_tfs(self, sorted_checkpoint_idx, target_checkpoint_tfs, num_points):
"""This function takes the target checkpoint transforms and the desired order based on the shortest path sorting,
and calls the decoupled_rot_and_trans() function.
Args:
sorted_checkpoint_idx (list): List describing order of checkpoints to follow.
target_checkpoint_tfs (np.ndarray): the state of the robot joints. In a youbot those are revolute
num_points (int): Number of intermediate points between checkpoints.
Returns:
full_checkpoint_tfs: 4x4x(4xnum_points+5) homogeneous transformations matrices describing the full desired
poses of the end-effector position.
"""
# Your code starts here ------------------------------
#TODO
full_checkpoint_tfs= np.repeat(np.identity(4), | |
<reponame>quanganh2627/bytm-x64-L-w05-2015_external_chromium_org_third_party_WebKit
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import jsonresults
from jsonresults import *
except ImportError:
print "ERROR: Add the TestResultServer, google_appengine and yaml/lib directories to your PYTHONPATH"
raise
import json
import logging
import unittest
FULL_RESULT_EXAMPLE = """ADD_RESULTS({
"seconds_since_epoch": 1368146629,
"tests": {
"media": {
"encrypted-media": {
"encrypted-media-v2-events.html": {
"bugs": ["crbug.com/1234"],
"expected": "TIMEOUT",
"actual": "TIMEOUT",
"time": 6.0
},
"encrypted-media-v2-syntax.html": {
"expected": "TIMEOUT",
"actual": "TIMEOUT"
}
},
"progress-events-generated-correctly.html": {
"expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
"actual": "TIMEOUT",
"time": 6.0
},
"W3C": {
"audio": {
"src": {
"src_removal_does_not_trigger_loadstart.html": {
"expected": "PASS",
"actual": "PASS",
"time": 3.5
}
}
},
"video": {
"src": {
"src_removal_does_not_trigger_loadstart.html": {
"expected": "PASS",
"actual": "PASS",
"time": 1.1
},
"notrun.html": {
"expected": "NOTRUN",
"actual": "SKIP",
"time": 1.1
}
}
}
},
"unexpected-skip.html": {
"expected": "PASS",
"actual": "SKIP"
},
"unexpected-fail.html": {
"expected": "PASS",
"actual": "FAIL"
},
"flaky-failed.html": {
"expected": "PASS FAIL",
"actual": "FAIL"
},
"media-document-audio-repaint.html": {
"expected": "IMAGE",
"actual": "IMAGE",
"time": 0.1
},
"unexpected-leak.html": {
"expected": "PASS",
"actual": "LEAK"
}
}
},
"skipped": 2,
"num_regressions": 0,
"build_number": "3",
"interrupted": false,
"layout_tests_dir": "\/tmp\/cr\/src\/third_party\/WebKit\/LayoutTests",
"version": 3,
"builder_name": "Webkit",
"num_passes": 10,
"pixel_tests_enabled": true,
"blink_revision": "1234",
"has_pretty_patch": true,
"fixable": 25,
"num_flaky": 0,
"num_failures_by_type": {
"CRASH": 3,
"MISSING": 0,
"TEXT": 3,
"IMAGE": 1,
"PASS": 10,
"SKIP": 2,
"TIMEOUT": 16,
"IMAGE+TEXT": 0,
"FAIL": 2,
"AUDIO": 0,
"LEAK": 1
},
"has_wdiff": true,
"chromium_revision": "5678"
});"""
JSON_RESULTS_OLD_TEMPLATE = (
'{"[BUILDER_NAME]":{'
'"allFixableCount":[[TESTDATA_COUNT]],'
'"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
'"failure_map": %s,'
'"fixableCount":[[TESTDATA_COUNT]],'
'"fixableCounts":[[TESTDATA_COUNTS]],'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
'"tests":{[TESTDATA_TESTS]}'
'},'
'"version":[VERSION]'
'}') % json.dumps(CHAR_TO_FAILURE)
JSON_RESULTS_COUNTS = '{"' + '":[[TESTDATA_COUNT]],"'.join([char for char in CHAR_TO_FAILURE.values()]) + '":[[TESTDATA_COUNT]]}'
JSON_RESULTS_TEMPLATE = (
'{"[BUILDER_NAME]":{'
'"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
'"failure_map": %s,'
'"num_failures_by_type":%s,'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
'"tests":{[TESTDATA_TESTS]}'
'},'
'"version":[VERSION]'
'}') % (json.dumps(CHAR_TO_FAILURE), JSON_RESULTS_COUNTS)
JSON_RESULTS_COUNTS_TEMPLATE = '{"' + '":[TESTDATA],"'.join([char for char in CHAR_TO_FAILURE]) + '":[TESTDATA]}'
JSON_RESULTS_TEST_LIST_TEMPLATE = '{"Webkit":{"tests":{[TESTDATA_TESTS]}}}'
class MockFile(object):
def __init__(self, name='results.json', data=''):
self.master = 'MockMasterName'
self.builder = 'MockBuilderName'
self.test_type = 'MockTestType'
self.name = name
self.data = data
def save(self, data):
self.data = data
return True
class JsonResultsTest(unittest.TestCase):
def setUp(self):
self._builder = "Webkit"
self.old_log_level = logging.root.level
logging.root.setLevel(logging.ERROR)
def tearDown(self):
logging.root.setLevel(self.old_log_level)
# Use this to get better error messages than just string compare gives.
def assert_json_equal(self, a, b):
self.maxDiff = None
a = json.loads(a) if isinstance(a, str) else a
b = json.loads(b) if isinstance(b, str) else b
self.assertEqual(a, b)
def test_strip_prefix_suffix(self):
json = "['contents']"
self.assertEqual(JsonResults._strip_prefix_suffix("ADD_RESULTS(" + json + ");"), json)
self.assertEqual(JsonResults._strip_prefix_suffix(json), json)
def _make_test_json(self, test_data, json_string=JSON_RESULTS_TEMPLATE, builder_name="Webkit"):
if not test_data:
return ""
builds = test_data["builds"]
tests = test_data["tests"]
if not builds or not tests:
return ""
counts = []
build_numbers = []
webkit_revision = []
chrome_revision = []
times = []
for build in builds:
counts.append(JSON_RESULTS_COUNTS_TEMPLATE.replace("[TESTDATA]", build))
build_numbers.append("1000%s" % build)
webkit_revision.append("2000%s" % build)
chrome_revision.append("3000%s" % build)
times.append("100000%s000" % build)
json_string = json_string.replace("[BUILDER_NAME]", builder_name)
json_string = json_string.replace("[TESTDATA_COUNTS]", ",".join(counts))
json_string = json_string.replace("[TESTDATA_COUNT]", ",".join(builds))
json_string = json_string.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
json_string = json_string.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
json_string = json_string.replace("[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
json_string = json_string.replace("[TESTDATA_TIMES]", ",".join(times))
version = str(test_data["version"]) if "version" in test_data else "4"
json_string = json_string.replace("[VERSION]", version)
json_string = json_string.replace("{[TESTDATA_TESTS]}", json.dumps(tests, separators=(',', ':'), sort_keys=True))
return json_string
def _test_merge(self, aggregated_data, incremental_data, expected_data, max_builds=jsonresults.JSON_RESULTS_MAX_BUILDS):
aggregated_results = self._make_test_json(aggregated_data, builder_name=self._builder)
incremental_json, _ = JsonResults._get_incremental_json(self._builder, self._make_test_json(incremental_data, builder_name=self._builder), is_full_results_format=False)
merged_results, status_code = JsonResults.merge(self._builder, aggregated_results, incremental_json, num_runs=max_builds, sort_keys=True)
if expected_data:
expected_results = self._make_test_json(expected_data, builder_name=self._builder)
self.assert_json_equal(merged_results, expected_results)
self.assertEqual(status_code, 200)
else:
self.assertTrue(status_code != 200)
def _test_get_test_list(self, input_data, expected_data):
input_results = self._make_test_json(input_data)
expected_results = JSON_RESULTS_TEST_LIST_TEMPLATE.replace("{[TESTDATA_TESTS]}", json.dumps(expected_data, separators=(',', ':')))
actual_results = JsonResults.get_test_list(self._builder, input_results)
self.assert_json_equal(actual_results, expected_results)
def test_update_files_empty_aggregate_data(self):
small_file = MockFile(name='results-small.json')
large_file = MockFile(name='results.json')
incremental_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]],
}
}
}
incremental_string = self._make_test_json(incremental_data, builder_name=small_file.builder)
self.assertTrue(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False))
self.assert_json_equal(small_file.data, incremental_string)
self.assert_json_equal(large_file.data, incremental_string)
def test_update_files_null_incremental_data(self):
small_file = MockFile(name='results-small.json')
large_file = MockFile(name='results.json')
aggregated_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]],
}
}
}
aggregated_string = self._make_test_json(aggregated_data, builder_name=small_file.builder)
small_file.data = large_file.data = aggregated_string
incremental_string = ""
self.assertEqual(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False),
('No incremental JSON data to merge.', 403))
self.assert_json_equal(small_file.data, aggregated_string)
self.assert_json_equal(large_file.data, aggregated_string)
def test_update_files_empty_incremental_data(self):
small_file = MockFile(name='results-small.json')
large_file = MockFile(name='results.json')
aggregated_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]],
}
}
}
aggregated_string = self._make_test_json(aggregated_data, builder_name=small_file.builder)
small_file.data = large_file.data = aggregated_string
incremental_data = {
"builds": [],
"tests": {}
}
incremental_string = self._make_test_json(incremental_data, builder_name=small_file.builder)
self.assertEqual(JsonResults.update_files(small_file.builder, incremental_string, small_file, large_file, is_full_results_format=False),
('No incremental JSON data to merge.', 403))
self.assert_json_equal(small_file.data, aggregated_string)
self.assert_json_equal(large_file.data, aggregated_string)
def test_merge_with_empty_aggregated_results(self):
incremental_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]],
}
}
}
incremental_results, _ = JsonResults._get_incremental_json(self._builder, self._make_test_json(incremental_data), is_full_results_format=False)
aggregated_results = ""
merged_results, _ = JsonResults.merge(self._builder, aggregated_results, incremental_results, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS, sort_keys=True)
self.assert_json_equal(merged_results, incremental_results)
def test_failures_by_type_added(self):
aggregated_results = self._make_test_json({
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[100, TEXT], [100, FAIL]],
"times": [[200, 0]],
}
}
}, json_string=JSON_RESULTS_OLD_TEMPLATE)
incremental_results = self._make_test_json({
"builds": ["3"],
"tests": {
"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]],
}
}
}, json_string=JSON_RESULTS_OLD_TEMPLATE)
incremental_json, _ = JsonResults._get_incremental_json(self._builder, incremental_results, is_full_results_format=False)
merged_results, _ = JsonResults.merge(self._builder, aggregated_results, incremental_json, num_runs=201, sort_keys=True)
self.assert_json_equal(merged_results, self._make_test_json({
"builds": ["3", "2", "1"],
"tests": {
"001.html": {
"results": [[101, TEXT], [100, FAIL]],
"times": [[201, 0]],
}
}
}))
def test_merge_full_results_format(self):
expected_incremental_results = {
"Webkit": {
"blinkRevision": ["1234"],
"buildNumbers": ["3"],
"chromeRevision": ["5678"],
"failure_map": CHAR_TO_FAILURE,
"num_failures_by_type": {"AUDIO": [0], "CRASH": [3], "FAIL": [2], "IMAGE": [1], "IMAGE+TEXT": [0], "MISSING": [0], "PASS": [10], "SKIP": [2], "TEXT": [3], "TIMEOUT": [16], "LEAK": [1]},
"secondsSinceEpoch": [1368146629],
"tests": {
"media": {
"W3C": {
"audio": {
"src": {
"src_removal_does_not_trigger_loadstart.html": {
"results": [[1, PASS]],
"times": [[1, 4]],
}
}
}
},
"encrypted-media": {
"encrypted-media-v2-events.html": {
"bugs": ["crbug.com/1234"],
"expected": "TIMEOUT",
"results": [[1, TIMEOUT]],
"times": [[1, 6]],
},
"encrypted-media-v2-syntax.html": {
"expected": "TIMEOUT",
"results": [[1, TIMEOUT]],
"times": [[1, 0]],
}
},
"media-document-audio-repaint.html": {
"expected": "IMAGE",
"results": [[1, IMAGE]],
"times": [[1, 0]],
},
"progress-events-generated-correctly.html": {
"expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
"results": [[1, TIMEOUT]],
"times": [[1, 6]],
},
"flaky-failed.html": {
"expected": "PASS FAIL",
"results": [[1, FAIL]],
"times": [[1, 0]],
},
"unexpected-fail.html": {
"results": [[1, FAIL]],
"times": [[1, 0]],
},
"unexpected-leak.html": {
"results": [[1, LEAK]],
"times": [[1, 0]],
},
}
}
},
"version": 4
}
aggregated_results = ""
incremental_json, _ = JsonResults._get_incremental_json(self._builder, FULL_RESULT_EXAMPLE, is_full_results_format=True)
merged_results, _ = JsonResults.merge("Webkit", aggregated_results, incremental_json, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS, sort_keys=True)
self.assert_json_equal(merged_results, expected_incremental_results)
def test_merge_empty_aggregated_results(self):
# No existing aggregated results.
# Merged results == new incremental results.
self._test_merge(
# Aggregated results
None,
# Incremental results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}},
# Expected result
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}})
def test_merge_duplicate_build_number(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[100, TEXT]],
"times": [[100, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]}}},
# Expected results
None)
def test_merge_incremental_single_test_single_run_same_result(self):
# Incremental results has | |
"""
This module defines common methods used in simulator specific build modules
@author <NAME>
"""
##########################################################################
#
# Copyright 2011 Okinawa Institute of Science and Technology (OIST), Okinawa
#
##########################################################################
from __future__ import absolute_import
from builtins import object
from future.utils import PY3
import platform
import os
import subprocess as sp
import time
from itertools import chain
from copy import deepcopy
import shutil
from os.path import join
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from future.utils import with_metaclass
from abc import ABCMeta, abstractmethod
import sympy
from nineml import units
from nineml.exceptions import NineMLNameError, NineMLSerializationError
from pype9.exceptions import (
Pype9BuildError, Pype9CommandNotFoundError, Pype9RuntimeError)
from ..cells.with_synapses import read
import pype9.annotations
from pype9.annotations import PYPE9_NS, BUILD_PROPS
from os.path import expanduser
import re
from nineml.serialization import url_re
import sysconfig
from pype9 import __version__
from pype9.utils.paths import remove_ignore_missing
from pype9.utils.logging import logger
BASE_BUILD_DIR = os.path.join(
expanduser("~"),
'.pype9',
'build',
'v{}'.format(__version__),
'python{}'.format(sysconfig.get_config_var('py_version')))
class BaseCodeGenerator(with_metaclass(ABCMeta, object)):
"""
Parameters
----------
base_dir : str | None
The base directory for the generated code. If None a directory
will be created in user's home directory.
"""
BUILD_MODE_OPTIONS = ['lazy', # Build iff source has been updated
'force', # Build always
'require', # Don't build, requires pre-built
'build_only', # Only build
'generate_only', # Only generate source files
'purge' # Remove all configure files and rebuild
]
_PARAMS_DIR = 'params'
_SRC_DIR = 'src'
_INSTL_DIR = 'install'
_CMPL_DIR = 'compile' # Ignored for NEURON but used for NEST
_BUILT_COMP_CLASS = 'built_component_class.xml'
# Python functions and annotations to be made available in the templates
_globals = dict(
[('len', len), ('zip', zip), ('enumerate', enumerate),
('range', range), ('next', next), ('chain', chain), ('sorted',
sorted), ('hash', hash), ('deepcopy', deepcopy), ('units', units),
('hasattr', hasattr), ('set', set), ('list', list), ('None', None),
('sympy', sympy)] +
[(n, v) for n, v in list(pype9.annotations.__dict__.items())
if n != '__builtins__'])
# Derived classes should provide mapping from 9ml dimensions to default
# units
DEFAULT_UNITS = {}
def __init__(self, base_dir=None, **kwargs): # @UnusedVariable
if base_dir is None:
base_dir = BASE_BUILD_DIR
self._base_dir = os.path.join(
base_dir, self.SIMULATOR_NAME + self.SIMULATOR_VERSION)
def __repr__(self):
return "{}CodeGenerator(base_dir='{}')".format(
self.SIMULATOR_NAME.capitalize(), self.base_dir)
def __eq__(self, other):
try:
return (self.SIMULATOR_NAME == other.SIMULATOR_NAME and
self.base_dir == other.base_dir)
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def base_dir(self):
return self._base_dir
@abstractmethod
def generate_source_files(self, dynamics, src_dir, name, **kwargs):
"""
Generates the source files for the relevant simulator
"""
pass
def configure_build_files(self, name, src_dir, compile_dir, install_dir,
**kwargs):
"""
Configures the build files before compiling
"""
pass
@abstractmethod
def compile_source_files(self, compile_dir, name):
pass
def generate(self, component_class, build_mode='lazy', url=None, **kwargs):
"""
Generates and builds the required simulator-specific files for a given
NineML cell class
Parameters
----------
component_class : nineml.Dynamics
9ML Dynamics object
name : str
Name of the generated cell class
install_dir : str
Path to the directory where the NMODL files
will be generated and compiled
build_mode : str
Available build options:
lazy - only build if files are modified
force - always generate and build
purge - remove all config files, generate and rebuild
require - require built binaries are present
build_only - build and then quit
generate_only - generate src and then quit
recompile - don't generate src but compile
build_version : str
A suffix appended to the cell build name to distinguish
it from other code generated from the component class
url : str
The URL where the component class is stored (used to form the
build path)
kwargs : dict
A dictionary of (potentially simulator- specific) template
arguments
"""
# Save original working directory to reinstate it afterwards (just to
# be polite)
name = component_class.name
orig_dir = os.getcwd()
if url is None:
url = component_class.url
# Calculate compile directory path within build directory
src_dir = self.get_source_dir(name, url)
compile_dir = self.get_compile_dir(name, url)
install_dir = self.get_install_dir(name, url)
# Path of the build component class
built_comp_class_pth = os.path.join(src_dir, self._BUILT_COMP_CLASS)
# Determine whether the installation needs rebuilding or whether there
# is an existing library module to use.
if build_mode == 'purge':
remove_ignore_missing(src_dir)
remove_ignore_missing(install_dir)
remove_ignore_missing(compile_dir)
generate_source = compile_source = True
elif build_mode in ('force', 'build_only'): # Force build
generate_source = compile_source = True
elif build_mode == 'require': # Just check that prebuild is present
generate_source = compile_source = False
elif build_mode == 'generate_only': # Only generate
generate_source = True
compile_source = False
elif build_mode == 'lazy': # Generate if source has been modified
compile_source = True
if not os.path.exists(built_comp_class_pth):
generate_source = True
else:
try:
built_component_class = read(built_comp_class_pth)[name]
if built_component_class.equals(component_class,
annotations_ns=[PYPE9_NS]):
generate_source = False
logger.info("Found existing source in '{}' directory, "
"code generation skipped (set 'build_mode'"
" argument to 'force' or 'build_only' to "
"enforce regeneration)".format(src_dir))
else:
generate_source = True
logger.info("Found existing source in '{}' directory, "
"but the component classes differ so "
"regenerating sources".format(src_dir))
except (NineMLNameError, NineMLSerializationError):
generate_source = True
logger.info("Found existing source in '{}' directory, "
"but could not find '{}' component class so "
"regenerating sources".format(name, src_dir))
# Check if required directories are present depending on build_mode
elif build_mode == 'require':
if not os.path.exists(install_dir):
raise Pype9BuildError(
"Prebuilt installation directory '{}' is not "
"present, and is required for 'require' build option"
.format(install_dir))
else:
raise Pype9BuildError(
"Unrecognised build option '{}', must be one of ('{}')"
.format(build_mode, "', '".join(self.BUILD_MODE_OPTIONS)))
# Generate source files from NineML code
if generate_source:
self.clean_src_dir(src_dir, name)
self.generate_source_files(
name=name,
component_class=component_class,
src_dir=src_dir,
compile_dir=compile_dir,
install_dir=install_dir,
**kwargs)
component_class.write(built_comp_class_pth,
preserve_order=True, version=2.0)
if compile_source:
# Clean existing compile & install directories from previous builds
if generate_source:
self.clean_compile_dir(compile_dir,
purge=(build_mode == 'purge'))
self.configure_build_files(
name=name, src_dir=src_dir, compile_dir=compile_dir,
install_dir=install_dir, **kwargs)
self.clean_install_dir(install_dir)
self.compile_source_files(compile_dir, name)
# Switch back to original dir
os.chdir(orig_dir)
# Cache any dimension maps that were calculated during the generation
# process
return install_dir
def get_build_dir(self, name, url):
return os.path.join(self.base_dir, self.url_build_path(url), name)
def get_source_dir(self, name, url):
return os.path.abspath(os.path.join(
self.get_build_dir(name, url), self._SRC_DIR))
def get_compile_dir(self, name, url):
return os.path.abspath(os.path.join(
self.get_build_dir(name, url), self._CMPL_DIR))
def get_install_dir(self, name, url):
return os.path.abspath(os.path.join(
self.get_build_dir(name, url), self._INSTL_DIR))
def clean_src_dir(self, src_dir, component_name): # @UnusedVariable
# Clean existing src directories from previous builds.
shutil.rmtree(src_dir, ignore_errors=True)
try:
os.makedirs(src_dir)
except OSError as e:
raise Pype9BuildError(
"Could not create source directory ({}), please check the "
"required permissions or specify a different \"build dir"
"base\" ('build_dir_base'):\n{}".format(src_dir, e))
def clean_compile_dir(self, compile_dir, purge=False): # @UnusedVariable
# Clean existing compile & install directories from previous builds
shutil.rmtree(compile_dir, ignore_errors=True)
try:
os.makedirs(compile_dir)
except OSError as e:
raise Pype9BuildError(
"Could not create compile directory ({}), please check the "
"required permissions or specify a different \"build dir"
"base\" ('build_dir_base'):\n{}".format(compile_dir, e))
def clean_install_dir(self, install_dir):
# Clean existing compile & install directories from previous builds
shutil.rmtree(install_dir, ignore_errors=True)
try:
os.makedirs(install_dir)
except OSError as e:
raise Pype9BuildError(
"Could not create install directory ({}), please check the "
"required permissions or specify a different \"build dir"
"base\" ('build_dir_base'):\n{}".format(install_dir, e))
def render_to_file(self, template, args, filename, directory, switches={},
post_hoc_subs={}):
# Initialise the template loader to include the flag directories
template_paths = [
self.BASE_TMPL_PATH,
os.path.join(self.BASE_TMPL_PATH, 'includes')]
# Add include paths for various switches (e.g. solver type)
for name, value in list(switches.items()):
if value is not None:
template_paths.append(os.path.join(self.BASE_TMPL_PATH,
'includes', name, value))
# Add default path for template includes
template_paths.append(
os.path.join(self.BASE_TMPL_PATH, 'includes', 'default'))
# Initialise the Jinja2 environment
jinja_env = Environment(loader=FileSystemLoader(template_paths),
trim_blocks=True, lstrip_blocks=True,
undefined=StrictUndefined)
# Add some globals used by the template code
jinja_env.globals.update(**self._globals)
# Actually render the contents
contents = jinja_env.get_template(template).render(**args)
for old, new in list(post_hoc_subs.items()):
contents = contents.replace(old, new)
# Write the contents to file
with open(os.path.join(directory, filename), 'w') as f:
f.write(contents)
def path_to_utility(self, utility_name, env_var='', **kwargs): # @UnusedVariable @IgnorePep8
"""
Returns the full path to an executable by searching the "PATH"
environment variable
Parameters
----------
utility_name : str
Name of executable to search the execution path
env_var : str
Name of a environment variable to lookup first before searching
path
default : str | None
The default value to assign to the path if it cannot be found.
Returns
-------
utility_path : str
Full path to executable
"""
if kwargs and list(kwargs) != ['default']:
raise Pype9RuntimeError(
"Should only provide 'default' as kwarg to path_to_utility "
"provided ({})".format(kwargs))
try:
utility_path = os.environ[env_var]
except KeyError:
if platform.system() == 'Windows':
utility_name += '.exe'
# Get the system path
system_path = os.environ['PATH'].split(os.pathsep)
# Append NEST_INSTALL_DIR/NRNHOME if present
system_path.extend(self.simulator_specific_paths())
# Check the system | |
size=1)
>>> cdll[::-1]
CDLList(head=Node(value=3, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll[:3:2]
CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=2)
>>> list(cdll[:3:2])
[1, 3]
>>> cdll[::'w']
Traceback (most recent call last):
...
ds.errors.InvalidIntegerSliceError: slice indices must be integers or None or have an __index__ method
Orig: <class 'TypeError'>
"""
if isinstance(index, int):
try:
return self.peek(index, node=False, errors='raise')
except IndexError:
raise IndexError(
f"{index=} out of range, "
f"for CDLList of {len(self)=} items"
)
# slice
err = _validate_integer_slice(index)
if err is not None:
raise InvalidIntegerSliceError(orig=err) from None
start, stop, step = index.indices(self.size)
return self.__class__.from_iterable(
self[i] for i in range(start, stop, step)
)
@overload
def __setitem__(self, index: int, value: T):
"""If index is int, set item at given index to value.
If index is out of range, raise IndexError.
"""
@overload
def __setitem__(self, index: slice, value: Iterable[T]):
"""If index is slice, set items in given range to values.
If extended slice length is greater than value length,
raise ValueError.
"""
def __setitem__(self, index, value):
"""
Set item(s) at given index(es).
If index is int, set item at given index to value.
If index is out of range, raise IndexError.
If index is slice, set items in given range to values.
If extended slice length is greater than value length,
raise ValueError.
If slice is not a valid integer slice, raise InvalidIntegerSliceError.
Usage:
>>> from ds.cdll import CDLList
>>> cdll = CDLList([1, 2, 3])
>>> cdll[0] = 4
>>> cdll[1] = 5
>>> cdll[-1] = 7
>>> cdll[3] = 8
Traceback (most recent call last):
...
IndexError: Index 3 out of range
>>> cdll[1:2] = [9, 10]
>>> list(cdll)
[4, 9, 10, 7]
>>> cdll[::-1] = [11, 12, 13, 14]
>>> list(cdll)
[14, 13, 12, 11]
>>> cdll[:3:2] = [-1, -2]
>>> list(cdll)
[-1, 13, -2, 11]
>>> cdll[::-1] = [15, 16]
Traceback (most recent call last):
...
ValueError: attempt to assign sequence of size 2 to extended slice of size 4
>>> cdll[2:] = []
>>> list(cdll)
[-1, 13]
>>> cdll[:] = []
>>> list(cdll)
[]
"""
# breakpoint()
if isinstance(index, int):
try:
self.peek(index, node=True, errors='raise').value = value
return
except (IndexError, ValueError) as exc:
# @TODO: add logging
raise exc from None
# handle slice
err = _validate_integer_slice(index)
if err is not None:
raise InvalidIntegerSliceError(orig=err) from None
points = range(*index.indices(self.size))
slice_size = len(points)
start, stop, step = points.start, points.stop, points.step
new = self.__class__.from_iterable(value)
if slice_size == self.size:
if step < 0 and new.size != self.size:
raise ValueError(
f"attempt to assign sequence of size {new.size} "
f"to extended slice of size {self.size}"
)
self.clear()
if step < 0:
value = reversed(value)
self.extend(value)
return
if step == 1:
del self[start:stop:step]
if new.size == 0:
del new
return
set_after = self.peek(start - 1, node=True, errors='raise')
set_until = set_after.right
set_after.right = new.head
set_until.left = new.tail
new.tail.right = set_until
new.head.left = set_after
self.size += new.size
del new
return
# handle extended slice
if slice_size != len(new):
raise ValueError(
f"attempt to assign sequence of size {len(new)} "
f"to extended slice of size {slice_size}"
)
for i, value in zip(points, value, strict=True):
self[i] = value
@assert_types(index = int | slice)
def __delitem__(self, index: int | slice):
"""If index is int, delete item at given index.
If index is out of range, raise IndexError.
If index is slice, delete items in given range.
If slice is not a valid integer slice, raise InvalidIntegerSliceError.
Usage:
>>> from ds.cdll import CDLList
>>> cdll = CDLList([1, 2, 3, 4, 5, 6, 7])
>>> del cdll[0]
>>> cdll
CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=6)
>>> del cdll[1:3]
>>> cdll
CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=4)
>>> del cdll[:2]
>>> cdll
CDLList(head=Node(value=6, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=2)
>>> del cdll[5:4]
>>> cdll
CDLList(head=Node(value=6, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=2)
>>> del cdll[:]
>>> cdll
CDLList(empty, size=0)
"""
if isinstance(index, int):
try:
self.pop(index)
return
except IndexError as exc:
# @TODO: Add logging.
raise exc from None
# handle slice
err = _validate_integer_slice(index)
if err is not None:
raise InvalidIntegerSliceError(orig=err) from None
points = range(*index.indices(self.size))
slice_size = len(points)
if slice_size == self.size:
self.clear()
return
if slice_size == 0 or self.size == 0:
return
start, stop, step = points.start, points.stop, points.step
if step == 1 or step == -1:
start, stop = (start, stop - 1) if step == 1 else (stop + 1, start)
del_from = self.peek(start, node=True, errors='raise')
del_till = self.peek(stop, node=True, errors='raise')
del_from.left.right = del_till.right
del_till.right.left = del_from.left
self.size -= slice_size
# assign correct head if head was deleted
if 0 in points and self.size > 0:
self.head = del_till.right
return
# handle extended slice
# if start index is less than stop index,
# reverse the range to preserve order
preserve_order = reversed if start < stop else lambda x: x
# if points.start < points.stop:
# points = reversed(points) # type ignore
for i in preserve_order(points):
self.pop(i)
@assert_types(by=int)
def __rshift__(self, by: int) -> 'CDLList[T]':
"""Moves head to right by given amount,
returns the CDLList with items shifted by given amount.
Modifies in-place.
If amount is negative, shift items to left.
If amount is greater than length of CDLList, take a modulo of by
w.r.t self.size, then apply lshift.
Usage:
>>> from ds.cdll import CDLList
>>> cdll = CDLList([1, 2, 3])
>>> cdll
CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll.head
Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>)
>>> cdll >> 2
CDLList(head=Node(value=3, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll.head
Node(value=3, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>)
>>> cdll >> -2 # equivalent to cdll << 2
CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll.head
Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>)
>>> # cdll >> 4 is equivalent to: cdll >> 1 (because 4 % 3 == 1)
>>> cdll >> 4
CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
"""
if by == 0:
return self.copy()
# if by negative, shift left by negative amount
if by < 0:
return self << -by
# if shift is greater than length, shift by modulo
if by >= self.size:
return self >> (by % self.size)
# if shift is greater than half length, shift left by length - shift
if by > self.size // 2:
return self << (self.size - by)
self.head = self.peek(by, node=True, errors='raise')
return self
@assert_types(by=int)
def __lshift__(self, by: int) -> 'CDLList[T]':
"""Moves head to left by given amount,
returns the CDLList with items shifted by given amount.
Modifies in-place.
If amount is negative, shift items to right.
If amount is greater than length of CDLList, take a modulo of by
w.r.t self.size, then apply rshift.
Usage:
>>> from ds.cdll import CDLList
>>> cdll = CDLList([1, 2, 3])
>>> cdll
CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll.head
Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>)
>>> cdll << 2
CDLList(head=Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll.head
Node(value=2, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>)
>>> cdll << -2 # equivalent to cdll >> 2
CDLList(head=Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
>>> cdll.head
Node(value=1, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>)
>>> # cdll << 4 is equivalent to: cdll << 1 (because 4 % 3 == 1)
>>> cdll << 4
CDLList(head=Node(value=3, left=<class 'ds.cdll.Node'>, right=<class 'ds.cdll.Node'>), size=3)
"""
if by == 0:
return self.copy()
# if by negative, shift right by negative amount
if by < 0:
return self >> -by
# if shift is greater than length, shift by modulo
if by >= self.size:
return self << (by % self.size)
# if shift is greater than half length, shift right by length - shift
if by > self.size // 2:
return self >> (self.size - by)
self.head = self.peek(-by, node=True, errors='raise')
return self
def __contains__(self, x: Any) -> bool:
"""Check if value in CDLList.
Usage:
>>> from ds.cdll import CDLList
>>> cdll = CDLList([1, 2, 3])
>>> 2 in cdll
True
>>> 4 in cdll
False
"""
| |
XZ plane
zgrid = np.linspace(*zbounds, nperax)
xgrid = np.linspace(*xbounds, nperax)
fig = self.plot_phi_map(xgrid, zgrid, contour=contour, **kwargs)
ax = fig.axes[0]
# Add marker for stim position
ax.scatter([xstim * 1e-3], [zstim * 1e-3], label='electrode')
# Add markers for axon nodes
xnodes = self.axon.xnodes + self.axon.pos[0] # um
xnodes = xnodes[np.logical_and(xnodes >= xbounds[0], xnodes <= xbounds[-1])]
znodes = np.ones(xnodes.size) * zaxon # um
ax.axhline(zaxon * 1e-3, c='silver', lw=4, label='axon axis')
ax.scatter(xnodes * 1e-3, znodes * 1e-3, zorder=80, color='k', label='nodes')
ax.legend()
return fig
def plot_vprofile(self, ax=None, update=False, redraw=False):
'''
Plot the spatial distribution of the extracellular potential along the axon
:param ax (optional): axis on which to plot
:return: figure handle
'''
if ax is None:
fig, ax = plt.subplots(figsize=(6, 3))
ax.set_xlabel(AX_POS_MM)
sns.despine(ax=ax)
else:
fig = ax.get_figure()
xnodes = self.axon.xnodes # um
phinodes = self.get_phi(xnodes, I=self.stim.I) # mV
if update:
line = ax.get_lines()[0]
line.set_xdata(xnodes * 1e-3)
line.set_ydata(phinodes)
ax.relim()
ax.autoscale_view()
else:
ax.set_title('potential distribution along axon')
ax.set_ylabel('φ (mV)')
ax.plot(xnodes * 1e-3, phinodes)
if update and redraw:
fig.canvas.draw()
return fig
def plot_activating_function(self, ax=None, update=False, redraw=False):
'''
Plot the spatial distribution of the activating function along the axon
:param ax (optional): axis on which to plot
:return: figure handle
'''
if ax is None:
fig, ax = plt.subplots(figsize=(6, 3))
ax.set_xlabel(AX_POS_MM)
sns.despine(ax=ax)
else:
fig = ax.get_figure()
xnodes = self.axon.xnodes # um
phinodes = self.get_phi(xnodes, I=self.stim.I) # mV
d2phidx2 = self.get_activating_function(xnodes * 1e-3, phinodes) # mV2/mm2
if update:
line = ax.get_lines()[0]
line.set_xdata(xnodes * 1e-3)
line.set_ydata(d2phidx2)
ax.relim()
ax.autoscale_view()
else:
ax.set_title('activating function along axon')
ax.set_ylabel('d2φ/dx2 (mV2/mm2)')
ax.plot(xnodes * 1e-3, d2phidx2)
if update and redraw:
fig.canvas.draw()
return fig
def plot_profiles(self, fig=None):
'''
Plot profiles of extracellular potential and activating function along the axon
:return: figure handle
'''
# Get figure
if fig is None:
fig, axes = plt.subplots(2, figsize=(8, 4), sharex=True)
update = False
else:
axes = fig.axes
update = True
self.plot_vprofile(ax=axes[0], update=update, redraw=False)
self.plot_activating_function(ax=axes[1], update=update, redraw=False)
if not update:
for ax in axes[:-1]:
sns.despine(ax=ax, bottom=True)
ax.xaxis.set_ticks_position('none')
sns.despine(ax=axes[-1])
axes[-1].set_xlabel(AX_POS_MM)
else:
fig.canvas.draw()
return fig
def plot_vmap(self, tvec, vnodes, ax=None, update=False, redraw=True, add_rec_locations=False):
'''
Plot 2D colormap of membrane potential across nodes and time
:param tvec: time vector (ms)
:param vnodes: 2D array of membrane voltage of nodes and time
:param ax (optional): axis on which to plot
:param update: whether to update an existing figure or not
:param redraw: whether to redraw figure upon update
:param add_rec_locations: whether to add recruitment locations (predicted from
activatinvg function) on the map
:return: figure handle
'''
y = np.arange(self.axon.nnodes)
if ax is None:
fig, ax = plt.subplots(figsize=(np.ptp(tvec), np.ptp(y) / 50))
ax.set_xlabel(TIME_MS)
sns.despine(ax=ax, offset={'left': 10., 'bottom': 10})
else:
fig = ax.get_figure()
# Get normalizer and scalar mapable
vlims = (min(vnodes.min(), V_LIMS[0]), max(vnodes.max(), V_LIMS[1]))
norm = plt.Normalize(*vlims)
sm = cm.ScalarMappable(norm=norm, cmap='viridis')
if not update:
# Plot map
ax.set_ylabel('# nodes')
self.pm = ax.pcolormesh(tvec, y, vnodes, norm=norm, cmap='viridis')
fig.subplots_adjust(right=0.8)
pos = ax.get_position()
self.cax = fig.add_axes([pos.x1 + .02, pos.y0, 0.02, pos.y1 - pos.y0])
self.cbar = fig.colorbar(sm, cax=self.cax)
else:
self.pm.set_array(vnodes)
self.cbar.update_normal(sm)
# Add colorbar
self.cbar.set_ticks(vlims)
if not update:
self.cbar.set_label(V_MV, labelpad=-15)
if add_rec_locations:
# Compute activating function profile
xnodes = self.axon.xnodes # um
phinodes = self.get_phi(xnodes, I=self.stim.I) # mV
d2phidx2 = self.get_activating_function(xnodes * 1e-3, phinodes) # mV2/mm2
# Infer recruitment location(s) from maximum point(s) of activating function
psimax = np.max(d2phidx2)
if psimax > 0.:
irecnodes = np.where(np.isclose(d2phidx2, psimax))
xrec = xnodes[irecnodes]
# Remove previous lines
if update:
lines = ax.get_lines()
while lines:
l = lines.pop(0)
l.remove()
# Add current lines
for x in xrec:
ax.axhline(x * 1e-3, c='r', ls='--')
if update and redraw:
fig.canvas.draw()
return fig
def plot_vtraces(self, tvec, vnodes, ax=None, inodes=None, update=False, redraw=True, mark_spikes=False):
'''
Plot membrane potential traces at specific nodes
:param tvec: time vector (ms)
:param vnodes: 2D array of membrane voltage of nodes and time
:param ax (optional): axis on which to plot
:param inodes (optional): specific node indexes
:param update: whether to update an existing figure or not
:param redraw: whether to redraw figure upon update
:return: figure handle
'''
if ax is None:
fig, ax = plt.subplots(figsize=(np.ptp(tvec), 3))
ax.set_xlabel(TIME_MS)
sns.despine(ax=ax)
else:
fig = ax.get_figure()
nnodes = vnodes.shape[0]
if inodes is None:
inodes = [0, nnodes // 2, nnodes - 1]
vtraces = {f'node {inode}': vnodes[inode, :] for inode in inodes}
if update:
for line, (label, vtrace) in zip(ax.get_lines(), vtraces.items()):
line.set_xdata(tvec)
line.set_ydata(vtrace)
ax.relim()
ax.autoscale_view()
else:
for label, vtrace in vtraces.items():
ax.plot(tvec, vtrace, label=label)
if mark_spikes:
ispikes = self.detect_spikes(tvec, vtrace)
if len(ispikes) > 0:
ax.scatter(tvec[ispikes], vtrace[ispikes] + 10, marker='v')
ax.legend(loc=9, bbox_to_anchor=(0.95, 0.9))
ax.set_ylabel(V_MV)
ax.set_xlim([tvec[0], tvec[-1]])
ax.autoscale(True)
ylims = ax.get_ylim()
ax.set_ylim(min(ylims[0], V_LIMS[0]), max(ylims[1], V_LIMS[1]))
if update and redraw:
fig.canvas.draw()
return fig
def plot_Itrace(self, ax=None, update=False, redraw=True):
'''
Plot stimulus time profile
:param ax (optional): axis on which to plot
:param update: whether to update an existing figure or not
:param redraw: whether to redraw figure upon update
:return: figure handle
'''
if ax is None:
fig, ax = plt.subplots(figsize=(self.tstop, 3))
ax.set_xlabel(TIME_MS)
sns.despine(ax=ax)
else:
fig = ax.get_figure()
tstim, Istim = self.stim.stim_profile()
if tstim[-1] > self.tstop:
Istim = Istim[tstim < self.tstop]
tstim = tstim[tstim < self.tstop]
tstim = np.hstack((tstim, [self.tstop]))
Istim = np.hstack((Istim, [Istim[-1]]))
if update:
line = ax.get_lines()[0]
line.set_xdata(tstim)
line.set_ydata(Istim)
ax.relim()
ax.autoscale_view()
else:
ax.plot(tstim, Istim, color='k')
ax.set_ylabel(f'Istim ({self.stim.unit})')
if update and redraw:
fig.canvas.draw()
return fig
def plot_results(self, tvec, vnodes, inodes=None, fig=None, mark_spikes=False):
'''
Plot simulation results.
:param tvec: time vector (ms)
:param vnodes: 2D array of membrane voltage of nodes and time
:param ax (optional): axis on which to plot
:param inodes (optional): specific node indexes
:param fig (optional): existing figure to use for rendering
:return: figure handle
'''
# Get figure
if fig is None:
fig, axes = plt.subplots(3, figsize=(7, 5), sharex=True)
update = False
else:
axes = fig.axes
update = True
# Plot results
self.plot_vmap(tvec, vnodes, ax=axes[0], update=update, redraw=False)
self.plot_vtraces(tvec, vnodes, ax=axes[1], inodes=inodes, update=update, redraw=False, mark_spikes=mark_spikes)
self.plot_Itrace(ax=axes[2], update=update, redraw=False)
# Adjust axes and figure
if not update:
for ax in axes[:-1]:
sns.despine(ax=ax, bottom=True)
ax.xaxis.set_ticks_position('none')
sns.despine(ax=axes[-1])
axes[-1].set_xlabel(TIME_MS)
else:
fig.canvas.draw()
# Return figure
return fig
def detect_spikes(self, t, v):
'''
Detect spikes in simulation output data.
:param t: time vector
:param v: 1D or 2D membrane potential array
:return: time indexes of detected spikes:
- If a 1D voltage array is provided, a single list is returned.
- If a 2D voltage array is provided, a list of lists is returned (1 list per node)
Example use:
ispikes = sim.detect_spikes(tvec, vnodes)
'''
if v.ndim > 2:
raise ValueError('cannot work with potential arrays of more than 2 dimensions')
if v.ndim == 2:
ispikes = [self.detect_spikes(t, vv) for vv in v]
if all(len(i) == len(ispikes[0]) for i in ispikes):
ispikes = np.array(ispikes)
return ispikes
return find_peaks(v, height=0., prominence=50.)[0]
def copy_slider(slider, **kwargs):
'''
Copy an ipywidgets slider object
:param slider: reference slider
:param kwargs: attributes to be overwritten
:return: slider copy
'''
# Get slider copy
if isinstance(slider, FloatSlider):
s = FloatSlider(
description=slider.description,
min=slider.min, max=slider.max, value=slider.value, step=slider.step,
continuous_update=slider.continuous_update, layout=slider.layout)
elif isinstance(slider, FloatLogSlider):
s = FloatLogSlider(
description=slider.description,
base=slider.base, min=slider.min, max=slider.max, value=slider.value, step=slider.step,
continuous_update=slider.continuous_update, layout=slider.layout)
else:
raise ValueError(f'cannot copy {slider} object')
# Overwrite specified attributes
for k, v in kwargs.items():
setattr(s, k, v)
return s
def interactive_display(sim, updatefunc, *refsliders):
'''
Start an interactive display
:param sim: simulation object
:param updatefunc: update function that takes the slider values as input and creates/updates a figure
:param refsliders: list of reference slider objects
:return: interactive display
'''
# Check that number of input sliders corresponds to update function signature
params = inspect.signature(updatefunc).parameters
sparams = [k for k, v in params.items() if v.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD][1:]
assert len(sparams) == len(refsliders), 'number of sliders does not match update signature'
# Reset simulation object
sim.reset()
# Create a copy of reference sliders for this interactive simulation
sliders = [copy_slider(rs) for rs in refsliders]
# Call update once to generate initial figure
fig = updatefunc(sim, *[s.value for s in sliders])
| |
<filename>examples/python/routing_example.py
# # Respecting Architecture Connectivity Constraints - Advanced Routing in tket
# Very few current or planned quantum architectures have all-to-all qubit connectivity. In consequence, quantum circuits must be modified before execution to ensure that every multi-qubit gate in a circuit corresponds to a set of interactions that are permitted by the architecture. The problem is solved for arbitrary architectures by adding ```SWAP``` gates and distributed ```CX``` gates, and through translation of multi-qubit gates in to architecture permitted ones.
#
# In this tutorial we will show how this routing problem is solved automatically in tket. The basic examples require only the installation of pytket, ```pip install pytket```, while further examples require the installation of some supported subpackages, ```pytket_qiskit``` & ```pytket_cirq```.
#
# Let's start by importing the Architecture class from ```pytket```:
from pytket.routing import Architecture
# The Architecture class is used in ```pytket``` to hold information about a quantum architectures connectivity constraints. An Architecture object requires a coupling map to be created i.e. a list of edges between qubits which defines where two-qubit primitives may be executed. A coupling map can be produced naively by the integer indexing of nodes and edges in your architecture. We also use networkx and matplotlib to draw a graph representation of our Architecture.
import networkx as nx
import matplotlib.pyplot as plt
def draw_graph(coupling_map):
coupling_graph = nx.Graph(coupling_map)
nx.draw(coupling_graph, labels={node: node for node in coupling_graph.nodes()})
simple_coupling_map = [(0, 1), (1, 2), (2, 3)]
simple_architecture = Architecture(simple_coupling_map)
draw_graph(simple_coupling_map)
# Alternatively we could use the `Node` class to assign our nodes - you will see why this can be helpful later. Lets create an Architecture with an identical graph in this manner.
from pytket.circuit import Node
node_0 = Node("example_register", 0)
node_1 = Node("example_register", 1)
node_2 = Node("example_register", 2)
node_3 = Node("example_register", 3)
id_coupling_map = [(node_0, node_1), (node_1, node_2), (node_2, node_3)]
id_architecture = Architecture(id_coupling_map)
draw_graph(id_coupling_map)
# We can also create an ID with an arbitrary-dimensional index. Lets make a 2x2x2 cube:
node_000 = Node("cube", [0, 0, 0])
node_001 = Node("cube", [0, 0, 1])
node_010 = Node("cube", [0, 1, 0])
node_011 = Node("cube", [0, 1, 1])
node_100 = Node("cube", [1, 0, 0])
node_101 = Node("cube", [1, 0, 1])
node_110 = Node("cube", [1, 1, 0])
node_111 = Node("cube", [1, 1, 1])
cube_coupling_map = [
(node_000, node_001),
(node_000, node_010),
(node_010, node_011),
(node_001, node_011),
(node_000, node_100),
(node_001, node_101),
(node_010, node_110),
(node_011, node_111),
(node_100, node_101),
(node_100, node_110),
(node_110, node_111),
(node_101, node_111),
]
cube_architecture = Architecture(cube_coupling_map)
draw_graph(cube_coupling_map)
# To avoid that tedium though we could just use our SquareGrid Architecture:
from pytket.routing import SquareGrid
alternative_cube_architecture = SquareGrid(2, 2, 2)
draw_graph(alternative_cube_architecture.coupling)
# In many cases, we are interested in the architectures of real devices. These are available directly from the device backends, available within tket's respective extension packages.
# In reality a Quantum Device has much more information to it than just its connectivity constraints. This includes information we can use in noise-aware methods such as gate errors and readout errors for each qubit. These methods can improve circuit performance when running on real hardware. If available from hardware providers, a device Backend will store this information via the `backend_info` attribute.
from qiskit import IBMQ
IBMQ.load_account()
# We can produce an IBMQ Backend object using ```process_characterisation```. This returns a dictionary containing characteriastion information provided by IBMQ, including t1 times, t2 times, qubit frequencies and gate times along with the coupling graph of the device as a pytket ```Architecture```.
from pytket.circuit import OpType
from pytket.extensions.qiskit.qiskit_convert import process_characterisation
provider = IBMQ.providers()[0]
quito_backend = provider.get_backend("ibmq_quito")
quito_characterisation = process_characterisation(quito_backend)
draw_graph(quito_characterisation["Architecture"].coupling)
# This characterisation contains a range of information such as gate fidelities. Let's look at two-qubit gate errors.
for key, val in quito_characterisation["EdgeErrors"].items():
print(key, val)
# We've now seen how to create custom Architectures using indexing and nodes, how to use our built-in Architecture generators for typical connectivity graphs and how to access characterisation information using the ```process_characterisation``` method.
#
# Let's now see how we can use these objects are used for Routing circuits - we create a circuit for Routing to our original architectures and assume the only primitive constraint is the ```CX``` gate, which can only be executed on an edge in our coupling map.
from pytket import Circuit
example_circuit = Circuit(4)
example_circuit.CX(0, 1).CX(0, 2).CX(1, 2).CX(3, 2).CX(0, 3)
for gate in example_circuit:
print(gate)
# We can also visualise the `Circuit` using the `render_circuit_jupyter` method.
from pytket.circuit.display import render_circuit_jupyter
render_circuit_jupyter(example_circuit)
# This circuit can not be executed on any of our Architectures without modification. We can see this by looking at the circuits interaction graph, a graph where nodes are logical qubits and edges are some two-qubit gate.
interaction_edges = [(0, 1), (0, 2), (1, 2), (3, 2), (0, 3)]
draw_graph(interaction_edges)
draw_graph(simple_coupling_map)
# Sometimes we can route a circuit just by labelling the qubits to nodes of our Architecture such that the interaction graph matches a subgraph of the Architecture - unfortunately this isn't possible here.
#
# Let's call ```pytket```'s automatic routing method, route our circuit for the first Architecture we made, and have a look at our new circuit:
from pytket.routing import route
simple_modified_circuit = route(example_circuit, simple_architecture)
for gate in simple_modified_circuit:
print(gate)
render_circuit_jupyter(simple_modified_circuit)
draw_graph(id_architecture.coupling)
# The route method has relabelled the qubits in our old circuit to nodes in simple_architecture, and has added ```SWAP``` gates that permute logical qubits on nodes of our Architecture.
#
# Let's repeat this for id_architecture:
id_modified_circuit = route(example_circuit, id_architecture)
for gate in id_modified_circuit:
print(gate)
render_circuit_jupyter(id_modified_circuit)
# Both simple_architecture and id_architecture had the same graph structure, and so we can see that the qubits have been relabelled and ```SWAP``` gates added identically - the only difference is the preservation of the node labelling of id_architecture.
#
# Let's repeat this one more time for cube_architecture:
cube_modified_circuit = route(example_circuit, cube_architecture)
for gate in cube_modified_circuit:
print(gate)
cmc_copy = cube_modified_circuit.copy()
cmc_copy.flatten_registers()
render_circuit_jupyter(cmc_copy)
# Similarly the circuits qubits have been relabelled and ```SWAP``` gates added. In this example though ```route``` is able to utilise the extra connectivity of cube_architecture to reduce the number of ```SWAP``` gates added from 3 to 1.
#
# We also route for the Quito architecture.
quito_modified_circuit = route(example_circuit, quito_characterisation["Architecture"])
for gate in quito_modified_circuit:
print(gate)
render_circuit_jupyter(quito_modified_circuit)
# The ```route``` method comes with a set of parameters that can be modified to tune the performance of routing for a circuit to a given Architecture.
#
# The 6 parameters are as follows:
# - (int) **swap_lookahead**, the depth of lookahead employed when trialling ```SWAP``` gates during Routing, default 50.
# - (int) **bridge_lookahead**, the depth of lookahead employed when comparing ```BRIDGE``` gates to ```SWAP``` gates during Routing, default 2.
# - (int) **bridge_interactions**, the number of interactions considered in a slice of multi-qubit gates when comparing ```BRIDGE``` gates to ```SWAP``` gates during routing, default 1.
# - (float) **bridge_exponent**, effects the weighting placed on future slices when comparing ```BRIDGE``` gates to ```SWAP``` gates, default 0.
# Let's change some of our basic routing parameters:
basic_parameters = dict(bridge_lookahead=4, bridge_interactions=4, swap_lookahead=0)
id_basic_modified_circuit = route(example_circuit, id_architecture, **basic_parameters)
for gate in id_basic_modified_circuit:
print(gate)
# By changing the basic routing parameters we return a different routed circuit. To assess performance we must know the CX decomposition of both the ```SWAP``` and ```BRIDGE``` gates.
SWAP_c = Circuit(2)
SWAP_c.SWAP(0, 1)
SWAP_decomp_c = Circuit(2)
SWAP_decomp_c.CX(0, 1).CX(1, 0).CX(0, 1)
BRIDGE_c = Circuit(3)
BRIDGE_c.CX(0, 2)
BRIDGE_decomp_c = Circuit(3)
BRIDGE_decomp_c.CX(0, 1).CX(1, 2).CX(0, 1).CX(1, 2)
render_circuit_jupyter(SWAP_c)
print("\n=\n")
render_circuit_jupyter(SWAP_decomp_c)
render_circuit_jupyter(BRIDGE_c)
print("\n=\n")
render_circuit_jupyter(BRIDGE_decomp_c)
# The ```BRIDGE``` (or Distributed-CX gate distance 2) and ```SWAP``` both introduce a net three ```CX``` gates to the circuit.
#
# Considering this, by changing our basic parameters our routed circuit has one less gate added, and so should have net three fewer ```CX``` gates. We can confirm this by calling a ```Transformation``` pass that will decompose our additional gates to ```CX``` gates for us.
from pytket.transform import Transform
Transform.DecomposeSWAPtoCX().apply(id_modified_circuit)
Transform.DecomposeSWAPtoCX().apply(id_basic_modified_circuit)
Transform.DecomposeBRIDGE().apply(id_basic_modified_circuit)
print(
"CX gates in id_modified_circuit: ", id_modified_circuit.n_gates_of_type(OpType.CX)
)
print(
"CX gates in id_basic_modified_circuit: ",
id_basic_modified_circuit.n_gates_of_type(OpType.CX),
)
# So, by changing the parameters we've managed to produce another suitable routed solution with three fewer ```CX``` gates.
#
# We may be able to reduce the number of ```CX``` gates in our routed circuits by using the ```RemovedRedundancies``` ```Transformation``` pass which will replace any adjacent identical ```CX``` gates with the Identity and remove them.
Transform.RemoveRedundancies().apply(id_modified_circuit)
Transform.RemoveRedundancies().apply(id_basic_modified_circuit)
print(
"CX gates in id_modified_circuit: ", id_modified_circuit.n_gates_of_type(OpType.CX)
)
print(
"CX gates in id_basic_modified_circuit: ",
id_basic_modified_circuit.n_gates_of_type(OpType.CX),
)
# By changing the routing parameters and cleaning up our circuits after routing we've managed to reduce the number of ```CX``` gates in the final circuit by 5!
render_circuit_jupyter(id_modified_circuit)
render_circuit_jupyter(id_basic_modified_circuit)
# We can also confirm their validity:
print(id_modified_circuit.valid_connectivity(id_architecture, False))
print(id_basic_modified_circuit.valid_connectivity(id_architecture, False))
# Some circuits may not | |
self.scrollArea_chosen = QtWidgets.QScrollArea(self.groupBox_sage)
self.scrollArea_chosen.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.scrollArea_chosen.setWidgetResizable(True)
self.scrollArea_chosen.setObjectName("scrollArea_chosen")
self.scrollArea_chosen.setFocusPolicy(QtCore.Qt.ClickFocus)
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 389, 323))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.scrollAreaWidgetContents_2.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_8 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_2)
self.gridLayout_8.setObjectName("gridLayout_8")
self.scrollArea_chosen.setWidget(self.scrollAreaWidgetContents_2)
self.gridLayout_5.addWidget(self.scrollArea_chosen, 5, 0, 1, 7)
# self.line_seperator = QtWidgets.QFrame(self.scrollAreaWidgetContents_2)
# self.line_seperator.setFrameShape(QtWidgets.QFrame.HLine)
# self.line_seperator.setFrameShadow(QtWidgets.QFrame.Raised)
# self.line_seperator.setObjectName("line_seperator")
# self.line_seperator.setFocusPolicy(QtCore.Qt.NoFocus)
# self.line_seperator.setLineWidth(3)
# self.line_seperator.hide()
self.groupBox_notenschl = QtWidgets.QGroupBox(self.groupBox_sage)
self.groupBox_notenschl.setObjectName("groupBox_notenschl")
self.gridLayout_6 = QtWidgets.QGridLayout(self.groupBox_notenschl)
self.gridLayout_6.setObjectName("gridLayout_6")
self.spinBox_3 = SpinBox_noWheel(self.groupBox_notenschl)
self.spinBox_3.setMaximumSize(QtCore.QSize(55, 20))
self.spinBox_3.setProperty("value", 80)
self.spinBox_3.setObjectName("spinBox_3")
self.spinBox_3.valueChanged.connect(self.punkte_changed)
self.spinBox_3.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_6.addWidget(self.spinBox_3, 0, 4, 1, 1)
self.label_sg_pkt = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_sg_pkt.setObjectName("label_sg_pkt")
self.gridLayout_6.addWidget(self.label_sg_pkt, 0, 2, 1, 1)
self.label_g_pkt = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_g_pkt.setObjectName("label_g_pkt")
self.gridLayout_6.addWidget(self.label_g_pkt, 0, 5, 1, 1)
self.label_g = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_g.setMaximumSize(QtCore.QSize(54, 20))
self.label_g.setObjectName("label_g")
self.gridLayout_6.addWidget(self.label_g, 0, 3, 1, 1)
self.label_sg = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_sg.setMaximumSize(QtCore.QSize(64, 20))
self.label_sg.setObjectName("label_sg")
self.gridLayout_6.addWidget(self.label_sg, 0, 0, 1, 1)
self.spinBox_2 = SpinBox_noWheel(self.groupBox_notenschl)
self.spinBox_2.setMaximumSize(QtCore.QSize(55, 20))
self.spinBox_2.setProperty("value", 91)
self.spinBox_2.setObjectName("spinBox_2")
self.spinBox_2.valueChanged.connect(self.punkte_changed)
self.spinBox_2.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_6.addWidget(self.spinBox_2, 0, 1, 1, 1)
self.label_b = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_b.setMaximumSize(QtCore.QSize(80, 20))
self.label_b.setObjectName("label_b")
self.gridLayout_6.addWidget(self.label_b, 1, 0, 1, 1)
self.spinBox_4 = SpinBox_noWheel(self.groupBox_notenschl)
self.spinBox_4.setMaximumSize(QtCore.QSize(55, 20))
self.spinBox_4.setProperty("value", 64)
self.spinBox_4.setObjectName("spinBox_4")
self.spinBox_4.valueChanged.connect(self.punkte_changed)
self.spinBox_4.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_6.addWidget(self.spinBox_4, 1, 1, 1, 1)
self.label_b_pkt = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_b_pkt.setObjectName("label_b_pkt")
self.gridLayout_6.addWidget(self.label_b_pkt, 1, 2, 1, 1)
self.label_g_2 = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_g_2.setMaximumSize(QtCore.QSize(80, 20))
self.label_g_2.setObjectName("label_g_2")
self.gridLayout_6.addWidget(self.label_g_2, 1, 3, 1, 1)
self.label_g_pkt_2 = QtWidgets.QLabel(self.groupBox_notenschl)
self.label_g_pkt_2.setObjectName("label_g_pkt_2")
self.gridLayout_6.addWidget(self.label_g_pkt_2, 1, 5, 1, 1)
self.spinBox_5 = SpinBox_noWheel(self.groupBox_notenschl)
self.spinBox_5.setMaximumSize(QtCore.QSize(55, 20))
self.spinBox_5.setProperty("value", 50)
self.spinBox_5.setObjectName("spinBox_5")
self.spinBox_5.valueChanged.connect(self.punkte_changed)
self.spinBox_5.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_6.addWidget(self.spinBox_5, 1, 4, 1, 1)
self.gridLayout_5.addWidget(self.groupBox_notenschl, 6, 0, 1, 7)
self.groupBox_notenschl.setTitle(
_translate("MainWindow", "Notenschlüssel", None)
)
self.label_sg_pkt.setText(_translate("MainWindow", "% (ab 0)", None))
self.label_g_pkt.setText(_translate("MainWindow", "% (ab 0)", None))
self.label_g.setText(_translate("MainWindow", "Gut:", None))
self.label_sg.setText(_translate("MainWindow", "Sehr Gut:", None))
self.label_b.setText(_translate("MainWindow", "Befriedigend:", None))
self.label_b_pkt.setText(_translate("MainWindow", "% (ab 0)", None))
self.label_g_2.setText(_translate("MainWindow", "Genügend:", None))
self.label_g_pkt_2.setText(_translate("MainWindow", "% (ab 0)", None))
### Groupbox Beurteilungsraster #####
self.groupBox_beurteilungsra = QtWidgets.QGroupBox(self.groupBox_sage)
self.groupBox_beurteilungsra.setObjectName("groupBox_beurteilungsra")
self.gridLayout_6 = QtWidgets.QGridLayout(self.groupBox_beurteilungsra)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_typ1_pkt = QtWidgets.QLabel(self.groupBox_beurteilungsra)
self.label_typ1_pkt.setObjectName("label_typ1_pkt")
self.gridLayout_6.addWidget(self.label_typ1_pkt, 0, 0, 1, 1)
# self.label_typ1_pkt.setText(_translate("MainWindow", "Punkte Typ 1: 0",None))
self.label_typ2_pkt = QtWidgets.QLabel(self.groupBox_beurteilungsra)
self.label_typ2_pkt.setObjectName("label_typ2_pkt")
self.gridLayout_6.addWidget(self.label_typ2_pkt, 1, 0, 1, 1)
self.label_ausgleich_pkt = QtWidgets.QLabel(self.groupBox_beurteilungsra)
self.label_ausgleich_pkt.setObjectName("label_ausgleich_pkt")
self.gridLayout_6.addWidget(self.label_ausgleich_pkt, 2, 0, 1, 1)
# self.label_ausgleich_pkt.setText(_translate("MainWindow", "Ausgleichspunkte: 0",None))
# self.label_typ2_pkt.setText(_translate("MainWindow", "Punkte Typ 2: 0",None))
self.groupBox_beurteilungsra.setTitle(
_translate("MainWindow", "Beurteilungsraster", None)
)
self.groupBox_beurteilungsra.hide()
### Zusammenfassung d. SA ###
self.label_gesamtbeispiele = QtWidgets.QLabel(self.groupBox_sage)
self.gridLayout_5.addWidget(self.label_gesamtbeispiele, 7, 0, 1, 3)
self.label_gesamtbeispiele.setObjectName("label_gesamtbeispiele")
self.label_gesamtbeispiele.setText(
_translate(
"MainWindow", "Anzahl der Aufgaben: 0 (Typ1: 0 / Typ2: 0) ", None
)
)
self.label_gesamtpunkte = QtWidgets.QLabel(self.groupBox_sage)
self.gridLayout_5.addWidget(self.label_gesamtpunkte, 8, 0, 1, 1)
self.label_gesamtpunkte.setObjectName("label_gesamtpunkte")
self.label_gesamtpunkte.setText(
_translate("MainWindow", "Gesamtpunkte: 0", None)
)
self.cb_solution_sage = QtWidgets.QCheckBox(self.centralwidget)
self.cb_solution_sage.setObjectName(_fromUtf8("cb_solution"))
self.cb_solution_sage.setText(
_translate("MainWindow", "Lösungen anzeigen", None)
)
self.cb_solution_sage.setChecked(True)
self.cb_solution_sage.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_5.addWidget(
self.cb_solution_sage, 7, 4, 2, 1, QtCore.Qt.AlignRight
)
self.pushButton_vorschau = QtWidgets.QPushButton(self.groupBox_sage)
self.pushButton_vorschau.setMaximumSize(QtCore.QSize(90, 16777215))
self.pushButton_vorschau.setObjectName("pushButton_vorschau")
self.pushButton_vorschau.setText(_translate("MainWindow", "Vorschau", None))
self.pushButton_vorschau.setShortcut(_translate("MainWindow", "Return", None))
self.gridLayout_5.addWidget(
self.pushButton_vorschau, 7, 5, 1, 2, QtCore.Qt.AlignRight
)
self.pushButton_vorschau.clicked.connect(
partial(self.pushButton_vorschau_pressed, "vorschau", 0, 0)
)
self.pushButton_vorschau.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout.addWidget(self.groupBox_sage, 1, 2, 8, 3)
self.pushButton_erstellen = QtWidgets.QPushButton(self.groupBox_sage)
self.pushButton_erstellen.setMaximumSize(QtCore.QSize(90, 16777215))
self.pushButton_erstellen.setObjectName("pushButton_erstellen")
self.pushButton_erstellen.setText(_translate("MainWindow", "Erstellen", None))
self.pushButton_erstellen.setFocusPolicy(QtCore.Qt.ClickFocus)
self.pushButton_erstellen.clicked.connect(self.pushButton_erstellen_pressed)
self.gridLayout_5.addWidget(
self.pushButton_erstellen, 8, 5, 1, 2, QtCore.Qt.AlignRight
)
self.groupBox_sage.hide()
################################################################
################################################################
########### FEEDBACK #############################################
#######################################################################
self.comboBox_at_fb = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_at_fb.setObjectName("comboBox_at_fb")
self.comboBox_at_fb.addItem("")
self.comboBox_at_fb.addItem("")
self.comboBox_at_fb.addItem("")
self.gridLayout.addWidget(self.comboBox_at_fb, 0, 0, 1, 1)
self.comboBox_at_fb.setItemText(0, _translate("MainWindow", "Typ 1", None))
self.comboBox_at_fb.setItemText(1, _translate("MainWindow", "Typ 2", None))
self.comboBox_at_fb.setItemText(
2, _translate("MainWindow", "Allgemeine Rückmeldung", None)
)
self.comboBox_at_fb.currentIndexChanged.connect(self.comboBox_at_fb_changed)
self.comboBox_at_fb.setFocusPolicy(QtCore.Qt.ClickFocus)
self.comboBox_at_fb.hide()
self.label_example = QtWidgets.QLabel(self.centralwidget)
self.label_example.setObjectName(_fromUtf8("label_example"))
# self.label_update.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.label_example.setText(
_translate("MainWindow", "Ausgewählte Aufgabe: -", None)
)
self.gridLayout.addWidget(self.label_example, 0, 1, 1, 1)
self.label_example.hide()
self.groupBox_alle_aufgaben_fb = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_alle_aufgaben_fb.setMinimumSize(QtCore.QSize(140, 16777215))
self.groupBox_alle_aufgaben_fb.setMaximumSize(QtCore.QSize(180, 16777215))
self.groupBox_alle_aufgaben_fb.setObjectName("groupBox_alle_aufgaben_fb")
self.verticalLayout_fb = QtWidgets.QVBoxLayout(self.groupBox_alle_aufgaben_fb)
self.verticalLayout_fb.setObjectName("verticalLayout_fb")
self.comboBox_fb = QtWidgets.QComboBox(self.groupBox_alle_aufgaben_fb)
self.comboBox_fb.setObjectName("comboBox_fb")
list_comboBox_fb = ["", "AG", "FA", "AN", "WS", "K5", "K6", "K7", "K8"]
index = 0
for all in list_comboBox_fb:
self.comboBox_fb.addItem("")
self.comboBox_fb.setItemText(index, _translate("MainWindow", all, None))
index += 1
self.comboBox_fb.currentIndexChanged.connect(
partial(self.comboBox_gk_changed, "feedback")
)
self.comboBox_fb.setFocusPolicy(QtCore.Qt.ClickFocus)
self.verticalLayout_fb.addWidget(self.comboBox_fb)
self.comboBox_fb_num = QtWidgets.QComboBox(self.groupBox_alle_aufgaben_fb)
self.comboBox_fb_num.setObjectName("comboBox_gk_num")
self.comboBox_fb_num.currentIndexChanged.connect(
partial(self.comboBox_gk_num_changed, "feedback")
)
self.comboBox_fb_num.setFocusPolicy(QtCore.Qt.ClickFocus)
self.verticalLayout_fb.addWidget(self.comboBox_fb_num)
self.lineEdit_number_fb = QtWidgets.QLineEdit(self.groupBox_alle_aufgaben_fb)
self.lineEdit_number_fb.setObjectName("lineEdit_number_fb")
self.lineEdit_number_fb.textChanged.connect(
partial(self.lineEdit_number_changed, "feedback")
)
self.verticalLayout_fb.addWidget(self.lineEdit_number_fb)
self.listWidget_fb = QtWidgets.QListWidget(self.groupBox_alle_aufgaben)
self.listWidget_fb.setObjectName("listWidget")
self.verticalLayout_fb.addWidget(self.listWidget_fb)
self.gridLayout.addWidget(self.groupBox_alle_aufgaben_fb, 1, 0, 3, 1)
self.groupBox_alle_aufgaben_fb.setTitle(
_translate("MainWindow", "Aufgaben", None)
)
self.groupBox_alle_aufgaben_fb.hide()
self.groupBox_fehlertyp = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_fehlertyp.setObjectName("groupBox_fehlertyp")
self.gridLayout_fehlertyp = QtWidgets.QGridLayout(self.groupBox_fehlertyp)
self.gridLayout_fehlertyp.setObjectName("gridLayout_feedback")
self.groupBox_fehlertyp.setTitle(_translate("MainWindow", "Betreff", None))
self.comboBox_fehlertyp = QtWidgets.QComboBox(self.groupBox_fehlertyp)
self.comboBox_fehlertyp.setObjectName("comboBox_pruefungstyp")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.addItem("")
self.comboBox_fehlertyp.setItemText(
1, _translate("MainWindow", "Feedback", None)
)
self.comboBox_fehlertyp.setItemText(
2, _translate("MainWindow", "Fehler in der Angabe", None)
)
self.comboBox_fehlertyp.setItemText(
3, _translate("MainWindow", "Fehler in der Lösung", None)
)
self.comboBox_fehlertyp.setItemText(
4, _translate("MainWindow", "Bild wird nicht (richtig) angezeigt", None)
)
self.comboBox_fehlertyp.setItemText(
5, _translate("MainWindow", "Grafik ist unleserlich/fehlerhaft", None)
)
self.comboBox_fehlertyp.setItemText(
6, _translate("MainWindow", "Aufgabe ist doppelt vorhanden", None)
)
self.comboBox_fehlertyp.setItemText(
7,
_translate(
"MainWindow",
"Falsche Kodierung (Grundkompetenz, Aufgabenformat, ...)",
None,
),
)
self.comboBox_fehlertyp.setItemText(
8, _translate("MainWindow", "Sonstiges", None)
)
self.comboBox_fehlertyp.setFocusPolicy(QtCore.Qt.ClickFocus)
self.gridLayout_fehlertyp.addWidget(self.comboBox_fehlertyp, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_fehlertyp, 1, 1, 1, 3)
self.groupBox_fehlertyp.hide()
self.groupBox_feedback = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_feedback.setObjectName(_fromUtf8("groupBox_feedback"))
self.gridLayout_fb = QtWidgets.QGridLayout(self.groupBox_feedback)
self.gridLayout_fb.setObjectName(_fromUtf8("gridLayout_fb"))
self.plainTextEdit_fb = QtWidgets.QPlainTextEdit(self.groupBox_feedback)
self.plainTextEdit_fb.setObjectName(_fromUtf8("plainTextEdit_fb"))
self.gridLayout_fb.addWidget(self.plainTextEdit_fb, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_feedback, 2, 1, 1, 3)
self.groupBox_feedback.setTitle(
_translate("MainWindow", "Feedback bzw. Problembeschreibung", None)
)
self.groupBox_feedback.hide()
self.groupBox_email = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_email.setObjectName("groupBox_email")
# self.groupBox_klasse.setMaximumSize(QtCore.QSize(200, 16777215))
self.verticalLayout_email = QtWidgets.QVBoxLayout(self.groupBox_email)
self.verticalLayout_email.setObjectName("verticalLayout_email")
self.lineEdit_email = QtWidgets.QLineEdit(self.groupBox_email)
self.lineEdit_email.setObjectName("lineEdit_email")
self.groupBox_email.setTitle(
_translate("MainWindow", "E-Mail Adresse für Nachfragen (optional)", None)
)
self.verticalLayout_email.addWidget(self.lineEdit_email)
self.gridLayout.addWidget(self.groupBox_email, 3, 1, 1, 3)
self.groupBox_email.hide()
self.pushButton_send = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_send.setObjectName(_fromUtf8("pushButton_send"))
self.gridLayout.addWidget(
self.pushButton_send, 4, 3, 1, 1, QtCore.Qt.AlignRight
)
self.pushButton_send.setText(_translate("MainWindow", "Senden", None))
self.pushButton_send.clicked.connect(self.pushButton_send_pressed)
self.pushButton_send.hide()
####################################################################
#####################################################################
######################################################################
#####################################################################
self.gridLayout_11.addWidget(self.tab_widget_gk, 0, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_gk, 1, 3, 2, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
# self.actionReset = QtWidgets.QAction(MainWindow)
# self.actionReset.setObjectName(_fromUtf8("actionReset"))
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.tab_widget_gk_cr.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
############################################################################
############## Commands ####################################################
############################################################################
# self.btn_refreshddb.clicked.connect(self.refresh_ddb)
self.btn_k5.clicked.connect(self.btn_k5_pressed)
self.btn_k6.clicked.connect(self.btn_k6_pressed)
self.btn_k7.clicked.connect(self.btn_k7_pressed)
self.btn_k8.clicked.connect(self.btn_k8_pressed)
self.btn_ag_all.clicked.connect(self.btn_ag_all_pressed)
self.btn_an_all.clicked.connect(self.btn_an_all_pressed)
self.btn_fa_all.clicked.connect(self.btn_fa_all_pressed)
self.btn_ws_all.clicked.connect(self.btn_ws_all_pressed)
self.btn_suche.clicked.connect(self.PrepareTeXforPDF)
self.actionExit.triggered.connect(self.close_app)
self.actionFeedback.triggered.connect(self.send_feedback)
self.actionRefresh_Database.triggered.connect(
self.refresh_ddb
) # self.label_aufgabentyp.text()[-1]
self.actionReset.triggered.connect(self.suchfenster_reset)
self.actionReset_sage.triggered.connect(self.reset_sage)
self.actionLoad.triggered.connect(partial(self.sage_load, False))
self.actionSave.triggered.connect(partial(self.sage_save, ""))
self.actionAufgaben_Typ1.triggered.connect(self.chosen_aufgabenformat_typ1)
self.actionAufgaben_Typ2.triggered.connect(self.chosen_aufgabenformat_typ2)
self.actionInfo.triggered.connect(self.show_info)
self.actionNeu.triggered.connect(self.neue_aufgabe_erstellen)
self.actionSage.triggered.connect(self.neue_schularbeit_erstellen)
self.actionSuche.triggered.connect(self.aufgaben_suchen)
self.actionBild_einf_gen.triggered.connect(self.add_picture)
self.actionBild_konvertieren_jpg_eps.triggered.connect(self.convert_imagetoeps)
self.comboBox_aufgabentyp_cr.currentIndexChanged.connect(
self.chosen_aufgabenformat_cr
)
self.pushButton_save.clicked.connect(self.save_file)
self.pushButton_titlepage.clicked.connect(
partial(self.titlepage_clicked, self.dict_titlepage)
)
for all in ag_beschreibung:
x = eval("self.cb_" + all)
x.stateChanged.connect(self.cb_checked)
for all in fa_beschreibung:
x = eval("self.cb_" + all)
x.stateChanged.connect(self.cb_checked)
for all in an_beschreibung:
x = eval("self.cb_" + all)
x.stateChanged.connect(self.cb_checked)
for all in ws_beschreibung:
x = eval("self.cb_" + all)
x.stateChanged.connect(self.cb_checked)
for g in range(5, 9):
for all in eval("k%s_beschreibung" % g):
x = eval("self.cb_k%s_" % g + all)
x.stateChanged.connect(self.cb_rest_checked)
for all in {
**ag_beschreibung,
**fa_beschreibung,
**an_beschreibung,
**ws_beschreibung,
}:
x = eval("self.cb_" + all + "_cr")
x.stateChanged.connect(lambda: self.gk_checked_cr("gk"))
for g in range(5, 9):
for all in eval("k%s_beschreibung" % g):
x = eval("self.cb_k%s_cr_" % g + all + "_cr")
x.stateChanged.connect(lambda: self.gk_checked_cr("klasse"))
if loaded_lama_file_path != "":
self.sage_load(True)
############################################################################################
##############################################################################################
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(
_translate(
"LaMA - LaTeX Mathematik Assistent",
"LaMA - LaTeX Mathematik Assistent",
None,
)
)
self.menuDateityp.setTitle(_translate("MainWindow", "Aufgabentyp", None))
self.menuDatei.setTitle(_translate("MainWindow", "Datei", None))
self.menuNeu.setTitle(_translate("MainWindow", "Neue Aufgabe", None))
self.menuSage.setTitle(_translate("MainWindow", "Neue Schularbeit", None))
self.menuSuche.setTitle(_translate("MainWindow", "Aufgabensuche", None))
self.menuBild_einf_gen.setTitle(_translate("MainWindow", "Bild einfügen", None))
self.menuFeedback.setTitle(_translate("MainWindow", "Feedback && Fehler", None))
self.actionBild_einf_gen.setText(
_translate("MainWindow", "Durchsuchen...", None)
)
self.actionBild_konvertieren_jpg_eps.setText(
_translate("MainWindow", "Grafik konvertieren (jpg/png zu eps)", None)
)
self.menuHelp.setTitle(_translate("MainWindow", "?", None))
self.actionReset.setText(_translate("MainWindow", "Reset", None))
self.actionReset_sage.setText(
_translate("MainWindow", "Reset Schularbeit", None)
)
self.actionReset.setShortcut("F4")
self.actionLoad.setText(_translate("MainWindow", "Öffnen", None))
self.actionLoad.setShortcut("Ctrl+O")
self.actionSave.setText(_translate("MainWindow", "Speichern", None))
self.actionSave.setShortcut("Ctrl+S")
self.actionFeedback.setText(
_translate("MainWindow", "Feedback oder Fehler senden ...", None)
)
self.actionAufgaben_Typ1.setText(
_translate("MainWindow", "Typ 1 Aufgaben", None)
)
self.actionAufgaben_Typ1.setShortcut("Ctrl+1")
self.actionAufgaben_Typ2.setText(
_translate("MainWindow", "Typ 2 Aufgaben", None)
)
self.actionAufgaben_Typ2.setShortcut("Ctrl+2")
self.actionInfo.setText(_translate("MainWindow", "Über LaMA", None))
self.actionNeu.setText(
_translate("MainWindow", "Neue Aufgabe erstellen...", None)
)
self.actionNeu.setShortcut("F3")
self.actionSage.setText(
_translate("MainWindow", "Neue Schularbeit erstellen...", None)
)
self.actionSage.setShortcut("F2")
self.actionSuche.setText(_translate("MainWindow", "Aufgaben suchen...", None))
self.actionSuche.setShortcut("F1")
self.actionExit.setText(_translate("MainWindow", "Exit", None))
self.actionRefresh_Database.setText(
_translate("MainWindow", "Refresh Database", None)
)
self.actionRefresh_Database.setShortcut("F5")
self.label_aufgabentyp.setText(
_translate("MainWindow", "Aufgabentyp: Typ 1", None)
)
self.groupBox_ausgew_gk.setTitle(
_translate("MainWindow", "Ausgewählte Grundkompetenzen", None)
)
self.groupBox_titelsuche.setTitle(_translate("MainWindow", "Titelsuche:", None))
self.groupBox_klassen.setTitle(_translate("MainWindow", "Klassen", None))
self.cb_k7.setText(_translate("MainWindow", "7. Klasse", None))
self.cb_k5.setText(_translate("MainWindow", "5. Klasse", None))
self.cb_k6.setText(_translate("MainWindow", "6. Klasse", None))
self.cb_k8.setText(_translate("MainWindow", "8. Klasse", None))
self.cb_mat.setText(_translate("MainWindow", "Matura", None))
self.cb_solution.setText(_translate("MainWindow", "Lösungen anzeigen", None))
try:
log_file = os.path.join(path_programm, "Teildokument", "log_file_1")
self.label_update.setText(
_translate(
"MainWindow",
"Letztes Update: "
+ self.modification_date(log_file).strftime("%d.%m.%y - %H:%M"),
None,
)
)
except FileNotFoundError:
self.label_update.setText(
_translate("MainWindow", "Letztes Update: ---", None)
)
self.btn_suche.setText(_translate("MainWindow", "Suche starten", None))
# self.btn_refreshddb.setText(_translate("MainWindow", "Refresh Database", None))
# self.menu_aufgabentyp.setItemText(0, _translate("MainWindow", "Typ 1", None))
# self.menu_aufgabentyp.setItemText(1, _translate("MainWindow", "Typ 2", None))
self.combobox_searchtype.setItemText(
0,
_translate(
"MainWindow",
"Alle Dateien ausgeben, die zumindest ein Suchkriterium enthalten",
None,
),
)
##### ONLY NEEDED for Typ1 #####
self.groupBox_af = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_af.setMaximumSize(QtCore.QSize(375, 16777215))
self.groupBox_af.setObjectName(_fromUtf8("groupBox_af"))
# self.groupBox_af.setMaximumHeight(80)
self.gridLayout_af = QtWidgets.QGridLayout(self.groupBox_af)
self.gridLayout_af.setObjectName(_fromUtf8("gridLayout_af"))
self.cb_af_zo = QtWidgets.QCheckBox(self.groupBox_af)
self.cb_af_zo.setObjectName(_fromUtf8("cb_af_zo"))
self.gridLayout_af.addWidget(self.cb_af_zo, 0, 2, 1, 1)
self.cb_af_mc = QtWidgets.QCheckBox(self.groupBox_af)
self.cb_af_mc.setObjectName(_fromUtf8("cb_af_mc"))
self.gridLayout_af.addWidget(self.cb_af_mc, 0, 0, 1, 2)
self.cb_af_oa = QtWidgets.QCheckBox(self.groupBox_af)
self.cb_af_oa.setObjectName(_fromUtf8("cb_af_oa"))
self.gridLayout_af.addWidget(self.cb_af_oa, 1, 2, 1, 1)
self.cb_af_lt = QtWidgets.QCheckBox(self.groupBox_af)
self.cb_af_lt.setObjectName(_fromUtf8("cb_af_lt"))
self.gridLayout_af.addWidget(self.cb_af_lt, 1, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_af, 4, 0, 1, 1)
# #################
# ##### ONLY NEEDED for Typ1 #####
self.groupBox_af.setTitle(
_translate("MainWindow", "Gesuchte Aufgabenformate:", None)
)
self.cb_af_zo.setText(_translate("MainWindow", "Zuordnungsformat (ZO)", None))
self.cb_af_mc.setText(_translate("MainWindow", "Multiplechoice (MC)", None))
self.cb_af_oa.setText(
_translate("MainWindow", "Offenes Antwortformat (OA)", None)
)
self.cb_af_lt.setText(_translate("MainWindow", "Lückentext (LT)", None))
#########################
### Typ1
# self.combobox_searchtype.setItemText(1, _translate("MainWindow", "Alle Dateien ausgeben, die alle Suchkriterien enthalten", None))
######
### Typ2
self.combobox_searchtype.setItemText(
1,
_translate(
"MainWindow",
"Alle Dateien ausgeben, die | |
<filename>tensorflow_federated/python/research/optimization/shared/fed_avg_schedule.py
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the FedAvg algorithm with learning rate schedules.
This is intended to be a somewhat minimal implementation of Federated
Averaging that allows for client and server learning rate scheduling.
The original FedAvg is based on the paper:
Communication-Efficient Learning of Deep Networks from Decentralized Data
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>. AISTATS 2017.
https://arxiv.org/abs/1602.05629
"""
# TODO(b/147626125): Merge with fed_avg.py to allow for learning rate schedules
# in the reparameterized federated averaging framework.
# TODO(b/149402127): Implement a check to zero out client updates if any value
# is non-finite.
import collections
from typing import Callable, Optional, Union
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.tensorflow_libs import tensor_utils
# Convenience type aliases.
ModelBuilder = Callable[[], tff.learning.Model]
OptimizerBuilder = Callable[[float], tf.keras.optimizers.Optimizer]
ClientWeightFn = Callable[..., float]
LRScheduleFn = Callable[[int], float]
ModelWeights = collections.namedtuple('ModelWeights', 'trainable non_trainable')
def _initialize_optimizer_vars(model: tff.learning.Model,
optimizer: tf.keras.optimizers.Optimizer):
"""Ensures variables holding the state of `optimizer` are created."""
# 创建一个 模型的 delta 的一个 placeholder
delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)
model_weights = _get_weights(model)
grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,
model_weights.trainable)
optimizer.apply_gradients(grads_and_vars, name='server_update')
assert optimizer.variables()
def _get_weights(model: tff.learning.Model) -> ModelWeights:
return ModelWeights(
trainable=tuple(model.trainable_variables),
non_trainable=tuple(model.non_trainable_variables))
@attr.s(eq=False, order=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
- `model`: A dictionary of the model's trainable and non-trainable
weights.
- `optimizer_state`: The server optimizer variables.
- `round_num`: The current training round, as a float.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
# This is a float to avoid type incompatibility when calculating learning rate
# schedules.
@classmethod
def assign_weights_to_keras_model(cls, reference_model: ModelWeights,
keras_model: tf.keras.Model):
"""Assign the model weights to the weights of a `tf.keras.Model`.
Args:
reference_model: the `ModelWeights` object to assign weights from.
keras_model: the `tf.keras.Model` object to assign weights to.
"""
if not isinstance(reference_model, ModelWeights):
raise TypeError('The reference model must be an instance of '
'fed_avg_schedule.ModelWeights.')
def assign_weights(keras_weights, tff_weights):
for k, w in zip(keras_weights, tff_weights):
k.assign(w)
# 将当前发送的模型赋值为当前客户端所拥有的模型
assign_weights(keras_model.trainable_weights, reference_model.trainable)
assign_weights(keras_model.non_trainable_weights,
reference_model.non_trainable)
@tf.function
def server_update(model, server_optimizer, server_state, weights_delta):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
server_optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
Returns:
An updated `ServerState`.
"""
model_weights = _get_weights(model)
tff.utils.assign(model_weights, server_state.model)
# Server optimizer variables must be initialized prior to invoking this
tff.utils.assign(server_optimizer.variables(), server_state.optimizer_state)
weights_delta, has_non_finite_weight = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if has_non_finite_weight > 0:
return server_state
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
grads_and_vars = [
(-1.0 * x, v) for x, v in zip(weights_delta, model_weights.trainable)
]
server_optimizer.apply_gradients(grads_and_vars)
# Create a new state based on the updated model.
return tff.utils.update_state(
server_state,
model=model_weights,
optimizer_state=server_optimizer.variables(),
round_num=server_state.round_num + 1.0)
@attr.s(eq=False, order=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Fields:
- `weights_delta`: A dictionary of updates to the model's trainable
variables.
- `client_weight`: Weight to be used in a weighted mean when
aggregating `weights_delta`.
- `model_output`: A structure matching
`tff.learning.Model.report_local_outputs`, reflecting the results of
training on the input dataset.
- `optimizer_output`: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
model_output = attr.ib()
optimizer_output = attr.ib()
def create_client_update_fn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
@tf.function
def client_update(model,
dataset,
initial_weights,
client_optimizer,
client_weight_fn=None):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
initial_weights: A `tff.learning.Model.weights` from server.
client_optimizer: A `tf.keras.optimizer.Optimizer` object.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
Returns:
A 'ClientOutput`.
"""
model_weights = _get_weights(model)
tff.utils.assign(model_weights, initial_weights)
num_examples = tf.constant(0, dtype=tf.int32)
for batch in dataset:
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
grads = tape.gradient(output.loss, model_weights.trainable)
grads_and_vars = zip(grads, model_weights.trainable)
client_optimizer.apply_gradients(grads_and_vars)
num_examples += tf.shape(output.predictions)[0]
aggregated_outputs = model.report_local_outputs()
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
model_weights.trainable,
initial_weights.trainable)
weights_delta, has_non_finite_weight = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if has_non_finite_weight > 0:
client_weight = tf.constant(0, dtype=tf.float32)
elif client_weight_fn is None:
client_weight = tf.cast(num_examples, dtype=tf.float32)
else:
client_weight = client_weight_fn(aggregated_outputs)
return ClientOutput(
weights_delta, client_weight, aggregated_outputs,
collections.OrderedDict([('num_examples', num_examples)]))
return client_update
def build_server_init_fn(
model_fn: ModelBuilder,
server_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer]):
"""Builds a `tff.tf_computation` that returns the initial `ServerState`.
The attributes `ServerState.model` and `ServerState.optimizer_state` are
initialized via their constructor functions. The attribute
`ServerState.round_num` is set to 0.0.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
Returns:
A `tff.tf_computation` that returns initial `ServerState`.
"""
@tff.tf_computation
def server_init_tf():
server_optimizer = server_optimizer_fn()
model = model_fn()
_initialize_optimizer_vars(model, server_optimizer)
return ServerState(
model=_get_weights(model),
optimizer_state=server_optimizer.variables(),
round_num=0.0)
return server_init_tf
def build_fed_avg_process(
model_fn: ModelBuilder,
client_optimizer_fn: OptimizerBuilder,
client_lr: Union[float, LRScheduleFn] = 0.1,
server_optimizer_fn: OptimizerBuilder = tf.keras.optimizers.SGD,
server_lr: Union[float, LRScheduleFn] = 1.0,
client_weight_fn: Optional[ClientWeightFn] = None,
dataset_preprocess_comp: Optional[tff.Computation] = None,
) -> tff.templates.IterativeProcess:
"""Builds the TFF computations for optimization using federated averaging.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
client_optimizer_fn: A function that accepts a `learning_rate` keyword
argument and returns a `tf.keras.optimizers.Optimizer` instance.
client_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
server_optimizer_fn: A function that accepts a `learning_rate` argument and
returns a `tf.keras.optimizers.Optimizer` instance.
server_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of model deltas. If not provided, the default is
the total number of examples processed on device.
dataset_preprocess_comp: Optional `tff.Computation` that sets up a data
pipeline on the clients. The computation must take a squence of values
and return a sequence of values, or in TFF type shorthand `(U* -> V*)`. If
`None`, no dataset preprocessing is applied.
Returns:
A `tff.templates.IterativeProcess`.
"""
client_lr_schedule = client_lr
if not callable(client_lr_schedule):
client_lr_schedule = lambda round_num: client_lr
server_lr_schedule = server_lr
if not callable(server_lr_schedule):
server_lr_schedule = lambda round_num: server_lr
dummy_model = model_fn()
server_init_tf = build_server_init_fn(
model_fn,
# Initialize with the learning rate for round zero.
lambda: server_optimizer_fn(server_lr_schedule(0)))
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model
round_num_type = server_state_type.round_num
if dataset_preprocess_comp is not None:
tf_dataset_type = dataset_preprocess_comp.type_signature.parameter
model_input_type = tff.SequenceType(dummy_model.input_spec)
preprocessed_dataset_type = dataset_preprocess_comp.type_signature.result
if not model_input_type.is_assignable_from(preprocessed_dataset_type):
raise TypeError('Supplied `dataset_preprocess_comp` does not yield '
'batches that are compatible with the model constructed '
'by `model_fn`. Model expects type {m}, but dataset '
'yields type {d}.'.format(
m=model_input_type, d=preprocessed_dataset_type))
else:
tf_dataset_type = tff.SequenceType(dummy_model.input_spec)
model_input_type = tff.SequenceType(dummy_model.input_spec)
@tff.tf_computation(model_input_type, model_weights_type, round_num_type)
def client_update_fn(tf_dataset, initial_model_weights, round_num):
client_lr = client_lr_schedule(round_num)
client_optimizer = client_optimizer_fn(client_lr)
client_update = create_client_update_fn()
return client_update(model_fn(), tf_dataset, initial_model_weights,
client_optimizer, client_weight_fn)
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def server_update_fn(server_state, model_delta):
model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
server_optimizer = server_optimizer_fn(server_lr)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(model, server_optimizer)
return server_update(model, server_optimizer, server_state, model_delta)
@tff.federated_computation(
tff.FederatedType(server_state_type, tff.SERVER),
tff.FederatedType(tf_dataset_type, tff.CLIENTS))
def run_one_round(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_model = tff.federated_broadcast(server_state.model)
client_round_num = tff.federated_broadcast(server_state.round_num)
if dataset_preprocess_comp is not None:
federated_dataset = tff.federated_map(dataset_preprocess_comp,
federated_dataset)
client_outputs = tff.federated_map(
client_update_fn,
(federated_dataset, client_model, client_round_num))
client_weight = client_outputs.client_weight
model_delta = tff.federated_mean(
client_outputs.weights_delta, weight=client_weight)
| |
<reponame>MattGreav/test
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
testing for reactors.py
"""
# pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access
import copy
import os
import unittest
from six.moves import cPickle
from numpy.testing import assert_allclose, assert_equal
from armi import operators
from armi import runLog
from armi import settings
from armi import tests
from armi.materials import uZr
from armi.reactor.flags import Flags
from armi.reactor import assemblies
from armi.reactor import blocks
from armi.reactor import grids
from armi.reactor import locations
from armi.reactor import geometry
from armi.reactor import reactors
from armi.reactor.components import Hexagon, Rectangle
from armi.reactor.converters import geometryConverters
from armi.tests import TEST_ROOT, ARMI_RUN_PATH
from armi.utils import directoryChangers
from armi.physics.neutronics import isotopicDepletion
TEST_REACTOR = None # pickled string of test reactor (for fast caching)
def buildOperatorOfEmptyHexBlocks(customSettings=None):
"""
Builds a operator w/ a reactor object with some hex assemblies and blocks, but all are empty
Doesn't depend on inputs and loads quickly.
Params
------
customSettings : dict
Dictionary of off-default settings to update
"""
settings.setMasterCs(None) # clear
cs = settings.getMasterCs() # fetch new
cs["db"] = False # stop use of database
if customSettings is not None:
cs.update(customSettings)
r = tests.getEmptyHexReactor()
r.core.setOptionsFromCs(cs)
o = operators.Operator(cs)
o.initializeInterfaces(r)
a = assemblies.HexAssembly("fuel")
a.spatialGrid = grids.axialUnitGrid(1)
b = blocks.HexBlock("TestBlock")
b.setType("fuel")
dims = {"Tinput": 600, "Thot": 600, "op": 16.0, "ip": 1, "mult": 1}
c = Hexagon("fuel", uZr.UZr(), **dims)
b.add(c)
a.add(b)
a.spatialLocator = r.core.spatialGrid[1, 0, 0]
o.r.core.add(a)
return o
def buildOperatorOfEmptyCartesianBlocks(customSettings=None):
"""
Builds a operator w/ a reactor object with some Cartesian assemblies and blocks, but all are empty
Doesn't depend on inputs and loads quickly.
Params
------
customSettings : dict
Dictionary of off-default settings to update
"""
settings.setMasterCs(None) # clear
cs = settings.getMasterCs() # fetch new
cs["db"] = False # stop use of database
if customSettings is not None:
cs.update(customSettings)
r = tests.getEmptyCartesianReactor()
r.core.setOptionsFromCs(cs)
o = operators.Operator(cs)
o.initializeInterfaces(r)
a = assemblies.CartesianAssembly("fuel")
a.spatialGrid = grids.axialUnitGrid(1)
b = blocks.CartesianBlock("TestBlock")
b.setType("fuel")
dims = {
"Tinput": 600,
"Thot": 600,
"widthOuter": 16.0,
"lengthOuter": 10.0,
"widthInner": 1,
"lengthInner": 1,
"mult": 1,
}
c = Rectangle("fuel", uZr.UZr(), **dims)
b.add(c)
a.add(b)
a.spatialLocator = r.core.spatialGrid[1, 0, 0]
o.r.core.add(a)
return o
def loadTestReactor(
inputFilePath=TEST_ROOT, customSettings=None, inputFileName="armiRun.yaml"
):
r"""
Loads a test reactor. Can be used in other test modules.
Parameters
----------
inputFilePath : str
Path to the directory of the armiRun.yaml input file.
customSettings : dict with str keys and values of any type
For each key in customSettings, the cs which is loaded from the
armiRun.yaml will be overwritten to the value given in customSettings
for that key.
Returns
-------
o : Operator
r : Reactor
"""
# TODO: it would be nice to have this be more stream-oriented. Juggling files is
# devilishly difficult.
global TEST_REACTOR
fName = os.path.join(inputFilePath, inputFileName)
customSettings = customSettings or {}
isPickeledReactor = fName == ARMI_RUN_PATH and customSettings == {}
assemblies.resetAssemNumCounter()
if isPickeledReactor and TEST_REACTOR:
# return test reactor only if no custom settings are needed.
o, r, assemNum = cPickle.loads(TEST_REACTOR)
assemblies.setAssemNumCounter(assemNum)
settings.setMasterCs(o.cs)
o.reattach(r, o.cs)
return o, r
cs = settings.Settings(fName=fName)
# Overwrite settings if desired
if customSettings:
for settingKey, settingVal in customSettings.items():
cs[settingKey] = settingVal
if "verbosity" not in customSettings:
runLog.setVerbosity("error")
settings.setMasterCs(cs)
cs["stationaryBlocks"] = []
cs["nCycles"] = 3
o = operators.factory(cs)
r = reactors.loadFromCs(cs)
o.initializeInterfaces(r)
# put some stuff in the SFP too.
for a in range(10):
a = o.r.blueprints.constructAssem(o.cs, name="feed fuel")
o.r.core.sfp.add(a)
o.r.core.regenAssemblyLists()
if isPickeledReactor:
# cache it for fast load for other future tests
# protocol=2 allows for classes with __slots__ but not __getstate__ to be pickled
TEST_REACTOR = cPickle.dumps((o, o.r, assemblies.getAssemNum()), protocol=2)
return o, o.r
class ReactorTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
# prepare the input files. This is important so the unit tests run from wherever
# they need to run from.
cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)
cls.directoryChanger.open()
@classmethod
def tearDownClass(cls):
cls.directoryChanger.close()
class HexReactorTests(ReactorTests):
def setUp(self):
self.o, self.r = loadTestReactor(self.directoryChanger.destination)
def testGetTotalParam(self):
# verify that the block params are being read.
val = self.r.core.getTotalBlockParam("power")
val2 = self.r.core.getTotalBlockParam("power", addSymmetricPositions=True)
self.assertEqual(val2 / self.r.core.powerMultiplier, val)
def test_geomType(self):
self.assertTrue(self.r.core.geomType == geometry.GeomType.HEX)
def test_growToFullCore(self):
nAssemThird = len(self.r.core)
self.assertEqual(self.r.core.powerMultiplier, 3.0)
self.assertFalse(self.r.core.isFullCore)
self.r.core.growToFullCore(self.o.cs)
aNums = []
for a in self.r.core.getChildren():
self.assertNotIn(a.getNum(), aNums)
aNums.append(a.getNum())
bNames = [b.getName() for b in self.r.core.getBlocks()]
for bName in bNames:
self.assertEqual(bNames.count(bName), 1)
self.assertEqual(self.r.core.powerMultiplier, 1.0)
self.assertTrue(self.r.core.isFullCore)
nAssemFull = len(self.r.core)
self.assertEqual(nAssemFull, (nAssemThird - 1) * 3 + 1)
def test_getBlocksByIndices(self):
indices = [(1, 1, 1), (3, 2, 2)]
actualBlocks = self.r.core.getBlocksByIndices(indices)
actualNames = [b.getName() for b in actualBlocks]
expectedNames = ["B0022-001", "B0043-002"]
self.assertListEqual(expectedNames, actualNames)
def test_getAllXsSuffixes(self):
actualSuffixes = self.r.core.getAllXsSuffixes()
expectedSuffixes = ["AA"]
self.assertListEqual(expectedSuffixes, actualSuffixes)
def test_countBlocksOfType(self):
numControlBlocks = self.r.core.countBlocksWithFlags([Flags.DUCT, Flags.CONTROL])
self.assertEqual(numControlBlocks, 3)
numControlBlocks = self.r.core.countBlocksWithFlags(
[Flags.DUCT, Flags.CONTROL, Flags.FUEL], Flags.CONTROL
)
self.assertEqual(numControlBlocks, 3)
def test_countFuelAxialBlocks(self):
numFuelBlocks = self.r.core.countFuelAxialBlocks()
self.assertEqual(numFuelBlocks, 3)
def test_getFirstFuelBlockAxialNode(self):
firstFuelBlock = self.r.core.getFirstFuelBlockAxialNode()
self.assertEqual(firstFuelBlock, 1)
def test_getMaxAssembliesInHexRing(self):
maxAssems = self.r.core.getMaxAssembliesInHexRing(3)
self.assertEqual(maxAssems, 4)
def test_getMaxNumPins(self):
numPins = self.r.core.getMaxNumPins()
self.assertEqual(169, numPins)
def test_addMoreNodes(self):
originalMesh = self.r.core.p.axialMesh
bigMesh = list(originalMesh)
bigMesh[2] = 30.0
smallMesh = originalMesh[0:2] + [40.0, 47.0] + originalMesh[2:]
newMesh1, originalMeshGood = self.r.core.addMoreNodes(originalMesh)
newMesh2, bigMeshGood = self.r.core.addMoreNodes(bigMesh)
newMesh3, smallMeshGood = self.r.core.addMoreNodes(smallMesh)
expectedMesh = [0.0, 25.0, 50.0, 75.0, 100.0, 118.75, 137.5, 156.25, 175.0]
expectedBigMesh = [
0.0,
25.0,
30.0,
36.75,
75.0,
100.0,
118.75,
137.5,
156.25,
175.0,
]
expectedSmallMesh = [
0.0,
25.0,
40.0,
47.0,
50.0,
53.75,
75.0,
100.0,
118.75,
137.5,
156.25,
175.0,
]
self.assertListEqual(expectedMesh, newMesh1)
self.assertListEqual(expectedBigMesh, newMesh2)
self.assertListEqual(expectedSmallMesh, newMesh3)
self.assertTrue(originalMeshGood)
self.assertFalse(bigMeshGood)
self.assertFalse(smallMeshGood)
def test_findAxialMeshIndexOf(self):
numMeshPoints = (
len(self.r.core.p.axialMesh) - 2
) # -1 for typical reason, -1 more because mesh includes 0
self.assertEqual(self.r.core.findAxialMeshIndexOf(0.0), 0)
self.assertEqual(self.r.core.findAxialMeshIndexOf(0.1), 0)
self.assertEqual(
self.r.core.findAxialMeshIndexOf(self.r.core[0].getHeight()), numMeshPoints
)
self.assertEqual(
self.r.core.findAxialMeshIndexOf(self.r.core[0].getHeight() - 0.1),
numMeshPoints,
)
self.assertEqual(
self.r.core.findAxialMeshIndexOf(self.r.core[0][0].getHeight() + 0.1), 1
)
def test_findAllAxialMeshPoints(self):
mesh = self.r.core.findAllAxialMeshPoints(applySubMesh=False)
self.assertEqual(mesh[0], 0)
self.assertAlmostEqual(mesh[-1], self.r.core[0].getHeight())
blockMesh = self.r.core.getFirstAssembly(Flags.FUEL).spatialGrid._bounds[2]
assert_allclose(blockMesh, mesh)
def test_findAllAziMeshPoints(self):
aziPoints = self.r.core.findAllAziMeshPoints()
expectedPoints = [
-50.7707392969,
-36.2648137835,
-21.7588882701,
-7.2529627567,
7.2529627567,
21.7588882701,
36.2648137835,
50.7707392969,
65.2766648103,
79.7825903236,
94.288515837,
108.7944413504,
123.3003668638,
]
assert_allclose(expectedPoints, aziPoints)
def test_findAllRadMeshPoints(self):
radPoints = self.r.core.findAllRadMeshPoints()
expectedPoints = [
-12.5625,
-4.1875,
4.1875,
12.5625,
20.9375,
29.3125,
37.6875,
46.0625,
54.4375,
62.8125,
71.1875,
79.5625,
87.9375,
96.3125,
104.6875,
113.0625,
121.4375,
129.8125,
138.1875,
146.5625,
]
assert_allclose(expectedPoints, radPoints)
def test_findNeighbors(self):
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(
a, duplicateAssembliesOnReflectiveBoundary=True
)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
self.assertEqual(len(neighbs), 6)
self.assertIn((2, 1), locs)
self.assertIn((2, 2), locs)
self.assertEqual(locs.count((2, 1)), 3)
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(
a, duplicateAssembliesOnReflectiveBoundary=True
)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
self.assertEqual(locs, [(2, 1), (2, 2)] * 3, 6)
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(
a, duplicateAssembliesOnReflectiveBoundary=True
)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
self.assertEqual(len(neighbs), 6)
self.assertEqual(locs, [(3, 2), (3, 3), (3, 12), (2, 1), (1, 1), (2, 1)])
# try with edge assemblies
# With edges, the neighbor is the one that's actually next to it.
converter = geometryConverters.EdgeAssemblyChanger()
converter.addEdgeAssemblies(self.r.core)
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(
a, duplicateAssembliesOnReflectiveBoundary=True
)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
self.assertEqual(len(neighbs), 6)
# in this case no locations that aren't actually in the core should be returned
self.assertEqual(locs, [(3, 2), (3, 3), (3, 4), (2, 1), (1, 1), (2, 1)])
converter.removeEdgeAssemblies(self.r.core)
# try with full core
self.r.core.growToFullCore(self.o.cs)
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 4)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(a)
self.assertEqual(len(neighbs), 6)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
for loc in [(2, 2), (2, 3), (3, 3), (3, 5), (4, 5), (4, 6)]:
self.assertIn(loc, locs)
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(a)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
for loc in [(1, 1), (2, 1), (2, 3), (3, 2), (3, 3), (3, 4)]:
self.assertIn(loc, locs)
# Try the duplicate option in full core as well
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)
a = self.r.core.childrenByLocator[loc]
neighbs = self.r.core.findNeighbors(
a, duplicateAssembliesOnReflectiveBoundary=True
)
locs = [a.spatialLocator.getRingPos() for a in neighbs]
self.assertEqual(len(neighbs), 6)
self.assertEqual(locs, [(3, 2), (3, 3), (3, 4), | |
<reponame>rieder/limepy<filename>limepy/spes.py
# -*- coding: utf-8 -*-
import numpy
import scipy
from scipy.interpolate import BPoly, interp1d, UnivariateSpline
from numpy import exp, sqrt, pi, sin, cos, log10
from scipy.special import gamma, gammainc, gammaincc, hyp1f1
from scipy.integrate import ode, simps
from math import factorial, sinh
# Authors: <NAME> & <NAME> (Surrey 2018)
class spes:
def __init__(self, phi0, **kwargs):
r"""
SPES models
Spherical Potential Escapers Stitched models
This code solves the models presented in Claydon et al. 2019 (C19)
and calculates radial profiles for some useful quantities. The
models are defined by the distribution function (DF) of eq. (3) in
C19.
Argument (required):
====================
phi0 : scalar, required
Central dimensionless potential
Named arguments (required):
===========================
B : scalar, required
Reduction of the DF at trunction [0-1]
eta : scalar, required
Velocity dispersion of PEs in model units [0-1]
Input for scaling:
==================
G : scalar, optional
Gravitational const; 0.004302 (km/s)^2 pc/Msun if both M and
scale radius are set (see below)
M : scalar, optional
Total mass of bound stars and PEs < rt
r0, rh, rv, rt : scalar, optional
Final scaled radius; default=rh=3
Options:
========
project : bool, optional
Compute model properties in projection; default=False
nrt : scalar, optional
Solve model until nrt*rt; default=2
potonly : bool, optional
Fast solution by solving potential only; default=False
max_step : scalar, optional
Maximum step size for ode output; default=1e10
verbose : bool, optional
Print diagnostics; default=False
ode_atol : absolute tolerance parameter for ode solver; default=1e-7
ode_rtol : relative tolerance parameter for ode solver; default=1e-7
Output variables:
=================
All models:
-----------
rhat, phihat, rhohat : radius, potential and density in model units
r, phi, rho : as above, in scaled units
v2 : total mean-square velocity
mc : enclosed mass profile
r0, rh, rv, rt : radii (King, half-mass, virial, truncation)
K : kinetic energy
U, Q : potential energy, virial ratio
volume : phase-space volume occupied by model
nstep : number of integration steps (depends on ode_rtol&ode_atol)
converged : bool flag to indicate whether model was solved
Projected models:
-----------------
Sigma : surface (mass) density
v2p : line-of-sight mean-square velocity
v2R, v2T : radial and tangential component of mean-square velocity
on plane of the sky (same as v2p)
Examples:
=========
Construct a SPES model with W0=7, B=0.99, eta=0.1 for physical
parameters M=1e5 Msun, rh=3 pc. Solve until 2rt and project on sky.
>>> s = limepy.spes(7,B=0.99,eta=0.1,nrt=2,M=1e5,rh=3,project=True)
Plot surface density profile
>>> plt.plot(s.r, s.Sigma)
"""
# Set parameters
self._set_kwargs(phi0, **kwargs)
# Required constants
self.rhoint0 = [self._rhoint(self.phi0, 0)]
self.r0 = 1
self._poisson(self.potonly)
# Optional scaling
if (self.scale): self._scale()
# Optional computation of model properties in projection
if (self.project): self._project()
# Optional output
if (self.verbose):
print("\n Model properties: ")
print(" ----------------- ")
print(" phi0 = %5.2f; B = %12.6e; eta = %10.4e"%(self.phi0, self.B, self.eta))
print(" fpe = %10.4e"%self.fpe)
print(" Converged = %s"%(self.converged))
if (self.potonly):
print(" M = %10.3f; U = %10.4f "%(self.M, self.U))
else:
pV = self.rho[self.nbound-1]*self.v2[self.nbound-1]*(4.*pi/3)*self.rt**3
out1 = (self.U,self.K,pV, -self.K/(self.U+3*pV))
print(" M = %10.3e "%self.M)
frm = " U = %9.3e; K = %9.3e; p_eV_e = %9.3e; "
frm += "Q = -K/(U+3p_eV_e) = %5.3f "
print(frm%out1)
out2 = (self.rv/self.rh, self.rh/self.r0, self.rt/self.r0)
frm = " rv/rh = %4.3f; rh/r0 = %6.3f; rt/r0 = %7.3f"
print(frm%out2)
def _set_kwargs(self, phi0, **kwargs):
""" Set parameters and scales """
self.model = "spes"
self.phi0 = phi0
self.B, self.C, self.eta, self.fpe = None, None, None, None
self._MS, self._RS, self._GS = None, None, None
self.scale_radius = None
self.scale = False
self.project = False
self.maxr = 1e99
self.rt = 1e99
self.max_step = self.maxr
self.max_arg_exp = 700 # Maximum argument for exponent
self.minimum_phi = 1e-8 # Stop criterion for integrator
self.ode_atol = 1e-7
self.ode_rtol = 1e-7
self.nmbin = 1 # for future expansion to multimass
self.G = 9.0/(4.0*pi)
self.mu, self.alpha = numpy.array([1.0]),numpy.array([1.0])
self.s2 = 1.0
self.s2j = numpy.array([1.0])
self.potonly, self.multi, self.verbose = [False]*3
self.nstep=1
self.converged=True
self._interpolator_set=False
# Spes specific
self.nrt = 2
if kwargs is not None:
for key, value in kwargs.items():
# Check for scaling input (similar to LIMEPY)
if key is 'G':
self._GS, self.scale = value, True
elif key is 'M':
self._MS, self.scale = value, True
elif key is 'r0':
if self.scale_radius is None:
self._RS,self.scale_radius,self.scale = value,'r0', True
else:
error="Can not set scale radius to r0,already set to %s"
raise ValueError(error%self.scale_radius)
elif key is 'rh':
if self.scale_radius is None:
self._RS, self.scale_radius, self.scale=value,'rh', True
else:
error="Can not set scale radius to rh,already set to %s"
raise ValueError(error%self.scale_radius)
elif key is 'rv':
if self.scale_radius is None:
self._RS, self.scale_radius, self.scale=value,'rv', True
else:
error="Can not set scale radius to rv,already set to %s"
raise ValueError(error%self.scale_radius)
elif key is 'rt':
if self.scale_radius is None:
self._RS, self.scale_radius, self.scale=value,'rt', True
else:
error="Can not set scale radius to rt,already set to %s"
raise ValueError(error%self.scale_radius)
# SPES specific checks
elif key is 'C' :
error="Can not set C, use B & eta. C and fpe are computed internally"
raise ValueError(error)
else:
# Set input parameters
setattr(self, key, value)
# Check if 2 required spes parameters are set
npar = 0
for par in [self.B, self.eta, self.fpe]:
if (par != None): npar +=1
if npar<2:
error="Two model parameters from {B, eta, fpe} are needed"
raise ValueError(error)
# There are 3 options to define the model
if (self.B != None) and (self.eta != None):
# this is most straightforward, fpe is computed after solving
self.C = 1 + (self.B-1)/self.eta**2
else:
# if fpe is set in input, iteration is required
self._iterate_on_B()
if (self.scale):
if self._MS is None:
self._MS = 1e5
if (self.verbose):
print(" No mass-scale provided, set to default M = 1e5")
if self._RS is None:
self._RS, self.scale_radius = 3, 'rh'
if (self.verbose):
print(" No radius-scale provided, set to default rh = 3")
if self._GS is None:
self._GS = 0.004302
if (self.verbose):
print(" No G provided, set to default: G = 0.004302")
if (self.verbose):
vars=(self._GS, self._MS, self.scale_radius, self._RS)
print(" Model scaled to: G = %s, M = %s, %s = %s"%vars)
return
def _iterate_on_B(self):
# Iteratively find B based on input fpe and eta
frac = 0
# Check fpe input range
if (self.fpe < 0) or (self.fpe > 1):
error="error: fpe needs to be between 0 and 1"
raise ValueError(error)
# Option 1: eta & fpe are set, find B
if (self.eta != None):
# Check value of eta within allowed range
if (self.eta < 0) or (self.eta > 1):
error="error: eta needs to be between 0 and 1"
raise ValueError(error)
self.B = 0.999 # trial
self.C = 1 + (self.B-1)/self.eta**2
# Find B within 1% (TBD: user defined accuracy)
i = 0
while abs(frac - self.fpe)/self.fpe > 0.001:
self.nstep=1
# Set parameters and scales
if frac > self.fpe:
self.B += 0.5*(1-self.B)
else:
self.B -= 0.5*(1-self.B)
self.C = 1 + (self.B-1)/self.eta**2
self.rhoint0 = [self._rhoint(self.phi0, 0)]
self.r0 = 1.0
# Solve Poisson equation to get the potential
self._poisson(self.potonly)
frac = self.Mpe/self.M
self.rhohat = self._rhohat(self.phihat, self.r, 0)
i+=1
# Option 2: B is set, find eta
else:
# Check value of B within range
if (self.B < 0) or (self.B > 1):
error="error: B needs to be between 0 and 1"
raise ValueError(error)
# TBD
raise ValueError("Combo B & fpe TBD")
# self.eta = sqrt((1.-self.B)/(1.0-self.C))
def _logcheck(self, t, y):
""" Logs steps and checks for final values """
if (y[0]>self.minimum_phi):
if (t>0): self.r, self._y = numpy.r_[self.r, t], numpy.c_[self._y, y]
self.nstep+=1
return 0
else:
return -1
def _logcheck2(self, t, y):
""" Logs steps and checks for final values """
# Ensure that tidal radius value is not added again
if (t>self.rt):
self.r, self._y = numpy.r_[self.r, t], numpy.c_[self._y, y]
self.nstep+=1
return 0 if (t<=self.nrt*self.rt) else -1
def _poisson(self, potonly):
""" Solves Poisson equation """
# y = [phi, u_j, U, K_j], where u = -M(<r)/G
# Initialize
self.r = numpy.array([0])
self._y = numpy.r_[self.phi0, numpy.zeros(2*self.nmbin+1)]
if (not potonly): self._y = numpy.r_[self._y,numpy.zeros(self.nmbin)]
| |
<= 0)
m.c7583 = Constraint(expr= - m.b436 + m.b444 - m.b494 <= 0)
m.c7584 = Constraint(expr= - m.b436 + m.b445 - m.b495 <= 0)
m.c7585 = Constraint(expr= - m.b436 + m.b446 - m.b496 <= 0)
m.c7586 = Constraint(expr= - m.b436 + m.b447 - m.b497 <= 0)
m.c7587 = Constraint(expr= - m.b437 + m.b438 - m.b498 <= 0)
m.c7588 = Constraint(expr= - m.b437 + m.b439 - m.b499 <= 0)
m.c7589 = Constraint(expr= - m.b437 + m.b440 - m.b500 <= 0)
m.c7590 = Constraint(expr= - m.b437 + m.b441 - m.b501 <= 0)
m.c7591 = Constraint(expr= - m.b437 + m.b442 - m.b502 <= 0)
m.c7592 = Constraint(expr= - m.b437 + m.b443 - m.b503 <= 0)
m.c7593 = Constraint(expr= - m.b437 + m.b444 - m.b504 <= 0)
m.c7594 = Constraint(expr= - m.b437 + m.b445 - m.b505 <= 0)
m.c7595 = Constraint(expr= - m.b437 + m.b446 - m.b506 <= 0)
m.c7596 = Constraint(expr= - m.b437 + m.b447 - m.b507 <= 0)
m.c7597 = Constraint(expr= - m.b438 + m.b439 - m.b508 <= 0)
m.c7598 = Constraint(expr= - m.b438 + m.b440 - m.b509 <= 0)
m.c7599 = Constraint(expr= - m.b438 + m.b441 - m.b510 <= 0)
m.c7600 = Constraint(expr= - m.b438 + m.b442 - m.b511 <= 0)
m.c7601 = Constraint(expr= - m.b438 + m.b443 - m.b512 <= 0)
m.c7602 = Constraint(expr= - m.b438 + m.b444 - m.b513 <= 0)
m.c7603 = Constraint(expr= - m.b438 + m.b445 - m.b514 <= 0)
m.c7604 = Constraint(expr= - m.b438 + m.b446 - m.b515 <= 0)
m.c7605 = Constraint(expr= - m.b438 + m.b447 - m.b516 <= 0)
m.c7606 = Constraint(expr= - m.b439 + m.b440 - m.b517 <= 0)
m.c7607 = Constraint(expr= - m.b439 + m.b441 - m.b518 <= 0)
m.c7608 = Constraint(expr= - m.b439 + m.b442 - m.b519 <= 0)
m.c7609 = Constraint(expr= - m.b439 + m.b443 - m.b520 <= 0)
m.c7610 = Constraint(expr= - m.b439 + m.b444 - m.b521 <= 0)
m.c7611 = Constraint(expr= - m.b439 + m.b445 - m.b522 <= 0)
m.c7612 = Constraint(expr= - m.b439 + m.b446 - m.b523 <= 0)
m.c7613 = Constraint(expr= - m.b439 + m.b447 - m.b524 <= 0)
m.c7614 = Constraint(expr= - m.b440 + m.b441 - m.b525 <= 0)
m.c7615 = Constraint(expr= - m.b440 + m.b442 - m.b526 <= 0)
m.c7616 = Constraint(expr= - m.b440 + m.b443 - m.b527 <= 0)
m.c7617 = Constraint(expr= - m.b440 + m.b444 - m.b528 <= 0)
m.c7618 = Constraint(expr= - m.b440 + m.b445 - m.b529 <= 0)
m.c7619 = Constraint(expr= - m.b440 + m.b446 - m.b530 <= 0)
m.c7620 = Constraint(expr= - m.b440 + m.b447 - m.b531 <= 0)
m.c7621 = Constraint(expr= - m.b441 + m.b442 - m.b532 <= 0)
m.c7622 = Constraint(expr= - m.b441 + m.b443 - m.b533 <= 0)
m.c7623 = Constraint(expr= - m.b441 + m.b444 - m.b534 <= 0)
m.c7624 = Constraint(expr= - m.b441 + m.b445 - m.b535 <= 0)
m.c7625 = Constraint(expr= - m.b441 + m.b446 - m.b536 <= 0)
m.c7626 = Constraint(expr= - m.b441 + m.b447 - m.b537 <= 0)
m.c7627 = Constraint(expr= - m.b442 + m.b443 - m.b538 <= 0)
m.c7628 = Constraint(expr= - m.b442 + m.b444 - m.b539 <= 0)
m.c7629 = Constraint(expr= - m.b442 + m.b445 - m.b540 <= 0)
m.c7630 = Constraint(expr= - m.b442 + m.b446 - m.b541 <= 0)
m.c7631 = Constraint(expr= - m.b442 + m.b447 - m.b542 <= 0)
m.c7632 = Constraint(expr= - m.b443 + m.b444 - m.b543 <= 0)
m.c7633 = Constraint(expr= - m.b443 + m.b445 - m.b544 <= 0)
m.c7634 = Constraint(expr= - m.b443 + m.b446 - m.b545 <= 0)
m.c7635 = Constraint(expr= - m.b443 + m.b447 - m.b546 <= 0)
m.c7636 = Constraint(expr= - m.b444 + m.b445 - m.b547 <= 0)
m.c7637 = Constraint(expr= - m.b444 + m.b446 - m.b548 <= 0)
m.c7638 = Constraint(expr= - m.b444 + m.b447 - m.b549 <= 0)
m.c7639 = Constraint(expr= - m.b445 + m.b446 - m.b550 <= 0)
m.c7640 = Constraint(expr= - m.b445 + m.b447 - m.b551 <= 0)
m.c7641 = Constraint(expr= - m.b446 + m.b447 - m.b552 <= 0)
m.c7642 = Constraint(expr= - m.b448 + m.b449 - m.b462 <= 0)
m.c7643 = Constraint(expr= - m.b448 + m.b450 - m.b463 <= 0)
m.c7644 = Constraint(expr= - m.b448 + m.b451 - m.b464 <= 0)
m.c7645 = Constraint(expr= - m.b448 + m.b452 - m.b465 <= 0)
m.c7646 = Constraint(expr= - m.b448 + m.b453 - m.b466 <= 0)
m.c7647 = Constraint(expr= - m.b448 + m.b454 - m.b467 <= 0)
m.c7648 = Constraint(expr= - m.b448 + m.b455 - m.b468 <= 0)
m.c7649 = Constraint(expr= - m.b448 + m.b456 - m.b469 <= 0)
m.c7650 = Constraint(expr= - m.b448 + m.b457 - m.b470 <= 0)
m.c7651 = Constraint(expr= - m.b448 + m.b458 - m.b471 <= 0)
m.c7652 = Constraint(expr= - m.b448 + m.b459 - m.b472 <= 0)
m.c7653 = Constraint(expr= - m.b448 + m.b460 - m.b473 <= 0)
m.c7654 = Constraint(expr= - m.b448 + m.b461 - m.b474 <= 0)
m.c7655 = Constraint(expr= - m.b449 + m.b450 - m.b475 <= 0)
m.c7656 = Constraint(expr= - m.b449 + m.b451 - m.b476 <= 0)
m.c7657 = Constraint(expr= - m.b449 + m.b452 - m.b477 <= 0)
m.c7658 = Constraint(expr= - m.b449 + m.b453 - m.b478 <= 0)
m.c7659 = Constraint(expr= - m.b449 + m.b454 - m.b479 <= 0)
m.c7660 = Constraint(expr= - m.b449 + m.b455 - m.b480 <= 0)
m.c7661 = Constraint(expr= - m.b449 + m.b456 - m.b481 <= 0)
m.c7662 = Constraint(expr= - m.b449 + m.b457 - m.b482 <= 0)
m.c7663 = Constraint(expr= - m.b449 + m.b458 - m.b483 <= 0)
m.c7664 = Constraint(expr= - m.b449 + m.b459 - m.b484 <= 0)
m.c7665 = Constraint(expr= - m.b449 + m.b460 - m.b485 <= 0)
m.c7666 = Constraint(expr= - m.b449 + m.b461 - m.b486 <= 0)
m.c7667 = Constraint(expr= - m.b450 + m.b451 - m.b487 <= 0)
m.c7668 = Constraint(expr= - m.b450 + m.b452 - m.b488 <= 0)
m.c7669 = Constraint(expr= - m.b450 + m.b453 - m.b489 <= 0)
m.c7670 = Constraint(expr= - m.b450 + m.b454 - m.b490 <= 0)
m.c7671 = Constraint(expr= - m.b450 + m.b455 - m.b491 <= 0)
m.c7672 = Constraint(expr= - m.b450 + m.b456 - m.b492 <= 0)
m.c7673 = Constraint(expr= - m.b450 + m.b457 - m.b493 <= 0)
m.c7674 = Constraint(expr= - m.b450 + m.b458 - m.b494 <= 0)
m.c7675 = Constraint(expr= - m.b450 + m.b459 - m.b495 <= 0)
m.c7676 = Constraint(expr= - m.b450 + m.b460 - m.b496 <= 0)
m.c7677 = Constraint(expr= - m.b450 + m.b461 - m.b497 <= 0)
m.c7678 = Constraint(expr= - m.b451 + m.b452 - m.b498 <= 0)
m.c7679 = Constraint(expr= - m.b451 + m.b453 - m.b499 <= 0)
m.c7680 = Constraint(expr= - m.b451 + m.b454 - m.b500 <= 0)
m.c7681 = Constraint(expr= - m.b451 + m.b455 - m.b501 <= 0)
m.c7682 = Constraint(expr= - m.b451 + m.b456 - m.b502 <= 0)
m.c7683 = Constraint(expr= - m.b451 + m.b457 - m.b503 <= 0)
m.c7684 = Constraint(expr= - m.b451 + m.b458 - m.b504 <= 0)
m.c7685 = Constraint(expr= - m.b451 + m.b459 - m.b505 <= 0)
m.c7686 = Constraint(expr= - m.b451 + m.b460 - m.b506 <= 0)
m.c7687 = Constraint(expr= - m.b451 + m.b461 - m.b507 <= 0)
m.c7688 = Constraint(expr= - m.b452 + m.b453 - m.b508 <= 0)
m.c7689 = Constraint(expr= - m.b452 + m.b454 - m.b509 <= 0)
m.c7690 = Constraint(expr= - m.b452 + m.b455 - m.b510 <= 0)
m.c7691 = Constraint(expr= - m.b452 + m.b456 - m.b511 <= 0)
m.c7692 = Constraint(expr= - m.b452 + m.b457 - m.b512 <= 0)
m.c7693 = Constraint(expr= - m.b452 + m.b458 - m.b513 <= 0)
m.c7694 = Constraint(expr= - m.b452 + m.b459 - m.b514 <= 0)
m.c7695 = Constraint(expr= - m.b452 + m.b460 - m.b515 <= 0)
m.c7696 = Constraint(expr= - m.b452 + m.b461 - m.b516 <= 0)
m.c7697 = Constraint(expr= - m.b453 + m.b454 - m.b517 <= 0)
m.c7698 = Constraint(expr= - m.b453 + m.b455 - m.b518 <= 0)
m.c7699 = Constraint(expr= - m.b453 + m.b456 - m.b519 <= 0)
m.c7700 = Constraint(expr= - m.b453 + m.b457 - m.b520 <= 0)
m.c7701 = Constraint(expr= - m.b453 + m.b458 - m.b521 <= 0)
m.c7702 = Constraint(expr= - m.b453 + m.b459 - m.b522 <= 0)
m.c7703 = Constraint(expr= - m.b453 + m.b460 - m.b523 <= 0)
m.c7704 = Constraint(expr= - m.b453 + m.b461 - m.b524 <= 0)
m.c7705 = Constraint(expr= - m.b454 + m.b455 - m.b525 <= 0)
m.c7706 = Constraint(expr= - m.b454 + m.b456 - m.b526 <= 0)
m.c7707 = Constraint(expr= - m.b454 + m.b457 - m.b527 <= 0)
m.c7708 = Constraint(expr= - m.b454 + m.b458 - m.b528 <= 0)
m.c7709 = Constraint(expr= - m.b454 + m.b459 - m.b529 <= 0)
m.c7710 = Constraint(expr= - m.b454 + m.b460 - m.b530 | |
<filename>polo2/polo_mallet.py
import os
import time
import re
import pandas as pd
from itertools import combinations
from lxml import etree
from scipy import stats
from polo2 import PoloDb
from polo2 import PoloFile
from polo2 import PoloMath as pm
class PoloMallet(PoloDb):
def __init__(self, config, trial='trial1'):
"""Initialize MALLET with trial name"""
if trial not in config.trials:
raise ValueError("Invalid trail name `{}`.format(trial)")
self.config = config
self.trial = trial
self.config.set_config_attributes(self) # Prefixes keys with cfg_
self.config.set_config_attributes(self, self.trial)
# todo: Put this in config.ini
self.cfg_tw_quantile = 0.8
# Temporary hack to handle casting
for key in "num_topics num_iterations optimize_interval num_threads num_top_words".split():
att = 'cfg_{}'.format(key)
setattr(self, att, int(getattr(self, att)))
self.cfg_thresh = float(self.cfg_thresh)
# Get replacment files
# todo: Fix order; higher ngrams should go first ... argues for sortable names
self.replacement_files = self.cfg_replacements
for filename in os.listdir('corpus'):
if 'replacements_' in filename:
self.replacement_files += ' corpus/' + filename
self.trial_name = self.trial # HACK
self.file_prefix = '{}/{}'.format(self.cfg_mallet_out_dir, self.trial_name)
self.mallet = {'import-file': {}, 'train-topics': {}}
self.mallet_init()
dbfile = self.config.generate_model_db_file_path(self.trial)
PoloDb.__init__(self, dbfile)
# todo: Remove or replace
def generate_trial_name(self):
"""Generate trial name based on metadata"""
ts = time.time()
self.trial_name = '{}-model-t{}-i{}-{}'.format(self.trial, self.cfg_num_topics,
self.cfg_num_iterations, int(ts))
def mallet_init(self):
"""Initialize command line arguments for MALLET"""
# todo: Consider putting trunhis in the init for the object itself
if not os.path.exists(self.cfg_mallet_path):
raise ValueError('Mallet cannot be found.')
print('Import file:', self.cfg_mallet_corpus_input)
self.mallet['import-file']['input'] = self.cfg_mallet_corpus_input
self.mallet['import-file']['output'] = '{}/mallet-corpus.mallet'.format(self.cfg_mallet_out_dir) # Put this in corpus?
self.mallet['import-file']['keep-sequence'] = 'TRUE' # todo: Control this by config
self.mallet['import-file']['remove-stopwords'] = 'FALSE' # todo: Control this by config
self.mallet['import-file']['replacement-files'] = self.replacement_files
self.mallet['train-topics']['num-topics'] = self.cfg_num_topics
self.mallet['train-topics']['num-top-words'] = self.cfg_num_top_words
self.mallet['train-topics']['num-iterations'] = self.cfg_num_iterations
self.mallet['train-topics']['optimize-interval'] = self.cfg_optimize_interval
self.mallet['train-topics']['num-threads'] = self.cfg_num_threads
self.mallet['train-topics']['input'] = self.mallet['import-file']['output']
self.mallet['train-topics']['output-topic-keys'] = '{}-topic-keys.txt'.format(self.file_prefix)
self.mallet['train-topics']['output-doc-topics'] = '{}-doc-topics.txt'.format(self.file_prefix)
self.mallet['train-topics']['word-topic-counts-file'] = '{}-word-topic-counts.txt'.format(self.file_prefix)
self.mallet['train-topics']['topic-word-weights-file'] = '{}-topic-word-weights.txt'.format(self.file_prefix)
self.mallet['train-topics']['xml-topic-report'] = '{}-topic-report.xml'.format(self.file_prefix)
self.mallet['train-topics']['xml-topic-phrase-report'] = '{}-topic-phrase-report.xml'.format(self.file_prefix)
self.mallet['train-topics']['diagnostics-file'] = '{}-diagnostics.xml'.format(self.file_prefix)
# self.mallet['train-topics']['output-topic-docs'] = '{}-topic-docs.txt'.format(self.file_prefix)
# self.mallet['train-topics']['doc-topics-threshold'] = self.config.thresh
self.mallet['train-topics']['output-state'] = '{}-state.gz'.format(self.file_prefix)
self.mallet['train-topics']['num-top-docs'] = self.cfg_num_topics
self.mallet['train-topics']['doc-topics-max'] = self.cfg_doc_topics_max
self.mallet['train-topics']['show-topics-interval'] = self.cfg_show_topics_interval
def mallet_run_command(self, op):
"""Run a MALLET command (e.g. import-file or train-topics)"""
my_args = ['--{} {}'.format(arg,self.mallet[op][arg]) for arg in self.mallet[op]]
my_cmd = '{} {} {}'.format(self.cfg_mallet_path, op, ' '.join(my_args))
print(my_cmd)
try:
os.system(my_cmd)
except:
raise ValueError('Command would not execute:', my_cmd)
def mallet_import(self):
"""Import contents of MALLET output files into Polo DB"""
self.mallet_run_command('import-file')
def mallet_train(self):
"""Train MALLET by running train-topics"""
self.mallet_run_command('train-topics')
def clean_up(self):
"""Clean up files created by MALLET"""
file_mask = '{}-*.*'.format(self.file_prefix)
my_cmd = 'rm {}'.format(file_mask)
try:
os.system(my_cmd)
except:
raise ValueError('Unable to delete files: {}'.format(file_mask))
# TABLE IMPORT METHODS
def tables_to_db(self):
"""Import core tables from MALLET files into Polo DB"""
self.import_table_config()
self.import_table_state()
self.import_table_topic()
self.import_tables_topicword_and_word()
self.import_table_doctopic()
self.import_table_topicphrase()
def import_table_state(self, src_file=None):
"""Import the state file into docword table"""
if not src_file:
src_file = self.mallet['train-topics']['output-state']
import gzip
with gzip.open(src_file, 'rb') as f:
docword = pd.DataFrame([line.split() for line in f.readlines()[3:]],
columns=['doc_id', 'src', 'word_pos', 'word_id', 'word_str', 'topic_id'])
docword = docword[['doc_id', 'word_id', 'word_pos', 'topic_id']]
docword = docword.astype('int')
docword.set_index(['doc_id', 'word_id'], inplace=True)
self.put_table(docword, 'docword', index=True)
def import_table_topic(self, src_file=None):
"""Import data into topic table"""
if not src_file: src_file = self.mallet['train-topics']['output-topic-keys']
topic = pd.read_csv(src_file, sep='\t', header=None, index_col=False,
names=['topic_id', 'topic_alpha', 'topic_words'])
topic.set_index('topic_id', inplace=True)
topic['topic_alpha_zscore'] = stats.zscore(topic.topic_alpha)
topic['topic_gloss'] = 'TBA'
self.put_table(topic, 'topic', index=True)
def import_tables_topicword_and_word(self, src_file=None):
"""Import data into topicword and word tables"""
if not src_file: src_file = self.mallet['train-topics']['word-topic-counts-file']
WORD = []
TOPICWORD = []
src = PoloFile(src_file)
for line in src.read_lines():
row = line.strip().split()
(word_id, word_str) = row[0:2]
WORD.append((int(word_id), word_str))
for item in row[2:]:
(topic_id, word_count) = item.split(':')
TOPICWORD.append((int(word_id), int(topic_id), int(word_count)))
word = pd.DataFrame(WORD, columns=['word_id', 'word_str'])
topicword = pd.DataFrame(TOPICWORD, columns=['word_id', 'topic_id', 'word_count'])
word.set_index('word_id', inplace=True)
topicword.set_index(['word_id', 'topic_id'], inplace=True)
self.put_table(word, 'word', index=True)
self.put_table(topicword, 'topicword', index=True)
def import_table_doctopic(self, src_file=None):
"""Import data into doctopic table"""
if not src_file: src_file = self.mallet['train-topics']['output-doc-topics']
if 'doc-topics-threshold' in self.mallet['train-topics']:
DOC = []
DOCTOPIC = []
src = PoloFile(src_file)
for line in src[1:]:
row = line.split('\t')
row.pop() # Pretty sure this is right
doc_id = row[0]
src_doc_id = int(row[1].split(',')[0])
doc_label = row[1].split(',')[1]
DOC.append([doc_id, src_doc_id, doc_label])
for i in range(2, len(row), 2):
topic_id = row[i]
topic_weight = row[i + 1]
DOCTOPIC.append([doc_id, topic_id, topic_weight])
doctopic = pd.DataFrame(DOCTOPIC, columns=['doc_id', 'topic_id', 'topic_weight'])
doctopic.set_index(['doc_id', 'topic_id'], inplace=True)
doctopic['topic_weight_zscore'] = stats.zscore(doctopic.topic_weight)
self.computed_thresh = round(doctopic.topic_weight.quantile(self.cfg_tw_quantile), 3)
doc = pd.DataFrame(DOC, columns=['doc_id', 'src_doc_id', 'doc_label'])
doc.set_index('doc_id', inplace=True)
self.put_table(doctopic, 'doctopic', index=True)
self.put_table(doc, 'doc', index=True)
else:
doctopic = pd.read_csv(src_file, sep='\t', header=None)
doc = pd.DataFrame(doctopic.iloc[:, 1])
doc.columns = ['doc_tmp']
doc['src_doc_id'] = doc.doc_tmp.apply(lambda x: int(x.split(',')[0]))
doc['doc_label'] = doc.doc_tmp.apply(lambda x: x.split(',')[1])
doc = doc[['src_doc_id', 'doc_label']]
doc.index.name = 'doc_id'
self.put_table(doc, 'doc', index=True)
doctopic.drop(1, axis = 1, inplace=True)
doctopic.rename(columns={0:'doc_id'}, inplace=True)
y = [col for col in doctopic.columns[1:]]
doctopic_narrow = pd.lreshape(doctopic, {'topic_weight': y})
doctopic_narrow['topic_id'] = [i for i in range(self.cfg_num_topics)
for doc_id in doctopic['doc_id']]
doctopic_narrow = doctopic_narrow[['doc_id', 'topic_id', 'topic_weight']]
doctopic_narrow.set_index(['doc_id', 'topic_id'], inplace=True)
doctopic_narrow['topic_weight_zscore'] = stats.zscore(doctopic_narrow.topic_weight)
self.computed_thresh = round(doctopic_narrow.topic_weight\
.quantile(self.cfg_tw_quantile), 3)
self.put_table(doctopic_narrow, 'doctopic', index=True)
# todo: Revisit this; in the best place to do this?
self.set_config_item('computed_thresh', self.computed_thresh)
def import_table_topicphrase(self, src_file=None):
"""Import data into topicphrase table"""
if not src_file: src_file = self.mallet['train-topics']['xml-topic-phrase-report']
TOPICPHRASE = []
src = PoloFile(src_file)
tree = etree.parse(src.file)
for topic in tree.xpath('/topics/topic'):
topic_id = int(topic.xpath('@id')[0])
for phrase in topic.xpath('phrase'):
phrase_weight = float(phrase.xpath('@weight')[0])
phrase_count = int(phrase.xpath('@count')[0])
topic_phrase = phrase.xpath('text()')[0]
TOPICPHRASE.append((topic_id, topic_phrase, phrase_weight, phrase_count))
topicphrase = pd.DataFrame(TOPICPHRASE, columns=['topic_id', 'topic_phrase',
'phrase_weight', 'phrase_count'])
topicphrase.set_index(['topic_id', 'topic_phrase'], inplace=True)
self.put_table(topicphrase, 'topicphrase', index=True)
def add_topic_glosses(self):
"""Add glosses to topic table"""
sql = """
SELECT topic_id, topic_phrase AS topic_gloss,
MAX(phrase_weight) AS max_phrase_weight
FROM topicphrase
GROUP BY topic_id
"""
topicphrase = pd.read_sql_query(sql, self.conn)
topicphrase.set_index('topic_id', inplace=True)
topic = self.get_table('topic', set_index=True)
topic['topic_gloss'] = topicphrase.topic_gloss
self.put_table(topic, 'topic', index=True)
def import_table_config(self):
"""Import data into config table"""
# fixme: Make this automatic; find a way to dump all values
cfg = {}
cfg['trial'] = self.trial
cfg['dbfile'] = self.dbfile
cfg['thresh'] = self.cfg_thresh
cfg['slug'] = self.cfg_slug
cfg['num_topics'] = self.cfg_num_topics
cfg['base_path'] = self.cfg_base_path
cfg['file_prefix'] = self.file_prefix
config = pd.DataFrame({'key': list(cfg.keys()), 'value': list(cfg.values())})
self.put_table(config, 'config')
def add_diagnostics(self, src_file=None):
"""Add diagnostics data to topics and topicword_diags tables"""
if not src_file: src_file = self.mallet['train-topics']['diagnostics-file']
TOPIC = []
TOPICWORD = []
tkeys = ['id', 'tokens', 'document_entropy', 'word-length', 'coherence',
'uniform_dist', 'corpus_dist',
'eff_num_words', 'token-doc-diff', 'rank_1_docs',
'allocation_ratio', 'allocation_count',
'exclusivity']
tints = ['id', 'tokens']
wkeys = ['rank', 'count', 'prob', 'cumulative', 'docs', 'word-length', 'coherence',
'uniform_dist', 'corpus_dist', 'token-doc-diff', 'exclusivity']
wints = ['rank', 'count', 'docs', 'word-length']
src = PoloFile(src_file)
tree = etree.parse(src.file)
for topic in tree.xpath('/model/topic'):
tvals = []
for key in tkeys:
xpath = '@{}'.format(key)
if key in tints:
tvals.append(int(float(topic.xpath(xpath)[0])))
else:
tvals.append(float(topic.xpath(xpath)[0]))
TOPIC.append(tvals)
for word in topic.xpath('word'):
wvals = []
topic_id = tvals[0] # Hopefully
wvals.append(topic_id)
word_str = word.xpath('text()')[0]
wvals.append(word_str)
for key in wkeys:
xpath = '@{}'.format(key)
if key in wints:
wvals.append(int(float(word.xpath(xpath)[0])))
else:
wvals.append(float(word.xpath(xpath)[0]))
TOPICWORD.append(wvals)
tkeys = ['topic_{}'.format(re.sub('-', '_', k)) for k in tkeys]
wkeys = ['topic_id', 'word_str'] + wkeys
wkeys = [re.sub('-', '_', k) for k in wkeys]
topic_diags = pd.DataFrame(TOPIC, columns=tkeys)
topic_diags.set_index('topic_id', inplace=True)
topics = self.get_table('topic', set_index=True)
topics = pd.concat([topics, topic_diags], axis=1)
self.put_table(topics, 'topic', index=True)
topicword_diags = pd.DataFrame(TOPICWORD, columns=wkeys)
topicword_diags.set_index(['topic_id', 'word_str'], inplace=True)
word = self.get_table('word')
word.set_index('word_str', inplace=True)
topicword_diags = topicword_diags.join(word, how='inner')
topicword_diags.reset_index(inplace=True)
topicword_diags.set_index(['topic_id', 'word_id'], inplace=True)
self.put_table(topicword_diags, 'topicword_diag', index=True)
# fixme: Deleting mallet files seems not to be working
def del_mallet_files(self):
"""Delete MALLET files"""
file_keys = ['output-topic-keys', 'output-doc-topics',
'word-topic-counts-file', 'xml-topic-report',
'xml-topic-phrase-report', 'diagnostics-file',
'topic-word-weights-file']
for fk in file_keys:
if os.path.isfile(self.mallet['train-topics'][fk]):
print("Deleting {}".format(fk))
os.remove(str(self.mallet['train-topics'][fk]))
# UPDATE OR ADD TABLES WITH STATS
# todo: Consider moving into method that creates doc and doctopic tables
def add_topic_entropy(self):
"""Add entropy to topic table"""
doctopic = self.get_table('doctopic')
doc = self.get_table('doc')
#topic_entropy = doctopic.groupby('doc_id')['topic_weight'].apply(lambda x: pm.entropy(x))
#doc['topic_entropy'] = topic_entropy
doc['topic_entropy'] = doctopic.groupby('doc_id')['topic_weight'].apply(pm.entropy)
doc['topic_entropy_zscore'] = stats.zscore(doc.topic_entropy)
doc.set_index('doc_id', inplace=True)
self.put_table(doc, 'doc', index=True)
def create_table_topicpair(self):
"""Create topicpair table"""
thresh = self.get_thresh()
# thresh = self.cfg_thresh
# if thresh == 0: #fixme: Why is the zero?
# thresh = .5
# Get doc count to calculate topic frequencies
r = self.conn.execute("select count() from doc")
doc_num = int(r.fetchone()[0])
# Create the doctopic matrix dataframe
# todo: Find out if this can pull from an existing table
doctopic = self.get_table('doctopic', set_index=True)
dtm = doctopic['topic_weight'].unstack()
if dtm.columns.nlevels == 2:
dtm.columns = dtm.columns.droplevel()
del doctopic
# Add topic frequency data to topic table
topic = self.get_table('topic', set_index=True)
topic['topic_freq'] = topic.apply(lambda x: len(dtm[dtm[x.name] >= thresh]), axis=1)
topic['topic_rel_freq'] = topic.apply(lambda x: x.topic_freq / doc_num, axis=1)
self.put_table(topic, 'topic', index=True)
# Create topicword | |
import numpy as np
import sys
import math
from matplotlib import pyplot as plt
import time
np.random.seed(3141592)
# d_f(z) = dz/dx in terms of z = f(x)
def relu(z):
return np.maximum(z, 0.0)
def d_relu(z):
return np.where(z > 0, 1.0, 0.0)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def d_sigmoid(z):
return z * (1 - z)
class NeuralNetwork:
"""
Parameters
----------
batch_size: batch size for gradient descent
features: number of features in the data, also the size of input layer
architecture: list of hidden layer sizes
target_classes: number of target classes, also the size of output layer
due to one-hot encoding
activation: list of activation functions for each hidden, output layer
"""
def __init__(self,
batch_size,
features,
architecture,
target_classes,
activation,
learning_rate,
eps=1e-4,
adaptive=False,
max_iter=1000):
# indexing:
# 0: input layer,
# 1 - num_hidden_layers: hidden layers,
# num_hidden_layers + 1: output
# input validation
assert len(activation) == len(architecture) + 1
assert eps > 0
assert batch_size > 0
assert features > 0
assert target_classes > 0
assert learning_rate > 0
assert max_iter > 0
# architecture structure
self.num_hidden_layers = len(architecture)
self.features = features
self.architecture = [features] + architecture + [target_classes] # changed
self.target_classes = target_classes
# activation functions and derivatives
self.activation = [None for i in range(self.num_hidden_layers + 2)]
self.d_activation = [None for i in range(self.num_hidden_layers + 2)]
for i in range(len(activation)):
if activation[i] == 'relu':
self.activation[i + 1] = relu
self.d_activation[i + 1] = d_relu
elif activation[i] == 'sigmoid':
self.activation[i + 1] = sigmoid
self.d_activation[i + 1] = d_sigmoid
else:
raise ValueError('Unsupported activation function,'
'choose one of relu and sigmoid')
# He initialization (variance of 2/(number of units in the previous layer))
self.theta = [None] + [np.random.uniform(-1, 1, (n+1, k)) * math.sqrt(6/n) for (n, k) in zip(self.architecture[:-1], self.architecture[1:])]
# SGD parameters
self.batch_size = batch_size
self.learning_rate = learning_rate
self.eps = eps
self.adaptive = adaptive
self.max_iter = max_iter
def train(self, _x_train, _y_train):
# reformatting data
m = _x_train.shape[0]
X_train = np.concatenate((np.ones((m, 1)), _x_train), axis=1)
y_train = np.concatenate((np.ones((m, 1)), _y_train), axis=1)
# variables to keep track of SGD
prev_error = math.inf
epoch = 1
# for each layer, keep track of outputs of that layer
# as well as the computed deltas
layer_outputs = [None for _ in range(len(self.architecture))]
delta = [None for _ in range(len(self.architecture))]
while True:
# max number of epochs - to prevent infinite loop
# however this is never triggered in any of the runs
if epoch == self.max_iter:
break
# choosing the learning rate
learning_rate = self.learning_rate
if self.adaptive:
learning_rate /= math.sqrt(epoch)
# shuffle X_train and y_train first
p = np.random.permutation(m)
X_train, y_train = X_train[p], y_train[p]
# initialize variables related to SGD
average_error = 0
M = self.batch_size
B = m // M
for i in range(B):
# extract mini-batch from the data
input_batch_X = X_train[i * M : (i + 1) * M]
input_batch_y = y_train[i * M : (i + 1) * M][:, 1:]
# forward propagate and keep track of outputs of each unit
layer_outputs[0] = input_batch_X
for layer in range(1, len(self.architecture)):
layer_outputs[layer] = np.concatenate((np.ones((M, 1)), self.activation[layer](layer_outputs[layer - 1] @ self.theta[layer])), axis=1)
last_output = layer_outputs[-1][:, 1:]
last_d_activation = self.d_activation[-1]
# compute loss
average_error += np.sum((input_batch_y - last_output) ** 2) / (2 * M)
# compute deltas using backpropagation
delta[-1] = (input_batch_y - last_output).T * last_d_activation(last_output.T) / M
for layer in range(len(self.architecture) - 2, 0, -1): # theta, layer_outputs
delta[layer] = (self.theta[layer + 1][1:, :] @ delta[layer + 1]) * self.d_activation[layer](layer_outputs[layer][:, 1:].T)
# using deltas find gradient for each theta[layer] and
# do batch update on theta
for layer in range(1, len(self.architecture)):
self.theta[layer] += learning_rate * (delta[layer] @ layer_outputs[layer - 1]).T
# average loss over this epoch
average_error /= B
#print('Iteration:', epoch, 'loss:', average_error)
# main convergence criteria
if abs(average_error - prev_error) < self.eps:
return epoch, average_error
prev_error = average_error
epoch += 1
return epoch, prev_error
def predict(self, x_test):
# reformatting for matching the data
m = x_test.shape[0]
layer_output = np.concatenate((np.array([np.ones(m)]).T, x_test), axis=1)
# feedforwarding
for layer in range(1, len(self.architecture)):
layer_output = self.activation[layer](layer_output @ self.theta[layer])
layer_output = np.concatenate((np.array([np.ones(m)]).T, layer_output), axis=1)
# returning predictions as class labels (not one-hot encoding)
return np.argmax(layer_output[:, 1:], axis=1)
def one_hot_encoder(y, num_classes):
b = np.zeros((y.shape[0], num_classes))
b[np.arange(y.shape[0]), y] = 1
return b
def compressor(x):
return np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2]))
def mainB():
# extracting data
X_train, y_train = compressor(np.load(sys.argv[1])), np.load(sys.argv[2])
X_test, y_test = compressor(np.load(sys.argv[3])), np.load(sys.argv[4])
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# statistics
units = []
test_accuracies = []
train_accuracies = []
elapsed_time = []
# possible values for hidden layer units
experimental_values = [1, 10, 50, 100, 500]
# iterating over all choices for hidden layer units
for hidden_layer_units in experimental_values:
# parameters for the neural network
num_hidden_layers = 1
features = 784
batch_size = 100
activation = ['sigmoid' for i in range(num_hidden_layers + 1)]
architecture = [hidden_layer_units] * num_hidden_layers
target_classes = 10
learning_rate = 0.1 # or 0.001
eps = 1e-4
# initializing the neural network
nn = NeuralNetwork(batch_size=batch_size,
features=features,
architecture=architecture,
target_classes=target_classes,
activation=activation,
learning_rate=learning_rate,
eps=eps)
# training the data
t = time.time()
epoch, average_error = nn.train(X_train, one_hot_encoder(y_train, target_classes))
# prediction on test and train data
y_pred_test = nn.predict(X_test)
y_pred_train = nn.predict(X_train)
# statistics
elapsed_time.append(time.time() - t)
units.append(hidden_layer_units)
test_accuracies.append(100 * y_pred_test[y_pred_test == y_test].shape[0] / y_pred_test.shape[0])
train_accuracies.append(100 * y_pred_train[y_pred_train == y_train].shape[0] / y_pred_train.shape[0])
# printing stats
print('hidden layer units:', hidden_layer_units)
print('test accuracy:', test_accuracies[-1], '%')
print('train accuracy:', train_accuracies[-1], '%')
print('time taken:', elapsed_time[-1])
print('number of epochs:', epoch)
print('average error:', average_error)
# plotting the graphs
plt.xscale('log')
plt.title('Accuracy plot')
plt.xlabel('Hidden layer units')
plt.ylabel('Accuracy (in %)')
plt.plot(units, test_accuracies, label='Test accuracies')
plt.plot(units, train_accuracies, label='Train accuracies')
plt.savefig('nn_accuracy_plot_nonadaptive.png')
plt.close()
plt.xscale('log')
plt.title('Time taken')
plt.xlabel('Hidden layer units')
plt.ylabel('Time taken (in s)')
plt.plot(units, elapsed_time)
plt.savefig('nn_time_plot_nonadaptive.png')
def mainC():
# extracting data
X_train, y_train = compressor(np.load(sys.argv[1])), np.load(sys.argv[2])
X_test, y_test = compressor(np.load(sys.argv[3])), np.load(sys.argv[4])
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# statistics
units = []
test_accuracies = []
train_accuracies = []
elapsed_time = []
# possible values for hidden layer units
experimental_values = [1, 10, 50, 100, 500]
# common parameters for neural network
num_hidden_layers = 1
features = 784
batch_size = 100
activation = ['sigmoid' for i in range(num_hidden_layers + 1)]
target_classes = 10
learning_rate = 0.5
eps = 1e-4
# iterating over all hidden layer unit values
for hidden_layer_units in experimental_values:
# architecture
architecture = [hidden_layer_units] * num_hidden_layers
# initializing the neural network
nn = NeuralNetwork(batch_size=batch_size,
features=features,
architecture=architecture,
target_classes=target_classes,
activation=activation,
learning_rate=learning_rate,
eps=eps,
adaptive=True)
t = time.time()
# training
epoch, average_error = nn.train(np.copy(X_train), one_hot_encoder(y_train, target_classes))
# prediction on test and train data
y_pred_test = nn.predict(np.copy(X_test))
y_pred_train = nn.predict(np.copy(X_train))
# statistics
elapsed_time.append(time.time() - t)
units.append(hidden_layer_units)
test_accuracies.append(100 * y_pred_test[y_pred_test == y_test].shape[0] / y_pred_test.shape[0])
train_accuracies.append(100 * y_pred_train[y_pred_train == y_train].shape[0] / y_pred_train.shape[0])
# printing statistics
print('hidden layer units:', hidden_layer_units)
print('test accuracy:', test_accuracies[-1], '%')
print('train accuracy:', train_accuracies[-1], '%')
print('time taken:', elapsed_time[-1])
print('number of epochs:', epoch)
print('average error:', average_error)
# plotting
plt.xscale('log')
plt.title('Accuracy plot')
plt.xlabel('Hidden layer units')
plt.ylabel('Accuracy (in %)')
plt.plot(units, test_accuracies, label='Test accuracies')
plt.plot(units, train_accuracies, label='Train accuracies')
plt.savefig('nn_accuracy_plot_adaptive.png')
plt.close()
plt.xscale('log')
plt.title('Time taken')
plt.xlabel('Hidden layer units')
plt.ylabel('Time taken (in s)')
plt.plot(units, elapsed_time)
plt.savefig('nn_time_plot_adaptive.png')
def mainD():
# extracting data
X_train, y_train = compressor(np.load(sys.argv[1])), np.load(sys.argv[2])
X_test, y_test = compressor(np.load(sys.argv[3])), np.load(sys.argv[4])
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# statistics
units = []
test_accuracies = []
train_accuracies = []
elapsed_time = []
# parameters for the neural networks
num_hidden_layers = 2
hidden_layer_units = 100
architecture = [hidden_layer_units] * num_hidden_layers
features = 784
batch_size = 100
relu_activation = ['relu' for i in range(num_hidden_layers + 1)]
relu_activation[-1] = 'sigmoid'
sigmoid_activation = ['sigmoid' for i in range(num_hidden_layers + 1)]
target_classes = 10
learning_rate = 0.5
eps = 1e-4
# iterating over both architectures
for activation in [relu_activation, sigmoid_activation]:
# initializing the neural network
nn = NeuralNetwork(batch_size=batch_size,
features=features,
architecture=architecture,
target_classes=target_classes,
activation=activation,
learning_rate=learning_rate,
eps=eps,
adaptive=True)
t = time.time()
# training
epoch, average_error = nn.train(np.copy(X_train), one_hot_encoder(y_train, target_classes))
# prediction on test and training data
y_pred_test = nn.predict(np.copy(X_test))
y_pred_train = nn.predict(np.copy(X_train))
# statistics
elapsed_time.append(time.time() - t)
units.append(hidden_layer_units)
| |
# Generated from StlParser.g4 by ANTLR 4.5.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3")
buf.write(u"H\u010c\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write(u"\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write(u"\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4")
buf.write(u"\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\3\2\3\2\3\2\3")
buf.write(u"\2\3\2\3\2\3\3\3\3\5\3\65\n\3\3\3\3\3\5\39\n\3\5\3;\n")
buf.write(u"\3\3\4\3\4\3\5\3\5\3\5\3\5\3\5\5\5D\n\5\3\5\3\5\3\5\5")
buf.write(u"\5I\n\5\3\5\3\5\3\5\5\5N\n\5\3\5\3\5\3\5\5\5S\n\5\3\5")
buf.write(u"\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5")
buf.write(u"\3\5\3\5\3\5\3\5\3\5\3\5\5\5i\n\5\3\5\3\5\3\5\3\5\3\5")
buf.write(u"\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5")
buf.write(u"\3\5\3\5\3\5\3\5\5\5\u0081\n\5\3\5\3\5\3\5\3\5\5\5\u0087")
buf.write(u"\n\5\3\5\3\5\3\5\3\5\5\5\u008d\n\5\3\5\7\5\u0090\n\5")
buf.write(u"\f\5\16\5\u0093\13\5\3\6\3\6\3\6\3\7\5\7\u0099\n\7\3")
buf.write(u"\7\7\7\u009c\n\7\f\7\16\7\u009f\13\7\3\7\3\7\7\7\u00a3")
buf.write(u"\n\7\f\7\16\7\u00a6\13\7\3\7\6\7\u00a9\n\7\r\7\16\7\u00aa")
buf.write(u"\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\n\3\n\5\n\u00b7\n")
buf.write(u"\n\3\n\3\n\3\13\3\13\5\13\u00bd\n\13\3\f\3\f\3\f\3\r")
buf.write(u"\3\r\3\r\3\r\3\r\3\r\3\r\3\16\5\16\u00ca\n\16\3\16\3")
buf.write(u"\16\3\16\5\16\u00cf\n\16\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write(u"\3\20\3\20\3\20\3\20\5\20\u00db\n\20\3\21\3\21\3\22\3")
buf.write(u"\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\5\23\u00e9")
buf.write(u"\n\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3")
buf.write(u"\23\3\23\3\23\7\23\u00f7\n\23\f\23\16\23\u00fa\13\23")
buf.write(u"\3\24\3\24\3\24\3\24\3\24\3\24\5\24\u0102\n\24\3\25\3")
buf.write(u"\25\3\25\3\25\5\25\u0108\n\25\3\26\3\26\3\26\2\4\b$\27")
buf.write(u"\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*\2\6\3")
buf.write(u"\2\16\17\3\2\23\26\4\2\37\"DD\3\2\32\33\u012a\2,\3\2")
buf.write(u"\2\2\4:\3\2\2\2\6<\3\2\2\2\bh\3\2\2\2\n\u0094\3\2\2\2")
buf.write(u"\f\u0098\3\2\2\2\16\u00ac\3\2\2\2\20\u00af\3\2\2\2\22")
buf.write(u"\u00b6\3\2\2\2\24\u00bc\3\2\2\2\26\u00be\3\2\2\2\30\u00c1")
buf.write(u"\3\2\2\2\32\u00c9\3\2\2\2\34\u00d0\3\2\2\2\36\u00da\3")
buf.write(u"\2\2\2 \u00dc\3\2\2\2\"\u00de\3\2\2\2$\u00e8\3\2\2\2")
buf.write(u"&\u0101\3\2\2\2(\u0107\3\2\2\2*\u0109\3\2\2\2,-\7\13")
buf.write(u"\2\2-.\5\4\3\2./\t\2\2\2/\60\5\4\3\2\60\61\7\f\2\2\61")
buf.write(u"\3\3\2\2\2\62\64\5(\25\2\63\65\5\6\4\2\64\63\3\2\2\2")
buf.write(u"\64\65\3\2\2\2\65;\3\2\2\2\668\7D\2\2\679\5\6\4\28\67")
buf.write(u"\3\2\2\289\3\2\2\29;\3\2\2\2:\62\3\2\2\2:\66\3\2\2\2")
buf.write(u";\5\3\2\2\2<=\t\3\2\2=\7\3\2\2\2>?\b\5\1\2?@\7\'\2\2")
buf.write(u"@i\5\b\5\23AC\7/\2\2BD\5\2\2\2CB\3\2\2\2CD\3\2\2\2DE")
buf.write(u"\3\2\2\2Ei\5\b\5\rFH\7\60\2\2GI\5\2\2\2HG\3\2\2\2HI\3")
buf.write(u"\2\2\2IJ\3\2\2\2Ji\5\b\5\fKM\7\63\2\2LN\5\2\2\2ML\3\2")
buf.write(u"\2\2MN\3\2\2\2NO\3\2\2\2Oi\5\b\5\tPR\7\64\2\2QS\5\2\2")
buf.write(u"\2RQ\3\2\2\2RS\3\2\2\2ST\3\2\2\2Ti\5\b\5\bUV\7\67\2\2")
buf.write(u"Vi\5\b\5\4WX\7\66\2\2Xi\5\b\5\3Yi\5$\23\2Z[\7\7\2\2[")
buf.write(u"\\\5\b\5\2\\]\7\b\2\2]i\3\2\2\2^_\7-\2\2_`\7\7\2\2`a")
buf.write(u"\5\b\5\2ab\7\b\2\2bi\3\2\2\2cd\7.\2\2de\7\7\2\2ef\5\b")
buf.write(u"\5\2fg\7\b\2\2gi\3\2\2\2h>\3\2\2\2hA\3\2\2\2hF\3\2\2")
buf.write(u"\2hK\3\2\2\2hP\3\2\2\2hU\3\2\2\2hW\3\2\2\2hY\3\2\2\2")
buf.write(u"hZ\3\2\2\2h^\3\2\2\2hc\3\2\2\2i\u0091\3\2\2\2jk\f\25")
buf.write(u"\2\2kl\5&\24\2lm\5\b\5\26m\u0090\3\2\2\2no\f\22\2\2o")
buf.write(u"p\7(\2\2p\u0090\5\b\5\23qr\f\21\2\2rs\7)\2\2s\u0090\5")
buf.write(u"\b\5\22tu\f\20\2\2uv\7+\2\2v\u0090\5\b\5\21wx\f\17\2")
buf.write(u"\2xy\7*\2\2y\u0090\5\b\5\20z{\f\16\2\2{|\7,\2\2|\u0090")
buf.write(u"\5\b\5\17}~\f\13\2\2~\u0080\7\61\2\2\177\u0081\5\2\2")
buf.write(u"\2\u0080\177\3\2\2\2\u0080\u0081\3\2\2\2\u0081\u0082")
buf.write(u"\3\2\2\2\u0082\u0090\5\b\5\f\u0083\u0084\f\n\2\2\u0084")
buf.write(u"\u0086\7\62\2\2\u0085\u0087\5\2\2\2\u0086\u0085\3\2\2")
buf.write(u"\2\u0086\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u0090")
buf.write(u"\5\b\5\13\u0089\u008a\f\7\2\2\u008a\u008c\7\65\2\2\u008b")
buf.write(u"\u008d\5\2\2\2\u008c\u008b\3\2\2\2\u008c\u008d\3\2\2")
buf.write(u"\2\u008d\u008e\3\2\2\2\u008e\u0090\5\b\5\b\u008fj\3\2")
buf.write(u"\2\2\u008fn\3\2\2\2\u008fq\3\2\2\2\u008ft\3\2\2\2\u008f")
buf.write(u"w\3\2\2\2\u008fz\3\2\2\2\u008f}\3\2\2\2\u008f\u0083\3")
buf.write(u"\2\2\2\u008f\u0089\3\2\2\2\u0090\u0093\3\2\2\2\u0091")
buf.write(u"\u008f\3\2\2\2\u0091\u0092\3\2\2\2\u0092\t\3\2\2\2\u0093")
buf.write(u"\u0091\3\2\2\2\u0094\u0095\5\f\7\2\u0095\u0096\7\2\2")
buf.write(u"\3\u0096\13\3\2\2\2\u0097\u0099\5\16\b\2\u0098\u0097")
buf.write(u"\3\2\2\2\u0098\u0099\3\2\2\2\u0099\u009d\3\2\2\2\u009a")
buf.write(u"\u009c\5\20\t\2\u009b\u009a\3\2\2\2\u009c\u009f\3\2\2")
buf.write(u"\2\u009d\u009b\3\2\2\2\u009d\u009e\3\2\2\2\u009e\u00a4")
buf.write(u"\3\2\2\2\u009f\u009d\3\2\2\2\u00a0\u00a3\5\24\13\2\u00a1")
buf.write(u"\u00a3\5\26\f\2\u00a2\u00a0\3\2\2\2\u00a2\u00a1\3\2\2")
buf.write(u"\2\u00a3\u00a6\3\2\2\2\u00a4\u00a2\3\2\2\2\u00a4\u00a5")
buf.write(u"\3\2\2\2\u00a5\u00a8\3\2\2\2\u00a6\u00a4\3\2\2\2\u00a7")
buf.write(u"\u00a9\5\22\n\2\u00a8\u00a7\3\2\2\2\u00a9\u00aa\3\2\2")
buf.write(u"\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\r\3")
buf.write(u"\2\2\2\u00ac\u00ad\7%\2\2\u00ad\u00ae\7D\2\2\u00ae\17")
buf.write(u"\3\2\2\2\u00af\u00b0\7&\2\2\u00b0\u00b1\7D\2\2\u00b1")
buf.write(u"\u00b2\7\31\2\2\u00b2\u00b3\7D\2\2\u00b3\21\3\2\2\2\u00b4")
buf.write(u"\u00b5\7D\2\2\u00b5\u00b7\7>\2\2\u00b6\u00b4\3\2\2\2")
buf.write(u"\u00b6\u00b7\3\2\2\2\u00b7\u00b8\3\2\2\2\u00b8\u00b9")
buf.write(u"\5\b\5\2\u00b9\23\3\2\2\2\u00ba\u00bd\5\32\16\2\u00bb")
buf.write(u"\u00bd\5\34\17\2\u00bc\u00ba\3\2\2\2\u00bc\u00bb\3\2")
buf.write(u"\2\2\u00bd\25\3\2\2\2\u00be\u00bf\7\21\2\2\u00bf\u00c0")
buf.write(u"\5\30\r\2\u00c0\27\3\2\2\2\u00c1\u00c2\7\30\2\2\u00c2")
buf.write(u"\u00c3\7\7\2\2\u00c3\u00c4\7D\2\2\u00c4\u00c5\7\17\2")
buf.write(u"\2\u00c5\u00c6\7D\2\2\u00c6\u00c7\7\b\2\2\u00c7\31\3")
buf.write(u"\2\2\2\u00c8\u00ca\5\"\22\2\u00c9\u00c8\3\2\2\2\u00c9")
buf.write(u"\u00ca\3\2\2\2\u00ca\u00cb\3\2\2\2\u00cb\u00cc\5 \21")
buf.write(u"\2\u00cc\u00ce\7D\2\2\u00cd\u00cf\5\36\20\2\u00ce\u00cd")
buf.write(u"\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\33\3\2\2\2\u00d0\u00d1")
buf.write(u"\7\35\2\2\u00d1\u00d2\5 \21\2\u00d2\u00d3\7D\2\2\u00d3")
buf.write(u"\u00d4\7>\2\2\u00d4\u00d5\5(\25\2\u00d5\35\3\2\2\2\u00d6")
buf.write(u"\u00d7\7>\2\2\u00d7\u00db\5(\25\2\u00d8\u00d9\7>\2\2")
buf.write(u"\u00d9\u00db\5\b\5\2\u00da\u00d6\3\2\2\2\u00da\u00d8")
buf.write(u"\3\2\2\2\u00db\37\3\2\2\2\u00dc\u00dd\t\4\2\2\u00dd!")
buf.write(u"\3\2\2\2\u00de\u00df\t\5\2\2\u00df#\3\2\2\2\u00e0\u00e1")
buf.write(u"\b\23\1\2\u00e1\u00e9\7D\2\2\u00e2\u00e9\5(\25\2\u00e3")
buf.write(u"\u00e4\7\22\2\2\u00e4\u00e5\7\7\2\2\u00e5\u00e6\5$\23")
buf.write(u"\2\u00e6\u00e7\7\b\2\2\u00e7\u00e9\3\2\2\2\u00e8\u00e0")
buf.write(u"\3\2\2\2\u00e8\u00e2\3\2\2\2\u00e8\u00e3\3\2\2\2\u00e9")
buf.write(u"\u00f8\3\2\2\2\u00ea\u00eb\f\7\2\2\u00eb\u00ec\7\4\2")
buf.write(u"\2\u00ec\u00f7\5$\23\b\u00ed\u00ee\f\6\2\2\u00ee\u00ef")
buf.write(u"\7\3\2\2\u00ef\u00f7\5$\23\7\u00f0\u00f1\f\5\2\2\u00f1")
buf.write(u"\u00f2\7\5\2\2\u00f2\u00f7\5$\23\6\u00f3\u00f4\f\4\2")
buf.write(u"\2\u00f4\u00f5\7\6\2\2\u00f5\u00f7\5$\23\5\u00f6\u00ea")
buf.write(u"\3\2\2\2\u00f6\u00ed\3\2\2\2\u00f6\u00f0\3\2\2\2\u00f6")
buf.write(u"\u00f3\3\2\2\2\u00f7\u00fa\3\2\2\2\u00f8\u00f6\3\2\2")
buf.write(u"\2\u00f8\u00f9\3\2\2\2\u00f9%\3\2\2\2\u00fa\u00f8\3\2")
buf.write(u"\2\2\u00fb\u0102\7;\2\2\u00fc\u0102\7:\2\2\u00fd\u0102")
buf.write(u"\7=\2\2\u00fe\u0102\7<\2\2\u00ff\u0102\78\2\2\u0100\u0102")
buf.write(u"\79\2\2\u0101\u00fb\3\2\2\2\u0101\u00fc\3\2\2\2\u0101")
buf.write(u"\u00fd\3\2\2\2\u0101\u00fe\3\2\2\2\u0101\u00ff\3\2\2")
buf.write(u"\2\u0101\u0100\3\2\2\2\u0102\'\3\2\2\2\u0103\u0108\7")
buf.write(u"B\2\2\u0104\u0108\7C\2\2\u0105\u0106\7\3\2\2\u0106\u0108")
buf.write(u"\5(\25\2\u0107\u0103\3\2\2\2\u0107\u0104\3\2\2\2\u0107")
buf.write(u"\u0105\3\2\2\2\u0108)\3\2\2\2\u0109\u010a\7D\2\2\u010a")
buf.write(u"+\3\2\2\2\36\648:CHMRh\u0080\u0086\u008c\u008f\u0091")
buf.write(u"\u0098\u009d\u00a2\u00a4\u00aa\u00b6\u00bc\u00c9\u00ce")
buf.write(u"\u00da\u00e8\u00f6\u00f8\u0101\u0107")
return buf.getvalue()
class StlParser ( Parser ):
grammarFileName = "StlParser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"'-'", u"'+'", u"'*'", u"'/'", u"'('",
u"')'", u"'{'", u"'}'", u"'['", u"']'", u"';'", u"':'",
u"','", u"'.'", u"'@'", u"'abs'", u"'s'", u"'ms'",
u"'us'", u"'ns'", u"'ps'", u"'topic'", u"'import'",
u"'input'", u"'output'", u"'internal'", u"'const'",
u"'real'", u"'float'", u"'long'", u"'complex'", u"'int'",
u"'bool'", u"'assertion'", u"'specification'", u"'from'",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"'xor'", u"'rise'", u"'fall'", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>",
u"'=='", u"'!=='", u"'>='", u"'<='", u"'>'", u"'<'",
u"'='" ]
symbolicNames = [ u"<INVALID>", u"MINUS", u"PLUS", u"TIMES", u"DIVIDE",
u"LPAREN", u"RPAREN", u"LBRACE", u"RBRACE", u"LBRACK",
u"RBRACK", u"SEMICOLON", u"COLON", u"COMMA", u"DOT",
u"AT", u"ABS", u"SEC", u"MSEC", u"USEC", u"NSEC",
u"PSEC", u"ROS_Topic", u"Import", u"Input", u"Output",
u"Internal", u"Constant", u"DomainTypeReal", u"DomainTypeFloat",
u"DomainTypeLong", u"DomainTypeComplex", u"DomainTypeInt",
u"DomainTypeBool", u"Assertion", u"Specification",
u"From", u"NotOperator", u"OrOperator", u"AndOperator",
u"IffOperator", u"ImpliesOperator", u"XorOperator",
u"RiseOperator", u"FallOperator", u"AlwaysOperator",
u"EventuallyOperator", u"UntilOperator", u"UnlessOperator",
u"HistoricallyOperator", u"OnceOperator", u"SinceOperator",
u"NextOperator", u"PreviousOperator", u"EqualOperator",
u"NotEqualOperator", u"GreaterOrEqualOperator", u"LesserOrEqualOperator",
u"GreaterOperator", u"LesserOperator", u"EQUAL", u"BooleanLiteral",
u"TRUE", u"FALSE", u"IntegerLiteral", u"RealLiteral",
u"Identifier", u"LINE_TERMINATOR", u"WHITESPACE",
u"COMMENT", u"LINE_COMMENT" ]
RULE_interval = 0
RULE_intervalTime = 1
RULE_unit = 2
RULE_expression = 3
RULE_specification_file = 4
RULE_specification = 5
RULE_spec = 6
RULE_modimport = 7
RULE_assertion = 8
RULE_declaration = 9
RULE_annotation = 10
RULE_annotation_type = 11
RULE_variableDeclaration = 12
RULE_constantDeclaration = 13
RULE_assignment = 14
RULE_domainType = 15
RULE_ioType = 16
RULE_real_expression = 17
RULE_comparisonOp = 18
RULE_literal = 19
RULE_identifier = 20
ruleNames = [ u"interval", u"intervalTime", u"unit", u"expression",
u"specification_file", u"specification", u"spec", u"modimport",
u"assertion", u"declaration", u"annotation", u"annotation_type",
u"variableDeclaration", u"constantDeclaration", u"assignment",
u"domainType", u"ioType", u"real_expression", u"comparisonOp",
u"literal", u"identifier" ]
EOF = Token.EOF
MINUS=1
PLUS=2
TIMES=3
DIVIDE=4
LPAREN=5
RPAREN=6
LBRACE=7
RBRACE=8
LBRACK=9
RBRACK=10
SEMICOLON=11
COLON=12
COMMA=13
DOT=14
AT=15
ABS=16
SEC=17
MSEC=18
USEC=19
NSEC=20
PSEC=21
ROS_Topic=22
Import=23
Input=24
Output=25
Internal=26
Constant=27
DomainTypeReal=28
DomainTypeFloat=29
DomainTypeLong=30
DomainTypeComplex=31
DomainTypeInt=32
DomainTypeBool=33
Assertion=34
Specification=35
From=36
NotOperator=37
OrOperator=38
AndOperator=39
IffOperator=40
ImpliesOperator=41
XorOperator=42
RiseOperator=43
FallOperator=44
AlwaysOperator=45
EventuallyOperator=46
UntilOperator=47
UnlessOperator=48
HistoricallyOperator=49
OnceOperator=50
SinceOperator=51
NextOperator=52
PreviousOperator=53
EqualOperator=54
NotEqualOperator=55
GreaterOrEqualOperator=56
LesserOrEqualOperator=57
GreaterOperator=58
LesserOperator=59
EQUAL=60
BooleanLiteral=61
TRUE=62
FALSE=63
IntegerLiteral=64
RealLiteral=65
Identifier=66
LINE_TERMINATOR=67
WHITESPACE=68
COMMENT=69
LINE_COMMENT=70
def __init__(self, input):
super(StlParser, self).__init__(input)
self.checkVersion("4.5.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class IntervalContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.IntervalContext, self).__init__(parent, invokingState)
self.parser = parser
def LBRACK(self):
return self.getToken(StlParser.LBRACK, 0)
def intervalTime(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.IntervalTimeContext)
else:
return self.getTypedRuleContext(StlParser.IntervalTimeContext,i)
def RBRACK(self):
return self.getToken(StlParser.RBRACK, 0)
def COLON(self):
return self.getToken(StlParser.COLON, 0)
def COMMA(self):
return self.getToken(StlParser.COMMA, 0)
def getRuleIndex(self):
return StlParser.RULE_interval
def accept(self, visitor):
if hasattr(visitor, "visitInterval"):
return visitor.visitInterval(self)
else:
return visitor.visitChildren(self)
def interval(self):
localctx = StlParser.IntervalContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_interval)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 42
self.match(StlParser.LBRACK)
self.state = 43
self.intervalTime()
self.state = 44
_la = self._input.LA(1)
if not(_la==StlParser.COLON or _la==StlParser.COMMA):
self._errHandler.recoverInline(self)
else:
self.consume()
self.state = 45
self.intervalTime()
self.state = 46
self.match(StlParser.RBRACK)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalTimeContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.IntervalTimeContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_intervalTime
def copyFrom(self, ctx):
super(StlParser.IntervalTimeContext, self).copyFrom(ctx)
class IntervalTimeLiteralContext(IntervalTimeContext):
def __init__(self, parser, ctx): # actually a StlParser.IntervalTimeContext)
super(StlParser.IntervalTimeLiteralContext, self).__init__(parser)
self.copyFrom(ctx)
def literal(self):
return self.getTypedRuleContext(StlParser.LiteralContext,0)
def unit(self):
return self.getTypedRuleContext(StlParser.UnitContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitIntervalTimeLiteral"):
return visitor.visitIntervalTimeLiteral(self)
else:
return visitor.visitChildren(self)
class ConstantTimeLiteralContext(IntervalTimeContext):
def __init__(self, parser, ctx): # actually a StlParser.IntervalTimeContext)
super(StlParser.ConstantTimeLiteralContext, self).__init__(parser)
self.copyFrom(ctx)
def Identifier(self):
return self.getToken(StlParser.Identifier, 0)
def unit(self):
return self.getTypedRuleContext(StlParser.UnitContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitConstantTimeLiteral"):
return visitor.visitConstantTimeLiteral(self)
else:
return visitor.visitChildren(self)
def intervalTime(self):
localctx = StlParser.IntervalTimeContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_intervalTime)
self._la = 0 # Token type
try:
self.state = 56
token = self._input.LA(1)
if token in [StlParser.MINUS, StlParser.IntegerLiteral, StlParser.RealLiteral]:
localctx = StlParser.IntervalTimeLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 48
self.literal()
self.state = 50
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << StlParser.SEC) | (1 << StlParser.MSEC) | (1 << StlParser.USEC) | (1 << StlParser.NSEC))) != 0):
self.state = 49
self.unit()
elif token in [StlParser.Identifier]:
localctx = StlParser.ConstantTimeLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 52
self.match(StlParser.Identifier)
self.state = 54
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << StlParser.SEC) | (1 << StlParser.MSEC) | (1 << StlParser.USEC) | (1 << StlParser.NSEC))) != 0):
self.state = 53
self.unit()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnitContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.UnitContext, self).__init__(parent, invokingState)
self.parser = parser
def SEC(self):
return self.getToken(StlParser.SEC, 0)
def MSEC(self):
return self.getToken(StlParser.MSEC, 0)
def USEC(self):
return self.getToken(StlParser.USEC, 0)
def NSEC(self):
return self.getToken(StlParser.NSEC, 0)
def getRuleIndex(self):
return StlParser.RULE_unit
def accept(self, visitor):
if hasattr(visitor, "visitUnit"):
return visitor.visitUnit(self)
else:
return visitor.visitChildren(self)
def unit(self):
localctx = StlParser.UnitContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_unit)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 58
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << StlParser.SEC) | (1 << StlParser.MSEC) | (1 << StlParser.USEC) | (1 << StlParser.NSEC))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.ExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_expression
def copyFrom(self, ctx):
super(StlParser.ExpressionContext, self).copyFrom(ctx)
class ExprSinceContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprSinceContext, self).__init__(parser)
self.copyFrom(ctx)
def expression(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.ExpressionContext)
else:
return self.getTypedRuleContext(StlParser.ExpressionContext,i)
def SinceOperator(self):
return self.getToken(StlParser.SinceOperator, 0)
def interval(self):
return self.getTypedRuleContext(StlParser.IntervalContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitExprSince"):
return visitor.visitExprSince(self)
else:
return visitor.visitChildren(self)
class ExprParenContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprParenContext, self).__init__(parser)
self.copyFrom(ctx)
def LPAREN(self):
return self.getToken(StlParser.LPAREN, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def RPAREN(self):
return self.getToken(StlParser.RPAREN, 0)
def accept(self, visitor):
if hasattr(visitor, "visitExprParen"):
return visitor.visitExprParen(self)
else:
return visitor.visitChildren(self)
class ExprIffContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprIffContext, self).__init__(parser)
self.copyFrom(ctx)
def expression(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.ExpressionContext)
else:
return self.getTypedRuleContext(StlParser.ExpressionContext,i)
def IffOperator(self):
return self.getToken(StlParser.IffOperator, 0)
def accept(self, visitor):
if hasattr(visitor, "visitExprIff"):
return visitor.visitExprIff(self)
else:
return visitor.visitChildren(self)
class ExpreOnceContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExpreOnceContext, self).__init__(parser)
self.copyFrom(ctx)
def OnceOperator(self):
return self.getToken(StlParser.OnceOperator, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def interval(self):
return self.getTypedRuleContext(StlParser.IntervalContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitExpreOnce"):
return visitor.visitExpreOnce(self)
else:
return visitor.visitChildren(self)
class ExprEvContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprEvContext, self).__init__(parser)
self.copyFrom(ctx)
def EventuallyOperator(self):
return self.getToken(StlParser.EventuallyOperator, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def interval(self):
return self.getTypedRuleContext(StlParser.IntervalContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitExprEv"):
return visitor.visitExprEv(self)
else:
return visitor.visitChildren(self)
class ExprImpliesContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprImpliesContext, self).__init__(parser)
self.copyFrom(ctx)
def expression(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.ExpressionContext)
else:
return self.getTypedRuleContext(StlParser.ExpressionContext,i)
def ImpliesOperator(self):
return self.getToken(StlParser.ImpliesOperator, 0)
def accept(self, visitor):
if hasattr(visitor, "visitExprImplies"):
return visitor.visitExprImplies(self)
else:
return visitor.visitChildren(self)
class ExprUntilContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprUntilContext, self).__init__(parser)
self.copyFrom(ctx)
def expression(self, i=None):
| |
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_folder_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: DesignFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_folder_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_folder_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_folder_get_with_http_info(self, id, nk, **kwargs):
"""
Fetches belongsTo relation folder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_folder_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: DesignFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_folder_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_folder_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_folder_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/folder'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DesignFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_members_count_get(self, id, nk, **kwargs):
"""
Counts members of Design.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_count_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_members_count_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_members_count_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_members_count_get_with_http_info(self, id, nk, **kwargs):
"""
Counts members of Design.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_count_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_members_count_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_members_count_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_members_count_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/members/count'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_members_delete(self, id, nk, **kwargs):
"""
Deletes all members of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_delete(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_members_delete_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_members_delete_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_members_delete_with_http_info(self, id, nk, **kwargs):
"""
Deletes all members of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_delete_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_members_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_members_delete`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_members_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/members'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_members_fk_delete(self, id, nk, fk, **kwargs):
"""
Delete a related item by id for members.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_fk_delete(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for members (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_members_fk_delete_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_members_fk_delete_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_members_fk_delete_with_http_info(self, id, nk, fk, **kwargs):
"""
Delete a related item by id for members.
This method makes | |
matricide matricides mattock mattocks maturation maturely
matzo matzoh matzohs matzos matzot matzoth maunder maundered
maundering maunders maven mavens maw mawed mawing mawkish mawkishly
maws maxed maxes maxilla maxillae maxillary maximally maximals maxing
mayday maydays mayflies mayflower mayflowers mayfly mayo mayoral
mayoralty maypole maypoles mead meadowlark meadowlarks meagerly
meagerness mealtime mealtimes meaningfully meanly meanness measurably
measureless meatball meatballs meatier meatiest meatloaf meatloaves
meaty mecca meccas mechanistic medial medians medic medicinally medics
meditative meditatively medulla medullas meetinghouse meetinghouses
meg megacycle megacycles megahertz megalith megaliths megalomania
megalomaniacs megalopolis megalopolises megs melancholia melancholic
melancholics melange melanges melanin melanoma melanomas meld melded
melding melds melee melees mellifluous mellifluously mellowness
melodically melodiously melodiousness melodramatically meltdown
meltdowns membranous memorabilia menacingly menage menages mendacious
mendacity mender menders mendicant mendicants menfolk menhaden
menially meningitis menopausal menorah menorahs menservants menses
menswear mentholated merchantman merchantmen mercurial mercuric
meretricious merganser mergansers merino merinos meritocracies
meritocracy meritorious meritoriously merman mermen merriness
merrymaker merrymakers merrymaking mesa mesas mescal mescaline mescals
mesdemoiselles mesmerism mesquite mesquites messiah messiahs messieurs
messily messiness mestizo mestizos metacarpal metacarpals metacarpi
metacarpus metallurgical metallurgist metallurgists metamorphic
metamorphism metamorphosed metamorphosing metastases metastasis
metatarsal metatarsals meteoroid meteoroids meteorological methadone
methane methanol methinks methodically methodological methodologies
methought meticulously meticulousness metier metiers metrical
metrically metrication metrics metronome metronomes mettlesome mewl
mewled mewling mewls mi miasma miasmas mica microbiologist
microbiologists microchip microchips microcosm microcosms
microeconomics micron microns microprocessors microscopically
microscopy microsurgery mid midair middies middlebrow middlebrows
middleweight middleweights middling middy midge midges midland
midlands midmost midmosts midpoint midpoints midshipman midshipmen
midterm midterms midtown midweek midweeks midwiferies midwifery
midwinter midyear midyears miff miffed miffing miffs mightily
mightiness mil milch mildness milepost mileposts miler milers milieu
milieus militantly militarism militarist militaristic militarists
militiaman militiamen milkiness milkmaid milkmaids milkshake milksop
milksops milkweed milkweeds millage millennial millennium millenniums
millet millipede millipedes millrace millraces millstone millstones
milquetoast milquetoasts mils mimeograph mimeographed mimeographing
mimeographs mimetic mimosa mimosas minaret minarets minatory mindfully
mindfulness mindlessness minefields mineralogist mineralogists
mineralogy minestrone minesweeper minesweepers mini miniaturist
miniaturists minibike minibikes minicam minicams minicomputers minim
minimalists minims minis miniseries miniskirt miniskirts ministrant
ministrants ministration ministrations minivan minivans minster
mintier mintiest minty minuend minuends minutely minuteman minutemen
minuteness minutia minutiae minx minxes mirthful mirthfully mirthless
misalignment misalliance misalliances misanthrope misanthropes
misanthropic misanthropist misanthropists misanthropy misapplication
misapplied misapplies misapply misapplying misapprehend misapprehended
misapprehending misapprehends misapprehensions misbegotten
miscalculate miscalculated miscalculates miscalculating miscalculation
miscalculations miscall miscalled miscalling miscalls miscast
miscasting miscasts miscegenation miscellanies mischance mischanced
mischances mischancing mischievously mischievousness misconceive
misconceived misconceives misconceiving misconstruction
misconstructions miscount miscounted miscounting miscounts miscreant
miscreants miscue miscued miscues miscuing misdeal misdealing misdeals
misdealt misdiagnose misdiagnosed misdiagnoses misdiagnosing
misdiagnosis misdid misdo misdoes misdoing misdoings misdone
miserliness misfeasance misfire misfired misfires misfiring misgovern
misgoverned misgoverning misgoverns misguidedly mishandle mishandled
mishandles mishandling mishmash mishmashes misidentified misidentifies
misidentify misidentifying misinterpretations mismanage mismanaged
mismanages mismanaging misogynist misogynistic misogynists misogyny
misplay misplayed misplaying misplays mispronounce mispronounced
mispronounces mispronouncing mispronunciation mispronunciations
misquotation misquotations misreadings misrule misruled misrules
misruling missal missals missilery misspend misspending misspends
misspent misstate misstated misstatement misstatements misstates
misstating misstep misstepped misstepping missteps mister misters
mistily mistime mistimed mistimes mistiming mistiness mistranslated
mistreat mistreated mistreating mistreatment mistreats mistrial
mistrials mistrustful mistypes mitigation mitosis mizzen mizzenmast
mizzenmasts mizzens mobster mobsters mocha mocker mockers mockingly
modals modem modems modernism modernist modernistic modernists
modifiable modish modishly mods modulator modulators modulus mogul
moguls moieties moiety moire moires moistly moistness molder moldered
moldering molders moldiness molehill molehills moleskin molestation
molester molesters moll mollification molls mollycoddle mollycoddled
mollycoddles mollycoddling molybdenum momentousness momma mommas
mommies mommy monarchic monarchical monarchism monarchist monarchists
monasticism monaural monetarily moneybag moneybags moneyed moneymaker
moneymakers moneymaking monger mongered mongering mongers mongolism
mongooses moniker monikers monkeyshine monkeyshines mono monochromatic
monochromes monocle monocles monocotyledon monocotyledons monograph
monographs monolingual monolinguals monolith monoliths monomania
monomaniac monomaniacs mononucleosis monophonic monopolist
monopolistic monopolists monosyllabic monotheism monotheist
monotheistic monotheists monotone monotoned monotones monotonic
monotoning monotonously monoxide monoxides monsieur monsignor
monsignors monstrance monstrances monstrously montage montages
monumentally mooch mooched moocher moochers mooches mooching moodiness
moonlighter moonlighters moonlit moonscape moonscapes moonshine
moonshines moonshot moonshots moonstone moonstones moonstruck moorland
mopeds moppet moppets moraine moraines moralistic moray morays
morbidity morbidly mordant mordants mores moribund morocco morosely
moroseness morpheme morphemed morphemes morpheming morphological
morrow morrows mortarboard mortarboards mortgagee mortgagees mortgagor
mortgagors mortician morticians mortise mortised mortises mortising
moses mosey moseyed moseying moseys mossed mossing mote motes
motherboard motherboards motherfucker motherfuckers motherfucking
motherland motherlands motherless motherliness motile motiles
motivational motivator motivators motocross motocrosses motorbiked
motorbiking motorboat motorboats motorcar motorcars motorcyclist
motorcyclists motorman motormen motormouth motormouths mottle mottled
mottles mottling moult moulted moulting moults mountainside
mountainsides mountaintop mountaintops mountebank mountebanks
mountings mournfully mournfulness mouser mousers mousetrap
mousetrapped mousetrapping mousetraps mousiness mouthwash mouthwashes
mouthwatering movingly mozzarella mucilage muckier muckiest muckrake
muckraked muckraker muckrakers muckrakes muckraking mucky muddiness
mudguard mudguards mudslide mudslides mudslinger mudslingers
mudslinging muesli muezzin muezzins mufti muftis muggings mukluk
mukluks mulatto mulattoes mulberries mulberry muleteer muleteers
mulish mulishly mulishness mullah mullahs mullet mullets mulligatawny
mullion mullions multicultural multiculturalism multidimensional
multifaceted multifarious multifariousness multilateral multilingual
multimedia multimillionaire multimillionaires multiplex multiplexed
multiplexer multiplexers multiplexes multiplexing multiplicand
multiplicands multiplier multipliers multipurpose multiracial
multitudinous multivariate multivitamin multivitamins mumbler mumblers
mummer mummers mummery mummification munchies mundanely municipally
munificence munificent munition munitions muralist muralists murderess
murderesses murderously murk murkily murkiness murks muscat muscatel
muscatels muscularity musculature mushiness musicale musicales
musicianship musicologist musicologists musicology musings muskellunge
muskellunges musketeer musketeers musketry muskier muskiest muskiness
muskmelon muskmelons muskrat muskrats musky muslin mussier mussiest
mussy mustiness mutability mutable muteness mutineer mutineered
mutineering mutineers mutinously mutuality muumuu muumuus myna mynas
myopia myrrh myrtle myrtles mysteriousness mystically mystification
mystique mythic mythologist mythologists n nabob nabobs nacho nachos
nacre nadir nadirs naiad naiads nailbrush nailbrushes nakedly nannied
nannies nanny nannying nanosecond nanoseconds naphtha naphthalene
nappier nappiest narc narced narcing narcissism narcissist
narcissistic narcissists narcissus narcosis narcs narwhal narwhals
nary nasally nascent nasturtium nasturtiums natal nattily naturalism
naturalistic nauseatingly nautically nautilus nautiluses nave naves
navigability navigational naysayer naysayers nearness neath nebular
necromancer necromancers necromancy necrosis needful needfuls
neediness needlepoint nefarious nefariously nefariousness negativity
neglectfully negligibly negs nematode nematodes nemeses nemesis
neoclassic neoclassical neoclassicism neocolonialism neodymium
neologism neologisms neonatal neonate neonates neoprene nephritis
neptunium nerd nerdier nerdiest nerds nerdy nerveless nervelessly
nervier nerviest nervy nethermost nettlesome neuralgia neuralgic
neuritis neurological neurosurgery neurotically neurotransmitter
neurotransmitters neutrally neutrino neutrinos nevermore newel newels
newlywed newlyweds newness newsboy newsboys newsflash newsman newsmen
newspaperman newspapermen newspaperwoman newspaperwomen newsreel
newsreels newsworthier newsworthiest newsworthy newtons nexus nexuses
niacin nib nibbed nibbing nibbler nibblers nibs niceness nickelodeon
nickelodeons niggard niggarded niggarding niggardliness niggardly
niggards nigger niggers niggle niggled niggles niggling nigglings
nigher nighest nightcap nightcaps nightclothes nighthawk nighthawks
nightie nighties nightlife nightshade nightshades nightshirt
nightshirts nightstick nightsticks nihilism nihilist nihilistic
nihilists nimbi nimbleness nimbus ninepin ninepins ninja ninjas nipper
nippered nippering nippers nirvana nitpick nitpicked nitpicker
nitpickers nitpicking nitpicks nitrogenous nitroglycerin nix nixed
nixes nixing nobleness nocturnally nocturne nocturnes nodal noddy
nodular nodule nodules noel noels noggin noggins noiselessness
noisemaker noisemakers noisome nonabrasive nonabsorbent nonabsorbents
nonagenarian nonagenarians nonalcoholic nonalcoholics nonaligned
nonbeliever nonbelievers nonbreakable nonce noncom noncombatant
noncombatants noncommercial noncommercials noncommittally
noncompetitive noncompliance noncoms nonconductor nonconductors
nonconformity noncontagious noncooperation nondairy nondeductible
nondenominational nondrinker nondrinkers nonempty nonessential
nonesuch nonesuches nonevent nonevents nonexempt nonexistence
nonexistent nonfat nonfatal nongovernmental nonhazardous nonhuman
nonindustrial noninterference nonintervention nonjudgmental nonliving
nonmalignant nonmember nonmembers nonnegotiable nonobjective nonpareil
nonpareils nonpayment nonpayments nonphysical nonplus nonpluses
nonplussed nonplussing nonpoisonous nonpolitical nonpolluting
nonprescription nonproductive nonprofessional nonprofessionals
nonproliferation nonrefillable nonrefundable nonrenewable
nonrepresentational nonrestrictive nonreturnable nonreturnables
nonrigid nonscheduled nonseasonal nonsectarian nonsensically nonsexist
nonskid nonsmoker nonsmokers nonsmoking nonstick nonsupport nontaxable
nontechnical nontoxic nontransferable nonunion nonuser nonusers
nonverbal nonviolent nonvoting nonwhite nonwhites nonzero noonday
noontime nope nopes normalcy normative northbound northeaster
northeasters northeastward northerner northerners northernmost
northwards northwesterly northwestward nosedive nosedived nosedives
nosediving nosegay nosegays nosh noshed noshes noshing nosiness
nostalgically nostrum nostrums notaries notary notepad notepaper
notionally nous novae novas novelette novelettes novella novellas
novitiate novitiates noway noways nowise nth nu nuanced nub nubile
nubs nucleic nudism nudist nudists nuke nuked nukes nuking
nullification nullity numberless numbly numeracy numerated numerates
numerating numeration numerations numerology numismatic numismatics
numismatist numismatists numskull numskulls nuncio nuncios nunneries
nunnery nurseryman nurserymen nuthatch nuthatches nutmeat nutmeats
nutria nutrias nutritionally nutritionist nutritionists nutritive
nuttiness nylons nymphomania nymphomaniac nymphomaniacs o oafish oaken
oakum oarlock oarlocks oarsman oarsmen oat oaten oats obduracy
obdurate obdurated obdurately obdurates obdurating obeisance
obeisances obeisant obfuscate obfuscated obfuscates obfuscating obit
obits objectionably objectiveness oblate oblation oblations obligingly
obliquely obliqueness obliviously obliviousness obloquy obnoxiously
oboist oboists obscenely obscurely obsequies obsequious obsequiously
obsequiousness obsequy observably observantly observational
obsessively obsessives obsidian obstetric obstetrical obstinately
obstreperous obstructionist obstructionists obstructively
obstructiveness obstructives obtrude obtruded obtrudes obtruding
obtrusively obtrusiveness obtusely obtuseness obverse obverses obviate
obviated obviates obviating obviousness ocarina ocarinas occidental
occidentals occlude occluded occludes occluding occlusion occlusions
occult oceangoing oceanographer oceanographers oceanographic ocelot
ocelots octane octet octets octogenarian octogenarians oculist
oculists oddball oddballs oddness odiously odium odoriferous odorous
odyssey odysseys offal offensively offertories offertory offhandedly
officeholder officeholders officialdom officiously officiousness
offside offsides oft oftentimes ofter oftest oho ohos oilcloth
oilcloths oilfield oilfields oiliness oilskin oink oinked oinking
oinks oldie oldies oleaginous oleander oleanders oleo oleomargarine
oligarch oligarchic oligarchies oligarchs oligarchy ombudsman
ombudsmen omegas omnibuses omnipresence omniscience omnivore omnivores
omnivorous oncology oneness onetime ongoings onionskin onomatopoeic
onrushing onshore onyx onyxes oops oopses opacity opalescence
opalescent opaquely opaqueness openhanded openwork operable
operationally operetta operettas ophthalmic | |
<filename>python/Strobe/StrobeFancy.py
"""
An example implementation of STROBE.
The key tree may be patented. Also, it may be easy to violate other
patents with this code, so be careful.
Copyright (c) <NAME>, Cryptography Research, 2015-2016.
I will need to contact legal to get a license for this; in the mean time
it is for example purposes only.
"""
from .Keccak import KeccakF
from .ControlWord import *
from collections import namedtuple
import base64
import threading
import itertools
class StrobeException(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
class AuthenticationFailed(StrobeException):
def __init__(self,*args,**kwargs):
StrobeException.__init__(self,*args,**kwargs)
class ProtocolError(StrobeException):
def __init__(self,*args,**kwargs):
StrobeException.__init__(self,*args,**kwargs)
def zeros():
while True: yield 0
class Strobe(object):
"""
STROBE protocol framework
"""
version = "v0.7"
PAD = 0x04
CSHAKE_PAD = 0x80
def __init__(self,proto,dir=None,F=None,rate=None,steg=0,copy_from=None,over_rate=None,doInit=True,verbose=False):
if copy_from is not None:
self.F = copy_from.F
self.rate = copy_from.rate
self.proto = copy_from.proto
self.off = copy_from.off
self.prev_mark = copy_from.prev_mark
self.dir = copy_from.dir
self.st = bytearray(copy_from.st)
self.steg = copy_from.steg
self.over_rate = copy_from.over_rate
self.verbose = verbose
else:
if F is None: F = KeccakF()
if rate is None: rate = F.nbytes - 32 - 2
self.F = F
self.rate = rate
self.proto = proto
self.off = self.prev_mark = 0
self.dir = dir
self.steg = steg
self.over_rate = rate + 2
self.verbose = verbose
if doInit: self.init(proto,over_rate)
else: self.st = bytearray(self.F.nbytes)
def __str__(self):
if self.dir is None: dir = "None"
elif self.dir == DIR_CLIENT: dir = "DIR_CLIENT"
elif self.dir == DIR_SERVER: dir = "DIR_SERVER"
return "%s(%s,dir=%s,F=%s)" % (
self.__class__.__name__,self.proto,dir,self.F
)
def copy(self):
return Strobe(proto=self.proto,copy_from=self)
def init(self,proto,over_rate=None):
"""
The initialization routine sets up the state in a way that is
unique to this Strobe protocol. Unlike SHA-3, the protocol
and rate are distinguished up front in the first call to the
F-function.
"""
self.st = bytearray(self.F.nbytes)
# Initialize according to cSHAKE. TODO: check that this is correct
aString = "STROBE " + self.__class__.version
cShakeD = bytearray([1,self.over_rate,1,len(aString)]) + aString + bytearray([1,0])
self.st[0:len(cShakeD)] = cShakeD
self.st = self.F(self.st)
self.duplex(FLAG_A|FLAG_M,proto)
def _run_f(self):
"""
Pad out blocks and run the sponge's F-function
"""
self.st[self.off] ^= self.prev_mark
self.st[self.off+1] ^= self.PAD
self.st[self.over_rate-1] ^= self.CSHAKE_PAD
# if self.verbose:
# print "**** IN ****"
# print "".join(("%02x" % b for b in self.st))
self.st = self.F(self.st)
# if self.verbose:
# print "**** OU ****"
# print "".join(("%02x" % b for b in self.st))
self.off = self.prev_mark = 0
def _set_mode(self, mode):
"""
Put a delimiter in the hash state.
"""
self.st[self.off] ^= self.prev_mark
self.off += 1
self.prev_mark = self.off
if self.off >= self.rate: self._run_f()
# Adjust the mode for initiator vs responder
if mode & FLAG_T:
if self.dir is None:
self.dir = mode & FLAG_I
mode ^= self.dir
self.st[self.off] ^= mode
self.off += 1
if self.off >= self.rate or (mode & (FLAG_C | FLAG_K)):
self._run_f()
def duplex(self,op,data=None,length=None,as_iter=False):
"""
The main STROBE duplex operation.
"""
# steg support: if would send/recv in the clear, send/recv encrypted instead.
if op & FLAG_T: op |= self.steg
self._set_mode(op)
(I,T,C,A,K) = (bool(op & f) for f in [FLAG_I,FLAG_T,FLAG_C,FLAG_A,FLAG_K])
if isinstance(data,str): data = bytearray(data)
# compute flags
yield_anything = (A and I) or (T and not I)
read_anything = (T and I) or (A and not I)
verify_mac = (I,T,A) == (True,True,False)
if data is None or not read_anything:
if length is None: data = ()
else: data = zeros()
if length is not None:
data = itertools.islice(data,length)
if self.verbose: print("Duplex mode=0x%02x:\n " % op, end=' ')
out = self._duplex_iter((I,T,A,C,K),data)
if yield_anything:
# Return the iterator
if as_iter: return out
return bytearray(out)
elif verify_mac:
# Asked to verify a MAC
res = 0
for o in out: res |= o
if res: raise AuthenticationFailed()
return ()
else:
# The data is not used
for o in out: pass
return ()
def _duplex_iter(self, op, data):
"""
Duplexing sponge construction, iterator-version.
"""
(I,T,A,C,K) = op
res = 0
if C: s2o = 0x00FF
else: s2o = 0
s2s = 0xFFFF
if T and not I: s2s ^= s2o
if K:
# The DPA-resistant key tree is a CRI design to mitigate differential
# power analysis at a protocol level.
if self.off != 0:
# Since we call self.mark(C or K) above, this is only possible through
# misuse of "more"
raise Exception("Bug: user called keytree with off != 0")
keytreebits = 2
assert keytreebits > 0 and 8 % keytreebits == 0 and self.PAD << keytreebits < 256
mask = (1<<keytreebits)-1
s2o >>= 8-keytreebits
s2s >>= 8-keytreebits
for byte in data:
for bpos in range(0,8,keytreebits):
byte ^= (self.st[0] & s2o) << bpos
self.st[0] &= s2s
self.st[0] ^= (byte >> bpos) & mask
self.st[1] ^= self.PAD<<keytreebits
self.st[self.over_rate-1] ^= self.CSHAKE_PAD
self.st = self.F(self.st)
yield byte
else:
# Not the keytree
for byte in data:
if self.verbose: print("%02x" % byte, end=' ')
byte ^= self.st[self.off] & s2o
self.st[self.off] &= s2s
self.st[self.off] ^= byte
self.off += 1
if self.off >= self.rate: self._run_f()
yield byte
if self.verbose: print()
def begin_steg(self):
"""
Begin steganography.
"""
self.steg = FLAG_C
@staticmethod
def i2o_le(number,length):
"""
Encode a non-negative integer to bytes, little-endian, of the given length.
"""
if number < 0 or number >= 1 << (8*length):
raise ProtocolError("Cannot encode number %d in %d bytes"
% (number, length))
return [ 0xFF & number >> (8*i)
for i in range(length) ]
@staticmethod
def o2i_le(enc_number):
"""
Decode a non-negative integer from bytes, little-endian.
"""
return sum(( int(x)<<(8*i) for (i,x) in enumerate(enc_number) ))
def outbound(self,cw,data=(),length=None,**kwargs):
"""
Send or inject data with the given control-word.
"""
if length is not None and data is not ():
raise ProtocolError("Explicit length set with data")
if cw.length_bytes == 0:
encoded_length = ()
if length is None: length = cw.length
else:
# determine the length
if length is None: length = cw.length
if length is None:
try: length = len(data)
except TypeError:
data = bytearray(data)
length = len(data)
# encode it
encoded_length = self.i2o_le(length,cw.length_bytes)
cw_bytes = itertools.chain(cw.bytes, encoded_length)
s1 = self.duplex(cw.cmode, cw_bytes)
s2 = self.duplex(cw.dmode, data, length=length, **kwargs)
return bytearray(s1) + bytearray(s2)
def send(self,cw,*args,**kwargs):
"""
Same as .outbound, but assert that mode includes actually sending
data to the wire.
(It is possible that no data will be sent if the length is 0.)
"""
if not (cw.dmode | cw.cmode) & FLAG_T:
raise ProtocolError(
"Used .send on non-T control word; use .inject or .outbound instead"
)
return self.outbound(cw,*args,**kwargs)
def inject(self,cw,*args,**kwargs):
"""
Same as .outbound, but assert that the mode does not include
sending data to the wire.
"""
if (cw.dmode | cw.cmode) & FLAG_T:
raise ProtocolError(
"Used .inject on T control word; use .send or .outbound instead"
)
self.outbound(cw,*args,**kwargs)
def recv_cw(self,data,possible_cws):
"""
Receive data from a list of possible keywords.
Return the keyword and length, or throw an error.
"""
# create stream data
cm = FLAG_I|FLAG_A|FLAG_T|FLAG_M
stream = self.duplex(cm,data,as_iter=True)
poss = list(possible_cws)
i = 0
dr = []
def can_begin_with(cw,bs):
if len(bs) > len(cw.bytes) + cw.length_bytes: return False
lencmp = min(len(bs),len(cw.bytes))
return bytearray(cw.bytes[0:lencmp]) == bytearray(bs[0:lencmp])
while len(poss) > 1:
b = next(stream)
dr.append(b)
poss = [cw for cw in poss if can_begin_with(cw,dr)]
if len(poss) == 0:
# oops, eliminated all possibilities
raise ProtocolError("None of the expected CWs received")
# read extra bytes to finish the control word
cw = poss[0]
extra = len(cw.bytes) + cw.length_bytes - len(dr)
dr.extend(itertools.islice(stream,extra))
if cw.length_bytes > 0:
actual_length = self.o2i_le(dr[-cw.length_bytes:])
# Sanity-check length
if cw.length is not None and cw.length != actual_length:
raise ProtocolError("Received length %d doesn't matched expected length %d"
% (actual_length, cw.length))
elif cw.min_length is not None and cw.min_length > actual_length:
raise ProtocolError("Received length %d less than expected min-length %d"
% (actual_length, cw.min_length))
elif cw.max_length is not None and cw.max_length < actual_length:
raise ProtocolError("Received length %d greater than expected max-length %d"
% (actual_length, cw.max_length))
return cw, actual_length
else:
return cw, cw.length
def inbound_data(self,cw,data,**kwargs):
"""
Take data from a | |
= (list(self.params.sixtrack.keys()) +
list(self.params.phasespace.keys()) +
['wu_id', 'preprocess_id'])
# create a dict for each row. This is bulky due to the json.loads :(
six_wu_entries = []
for row in self.db.select('sixtrack_wu', six_keys):
d = {}
for k, v in zip(six_keys, row):
if isinstance(v, str):
try:
v = json.loads(v)
except json.JSONDecodeError: # json custom exception
# print(f'json.loads({v}) failed.')
pass
d[k] = v
six_wu_entries.append(d)
# prepare a map of preprocess_id to task_id
madx_wu_task = self.db.select('preprocess_task',
['wu_id', 'task_id'])
pre_to_task_id = dict(madx_wu_task)
calc_out_to_be_updated = []
where_to_be_updated = []
# run the calculations
for row in six_wu_entries:
result = self.params.calc(row,
task_id=pre_to_task_id[row['preprocess_id']],
get_val_db=self.db,
require='all')
if result:
# only update the columns which need updating
update_cols = {}
for k, v in result.items():
if k in row.keys():
update_cols[k] = v
calc_out_to_be_updated.append(update_cols)
where_to_be_updated.append({'wu_id': row['wu_id']})
self._logger.info('Queued update of sixtack_wu/wu_id:'
f'{row["wu_id"]} with {update_cols}.')
# update everything at once
self._logger.info(f'Updating {len(calc_out_to_be_updated)} rows of '
'sixtrack_wu.')
# turn list of dicts into dict of lists
calc_out_to_be_updated = {k: [dic[k] for dic in calc_out_to_be_updated]
for k in calc_out_to_be_updated[0]}
where_to_be_updated = {k: [dic[k] for dic in where_to_be_updated]
for k in where_to_be_updated[0]}
self.db.updatem('sixtrack_wu',
calc_out_to_be_updated,
where=where_to_be_updated)
def update_db(self, db_check=False):
'''Update the database whith the user-defined parameters'''
temp = self.paths["templates"]
cont = os.listdir(temp)
require = []
require.append(self.sixtrack_input['fort_file'])
require.append(self.madx_input["mask_file"])
for r in require:
if r not in cont:
content = "The required file %s isn't found in %s!" % (r, temp)
raise FileNotFoundError(content)
outputs = self.db.select('templates', self.tables['templates'].keys())
tab = {}
for key, value in self.madx_input.items():
value = os.path.join(self.study_path, value)
tab[key] = utils.compress_buf(value)
value = os.path.join(self.study_path, self.sixtrack_input['fort_file'])
tab['fort_file'] = utils.compress_buf(value)
if self.collimation:
for key in self.collimation_input.keys():
val = os.path.join(self.study_path,
self.collimation_input[key])
tab[key] = utils.compress_buf(val)
if 'additional_input' in self.sixtrack_input.keys():
inp = self.sixtrack_input['additional_input']
for key in inp:
value = os.path.join(self.study_path, key)
tab[key] = utils.compress_buf(value)
if not outputs:
self.db.insert('templates', tab)
else:
self.db.update('templates', tab)
outputs = self.db.select('boinc_vars', self.boinc_vars.keys())
if not outputs:
self.db.insert('boinc_vars', self.boinc_vars)
else:
self.db.update('boinc_vars', self.boinc_vars)
# update the parameters
outputs = self.db.select('env', self.paths.keys())
envs = {}
envs.update(self.paths)
envs.update(self.env)
if not outputs:
self.db.insert('env', envs)
else:
self.db.update('env', envs)
# update preprocess_wu and sixtrack_wu with parameter combinations.
self._update_db_params()
def info(self, job=2, verbose=False, where=None):
'''Print the status information of this study.
job=
0: print madx, oneturn sixtrack job
1: print sixtrack job
2: print madx, oneturn sixtrack and sixtrack jobs
where: the filter condition for database query, e.g.
"status='complete'""
'''
query_list = ['wu_id', 'job_name', 'status', 'unique_id']
typ = ['preprocess_wu', 'sixtrack_wu']
titles = ['madx and one turn sixtrack jobs:', 'Sixtrack jobs:']
def query(index):
wus = self.db.select(typ[int(index)], query_list, where)
content = '\n'+titles[int(index)] + '\n'
comp = []
subm = []
incm = []
if wus:
results = dict(zip(query_list, zip(*wus)))
for ele in results['status']:
if ele == 'complete':
comp.append(ele)
if ele == 'submitted':
subm.append(ele)
if ele == 'incomplete':
incm.append(ele)
content += f'complete: {len(comp)} \n'
content += f'submitted: {len(subm)} \n'
content += f'incomplete: {len(incm)}\n'
self._logger.info(content)
if verbose:
print(query_list)
for i in wus:
print(i)
if job == 0 or job == 2:
query(0)
if job == 1 or job == 2:
query(1)
def submit(self, typ, trials=5, *args, **kwargs):
'''Sumbit the preporcess or sixtrack jobs to htctondor.
@type(0,1 or 2) The job type, 0 is preprocess job, 1 is sixtrack job,
2 is collimation job
@trials The maximum number of trials of submission
'''
if typ == 0:
input_path = self.paths['preprocess_in']
jobname = 'preprocess'
table_name = 'preprocess_wu'
elif typ == 1:
input_path = self.paths['sixtrack_in']
jobname = 'sixtrack'
table_name = 'sixtrack_wu'
else:
content = ("Unknown job type %s, must be either 0 "
"(preprocess job) or 1 (tracking job)") % typ
raise ValueError(content)
batch_name = os.path.join(self.study_path, jobname)
where = "batch_name like '%s_%%'" % batch_name
que_out = self.db.select(table_name, 'batch_name', where,
DISTINCT=True)
ibatch = len(que_out)
ibatch += 1
batch_name = batch_name + '_' + str(ibatch)
status, out = self.submission.submit(input_path,
batch_name,
self.max_jobsubmit,
trials,
*args,
**kwargs)
if status:
content = "Submit %s job successfully!" % jobname
self._logger.info(content)
table = {}
table['status'] = 'submitted'
self._logger.info(f"Updating the {table_name} table for job status.....")
bar = utils.ProgressBar(len(out))
for ky, vl in out.items():
bar.update()
keys = ky.split('-')
for k in keys:
where = 'task_id=%s' % k
table['unique_id'] = vl
table['batch_name'] = batch_name
self.db.update(table_name, table, where)
else:
content = "Failed to submit %s job!" % jobname
self._logger.error(content)
def collect_result(self, typ, boinc=False):
'''Collect the results of preprocess or sixtrack jobs'''
config = {}
info_sec = {}
config['info'] = info_sec
config['db_setting'] = self.db_settings
config['db_info'] = self.db_info
if typ == 0:
if self.oneturn:
# The section name should be same with the table name
config['oneturn_sixtrack_results'] = self.tables[
'oneturn_sixtrack_results']
info_sec['path'] = self.paths['preprocess_out']
fileout = list(self.preprocess_output.values())
info_sec['outs'] = Table.result_table(fileout)
elif typ == 1:
config['six_results'] = self.tables['six_results']
info_sec['path'] = self.paths['sixtrack_out']
info_sec['boinc_results'] = self.env['boinc_results']
info_sec['boinc'] = boinc
info_sec['st_pre'] = self.st_pre
info_sec['outs'] = Table.result_table(self.sixtrack_output)
if self.collimation:
config['aperture_losses'] = self.tables['aperture_losses']
config['collimation_losses'] = self.tables['collimation_losses']
config['init_state'] = self.tables['init_state']
config['final_state'] = self.tables['final_state']
else:
content = "Unkown job type %s" % typ
raise ValueError(content)
try:
gather.run(typ, config, self.submission)
except Exception as e:
raise e
def prepare_sixtrack_input(self, resubmit=False, boinc=False, groupby=None,
*args, **kwargs):
'''Prepare the input files for sixtrack job'''
# Prepares the sixtrack config dict, in the self.sixtrack_config
# attribute
self._prep_sixtrack_cfg()
# Run any input paramter based calculations and update the sixtrack_wu
# with the results.
self._run_calcs()
self._logger.info("Going to prepare input files for sixtrack jobs....")
if self.checkpoint_restart:
self.prepare_cr()
where = "status='complete'"
preprocess_outs = self.db.select('preprocess_wu', ['wu_id'], where)
if not preprocess_outs:
content = "There isn't complete madx job!"
self._logger.warning(content)
return
preprocess_outs = list(zip(*preprocess_outs))
if resubmit:
constraints = "status='submitted'"
action = 'resubmit'
else:
constraints = "status='incomplete' and preprocess_id in %s" % str(
preprocess_outs[0])
action = 'submit'
names = self.tables['sixtrack_wu'].keys()
outputs = self.db.select('sixtrack_wu',
names,
constraints)
if not outputs:
content = f"There isn't available sixtrack job to {action}!"
self._logger.info(content)
return
outputs = dict(zip(names, zip(*outputs)))
outputs['boinc'] = ['false'] * len(outputs['wu_id'])
if boinc:
outputs['boinc'] = ['true'] * len(outputs['wu_id'])
task_table = {}
wu_table = {}
task_ids = OrderedDict()
mtime = int(time.time() * 1E7)
task_table['wu_id'] = []
task_table['last_turn'] = []
task_table['mtime'] = []
self._logger.info("creating new lines in sixtrack_task table.....")
bar = utils.ProgressBar(len(outputs['last_turn']))
for wu_id, last_turn in zip(outputs['wu_id'], outputs['last_turn']):
bar.update()
where = f'wu_id={wu_id} and last_turn={last_turn} and status is null'
chck = self.db.select('sixtrack_task', ['task_id'], where)
if chck:
task_id = chck[0][0]
task_ids[task_id] = (wu_id, last_turn)
else:
task_table['wu_id'].append(wu_id)
task_table['last_turn'].append(last_turn)
task_table['mtime'].append(mtime)
if task_table['wu_id']:
self.db.insertm('sixtrack_task', task_table)
reviews = self.db.select('sixtrack_task',
['task_id', 'wu_id', 'last_turn'],
where=f'mtime={mtime}')
for ti, wi, lt in reviews:
task_ids[ti] = (wi, lt)
wu_table['task_id'] = list(task_ids.keys())
wu_table['mtime'] = [int(time.time() * 1E7), ]*len(task_ids)
where = {}
where['wu_id'] = []
where['last_turn'] = []
for i in task_ids.values():
where['wu_id'].append(i[0])
where['last_turn'].append(i[1]) # wu_id is not unique now
self.db.updatem('sixtrack_wu', wu_table, where)
task_ids = list(task_ids.keys())
outputs['task_id'] = task_ids
db_info = {}
db_info.update(self.db_info)
tran_input = []
if db_info['db_type'].lower() == 'sql':
sub_name = os.path.join(self.paths['sixtrack_in'], 'sub.db')
if os.path.exists(sub_name):
os.remove(sub_name) # remove the old one
db_info['db_name'] = sub_name
sub_db = SixDB(db_info, settings=self.db_settings, create=True)
sub_db.create_table('preprocess_wu', self.tables['preprocess_wu'],
self.table_keys['preprocess_wu'])
sub_db.create_table('preprocess_task',
self.tables['preprocess_task'],
self.table_keys['preprocess_task'])
sub_db.create_table('sixtrack_wu_tmp', self.tables['sixtrack_wu'],
self.table_keys['sixtrack_wu'])
sub_db.create_table('sixtrack_wu', self.tables['sixtrack_wu'],
self.table_keys['sixtrack_wu'])
sub_db.create_table('env', self.tables['env'])
sub_db.create_table('templates', self.tables['templates'])
env_outs = self.db.select('env')
names = list(self.tables['env'].keys())
env_ins = dict(zip(names, zip(*env_outs)))
sub_db.insertm('env', env_ins)
temp_outs = self.db.select('templates')
names = list(self.tables['templates'].keys())
temp_ins = dict(zip(names, zip(*temp_outs)))
sub_db.insertm('templates', temp_ins)
constr = "wu_id in (%s)" % (','.join(map(str, outputs['preprocess_id'])))
pre_outs = self.db.select('preprocess_wu', where=constr)
names = list(self.tables['preprocess_wu'].keys())
pre_ins = dict(zip(names, zip(*pre_outs)))
sub_db.insertm('preprocess_wu', pre_ins)
pre_task_ids = pre_ins['task_id']
constr = "task_id in (%s)" % (','.join(map(str, pre_task_ids)))
pre_task_outs = self.db.select('preprocess_task', where=constr)
names = list(self.tables['preprocess_task'].keys())
pre_task_ins = dict(zip(names, zip(*pre_task_outs)))
if resubmit:
constr = "first_turn is not null and status='submitted'"
else:
constr = "first_turn is not null and status='incomplete'"
cr_ids = self.db.select('sixtrack_wu', ['wu_id', 'first_turn'],
where=constr)
if cr_ids:
sub_db.create_table('sixtrack_task',
self.tables['sixtrack_task'])
cr_ids = list(zip(*cr_ids))
constr = "wu_id in (%s) and last_turn in (%s)" % (
','.join(map(str, cr_ids[0])),
','.join(map(str, map(lambda x: x-1, cr_ids[1]))))
cr_wu_outputs = self.db.select('sixtrack_wu', where=constr)
if cr_wu_outputs:
names = list(self.tables['sixtrack_wu'].keys())
cr_wu_ins = dict(zip(names, zip(*cr_wu_outputs)))
cr_task_ids = cr_wu_ins['task_id']
constr = "task_id in (%s)" % (','.join(map(str,
cr_task_ids)))
task_outputs = self.db.select('sixtrack_task',
where=constr)
names = list(self.tables['sixtrack_task'].keys())
task_ins = dict(zip(names, zip(*task_outputs)))
sub_db.insertm('sixtrack_wu', cr_wu_ins)
sub_db.insertm('sixtrack_task', task_ins)
sub_db.insertm('preprocess_task', pre_task_ins)
sub_db.insertm('sixtrack_wu_tmp', outputs)
sub_db.close()
db_info['db_name'] = 'sub.db'
content = "The submitted db %s is ready!" % db_info['db_name']
self._logger.info(content)
tran_input.append(sub_name)
else:
job_table = {}
where = "task_id in (%s)" % | |
# In[12]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # MVAE
@pytest.mark.performance
def test_run_mvae():
# * Original paper: Multimodal Generative Models for Scalable Weakly-Supervised Learning (https://papers.nips.cc/paper/7801-multimodal-generative-models-for-scalable-weakly-supervised-learning.pdf)
# * Original code: https://github.com/mhw32/multimodal-vae-public
#
# ### MVAE summary
# Multimodal variational autoencoder(MVAE) uses a product-of-experts inferece network and a sub-sampled training paradigm to solve the multi-modal inferece problem.
# - Product-of-experts
# In the multimodal setting we assume the N modalities, $x_{1}, x_{2}, ..., x_{N}$, are conditionally independent given the common latent variable, z. That is we assume a generative model of the form $p_{\theta}(x_{1}, x_{2}, ..., x_{N}, z) = p(z)p_{\theta}(x_{1}|z)p_{\theta}(x_{2}|z)$・・・$p_{\theta}(x_{N}|z)$. The conditional independence assumptions in the generative model imply a relation among joint- and simgle-modality posteriors. That is, the joint posterior is a procuct of individual posteriors, with an additional quotient by the prior.
#
# - Sub-sampled training
# MVAE sub-sample which ELBO terms to optimize for every gradient step for capturing the relationships between modalities and training individual inference networks.
# In[1]:
# In[2]:
# MNIST
# treat labels as a second modality
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# ### In the original paper
# Modalities: $x_{1}, x_{2}, ..., x_{N}$
# Generative model:
#
# $p_{\theta}\left(x_{1}, x_{2}, \ldots, x_{N}, z\right)=p(z) p_{\theta}\left(x_{1} | z\right) p_{\theta}\left(x_{2} | z\right) \cdots p_{\theta}\left(x_{N} | z\right)$
#
# Inference:
#
# $p\left(z | x_{1}, \ldots, x_{N}\right) \propto \frac{\prod_{i=1}^{N} p\left(z | x_{i}\right)}{\prod_{i=1}^{N-1} p(z)} \approx \frac{\prod_{i=1}^{N}\left[\tilde{q}\left(z | x_{i}\right) p(z)\right]}{\prod_{i=1}^{N-1} p(z)}=p(z) \prod_{i=1}^{N} \tilde{q}\left(z | x_{i}\right)$
#
# ### MNIST settings
# Modalities:
# - x for image modality
# - y for label modality
#
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generators:
# $p_{\theta}(x|z) = \cal B(x; \lambda = g_x(z))$ for image modality
# $p_{\theta}(y|z) = \cal Cat(y; \lambda = g_y(z))$ for label modality
# $p_{\theta}\left(x, y, z\right)=p(z) p_{\theta}(x| z) p_{\theta}(y | z)$
#
# Inferences:
# $q_{\phi}(z|x) = \cal N(z; \mu=fx_\mu(x), \sigma^2=fx_{\sigma^2}(x))$ for image modality
# $q_{\phi}(z|y) = \cal N(z; \mu=fy_\mu(y), \sigma^2=fy_{\sigma^2}(y))$ for label modality
# $p(z)q_{\phi}(z|x)q_{\phi}(z|y)$
#
# In[4]:
from pixyz.distributions import Normal, Bernoulli, Categorical, ProductOfNormal
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x) for image modality
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y) for label modality
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
p = p_x * p_y
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
# equation (4) in the paper
# "we can use a product of experts (PoE), including a “prior expert”, as the approximating distribution for the joint-posterior"
# Pixyz docs: https://docs.pixyz.io/en/latest/distributions.html#pixyz.distributions.ProductOfNormal
q = ProductOfNormal([q_x, q_y], name="q").to(device)
# In[5]:
print(q)
print_latex(q)
# In[6]:
print(p)
print_latex(p)
# ## Define Loss function
# $\cal L = \mathrm{ELBO}\left(x_{1}, \ldots, x_{N}\right)+\sum_{i=1}^{N} \mathrm{ELBO}\left(x_{i}\right)+\sum_{j=1}^{k} \mathrm{ELBO}\left(X_{j}\right)$
# In[7]:
from pixyz.losses import KullbackLeibler
from pixyz.losses import LogProb
from pixyz.losses import Expectation as E
# In[8]:
ELBO = -E(q, LogProb(p)) + KullbackLeibler(q, prior)
ELBO_x = -E(q_x, LogProb(p_x)) + KullbackLeibler(q_x, prior)
ELBO_y = -E(q_y, LogProb(p_y)) + KullbackLeibler(q_y, prior)
loss = ELBO.mean() + ELBO_x.mean() + ELBO_y.mean()
print_latex(loss) # Note: Terms in the printed loss may be reordered
# ## Define MVAE model using Model Class
# In[9]:
from pixyz.models import Model
model = Model(loss=loss, distributions=[p_x, p_y, q_x, q_y],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruction and generation
# In[12]:
def plot_reconstrunction_missing_label_modality(x):
with torch.no_grad():
# infer from x (image modality) only
z = q_x.sample({"x": x}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
# infer from y (label modality) only
z = q_y.sample({"y": y}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
# infer from x and y
z = q.sample({"x": x, "y": y}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
plot_number = 1
# set-aside observation for watching generative model improvement
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing_label_modality(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing_label', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # A toy example of variational inference with normalizing flow (using the VI class)
@pytest.mark.performance
def test_run_normalizing_flow_toy():
# In[1]:
# In[2]:
from pixyz.distributions import CustomProb, Normal, TransformedDistribution
from pixyz.models import VI
from pixyz.flows import PlanarFlow, FlowList
from pixyz.utils import print_latex
# In[3]:
# def plot_samples(points):
# X_LIMS = (-4, 4)
# Y_LIMS = (-4, 4)
#
# fig = plt.figure(figsize=(4, 4))
# ax = fig.add_subplot(111)
# ax.scatter(points[:, 0], points[:, 1], alpha=0.7, s=25)
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(z)")
#
# plt.show()
# In[4]:
import torch
x_dim = 2
def log_prob(z):
z1, z2 = torch.chunk(z, chunks=2, dim=1)
norm = torch.sqrt(z1 ** 2 + z2 ** 2)
exp1 = torch.exp(-0.5 * ((z1 - 2) / 0.6) ** 2)
exp2 = torch.exp(-0.5 * ((z1 + 2) / 0.6) ** 2)
u = 0.5 * ((norm - 2) / 0.4) ** 2 - torch.log(exp1 | |
self.first_learner.model1.transform(df)
.withColumn("preds1", split_udf(pred_col))
.select([*df_cols, "preds0", "preds1"])
)
df = df.withColumn(
"D",
F.when(
F.col(self.treatment_colname) == self.treatment_value, F.col(self.target_colname) - F.col("preds0")
).otherwise(F.col("preds1") - F.col(self.target_colname)),
)
return df.select([*df_cols, "D"])
class CVTEstimator:
"""Estimates treatment effect by transforming the target variable into a new target variable Z, such that the treatment effect tau(X) = 2 * E[Z | X] - 1.
This transformation results in a classification problem and is, thus, slightly different from the TransformedOutcomeEstimator, which results in a regression problem.
Can only be used with 50-50 treatment vs. control RCT data.
The Class Variable Transformation technique was proposed in Jaskowski and Jaroszewicz (2012) (http://people.cs.pitt.edu/~milos/icml_clinicaldata_2012/Papers/Oral_Jaroszewitz_ICML_Clinical_2012.pdf).
"""
def __init__(
self,
base_model_class: Any,
base_model_params: Dict,
predictors_colname: str = "features",
treatment_colname: str = "treatment",
target_colname: str = "outcome",
output_colname: str = "score",
):
"""Initializes the CVTEstimator.
Args:
base_model_class (pyspark.ml): the model class to instantiate the CVTEstimator with
base_model_params (dict): parameters and their values for the model
predictors_colname (str, optional): the column names that contain the predictor variables
treatment_colname (str, optional): the column name that contains the treatment indicators
target_colname (str, optional): the column name that contains the target
output_colname (str, optional): the column name for the estimator output
"""
base_model_params["featuresCol"] = predictors_colname
base_model_params["labelCol"] = "cvt_label"
self.model = base_model_class(**base_model_params)
self.predictors_colname = predictors_colname
self.treatment_colname = treatment_colname
self.target_colname = target_colname
self.output_colname = output_colname
def fit(self, df_train: pyspark.sql.DataFrame, df_val: Optional[Any] = None) -> None:
"""Trains the CVT model by transforming the target variable and fitting a classifier on the transformed targets.
Args:
df_train (pyspark.sql.DataFrame): a dataframe containing the treatment indicators, the observed outcomes, and predictors
df_val (pyspark.sql.DataFrame): a dataframe containing the treatment indicators, the observed outcomes, and predictors
"""
df_train = df_train.withColumn(
"cvt_label", F.when(F.col(self.treatment_colname) == F.col(self.target_colname), 1).otherwise(0)
)
self.model = self.model.fit(df_train)
def predict(self, df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
"""Applies the CVT model and returns treatment effect predictions.
Args:
df (pyspark.sql.DataFrame): a dataframe containing predictors
Returns:
df (pyspark.sql.DataFrame): a dataframe containing treatment effect predictions
"""
df_cols = df.columns
df = df.withColumn(
"cvt_label", F.when(F.col(self.treatment_colname) == F.col(self.target_colname), 1).otherwise(0)
)
df = self.model.transform(df)
split_udf = udf(lambda value: value[1].item(), FloatType())
df = df.withColumn("prob", split_udf("probability"))
df = df.withColumn(self.output_colname, 2 * F.col("prob") - 1)
df = df.select([*df_cols, self.output_colname])
return df
class TransformedOutcomeEstimator:
"""Estimates treatment effect by transforming the outcome, such that the expectation of the transformed outcome corresponds to the treatment effect.
This transformation results in a regression problem and is, thus, slightly different from the CVTEstimator, which results in a classification problem.
The Transformed Outcome technique was proposed in Athey and Imbens (2015) (https://pdfs.semanticscholar.org/86ce/004214845a1683d59b64c4363a067d342cac.pdf).
"""
def __init__(
self,
base_model_class: Any,
base_model_params: Dict,
predictors_colname: str = "features",
propensity_model_class: Any = None,
propensity_model_params: Dict = None,
treatment_colname: str = "treatment",
target_colname: str = "outcome",
treatment_value: int = 1,
control_value: int = 0,
output_colname: str = "score",
):
"""Initializes the TransformedOutcomeEstimator.
Args:
base_model_class (pyspark.ml): the model class to instantiate the TransformedOutcomeEstimator with
base_model_params (dict): parameters and their values for the model. The model must be a regressor.
predictors_colname (str, optional): the column names that contain the predictor variables
propensity_model_class (, optional): the model class to instantiate the propensity model. If None, propensity will be estimated as the overall proportion of treated users in the training data.
propensity_model_params (dict, optional): parameters and their values for the propensity model. Not used if <propensity_model_class> is None.
treatment_colname (str, optional): the column name that contains the treatment indicators
target_colname (str, optional): the column name that contains the target
treatment_value (str or int, optional): the value in column <treatment_colname> that refers to the treatment group
control_value (str or int, optional): the value in column <treatment_colname> that refers to the control group
output_colname (str, optional): the column name for the estimator output
"""
base_model_params["featuresCol"] = predictors_colname
base_model_params["labelCol"] = "D"
self.model = base_model_class(**base_model_params)
self.propensity_estimator = PropensityEstimator(
propensity_model_class, propensity_model_params, predictors_colname, treatment_colname, treatment_value
)
self.predictors_colname = predictors_colname
self.treatment_colname = treatment_colname
self.target_colname = target_colname
self.control_value = control_value
self.treatment_value = treatment_value
self.output_colname = output_colname
def fit(self, df_train: pyspark.sql.DataFrame, df_val: Optional[Any] = None) -> None:
"""Trains the Transformed Outcome model by first fitting a propensity model, retrieving the propensity scores for each instance,
computing the transformed outcomes, and finally fitting a regressor on the transformed outcomes.
Args:
df_train (pyspark.sql.DataFrame): a dataframe containing the treatment indicators, the observed outcomes, and predictors
df_val (pyspark.sql.DataFrame): a dataframe containing the treatment indicators, the observed outcomes, and predictors
"""
self.propensity_estimator.fit(df_train)
df_train = self._get_transformed_outcomes(df_train)
self.model = self.model.fit(df_train)
def predict(self, df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
"""Applies the Transformed Outcome Estimator and returns treatment effect predictions.
Args:
df (pyspark.sql.DataFrame): a dataframe containing predictors
Returns:
df (pyspark.sql.DataFrame): a dataframe containing treatment effect predictions
"""
df_cols = df.columns
df = self._get_transformed_outcomes(df)
df = self.model.transform(df)
split_udf = udf(lambda value: value, FloatType())
pred_col = "prediction"
df = df.withColumn(self.output_colname, split_udf(pred_col))
df = df.select([*df_cols, self.output_colname])
return df
def _get_transformed_outcomes(self, df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
"""Applies the propensity model, computes and returns the transformed outcomes.
Args:
df (pyspark.sql.DataFrame): a dataframe containing the treatment indicators, the observed outcomes, and predictors
Returns:
df (pyspark.sql.DataFrame): a dataframe containing the transformed outcomes
"""
df = self.propensity_estimator.predict(df)
df = df.withColumn(
"D",
F.when(
F.col(self.treatment_colname) == self.treatment_value, F.col(self.target_colname) / F.col("propensity")
).otherwise(-1.0 * F.col(self.target_colname) / (1 - F.col("propensity"))),
)
return df
class PropensityEstimator:
"""Estimates treatment propensities, either as the simple treatment proportions E[T] or by training a model for E[T | X]."""
def __init__(
self,
base_model_class: Any = None,
base_model_params: Dict = None,
predictors_colname: str = None,
treatment_colname: str = "treatment",
treatment_value: int = 1,
control_value: int = 0,
output_colname: str = "propensity",
):
"""Initializes the propensity model.
Args:
base_model_class (pyspark.ml, optional): the model class to instantiate the propensity model. If None, propensity will be estimated as the overall proportion of treated users in the training data.
base_model_params (dict, optional): parameters and their values for the model. Not used if <base_model_class> is None.
predictors_colname (list of str, optional): the column names that contain the predictor variables. Not used if <base_model_class> is None.
treatment_colname (str, optional): the column name that contains the treatment indicators
treatment_value (str or int, optional): the value in column <treatment_colname> that refers to the treatment group
output_colname (str, optional): the column name for the estimator output
"""
if base_model_class is not None:
base_model_params["featuresCol"] = predictors_colname # type: ignore
base_model_params["labelCol"] = treatment_colname # type: ignore
self.model = base_model_class(**base_model_params)
self.predictors_colname = predictors_colname
else:
self.model = None
self.propensity = 0.5
self.treatment_colname = treatment_colname
self.treatment_value = treatment_value
self.control_value = control_value
self.output_colname = output_colname
def fit(self, df_train: pyspark.sql.DataFrame, df_val: Optional[Any] = None) -> None:
"""Fits a propensity model.
If self.model is None, uses the proportion of treated instances in df_train to estimate E[T], independent of X.
If self.model is instantiated, fits a full propensity model E[T | X].
Args:
df_train (pyspark.sql.DataFrame): a dataframe containing the treatment indicators and predictors
df_val (pyspark.sql.DataFrame): a dataframe containing the treatment indicators and predictors
"""
if self.model is not None:
self.model = self.model.fit(df_train)
def predict(self, df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
"""Applies the propensity model and returns treatment assignment predictions.
If self.model is None, uses the pre-calculated treatment proportion for all instances.
If self.model is instantiated, applies the model to get estimates E[T | X].
Args:
df (pyspark.sql.DataFrame): a dataframe containing predictors
Returns:
df (pyspark.sql.DataFrame): a dataframe containing treatment assignment predictions
"""
df_cols = df.columns
if self.model is None:
treat_count = df.where(F.col(self.treatment_colname) == self.treatment_value).count()
control_count = df.where(F.col(self.treatment_colname) == self.control_value).count()
df = df.withColumn(
self.output_colname,
F.when(
F.col(self.treatment_colname) == self.treatment_value,
F.lit(treat_count / (treat_count + control_count)),
).otherwise(F.lit(control_count / (treat_count + control_count))),
)
else:
df = self.model.transform(df)
split_udf = udf(lambda value: value[1].item(), FloatType())
df = df.withColumn(self.output_colname, split_udf("probability"))
df = df.select([*df_cols, self.output_colname])
return df
class RetrospectiveEstimator:
"""Estimates E[T | Y=1, X], which corresponds to estimating the relative treatment effect E[Y | T=1, X] / E[Y | T=0, X] in case of 50-50 treatment vs. control RCT data.
This estimator can also used as the greedy solution for maximizing incrementality under ROI constraints, as described in Goldenberg et al. (2020) (preprint: https://drive.google.com/file/d/1E0KQ_sT09q1bpnlt9gZFFSbrx-YgGcqF/view).
"""
def __init__(
self,
base_model_class: Any,
base_model_params: | |
<filename>src/test/python/test_scc_pacs.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2020, Sandflow Consulting LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the SCC PACs"""
# pylint: disable=R0201,C0115,C0116
import unittest
from ttconv.scc.codes.preambles_address_codes import SccPreambleAddressCode
from ttconv.style_properties import TextDecorationType, NamedColors, FontStyleType
class SCCPreambleAddressCodesTest(unittest.TestCase):
def test_scc_pac_values(self):
channel_1_byte_1 = [0x11, 0x12, 0x15, 0x16, 0x17, 0x10, 0x13, 0x14]
channel_2_byte_1 = [0x19, 0x1A, 0x1D, 0x1E, 0x1F, 0x18, 0x1B, 0x1C]
all_range = list(range(0x00, 0XFF))
byte_2_range = range(0x40, 0x80)
other_bytes_1 = [item for item in all_range
if item not in channel_1_byte_1 and item not in channel_2_byte_1]
other_bytes_2 = [item for item in all_range if item not in list(byte_2_range)]
for b1 in channel_1_byte_1:
for b2 in byte_2_range:
pac = SccPreambleAddressCode.find(b1, b2)
if b2 > 0x5F and b1 % 0x08 == 0: # row 11 case
self.assertIsNone(pac)
else:
self.assertIsNotNone(pac)
for b2 in other_bytes_2:
self.assertIsNone(SccPreambleAddressCode.find(b1, b2))
for b1 in channel_2_byte_1:
for b2 in byte_2_range:
pac = SccPreambleAddressCode.find(b1, b2)
if b2 > 0x5F and b1 % 0x08 == 0: # row 11 case
self.assertIsNone(pac)
else:
self.assertIsNotNone(pac)
for b2 in other_bytes_2:
self.assertIsNone(SccPreambleAddressCode.find(b1, b2))
for b1 in other_bytes_1:
for b2 in range(0x00, 0xFF):
self.assertIsNone(SccPreambleAddressCode.find(b1, b2))
def check_scc_pac_attributes(self, pac, channel, row, indent, color, font_style, text_decoration):
self.assertEqual(channel, pac.get_channel())
self.assertEqual(row, pac.get_row())
self.assertEqual(indent, pac.get_indent())
self.assertEqual(color, pac.get_color())
self.assertEqual(font_style, pac.get_font_style())
self.assertEqual(text_decoration, pac.get_text_decoration())
def test_scc_pac_white(self):
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x40), 1, 1, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x60), 1, 2, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x40), 1, 3, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x60), 1, 4, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x40), 1, 5, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x60), 1, 6, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x40), 1, 7, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x60), 1, 8, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x40), 1, 9, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x60), 1, 10, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x10, 0x40), 1, 11, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x40), 1, 12, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x60), 1, 13, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x40), 1, 14, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x60), 1, 15, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x40), 2, 1, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x60), 2, 2, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x40), 2, 3, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x60), 2, 4, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x40), 2, 5, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x60), 2, 6, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x40), 2, 7, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x60), 2, 8, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1F, 0x40), 2, 9, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1F, 0x60), 2, 10, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x18, 0x40), 2, 11, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1B, 0x40), 2, 12, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1B, 0x60), 2, 13, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1C, 0x40), 2, 14, None, NamedColors.white.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1C, 0x60), 2, 15, None, NamedColors.white.value, None, None)
def test_scc_pac_white_underline(self):
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x41), 1, 1, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x61), 1, 2, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x41), 1, 3, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x61), 1, 4, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x41), 1, 5, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x61), 1, 6, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x41), 1, 7, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x61), 1, 8, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x41), 1, 9, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x61), 1, 10, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x10, 0x41), 1, 11, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x41), 1, 12, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x61), 1, 13, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x41), 1, 14, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x61), 1, 15, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x41), 2, 1, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x61), 2, 2, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x41), 2, 3, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x61), 2, 4, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x41), 2, 5, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x61), 2, 6, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x41), 2, 7, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x61), 2, 8, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1F, 0x41), 2, 9, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1F, 0x61), 2, 10, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x18, 0x41), 2, 11, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1B, 0x41), 2, 12, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1B, 0x61), 2, 13, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1C, 0x41), 2, 14, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1C, 0x61), 2, 15, None, NamedColors.white.value, None,
TextDecorationType(underline=True))
def test_scc_pac_green(self):
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x42), 1, 1, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x62), 1, 2, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x42), 1, 3, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x62), 1, 4, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x42), 1, 5, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x62), 1, 6, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x42), 1, 7, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x62), 1, 8, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x42), 1, 9, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x62), 1, 10, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x10, 0x42), 1, 11, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x42), 1, 12, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x62), 1, 13, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x42), 1, 14, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x62), 1, 15, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x42), 2, 1, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x62), 2, 2, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x42), 2, 3, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x62), 2, 4, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x42), 2, 5, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x62), 2, 6, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x42), 2, 7, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x62), 2, 8, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1F, 0x42), 2, 9, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1F, 0x62), 2, 10, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x18, 0x42), 2, 11, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1B, 0x42), 2, 12, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1B, 0x62), 2, 13, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1C, 0x42), 2, 14, None, NamedColors.green.value, None, None)
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1C, 0x62), 2, 15, None, NamedColors.green.value, None, None)
def test_scc_pac_green_underline(self):
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x43), 1, 1, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x11, 0x63), 1, 2, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x43), 1, 3, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x12, 0x63), 1, 4, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x43), 1, 5, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x15, 0x63), 1, 6, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x43), 1, 7, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x16, 0x63), 1, 8, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x43), 1, 9, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x17, 0x63), 1, 10, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x10, 0x43), 1, 11, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x43), 1, 12, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x13, 0x63), 1, 13, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x43), 1, 14, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x14, 0x63), 1, 15, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x43), 2, 1, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x19, 0x63), 2, 2, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x43), 2, 3, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1A, 0x63), 2, 4, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x43), 2, 5, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1D, 0x63), 2, 6, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, 0x43), 2, 7, None, NamedColors.green.value, None,
TextDecorationType(underline=True))
self.check_scc_pac_attributes(SccPreambleAddressCode(0x1E, | |
from __future__ import division
from typing_extensions import ParamSpecArgs
from django.shortcuts import render,redirect
from django.http import Http404, request
from .models import *
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import auth
from django.contrib.auth import login,logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.files.storage import FileSystemStorage
from django.core.exceptions import ObjectDoesNotExist
import datetime
import _datetime
from django.contrib import messages
from django.db.models.query import EmptyQuerySet
from django.db.models import Count
from django.http import JsonResponse
from django.utils import timezone
from .serializers import *
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import *
from rest_framework.permissions import IsAuthenticated
import requests
from rest_framework import generics, permissions
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from django.views.decorators.csrf import csrf_exempt
import json
import math, random
from django.utils.datastructures import MultiValueDictKeyError
from itertools import chain
from twilio.rest import Client
from lifestyles.settings import TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN
from django.http import HttpResponse
def index(request):
headtitle = "Life Styles | Home"
user = request.user
#checks for employee or customer by is_Staff
if user.is_staff == False:
#checks for logged in
if user.is_authenticated:
usertype = "Customer"
else:
#prints None if user is not logged in
usertype = None
#check if user is employee and is active
elif user.is_active == True and user.is_staff == True:
try:
#try getting the employee object from employee table
emp = employeecontrol.objects.get(id=user)
#storing employee tye in usertype
usertype = emp.employeetype
except ObjectDoesNotExist:
#if employee object does not exist
usertype = None
parms = {
'title':headtitle,
'usertype':usertype,
}
return render(request,'index.html',parms)
#BMI Calculation Function --
def bmicalc(weight,height):
bmi = (weight/(height**2))
if bmi < 16:
state = "Severe Thinness"
elif bmi > 16 and bmi < 17:
state = "Moderate Thinness"
elif bmi > 17 and bmi < 18:
state = "Mild Thinness"
elif bmi > 18 and bmi < 25:
state = "Normal"
elif bmi > 25 and bmi < 30:
state = "Overweight"
elif bmi > 30 and bmi < 35:
state = "Obese Class I"
elif bmi > 35 and bmi < 40:
state = "Obese Class II"
elif bmi > 40:
state = "Obese Class III"
context = [state,bmi]
return context
#Customer Dashboard - shows customer previous BMI, find its alloted dietician, nutritionist and trainer according to plan
#shows grocery list of that user, meeting links of that user
#Required - streak feature
def dashboard(request):
title = "Life Styles | Dashboard"
parms = {
'title':title,
}
user = request.user
#checks for user logged in and user is not any kind of employee
if user.is_authenticated and user.is_staff == False:
#get all bmi object of that particular user and then get the latest bmi of that user
bmii = bmi.objects.filter(us=user).order_by('-id')[0]
bmrr = bmr.objects.filter(us=user).order_by('-id')[0]
if user.allotdieti:
#get user dietician if he is alloted one.
finddieti = employeecontrol.objects.get(Q(employeetype="Dietician") & Q(alloted=user))
else:
#making that variable none in else part
finddieti = None
if user.allotnutri:
#get user nutritionist if he is alloted one
findnutri = employeecontrol.objects.get(Q(employeetype="Nutritionist") & Q(alloted=user))
else:
#else part making that var none
findnutri = None
if user.allottrain:
#get user trainer if he is alloted one.
findtrain = employeecontrol.objects.get(Q(employeetype="Fitness Trainer") & Q(alloted=user))
else:
#else making that var none
findtrain = None
#creating a list for storing grocery items
grolist = []
if user.sub.plan != "Free Plan":
try:
#trying to check for grocery list object
grocery = grocerylist.objects.filter(groid=user.id).first()
#if object does not exist then list will be none
except ObjectDoesNotExist:
grocery = None
#if list is not none then get all the items from that object of grocery and store in list
if grocery != None:
grolist = grocery.items.all()
#get all the meeting objects of that user
meet = user.lives.all()
live = []
emps = []
usem = []
for per in meet:
obj = MyUser.objects.filter(lives=per.id)
for ob in obj:
if ob.mobno != user.mobno:
emp = employeecontrol.objects.get(id=ob)
usem.append(ob)
live.append(per)
emps.append(emp)
#make a flag variable for checking if meet object is empty or not
flag = False
#if meet count is 0 flag is true
if meet.count() == 0:
flag = True
#if tmp list is empty then we pass these parameters basically bmi is passed as none
#now code for diet plans.
currday = datetime.datetime.today().weekday()
currweek = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
curday = currweek[currday]
try:
dietplans = user.diets.get(day=curday)
except ObjectDoesNotExist:
dietplans = None
i = dietplans.preworkout.all()
premeal = []
for item in i:
premeal.append(item)
j = dietplans.postworkout.all()
postmeal = []
for item in j:
postmeal.append(item)
lunch = []
k = dietplans.lunch.all()
for item in k:
lunch.append(item)
snacks = []
l = dietplans.snacks.all()
for item in l:
snacks.append(item)
m = dietplans.dinner.all()
dinner = []
for item in m:
dinner.append(item)
d = datetime.date.today()
try:
logg = logs.objects.get(Q(date=d) & Q(us=user))
except ObjectDoesNotExist:
logg = logs.objects.create(us=user,date=d)
#lunch
freelunch = []
loglunch = logg.lunch.all()
for i in lunch:
if i not in loglunch:
freelunch.append(i)
#premeal
freepre = []
logpre = logg.preworkout.all()
for i in premeal:
if i not in logpre:
freepre.append(i)
#postmeal
freepost = []
logpost = logg.postworkout.all()
for i in postmeal:
if i not in logpost:
freepost.append(i)
#snacks
freesnacks = []
logsnacks = logg.snacks.all()
for i in snacks:
if i not in logsnacks:
freesnacks.append(i)
#dinner
freedinner = []
logdinner = logg.dinner.all()
for i in dinner:
if i not in logdinner:
freedinner.append(i)
#exercise
try:
exe = user.fitness.get(day=curday)
exena = exe.exercisename.all()
freeex = []
try:
exlog = exlogs.objects.get(Q(date=d) & Q(us=user))
except exlogs.DoesNotExist:
exlog = exlogs.objects.create(us=user,date=d)
logex = exlog.exercisename.all()
for i in exena:
if i not in logex:
freeex.append(i)
except ObjectDoesNotExist:
exe = None
freeex = []
logex = []
if request.method == "POST":
if 'yess' in request.POST:
tag = request.POST['tag']
if tag == 'Lunch':
l = list(set(chain(freelunch,loglunch)))
for i in l:
try:
check = request.POST["l"+str(i.fooditem)]
if check == "on":
if i not in loglunch:
logg.lunch.add(i)
logg.save()
messages.success(request,"Added to logs")
except MultiValueDictKeyError:
logg.lunch.remove(i)
logg.save()
return redirect("dashboard")
if tag == "Pre workout meal":
l = list(set(chain(freepre,logpre)))
for i in l:
try:
check = request.POST["pr" + str(i.fooditem)]
if check == "on":
if i not in logpre:
logg.preworkout.add(i)
logg.save()
messages.success(request,"Added to logs")
except MultiValueDictKeyError:
logg.preworkout.remove(i)
logg.save()
return redirect("dashboard")
if tag == "Post workout meal":
l = list(set(chain(freepost,logpost)))
for i in l:
try:
check = request.POST["po" + str(i.fooditem)]
if check == "on":
if i not in logpost:
logg.postworkout.add(i)
logg.save()
messages.success(request,"Added to logs")
except MultiValueDictKeyError:
logg.postworkout.remove(i)
logg.save()
return redirect("dashboard")
if tag == "Snacks":
l = list(set(chain(freesnacks,logsnacks)))
for i in l:
try:
check = request.POST["s" + str(i.fooditem)]
if check == "on":
if i not in logsnacks:
logg.snacks.add(i)
logg.save()
messages.success(request,"Added to logs")
except MultiValueDictKeyError:
logg.snacks.remove(i)
logg.save()
return redirect("dashboard")
if tag == "Dinner":
l = list(set(chain(freedinner,logdinner)))
for i in l:
try:
check = request.POST["d" + str(i.fooditem)]
if check == "on":
if i not in logdinner:
logg.dinner.add(i)
logg.save()
messages.success(request,"Added to logs")
except MultiValueDictKeyError:
logg.dinner.remove(i)
logg.save()
return redirect("dashboard")
if 'exyes' in request.POST:
l = list(set(chain(freeex,logex)))
for i in l:
try:
check = request.POST[str(i.id)]
if check == "on":
if i not in logex:
exlog.exercisename.add(i)
exlog.save()
messages.success(request,"Added to Exercise logs")
except MultiValueDictKeyError:
exlog.exercisename.remove(i)
exlog.save()
return redirect("dashboard")
parms = {
'title':"DASHBOARD | KOWI Lifestyles",
'bmi':bmii,
'bmr':bmrr,
'grolist':grolist,
'zip':zip(emps,live,usem),
'flag':flag,
'findnutri':findnutri,
'finddieti':finddieti,
'findtrain':findtrain,
'curday':curday,
'dietplans':dietplans,
'premeal':freepre,
'logpre':logpre,
'postmeal':freepost,
'logpost':logpost,
'lunch':freelunch,
'loglunch':loglunch,
'snacks':freesnacks,
'logsnacks':logsnacks,
'dinner':freedinner,
'logdinner':logdinner,
'date':d,
'exer':freeex,
'logex':logex,
}
return render(request,'dashboard.html',parms)
#if user is not logged in then it will redirect to login
else:
return redirect(login)
return render(request,'dashboard.html',parms)
##Login / Signup ###
#logout function
def logoutuser(request):
logout(request)
return redirect('login')
#activate email sending function!
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = MyUser.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, MyUser.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
user.is_active = True
user.save()
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!')
#login function
def login(request):
title = "Login | Lifestyles"
if request.method == 'POST':
#login with username and password
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username,password=password)
#checking for one more condition that is_staff is false or not to prevent employees to login as customer.
if user is not None and user.is_staff == False:
auth.login(request,user)
messages.info(request,'Logged In Successfuly')
#redirects to dashboard
return redirect('dashboard')
else:
messages.info(request,'Invalid Credentials')
| |
1, 2.3, -12, None, False, '']
elif param_name == 'CategoriesListFilter':
valid = [self.CategoriesListFilter]
invalid = ['a', 1, 2.3, -12, None, False, '']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIUserPermissionsPanelApiUpdateData(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
set_up_permissions()
self.api_name = 'user_permissions_panel_api_update_data'
self.post_response_json_key_specifications = []
self.valid_permission_id = UserPermissions.objects.using('PerInd').filter(
user__login__exact=TEST_WINDOWS_USERNAME
)[0].user_permission_id
self.valid_table = 'UserPermissions'
self.valid_column = 'Active'
self.new_value = 'True'
self.valid_payloads = [
{
'id' : self.valid_permission_id,
'table' : self.valid_table,
'column' : self.valid_column,
'new_value' : self.new_value
}
]
@classmethod
def tearDownClass(self):
tear_down()
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly
saved_object = UserPermissions.objects.using('PerInd').get(
user_permission_id=self.valid_permission_id
)
self.assert_post_key_update_equivalence(key_name=payload['column'], key_value=payload['new_value'], db_value=str(saved_object.active))
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
'id' # str/int -> string formatted int or int: primary key of a row in the Permission table
,'table' # str -> Table name
,'column' # str -> Column name of the table
,'new_value' # str -> the new value to be saved
]
for param_name in parameters:
if param_name == 'id':
valid = [self.valid_permission_id]
invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, '']
elif param_name == 'table':
valid = [self.valid_table]
invalid = [1, 2.3, False, None, 'sdf', '']
elif param_name == 'column':
valid = [self.valid_column]
invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, True, '']
elif param_name == 'new_value':
valid = [self.new_value]
invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 1000, -1, None, False, True, '']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIUserPermissionsPanelApiAddRow(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'user_permissions_panel_api_add_row'
self.post_response_json_key_specifications = [
{'name': 'permission_id' , 'null': False}
,{'name': 'first_name' , 'null': False}
,{'name': 'last_name' , 'null': False}
,{'name': 'active_user' , 'null': False}
,{'name': 'login' , 'null': False}
,{'name': 'category_name' , 'null': False}
]
self.valid_login_selection = TEST_WINDOWS_USERNAME
self.valid_category_selection = DEFAULT_CATEGORY
self.valid_payloads = [
{
'login_selection' : self.valid_login_selection,
'category_selection' : self.valid_category_selection,
}
]
@classmethod
def tearDownClass(self):
tear_down()
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
tear_down_permissions() ## Remove the existing permission of our test user
response_json = self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly
saved_object = UserPermissions.objects.using('PerInd').get(
user_permission_id=response_json['post_data']['permission_id']
)
self.assert_post_key_update_equivalence(key_name='login_selection', key_value=payload['login_selection'], db_value=str(saved_object.user.login))
self.assert_post_key_update_equivalence(key_name='category_selection', key_value=payload['category_selection'], db_value=str(saved_object.category.category_name))
self.assert_post_key_update_equivalence(key_name='active', key_value=True, db_value=str(saved_object.active))
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
'login_selection' # str -> windows username
,'category_selection' # str -> a category name
]
for param_name in parameters:
if param_name == 'login_selection':
valid = [self.valid_login_selection]
invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, True, '']
elif param_name == 'category_selection':
valid = [self.valid_category_selection]
invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 1000, -1, None, False, True, '']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
tear_down_permissions() ## Remove the existing permission of our test user
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
tear_down_permissions() ## Remove the existing permission of our test user
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIUserPermissionsPanelApiDeleteRow(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'user_permissions_panel_api_delete_row'
self.post_response_json_key_specifications = []
self.user_permission_id = None
self.valid_payloads = [
{
'user_permission_id': None,
}
]
@classmethod
def tearDownClass(self):
tear_down()
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
## Set up a temp permission to delete
set_up_permissions(windows_username=TEST_WINDOWS_USERNAME, categories=[DEFAULT_CATEGORY])
self.valid_permission_id = str(UserPermissions.objects.using('PerInd').get(
user__login=TEST_WINDOWS_USERNAME
,category__category_name=DEFAULT_CATEGORY
).user_permission_id)
payload['user_permission_id'] = self.valid_permission_id
response_json = self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was deleted correctly
try:
saved_object = UserPermissions.objects.using('PerInd').get(user_permission_id=self.valid_permission_id)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except Exception as e:
raise ValueError(f"test_with_valid_data(): {e}")
else:
self.assertTrue(False, f"user_permission_id {saved_object.user_permission_id} still exists in the database, unable to delete permission")
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
'user_permission_id' # str -> string formatted int
]
for param_name in parameters:
## Set up a temp permission to delete
set_up_permissions(windows_username=TEST_WINDOWS_USERNAME, categories=[DEFAULT_CATEGORY])
self.valid_permission_id = str(UserPermissions.objects.using('PerInd').get(
user__login=TEST_WINDOWS_USERNAME
,category__category_name=DEFAULT_CATEGORY
).user_permission_id)
payload['user_permission_id'] = self.valid_permission_id
if param_name == 'user_permission_id':
valid = [str(self.valid_permission_id)]
invalid = ['a', 5.6, -2.6, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, True, '']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIUsersPanelApiAddRow(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.user_obj = grant_admin_status()
self.api_name = 'users_panel_api_add_row'
self.post_response_json_key_specifications = [
{'name': 'user_id' , 'null': False}
,{'name': 'first_name' , 'null': False}
,{'name': 'last_name' , 'null': False}
,{'name': 'active_user' , 'null': False}
,{'name': 'login' , 'null': False}
]
self.valid_first_name = 'some_random_fname'
self.valid_last_name = 'some_random_lname'
self.valid_login = 'some_random_login'
self.valid_payloads = [
{
'first_name_input' : self.valid_first_name,
'last_name_input' : self.valid_last_name,
'login_input' : self.valid_login,
}
]
@classmethod
def tearDownClass(self):
tear_down()
self.remove_test_user_if_exists(self)
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def remove_test_user_if_exists(self):
try:
new_user = Users.objects.using('PerInd').get(login__exact=self.valid_login)
except:
...#Do nothing
else:
new_user.delete(using='PerInd')
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
self.remove_test_user_if_exists()
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly
saved_object = Users.objects.using('PerInd').get(login__exact=self.valid_login)
self.assert_post_key_update_equivalence(key_name='first_name_input', key_value=payload['first_name_input'], db_value=saved_object.first_name)
self.assert_post_key_update_equivalence(key_name='last_name_input', key_value=payload['last_name_input'], db_value=saved_object.last_name)
self.assert_post_key_update_equivalence(key_name='login_input', key_value=payload['login_input'], db_value=saved_object.login)
self.assert_post_key_update_equivalence(key_name='active_user', key_value=True, db_value=saved_object.active_user)
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
"first_name_input" # str -> first name
,"last_name_input" # str -> last name
,"login_input" # str -> windows username
]
for param_name in parameters:
if param_name == 'first_name_input':
valid = [self.valid_first_name]
invalid = [1, 2.3, False, None]
elif param_name == 'last_name_input':
valid = [self.valid_last_name]
invalid = [1, 2.3, False, None]
elif param_name == 'login_input':
valid = [self.valid_login]
invalid = [1, 2.3, False, None]
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
self.remove_test_user_if_exists()
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
self.remove_test_user_if_exists()
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIUsersPanelApiDeleteRow(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'users_panel_api_delete_row'
self.post_response_json_key_specifications = []
self.valid_first_name = 'some_random_first_name'
self.valid_last_name = 'some_random_last_name'
self.valid_username = 'some_random_login'
self.valid_payloads = [
{
'user_id': None,
}
]
@classmethod
def tearDownClass(self):
tear_down()
try:
test_user = Users.objects.using('PerInd').get(login__exact=self.valid_username)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except:
raise
else:
test_user.delete(using='PerInd')
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def add_test_user_if_not_exists(self):
test_user = Users.objects.using('PerInd').get_or_create(
login=self.valid_username
,first_name=self.valid_first_name
,last_name=self.valid_last_name
)[0]
test_user.save(using='PerInd')
return test_user
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
user_obj = self.add_test_user_if_not_exists()
payload['user_id'] = user_obj.user_id
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was deleted correctly
try:
saved_object = Users.objects.using('PerInd').get(login__exact=self.valid_username)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except Exception as e:
raise ValueError(f"test_with_valid_data(): {e}")
else:
self.assertTrue(False, f"{saved_object.login} still exists in the database, unable to delete user")
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
"user_id" # str -> user id
]
for param_name in parameters:
if param_name == 'user_id':
valid = [self.add_test_user_if_not_exists().user_id]
invalid = [1, 2.3, False, None]
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
self.add_test_user_if_not_exists()
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
self.add_test_user_if_not_exists()
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIUsersPanelApiUpdateData(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'users_panel_api_update_row'
self.post_response_json_key_specifications = []
self.valid_first_name = 'some_random_first_name'
self.valid_last_name = 'some_random_last_name'
self.valid_username = 'some_random_login'
self.valid_first_name_input = 'hello_first'
self.valid_last_name_input = 'hello_last'
self.valid_payloads = [
{
'id' : None,
'table' : "Users",
'column' | |
import io
import time
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from scipy.ndimage import convolve
# numpy representation of the RPi camera module v1 Bayer Filter
bayerGrid = np.zeros((1944, 2592, 3), dtype=np.uint8)
bayerGrid[1::2, fc00:db20:35b:7399::5, 0] = 1 # Red
bayerGrid[0::2, fc00:db20:35b:7399::5, 1] = 1 # Green
bayerGrid[1::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1] = 1 # Green
bayerGrid[0::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 2] = 1 # Blue
def get_bayer_grid(width: int, height: int, dtype=np.float):
"""Create a Numpy bayer grid representation for a given size.
Uses the BGGR pattern of the Raspberry Pi camera sensor.
"""
bayerGrid = np.zeros((height, width, 3), dtype=dtype)
bayerGrid[1::2, fc00:db20:35b:7399::5, 0] = 1 # Red
bayerGrid[0::2, fc00:db20:35b:7399::5, 1] = 1 # Green
bayerGrid[1::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1] = 1 # Green
bayerGrid[0::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 2] = 1 # Blue
return bayerGrid
def debayerize(bayer_data, bayer_grid=None):
"""Interpolate bayer data of an image using nearest-neighbor."""
if bayer_grid is None:
h, w, d = bayer_data.shape
bayer_grid = get_bayer_grid(width=w, height=h, dtype=bayer_data.dtype)
kernel = np.ones((3,3), dtype=np.float)
data_conv = rgb_convolve(bayer_data, kernel)
grid_conv = rgb_convolve(bayer_grid, kernel)
interpolated = data_conv / grid_conv
# return interpolated
# fill in missing data in bayer_data with interpolated
result = bayer_data.copy()
result[bayer_grid == 0] = interpolated[bayer_grid == 0]
return result
def get_rgb_array(fp, dtype=np.uint64, width: int = None, height: int = None):
"""Return a 3-dimensional RGB numpy array of an image."""
im = Image.open(fp)
if height is not None or width is not None:
cwidth, cheight = im.size
if width is None:
width = int(height * cwidth/cheight)
elif height is None:
height = int(width * cheight/cwidth)
im = im.resize((width, height))
return np.array(im, dtype=dtype)
def get_bw_array(fp, dtype=np.uint64, width: int = None, height: int = None):
"""Return a 2-dimensional black-and-white numpy array of an image."""
a = get_rgb_array(fp, dtype=dtype, width=width, height=height)
return np.mean(a, axis=2)
def rgb_convolve(image, kernel, mode='constant', cval=0.0, **kwargs):
"""Apply a convolution kernel to the RGB layers of an image independently.
This applies scipy.ndimage.convolve with any additional parameters to the R,
G, and B slices of array `image`.
:param image: 3-dimensional numpy array of the image.
:param kernel: 2-dimensional numpy array to convolve with the image.
"""
res = np.zeros(image.shape, dtype=image.dtype)
for i in range(3):
res[:,:,i] = convolve(image[:,:,i], kernel, mode=mode, cval=cval, **kwargs)
return res
def getBayer(filename: str, ver: int = 1):
"""Return the Bayer data from an RPi camera image.
Note: this requires the Bayer output to be appended to the end of the
image file. This can be done from the commandline by passing the `--raw`
flag into the `raspistill` program and from the `picamera` Python module by
passing `bayer=True` into the `PiCamera.capture` function.
This uses code from the `picamera` module's documentation section on "Raw
Bayer data captures". See https://picamera.readthedocs.io/en/release-1.13/recipes2.html#raw-bayer-data-captures
:param ver: Version of the Raspberry Pi camera. Either 1 or 2.
"""
offset = {
1: 6404096,
2: 10270208,
}[ver]
# open file and extract bayer data
with open(filename, 'rb') as f:
data = f.read()[-offset:]
assert data[:4] == b'BRCM', "Could not find bayer data header"
data = data[32768:] # strip header data
data = np.frombuffer(data, dtype=np.uint8)
# For the V1 module, the data consists of 1952 rows of 3264 bytes of data.
# The last 8 rows of data are unused (they only exist because the maximum
# resolution of 1944 rows is rounded up to the nearest 16).
#
# For the V2 module, the data consists of 2480 rows of 4128 bytes of data.
# There's actually 2464 rows of data, but the sensor's raw size is 2466
# rows, rounded up to the nearest multiple of 16: 2480.
#
# Likewise, the last few bytes of each row are unused (why?). Here we
# reshape the data and strip off the unused bytes.
reshape, crop = {
1: ((1952, 3264), (1944, 3240)),
2: ((2480, 4128), (2464, 4100)),
}[ver]
data = data.reshape(reshape)[:crop[0], :crop[1]]
# Horizontally, each row consists of 10-bit values. Every four bytes are
# the high 8-bits of four values, and the 5th byte contains the packed low
# 2-bits of the preceding four values. In other words, the bits of the
# values A, B, C, D and arranged like so:
#
# byte 1 byte 2 byte 3 byte 4 byte 5
# AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD AABBCCDD
#
# Here, we convert our data into a 16-bit array, shift all values left by
# 2-bits and unpack the low-order bits from every 5th byte in each row,
# then remove the columns containing the packed bits
data = data.astype(np.uint16) << 2
for byte in range(4):
data[:, byte::5] |= ((data[:, 4::5] >> ((4 - byte) * 2)) & 0b11)
data = np.delete(data, np.s_[4::5], 1)
# Now to split the data up into its red, green, and blue components. The
# Bayer pattern of the OV5647 sensor is BGGR. In other words the first
# row contains alternating green/blue elements, the second row contains
# alternating red/green elements, and so on as illustrated below:
#
# GBGBGBGBGBGBGB
# RGRGRGRGRGRGRG
# GBGBGBGBGBGBGB
# RGRGRGRGRGRGRG
#
# Please note that if you use vflip or hflip to change the orientation
# of the capture, you must flip the Bayer pattern accordingly
rgb = np.zeros(data.shape + (3,), dtype=data.dtype)
rgb[1::2, fc00:db20:35b:7399::5, 0] = data[1::2, 0::2] # Red
rgb[0::2, fc00:db20:35b:7399::5, 1] = data[0::2, 0::2] # Green
rgb[1::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1] = data[1::2, 1::2] # Green
rgb[0::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 2] = data[0::2, 1::2] # Blue
uint16_to_uint8 = lambda a: (a * (255/1023)).astype(np.uint8) # note, this only works b/c the values are actually 10-bit
# uint16_to_uint8 = lambda a: (a >> 2).astype(np.uint8) # or bit-shift as suggested at the end
rgb8 = uint16_to_uint8(rgb)
np.max(rgb8)
return rgb8
def debayer_malvar(img):
"""Debayer an image using the method proposed in Malvar et al.
This method uses different filters for different cells in the
The default values for alpha, beta, and gamma are the approximate Wiener
values proposed by the paper.
"""
def norm(k):
return k / np.sum(k)
# kernels for processing
GatR = np.array([[0,0,-1,0,0],
[0,0,2,0,0],
[-1,2,4,2,-1],
[0,0,2,0,0],
[0,0,-1,0,0]]) # Green at red pixels
GatB = GatR
RatGRB = np.array([[0,0,.5,0,0],
[0,-1,0,-1,0],
[-1,4,5,4,-1],
[0,-1,0,-1,0],
[0,0,.5,0,0]]) # Red at Green, in Red row, Blue column
RatGBR = RatGRB.T
BatGBR = RatGRB
BatGRB = RatGBR
RatB = np.array([[0,0,-1.5,0,0],
[0,2,0,2,0],
[-1.5,0,6,0,-1.5],
[0,2,0,2,0],
[0,0,-1.5,0,0]])
BatR = RatB
# slices for grabbing specific colors
Grows1 = slice(None,None,2)
Gcols1 = Grows1
Grows2 = slice(1,None,2)
Gcols2 = Grows2
Rrows = slice(1,None,2)
Rcols = slice(None,None,2)
Brows = slice(None,None,2)
Bcols = slice(1,None,2)
# indices for colors (cols, rows)
# ideally these could be used directly, but the star operator doesn't seem
# to work with slicing
# iGatR = (Rcols,Rrows)
# iGatB = (Bcols,Brows)
# iRatGRB = (Gcols1,Grows1)
# iBatGRB = iRatGRB
# iRatGBR = (Gcols2,Grows2)
# iBatGBR = iRatGBR
# iRatB = (Bcols,Brows)
# iBatR = (Rcols,Rrows)
# actual demosaicing
b = img.copy().sum(axis=2) # flatten bayer data into 2-dimensional array
debayered = img.copy() # array to return, initially bayer data
dGatR = convolve(b, norm(GatR)) # data for GatR and GatB
debayered[Rrows,Rcols,1] = dGatR[Rrows,Rcols]
debayered[Brows,Bcols,1] = dGatR[Brows,Bcols]
dRatB = convolve(b, norm(RatB)) # data for RatB and BatR
debayered[Brows,Bcols,0] = dRatB[Brows,Bcols]
debayered[Rrows,Rcols,2] = dRatB[Rrows,Rcols]
dRatGRB = convolve(b, norm(RatGRB)) # data for RatGRB and BatGBR
debayered[Grows1,Gcols1,0] = dRatGRB[Grows1,Gcols1]
debayered[Grows2,Gcols2,2] = dRatGRB[Grows2,Gcols2]
dRatGBR = convolve(b, norm(RatGBR)) # data for RatGBR and BatGRB
debayered[Grows2,Gcols2,0] = dRatGBR[Grows2,Gcols2]
debayered[Grows1,Gcols1,2] = dRatGBR[Grows1,Gcols1]
return debayered
def psnr_rgb(control, test, max_value=1.0):
"""Calculate the peak signal-to-noise ratio in dB between two RGB images."""
def mse_rgb(control, test):
"""Calculate the mean squared error between two images."""
return np.sum((control - test)**2) / sum(control.shape)
assert control.shape == test.shape, 'Images are not the same size.'
assert not np.array_equal(control, test), 'Images are identical.'
return 10 * np.log10((max_value*3)**2 / mse_rgb(control, test))
def debayer_nearest_neighbor(bayer_data):
"""Demosaic an image using the nearest neighbor algorithm."""
h, w, d = bayer_data.shape
bayer_grid = get_bayer_grid(width=w, height=h)
debayered = bayer_data.copy()
roll1 = np.roll(debayered, shift=1, axis=0)
debayered[~bayer_grid] = roll1[~bayer_grid]
post_roll_grid = np.roll(bayer_grid, shift=1, axis=0) + bayer_grid
roll2 = np.roll(debayered, shift=1, axis=1) # shift again for R and B values
debayered[~post_roll_grid] = | |
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import proteus
from proteus.mprans.cNCLS import *
class SubgridError(proteus.SubgridError.SGE_base):
def __init__(self, coefficients, nd):
proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, False)
def initializeElementQuadrature(self, mesh, t, cq):
for ci in range(self.nc):
cq[('dH_sge', ci, ci)] = cq[('dH', ci, ci)]
def calculateSubgridError(self, q):
pass
def updateSubgridErrorHistory(self, initializationPhase=False):
pass
class ShockCapturing(proteus.ShockCapturing.ShockCapturing_base):
def __init__(self, coefficients, nd, shockCapturingFactor=0.25, lag=True, nStepsToDelay=None):
proteus.ShockCapturing.ShockCapturing_base.__init__(self, coefficients, nd, shockCapturingFactor, lag)
self.nStepsToDelay = nStepsToDelay
self.nSteps = 0
if self.lag:
logEvent("NCLS.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying")
self.nStepsToDelay = 1
self.lag = False
def initializeElementQuadrature(self, mesh, t, cq):
self.mesh = mesh
self.numDiff = []
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff.append(cq[('numDiff', ci, ci)])
self.numDiff_last.append(cq[('numDiff', ci, ci)])
def updateShockCapturingHistory(self):
self.nSteps += 1
if self.lag:
for ci in range(self.nc):
self.numDiff_last[ci][:] = self.numDiff[ci]
if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:
logEvent("NCLS.ShockCapturing: switched to lagged shock capturing")
self.lag = True
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff_last.append(self.numDiff[ci].copy())
logEvent("NCLS: max numDiff %e" % (globalMax(self.numDiff_last[0].max()),))
class NumericalFlux(proteus.NumericalFlux.HamiltonJacobi_DiagonalLesaintRaviart):
def __init__(self, vt, getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions,
getPeriodicBoundaryConditions=None):
proteus.NumericalFlux.HamiltonJacobi_DiagonalLesaintRaviart.__init__(self, vt, getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions)
class RKEV(proteus.TimeIntegration.SSP):
"""
Wrapper for SSPRK time integration using EV
... more to come ...
"""
def __init__(self, transport, timeOrder=1, runCFL=0.1, integrateInterpolationPoints=False):
SSP.__init__(self, transport, integrateInterpolationPoints=integrateInterpolationPoints)
self.runCFL = runCFL
self.dtLast = None
self.isAdaptive = True
# About the cfl
assert transport.coefficients.STABILIZATION_TYPE > 0, "SSP method just works for edge based EV methods; i.e., STABILIZATION_TYPE>0"
assert hasattr(transport, 'edge_based_cfl'), "No edge based cfl defined"
self.cfl = transport.edge_based_cfl
# Stuff particular for SSP
self.timeOrder = timeOrder # order of approximation
self.nStages = timeOrder # number of stages total
self.lstage = 0 # last stage completed
# storage vectors
self.u_dof_last = {}
# per component stage values, list with array at each stage
self.u_dof_stage = {}
for ci in range(self.nc):
if ('m', ci) in transport.q:
self.u_dof_last[ci] = transport.u[ci].dof.copy()
self.u_dof_stage[ci] = []
for k in range(self.nStages + 1):
self.u_dof_stage[ci].append(transport.u[ci].dof.copy())
def choose_dt(self):
maxCFL = 1.0e-6
maxCFL = max(maxCFL, globalMax(self.cfl.max()))
self.dt = old_div(self.runCFL, maxCFL)
if self.dtLast is None:
self.dtLast = self.dt
self.t = self.tLast + self.dt
self.substeps = [self.t for i in range(self.nStages)] # Manuel is ignoring different time step levels for now
def initialize_dt(self, t0, tOut, q):
"""
Modify self.dt
"""
self.tLast = t0
self.choose_dt()
self.t = t0 + self.dt
def setCoefficients(self):
"""
beta are all 1's here
mwf not used right now
"""
self.alpha = numpy.zeros((self.nStages, self.nStages), 'd')
self.dcoefs = numpy.zeros((self.nStages), 'd')
def updateStage(self):
"""
Need to switch to use coefficients
"""
self.lstage += 1
assert self.timeOrder in [1, 2, 3]
assert self.lstage > 0 and self.lstage <= self.timeOrder
if self.timeOrder == 3:
if self.lstage == 1:
logEvent("First stage of SSP33 method", level=4)
for ci in range(self.nc):
self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof
# update u_dof_old
self.transport.u_dof_old[:] = self.u_dof_stage[ci][self.lstage]
elif self.lstage == 2:
logEvent("Second stage of SSP33 method", level=4)
for ci in range(self.nc):
self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof
self.u_dof_stage[ci][self.lstage] *= old_div(1., 4.)
self.u_dof_stage[ci][self.lstage] += 3. / 4. * self.u_dof_last[ci]
# Update u_dof_old
self.transport.u_dof_old[:] = self.u_dof_stage[ci][self.lstage]
elif self.lstage == 3:
logEvent("Third stage of SSP33 method", level=4)
for ci in range(self.nc):
self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof
self.u_dof_stage[ci][self.lstage] *= old_div(2.0, 3.0)
self.u_dof_stage[ci][self.lstage] += 1.0 / 3.0 * self.u_dof_last[ci]
# update u_dof_old
self.transport.u_dof_old[:] = self.u_dof_last[ci]
# update solution to u[0].dof
self.transport.u[ci].dof[:] = self.u_dof_stage[ci][self.lstage]
elif self.timeOrder == 2:
if self.lstage == 1:
logEvent("First stage of SSP22 method", level=4)
for ci in range(self.nc):
self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof
# Update u_dof_old
self.transport.u_dof_old[:] = self.transport.u[ci].dof
elif self.lstage == 2:
logEvent("Second stage of SSP22 method", level=4)
for ci in range(self.nc):
self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof
self.u_dof_stage[ci][self.lstage][:] *= old_div(1., 2.)
self.u_dof_stage[ci][self.lstage][:] += 1. / 2. * self.u_dof_last[ci]
# update u_dof_old
self.transport.u_dof_old[:] = self.u_dof_last[ci]
# update solution to u[0].dof
self.transport.u[ci].dof[:] = self.u_dof_stage[ci][self.lstage]
else:
assert self.timeOrder == 1
for ci in range(self.nc):
self.u_dof_stage[ci][self.lstage][:] = self.transport.u[ci].dof[:]
def initializeTimeHistory(self, resetFromDOF=True):
"""
Push necessary information into time history arrays
"""
for ci in range(self.nc):
self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]
def updateTimeHistory(self, resetFromDOF=False):
"""
assumes successful step has been taken
"""
self.t = self.tLast + self.dt
for ci in range(self.nc):
self.u_dof_last[ci][:] = self.transport.u[ci].dof[:]
for k in range(self.nStages):
self.u_dof_stage[ci][k][:] = self.transport.u[ci].dof[:]
self.lstage = 0
self.dtLast = self.dt
self.tLast = self.t
def generateSubsteps(self, tList):
"""
create list of substeps over time values given in tList. These correspond to stages
"""
self.substeps = []
tLast = self.tLast
for t in tList:
dttmp = t - tLast
self.substeps.extend([tLast + dttmp for i in range(self.nStages)])
tLast = t
def resetOrder(self, order):
"""
initialize data structures for stage updges
"""
self.timeOrder = order # order of approximation
self.nStages = order # number of stages total
self.lstage = 0 # last stage completed
# storage vectors
# per component stage values, list with array at each stage
self.u_dof_stage = {}
for ci in range(self.nc):
if ('m', ci) in self.transport.q:
self.u_dof_stage[ci] = []
for k in range(self.nStages + 1):
self.u_dof_stage[ci].append(self.transport.u[ci].dof.copy())
self.substeps = [self.t for i in range(self.nStages)]
def setFromOptions(self, nOptions):
"""
allow classes to set various numerical parameters
"""
if 'runCFL' in dir(nOptions):
self.runCFL = nOptions.runCFL
flags = ['timeOrder']
for flag in flags:
if flag in dir(nOptions):
val = getattr(nOptions, flag)
setattr(self, flag, val)
if flag == 'timeOrder':
self.resetOrder(self.timeOrder)
class Coefficients(proteus.TransportCoefficients.TC_base):
from proteus.ctransportCoefficients import ncLevelSetCoefficientsEvaluate
def __init__(self,
V_model=0,
RD_model=None,
ME_model=1,
checkMass=True,
epsFact=1.5,
useMetrics=0.0,
sc_uref=1.0,
sc_beta=1.0,
waterline_interval=-1,
movingDomain=False,
PURE_BDF=False,
# PARAMETERS FOR EV
STABILIZATION_TYPE=0,
LUMPED_MASS_MATRIX=False,
ENTROPY_TYPE=1, # polynomial, u=0.5*u^2
cE=1.0,
# COUPEZ AND REDISTANCING PARAMETERS
DO_SMOOTHING=False,
DO_REDISTANCING=False,
pure_redistancing=False,
COUPEZ=False,
SATURATED_LEVEL_SET=False,
epsCoupez=0.1,
epsFactRedistancing=0.33, # For the signed distance function
redistancing_tolerance=0.1,
maxIter_redistancing=3,
lambda_coupez=0.1,
cfl_redistancing=1.0,
# OUTPUT quantDOFs
outputQuantDOFs=False,
# NULLSPACE Info
nullSpace='NoNullSpace',
initialize=True):
self.PURE_BDF=PURE_BDF
self.DO_SMOOTHING = DO_SMOOTHING
self.COUPEZ = COUPEZ
self.SATURATED_LEVEL_SET = SATURATED_LEVEL_SET
self.DO_REDISTANCING = DO_REDISTANCING
self.ENTROPY_TYPE = ENTROPY_TYPE
self.cE = cE
self.LUMPED_MASS_MATRIX = LUMPED_MASS_MATRIX
self.STABILIZATION_TYPE = STABILIZATION_TYPE
self.epsFactRedistancing = epsFactRedistancing
self.pure_redistancing = pure_redistancing
self.maxIter_redistancing = maxIter_redistancing
self.redistancing_tolerance = redistancing_tolerance
self.cfl_redistancing = cfl_redistancing
self.epsCoupez = epsCoupez
self.lambda_coupez = lambda_coupez
self.outputQuantDOFs = outputQuantDOFs
self.movingDomain = movingDomain
self.useMetrics = useMetrics
self.epsFact = epsFact
self.variableNames = ['phi']
self.flowModelIndex = V_model
self.modelIndex = ME_model
self.RD_modelIndex = RD_model
self.checkMass = checkMass
self.sc_uref = sc_uref
self.sc_beta = sc_beta
self.waterline_interval = waterline_interval
self.nullSpace = nullSpace
if initialize:
self.initialize()
def initialize(self):
nc = 1
mass = {0: {0: 'linear'}}
hamiltonian = {0: {0: 'linear'}}
advection = {}
diffusion = {}
potential = {}
reaction = {}
TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
['phi'],
movingDomain=self.movingDomain)
def attachModels(self, modelList):
# the level set model
self.model = modelList[self.modelIndex]
# the velocity
if self.flowModelIndex >= 0:
self.flowModel = modelList[self.flowModelIndex]
self.q_v = modelList[self.flowModelIndex].q[('velocity', 0)]
self.ebqe_v = modelList[self.flowModelIndex].ebqe[('velocity', 0)]
if ('velocity', 0) in modelList[self.flowModelIndex].ebq:
self.ebq_v = modelList[self.flowModelIndex].ebq[('velocity', 0)]
else:
self.ebq_v = None
if ('u', 0) not in self.model.ebq and ('u', 0) in self.flowModel.ebq:
self.model.ebq[('u', 0)] = numpy.zeros(self.flowModel.ebq[('u', 0)].shape, 'd')
self.model.ebq[('grad(u)', 0)] = numpy.zeros(self.flowModel.ebq[('grad(u)', 0)].shape, 'd')
if ('v', 1) in self.flowModel.ebq:
self.model.u[0].getValuesTrace(self.flowModel.ebq[('v', 1)], self.model.ebq[('u', 0)])
self.model.u[0].getGradientValuesTrace(self.flowModel.ebq[('grad(v)', 1)], self.model.ebq[('grad(u)', 0)])
if self.RD_modelIndex is not None:
# print self.RD_modelIndex,len(modelList)
self.rdModel = modelList[self.RD_modelIndex]
self.ebqe_rd_u = self.rdModel.ebqe[('u',0)]
def initializeElementQuadrature(self, t, cq):
if self.flowModelIndex is None:
self.q_v = numpy.ones(cq[('grad(u)', 0)].shape, 'd')
def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):
if self.flowModelIndex is None:
self.ebq_v = numpy.ones(cebq[('grad(u)', 0)].shape, 'd')
def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):
if self.flowModelIndex is None:
self.ebqe_v = numpy.ones(cebqe[('grad(u)', 0)].shape, 'd')
if self.RD_modelIndex is None:
self.ebqe_rd_u = cebqe[('u',0)]
def preStep(self, t, firstStep=False):
# SAVE OLD SOLUTION #
self.model.u_dof_old[:] = self.model.u[0].dof
# COMPUTE NEW VELOCITY (if given by user) #
if self.model.hasVelocityFieldAsFunction:
self.model.updateVelocityFieldAsFunction()
if self.checkMass:
self.m_pre = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,
self.model.mesh.elementDiametersArray,
self.model.q['dV'],
self.model.q[('m',0)],
self.model.mesh.nElements_owned)
logEvent("Phase 0 mass before NCLS step = %12.5e" % (self.m_pre,),level=2)
self.m_last = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,
self.model.mesh.elementDiametersArray,
self.model.q['dV'],
self.model.timeIntegration.m_last[0],
self.model.mesh.nElements_owned)
logEvent("Phase 0 mass before NCLS step (m_last) = %12.5e" % (self.m_last,),level=2)
copyInstructions = {}
return copyInstructions
def postStep(self, t, firstStep=False):
self.model.q['dV_last'][:] = self.model.q['dV']
if self.checkMass:
self.m_post = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,
self.model.mesh.elementDiametersArray,
self.model.q['dV'],
self.model.q[('u',0)],
self.model.mesh.nElements_owned)
logEvent("Phase 0 mass after NCLS step = %12.5e" % (self.m_post,),level=2)
# #need a flux here not a velocity
# self.fluxIntegral = Norms.fluxDomainBoundaryIntegralFromVector(self.flowModel.ebqe['dS'],
# self.flowModel.ebqe[('velocity',0)],
# self.flowModel.ebqe['n'],
# self.model.mesh)
# logEvent("Flux integral = %12.5e" % (self.fluxIntegral,),level=2)
# logEvent("Phase 0 mass conservation after NCLS step = %12.5e" % (self.m_post - self.m_last + | |
case
h = self.t_bound - t
y_new = y + h * self.f
self.h_previous = h
self.y_old = y
self.t = self.t_bound
self.y = y_new
self.f = None # signals _dense_output_impl
logging.warning(
'Linear extrapolation was used in the final step.')
return None, min_step
return h_abs, min_step
def _estimate_error(self, K, h):
# exclude K[-1] if not FSAL. It could contain nan or inf
return h * (K[:self.n_stages + self.FSAL].T @
self.E[:self.n_stages + self.FSAL])
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _comp_sol_err(self, y, h):
"""Compute solution and error.
The calculation of `scale` differs from scipy: The average instead of
the maximum of abs(y) of the current and previous steps is used.
"""
y_new = y + h * (self.K[:self.n_stages].T @ self.B)
scale = self.atol + self.rtol * 0.5*(np.abs(y) + np.abs(y_new))
if self.FSAL:
# do FSAL evaluation if needed for error estimate
self.K[self.n_stages, :] = self.fun(self.t + h, y_new)
error_norm = self._estimate_error_norm(self.K, h, scale)
return y_new, error_norm
def _rk_stage(self, h, i):
"""compute a single RK stage"""
dy = h * (self.K[:i, :].T @ self.A[i, :i])
self.K[i] = self.fun(self.t + self.C[i] * h, self.y + dy)
def _dense_output_impl(self):
"""return denseOutput, detect if step was extrapolated linearly"""
if self.f is None:
# output was extrapolated linearly
return LinearDenseOutput(self.t_old, self.t, self.y_old, self.y)
# normal output
Q = self.K.T @ self.P
return HornerDenseOutput(self.t_old, self.t, self.y_old, Q)
def _diagnose_stiffness(self):
"""Stiffness detection.
Test only if there are many recent step failures, or after many
function evaluations have been done.
Warn the user if the problem is diagnosed as stiff.
Original source: RKSuite.f, https://www.netlib.org/ode/rksuite/
"""
if self.nfev_stiff_detect == 0:
return
self.okstp += 1
h = self.h_previous
self.havg = 0.9 * self.havg + 0.1 * h # exp moving average
# reset after the first 20 steps to:
# - get stepsize on scale
# - reduce the effect of a possible initial transient
if self.okstp == 20:
self.havg = h
self.jflstp = 0
# There are lots of failed steps (lotsfl = True) if 10 or more step
# failures occurred in the last 40 successful steps.
if self.okstp % 40 == 39:
lotsfl = self.jflstp >= 10
self.jflstp = 0 # reset each 40 steps
else:
lotsfl = False
# Test for stifness after each nfev_stiff_detect evaluations
# then toomch = True
many_steps = self.nfev_stiff_detect//self.n_stages
toomch = self.okstp % many_steps == many_steps - 1
# If either too much work has been done or there are lots of failed
# steps, test for stiffness.
if toomch or lotsfl:
# Regenerate weight vector
avgy = 0.5 * (np.abs(self.y) + np.abs(self.y_old))
tiny = np.finfo(self.y.dtype).tiny
wt = np.maximum(avgy, sqrt(tiny))
# and error vector, wich is a good initial perturbation vector
v0 = np.atleast_1d(self._estimate_error(self.K, self.h_previous))
# stiff_a determines whether the problem is stiff. In some
# circumstances it is UNSURE. The decision depends on two things:
# whether the step size is being restricted on grounds of stability
# and whether the integration to t_bound can be completed in no
# more than nfev_stiff_detect function evaluations.
stif, rootre = stiff_a(
self.fun, self.t, self.y, self.h_previous, self.havg,
self.t_bound, self.nfev_stiff_detect, wt, self.f, v0,
self.n_stages, self.stbrad, self.tanang)
# inform the user about stiffness with warning messages
# the messages about remaining work have been removed from the
# original code.
if stif is None:
# unsure about stiffness
if rootre is None:
# no warning is given
logging.info('Stiffness detection did not converge')
if not rootre:
# A complex pair of roots has been found near the imaginary
# axis, where the stability boundary of the method is not
# well defined.
# A warning is given only if there are many failed steps.
# This reduces the chance of a false positive diagnosis.
if lotsfl:
warn('Your problem has a complex pair of dominant '
'roots near the imaginary axis. There are '
'many recently failed steps. You should '
'probably change to a code intended for '
'oscillatory problems.')
else:
logging.info(
'The problem has a complex pair of dominant roots '
'near the imaginary axis. There are not many '
'failed steps.')
else:
# this should not happen
logging.warning(
'stif=None, rootre=True; this should not happen')
elif stif:
# the problem is stiff
if rootre is None:
# this should not happen
logging.warning(
'stif=True, rootre=None; this should not happen')
elif rootre:
warn('Your problem has a real dominant root '
'and is diagnosed as stiff. You should probably '
'change to a code intended for stiff problems.')
else:
warn('Your problem has a complex pair of dominant roots '
'and is diagnosed as stiff. You should probably '
'change to a code intended for stiff problems.')
else:
# stif == False
# no warning is given
if rootre is None:
logging.info(
'Stiffness detection has diagnosed the problem as '
'non-stiff, without performing power iterations')
elif rootre:
logging.info(
'The problem has a real dominant root '
'and is not stiff')
else:
logging.info(
'The problem has a complex pair of dominant roots '
'and is not stiff')
def h_start(df, a, b, y, yprime, morder, rtol, atol):
"""h_shart computes a starting step size to be used in solving initial
value problems in ordinary differential equations.
This method is developed by <NAME> and described in [1]_. This function
is a Python translation of the Fortran source code [2]_. The two main
modifications are:
using the RMS norm from scipy.integrate
allowing for complex valued input
Parameters
----------
df : callable
Right-hand side of the system. The calling signature is fun(t, y).
Here t is a scalar. The ndarray y has has shape (n,) and fun must
return array_like with the same shape (n,).
a : float
This is the initial point of integration.
b : float
This is a value of the independent variable used to define the
direction of integration. A reasonable choice is to set `b` to the
first point at which a solution is desired. You can also use `b , if
necessary, to restrict the length of the first integration step because
the algorithm will not compute a starting step length which is bigger
than abs(b-a), unless `b` has been chosen too close to `a`. (it is
presumed that h_start has been called with `b` different from `a` on
the machine being used.
y : array_like, shape (n,)
This is the vector of initial values of the n solution components at
the initial point `a`.
yprime : array_like, shape (n,)
This is the vector of derivatives of the n solution components at the
initial point `a`. (defined by the differential equations in
subroutine `df`)
morder : int
This is the order of the formula which will be used by the initial
value method for taking the first integration step.
rtol : float
Relative tolereance used by the differential equation method.
atol : float or array_like
Absolute tolereance used by the differential equation method.
Returns
-------
float
An appropriate starting step size to be attempted by the differential
equation method.
References
----------
.. [1] <NAME>, "Starting step size for an ODE solver", Journal of
Computational and Applied Mathematics, Vol. 9, No. 2, 1983,
pp. 177-191, ISSN 0377-0427.
https://doi.org/10.1016/0377-0427(83)90040-7
.. [2] Slatec Fortran code dstrt.f.
https://www.netlib.org/slatec/src/
"""
# needed to pass scipy unit test:
if y.size == 0:
return np.inf
# compensate for modified call list
neq = y.size
spy = np.empty_like(y)
pv = np.empty_like(y)
etol = atol + rtol * np.abs(y)
# `small` is a small positive machine dependent constant which is used for
# protecting against computations with numbers which are too small relative
# to the precision of floating point arithmetic. `small` should be set to
# (approximately) the smallest positive DOUBLE PRECISION number such that
# (1. + small) > 1. on the machine being used. The quantity small**(3/8)
# is used in computing increments of | |
in ['CatalogNode']:
# blocks with header marker
pass
elif current_block in ['CatalogUGen', 'Parameters']:
# blocks without header marker
pass
elif current_block in ['Areas', 'Sites']:
# blocks without header marker
pass
elif current_block in ['Nodes', 'Branches', 'CatalogBranch']: # blocks without header marker
# repack the data with headers
for tpe in data_structures[current_block].keys():
hdr = __headers__[current_block][tpe][1:]
data = data_structures[current_block][tpe]
try:
data = np.array(data)[:, :len(hdr)] # truncate to the length of hdr
except:
# each line does have different lengths (shitty format...)
data2 = list()
# determine the maximum length
lmax = 0
for i in range(len(data)):
l = len(data[i])
if l > lmax:
lmax = l
# format all to have Lmax length
for i in range(len(data)):
line = data[i]
l = len(line)
d = lmax - l
fill = [0] * d
data2.append(line + fill)
data = np.array(data2)[:, :len(hdr)]
# extend the data
if data.shape[1] < len(hdr):
d = len(hdr) - data.shape[1]
data = np.c_[data, np.zeros((data.shape[0], d))]
df = pd.DataFrame(data=data, columns=hdr)
data_structures[current_block][tpe] = df
if verbose:
print('\n', current_block, ' -> ', tpe)
print(df)
elif current_block in ['DrawObjs', 'Panels']:
# blocks without header marker
pass
else:
logger.append('Block ' + current_block + ' unknown')
return data_structures, logger
def load_dpx(file_name,contraction_factor=1000) -> MultiCircuit:
"""
Read DPX file
:param file_name: file name
:return: MultiCircuit
"""
circuit = MultiCircuit()
Sbase = 100
circuit.Sbase = Sbase
SQRT3 = np.sqrt(3)
# read the raw data into a structured dictionary
print('Reading file...')
structures_dict, logger = read_dpx_data(file_name=file_name)
# format the read data
print('Packing data...')
data_structures, logger = repack(data_structures=structures_dict, logger=logger)
buses_id_dict = dict()
# create nodes
for tpe in data_structures['Nodes']:
# Airline support post
# __headers__['Nodes']['APOIO'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST']
# __headers__['Nodes']['ARM'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST', 'YEAR']
# __headers__['Nodes']['CX'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST']
# __headers__['Nodes']['CXN'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST']
# __headers__['Nodes']['LOAD'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST', 'VMIN', 'VMAX', 'NCMPLAN'] # fill to fit...
if tpe in ['APOIO', 'ARM', 'CX', 'CXN', 'LOAD']:
df = data_structures['Nodes'][tpe]
for i in range(df.shape[0]):
name = 'B' + str(len(circuit.buses)+1) + '_' + str(df['NAME'].values[i])
Vnom = float(df['VBASE'].values[i])
x = float(df['GX'].values[i]) / contraction_factor
y = float(df['GY'].values[i]) / contraction_factor
id_ = df['ID'].values[i]
bus = Bus(name=name, vnom=Vnom, xpos=x, ypos=y, height=40, width=60)
circuit.add_bus(bus)
buses_id_dict[id_] = bus
# Network Equivalent
# __headers__['Nodes']['EQUIV'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'VMIN', 'VMAX', 'ZONE',
# 'SEPNET', 'AUTOUP', 'P', 'Q', 'ELAST', 'SIMUL', 'HTYP', 'HARM5', 'HARM7',
# 'HARM11',
# 'HARM13', 'NOGRW', 'RS', 'XS', 'R1', 'X1', 'R2', 'X2', 'RH', 'XH', 'COM']
elif tpe == 'EQUIV':
df = data_structures['Nodes'][tpe]
for i in range(df.shape[0]):
name = 'B' + str(len(circuit.buses) + 1) + '_' + str(df['NAME'].values[i])
Vnom = float(df['VBASE'].values[i])
x = float(df['GX'].values[i]) / contraction_factor
y = float(df['GY'].values[i]) / contraction_factor
id_ = df['ID'].values[i]
bus = Bus(name=name, vnom=Vnom, xpos=x, ypos=y, height=40, width=60, is_slack=True)
circuit.add_bus(bus)
buses_id_dict[id_] = bus
name = 'LD' + str(len(circuit.buses)) + '_' + str(df['NAME'].values[i])
p = float(df['P'].values[i]) * Sbase
q = float(df['Q'].values[i]) * Sbase
load = Load(name=name, P=p, Q=q)
circuit.add_load(bus, load)
# Generator
# __headers__['Nodes']['GEN'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST', 'MODEL', 'VMIN',
# 'VMAX',
# 'V', 'ENAB', 'P', 'Q', 'QMIN', 'QMAX', 'ELAST', 'HTYP', 'HARM5', 'HARM7',
# 'HARM11',
# 'HARM13', 'VNOM', 'RAT', 'TGEN', 'COST', 'YEAR']
elif tpe == 'GEN':
df = data_structures['Nodes'][tpe]
for i in range(df.shape[0]):
name = 'B' + str(len(circuit.buses) + 1) + '_' + str(df['NAME'].values[i])
Vnom = float(df['VBASE'].values[i])
x = float(df['GX'].values[i]) / contraction_factor
y = float(df['GY'].values[i]) / contraction_factor
id_ = df['ID'].values[i]
bus = Bus(name=name, vnom=Vnom, xpos=x, ypos=y, height=40, width=60)
circuit.add_bus(bus)
buses_id_dict[id_] = bus
mode = int(df['MODEL'].values[i])
if mode == 1:
name = 'GEN' + str(len(circuit.buses)) + '_' + str(df['NAME'].values[i])
p = float(df['P'].values[i]) * Sbase
q = float(df['Q'].values[i]) * Sbase
v = float(df['V'].values[i]) # p.u.
gen = Generator(name=name, active_power=p, voltage_module=v)
circuit.add_generator(bus, gen)
else:
name = 'GENSTAT' + str(len(circuit.buses)) + '_' + str(df['NAME'].values[i])
p = float(df['P'].values[i]) * Sbase
q = float(df['Q'].values[i]) * Sbase
gen = StaticGenerator(name=name, P=p, Q=q)
circuit.add_static_generator(bus, gen)
# Transformation station
# __headers__['Nodes']['PT'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST', 'VMIN', 'VMAX',
# 'ZONE',
# 'ENAB', 'P', 'Q', 'ELAST', 'SIMUL', 'HTYP', 'HARM5', 'HARM7', 'HARM11', 'HARM13',
# 'NOGRW',
# 'EQEXIST', 'EQPOSS1', 'MCOST1', 'ICOST1', 'EQPOSS2', 'MCOST2', 'ICOST2',
# 'EQPOSS3', 'MCOST3',
# 'ICOST3', 'NCLI', 'EQTYPE', 'YEAR', 'COM', 'INFOCOM', 'ID_AUX']
elif tpe in ['PT', 'PTC']:
df = data_structures['Nodes'][tpe]
for i in range(df.shape[0]):
name = 'B' + str(len(circuit.buses) + 1) + '_' + str(df['NAME'].values[i])
Vnom = float(df['VBASE'].values[i])
x = float(df['GX'].values[i]) / contraction_factor
y = float(df['GY'].values[i]) / contraction_factor
id_ = df['ID'].values[i]
bus = Bus(name=name, vnom=Vnom, xpos=x, ypos=y, height=40, width=60)
name = 'LD' + str(len(circuit.buses) + 1) + '_' + str(df['NAME'].values[i])
p = float(df['P'].values[i]) * Sbase
q = float(df['Q'].values[i]) * Sbase
load = Load(name=name, P=p, Q=q)
circuit.add_bus(bus)
circuit.add_load(bus, load)
buses_id_dict[id_] = bus
# Reference node
# __headers__['Nodes']['REF'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'VREF', 'RAT',
# 'COST', 'TGEN', 'YEAR']
elif tpe == 'REF':
df = data_structures['Nodes'][tpe]
for i in range(df.shape[0]):
name = 'B' + str(len(circuit.buses) + 1) + '_' + str(df['NAME'].values[i])
Vnom = float(df['VBASE'].values[i])
x = float(df['GX'].values[i]) / contraction_factor
y = float(df['GY'].values[i]) / contraction_factor
id_ = df['ID'].values[i]
bus = Bus(name=name, vnom=Vnom, xpos=x, ypos=y, height=40, width=60, is_slack=True)
circuit.add_bus(bus)
buses_id_dict[id_] = bus
# Voltage Transformer
# __headers__['Nodes']['TT'] = ['CLASS', 'ID', 'NAME', 'VBASE', 'GX', 'GY', 'SX', 'SY', 'EXIST', 'VMIN', 'VMAX',
# 'DISABLE', 'HARM5', 'HARM7', 'HARM11', 'HARM13', 'EQEXIST', 'TAP', 'YEAR',
# 'ID_AUX']
elif tpe == 'TT':
df = data_structures['Nodes'][tpe]
for i in range(df.shape[0]):
name = 'B' + str(len(circuit.buses) + 1) + '_' + str(df['NAME'].values[i])
Vnom = float(df['VBASE'].values[i])
x = float(df['GX'].values[i]) / contraction_factor
y = float(df['GY'].values[i]) / contraction_factor
id_ = df['ID'].values[i]
bus = Bus(name=name, vnom=Vnom, xpos=x, ypos=y, height=40, width=60)
circuit.add_bus(bus)
buses_id_dict[id_] = bus
else:
logger.append(tpe + ' not recognised under Nodes')
# create branches
for tpe in data_structures['Branches']:
# Condenser series or shunt
# __headers__['Branches']['CAP'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'EXIST', 'STAT', 'PERM', 'EQ', 'YEAR']
if tpe in ['CAP', 'IND']:
df = data_structures['Branches'][tpe]
for i in range(df.shape[0]):
name = df['NAME'].values[i]
id1 = df['ID1'].values[i]
id2 = df['ID2'].values[i]
b1 = buses_id_dict[id1]
b2 = buses_id_dict[id2]
# get equipment reference in the catalogue
eq_id = df['EQ'].values[i]
df_cat = data_structures['CatalogBranch'][tpe]
cat_elm = df_cat[df_cat['EQ'] == eq_id]
try:
x = float(cat_elm['REAC'].values[0]) * Sbase
except:
x = 1e-20
br = Branch(bus_from=b1, bus_to=b2, name=name, x=x, branch_type=BranchType.Branch)
circuit.add_branch(br)
# Estimator
# __headers__['Branches']['ESTIM'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'INDEP', 'I', 'SIMULT']
if tpe in ['ESTIM']:
df = data_structures['Branches'][tpe]
for i in range(df.shape[0]):
name = df['NAME'].values[i]
id1 = df['ID1'].values[i]
id2 = df['ID2'].values[i]
b1 = buses_id_dict[id1]
b2 = buses_id_dict[id2]
br = Branch(bus_from=b1, bus_to=b2, name=name, branch_type=BranchType.Branch)
circuit.add_branch(br)
# Breaker
# __headers__['Branches']['DISJ'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'EXIST', 'STAT', 'PERM', 'FAILRT',
# 'TISOL', 'TRECONF', 'TREPAIR', 'EQ', 'YEAR', 'CONTROL']
# Fuse
# __headers__['Branches']['FUS'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'EXIST', 'STAT', 'PERM', 'FAILRT',
# 'TISOL','TRECONF', 'TREPAIR', 'EQ', 'YEAR']
# Switch
# __headers__['Branches']['INTR'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'EXIST', 'STAT', 'PERM', 'FAILRT',
# 'TISOL', 'TRECONF', 'TREPAIR', 'EQ', 'YEAR', 'DRIVE', 'CONTROL']
# Disconnector
# __headers__['Branches']['SECC'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'EXIST', 'STAT', 'PERM', 'FAILRT',
# 'TISOL', 'TRECONF', 'TREPAIR', 'EQ', 'YEAR', 'DRIVE', 'CONTROL']
if tpe in ['DISJ', 'FUS', 'INTR', 'SECC']:
df = data_structures['Branches'][tpe]
for i in range(df.shape[0]):
name = df['NAME'].values[i]
id1 = df['ID1'].values[i]
id2 = df['ID2'].values[i]
state = bool(int(df['STAT'].values[i]))
b1 = buses_id_dict[id1]
b2 = buses_id_dict[id2]
br = Branch(bus_from=b1, bus_to=b2, name=name, active=state, branch_type=BranchType.Switch)
circuit.add_branch(br)
# Lines, cables and bars
# fill until it fits or truncate the data
# __headers__['Branches']['LINE'] = ['CLASS', 'ID', 'NAME', 'ID1', 'ID2', 'EXIST', 'COLOR', 'GEOLEN', 'LEN',
# 'STAT',
# 'PERM', 'FAILRT', 'TISOL', 'TRECONF', 'TREPAIR', 'RERAT', 'EQEXIST', 'NPOSS',
# 'CHOOSEQ', 'INSRTCOST', 'EQPOSS1', 'MATCOST1', 'EQPOSS2', 'MATCOST2',
# 'EQPOSS3',
# 'MATCOST3', 'NCOOG', 'GX1', 'GY1', 'GX2', 'GY2']
if tpe in ['LINE']:
df = data_structures['Branches'][tpe]
for i in range(df.shape[0]):
name = df['NAME'].values[i]
id1 = df['ID1'].values[i]
id2 = df['ID2'].values[i]
b1 = buses_id_dict[id1]
b2 = buses_id_dict[id2]
length = float(df['LEN'].values[i])
# | |
'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_reeftourism_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\H1_90_md5_7973783ac2786f9d521a4b8b4cf5d68d.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_carbon_moisture_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_timber_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\I1_90_md5_54ad2f227abc1cf66ed23cc6d3b72d47.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_moisture_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_timber_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_marinefish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\S_90_md5_5d18924c69519ec76993f4d58a7b2687.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_coastal_reeftourism_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\C1_90_md5_3246d7fc06267a18f59ca9a8decf64fe.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Y_90_md5_f8393b73f3548658f610ac47acea72e7.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_marinefish_reeftourism_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_flood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\T_90_md5_6a0142de25bb3b5a107f7abae694c5b0.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_timber_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_fuelwood_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\U_90_md5_258160b638e742e91b84979e6b2c748f.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_flood_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_fwfish_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\V_90_md5_eeb6b515ad2f25a3ad76099e07e030bc.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fuelwood_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_grazing_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\W_90_md5_de1e7dc33c7227cdbcda5b7e6f9919bb.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_fwfish_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_natureaccess_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\X_90_md5_0cc1f3aeb8e1a566a6b220bf9986b828.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_grazing_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_natureaccess_nitrogen_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_natureaccess_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\Z_90_md5_1b9d0deb1e16f6975dc3402aacf4846e.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_natureaccess_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_nitrogen_pollination_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\A1_90_md5_1fb33de8a6ced1d1f54dcc7debed3c6c.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_nitrogen_sediment_90.tif",
},
{
'expression': 'raster1*raster2',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\B1_90_md5_14484122eba5a970559c57a48621d3fd.tif",
'raster2': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\D1_90_md5_ee81ad59355f2309c2ecb882e788454a.tif",
},
'target_nodata': 0,
'target_raster_path': "overlap_pollination_sediment_90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
#POPULATION STUFF##
calculation_list = [ #Trying at a finer resolution - 10s A90 -- there are significantly fewer people so this is probably the correct way to do it
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_md5_0ed997ee57533433c6372e070592e880.tif",
# 'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'near',
# 'target_raster_path': "lspop2017_on_10sA90.tif",
#},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\B_90_WARPED_near_md5_2b44cf1e234acbd8d12156068ba8ce2e.tif",
# 'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
# 'resample_method': 'near',
# 'target_raster_path': "lspop2017_on_10sB90.tif",
#},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\C_90_WARPED_near_md5_6a33ab63b7ac8fb9a679e192741bcac5.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'near',
'target_raster_path': "lspop2017_on_10sC90.tif",
},
]
for calculation in calculation_list:
raster_calculations_core.evaluate_calculation(
calculation, TASK_GRAPH, WORKSPACE_DIR)
TASK_GRAPH.join()
TASK_GRAPH.close()
return
calculation_list = [
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\A_90_WARPED_near_MASKED_land_2km_md5_66c8b850ace04761abef3a1d7a02f04a.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop2017_on_A90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\B_90_WARPED_near_MASKED_land_2km_md5_8e7a1e1badc25b30b5dd20d9c8ae4c85.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop2017_on_B90.tif",
},
{
'expression': 'raster1*(raster2)',
'symbol_to_path_map': {
'raster1': r"C:\Users\Becky\Documents\cnc_project\optimization\critical-natural-capital-optimizations\stitched_solutions\8-21\wgs\C_90_WARPED_near_2km_md5_f54c83a0078f91a2c5cb98c9bd23b22f.tif",
'raster2': r"C:\Users\Becky\Documents\lspop2017_md5_eafa6a4724f3d3a6675687114d4de6ba.tif",
},
'target_nodata': -9999,
'target_pixel_size': (0.008333333333333333218,-0.008333333333333333218),
'resample_method': 'average',
'target_raster_path': "lspop2017_on_C90.tif",
},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\A_90_WARPED_near_md5_1e9f19fadc8ba5e2b32c5c11bb4154cf.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\poverty\chi_relative_wealth_index.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.02222222222222399943,-0.02222222222222399943),
# 'resample_method': 'near',
# 'target_raster_path': "chi_relative_wealth_on_A90.tif",
#},
#{
# 'expression': 'raster1*(raster2)',
# 'symbol_to_path_map': {
# 'raster1': r"C:\Users\Becky\Documents\raster_calculations\align-to-mask-and-normalize\workspace\B_90_WARPED_near_md5_27f59aaa7d7e4abf71b3f80567bb66db.tif",
# 'raster2': r"C:\Users\Becky\Documents\cnc_project\supporting_layers\poverty\chi_relative_wealth_index.tif",
# },
# 'target_nodata': -9999,
# 'target_pixel_size': (0.02222222222222399943,-0.02222222222222399943),
# 'resample_method': 'near',
# 'target_raster_path': "chi_relative_wealth_on_B90.tif",
#},
#{
# 'expression': 'raster1*(raster2)',
| |
<filename>databroker/v1.py
from collections import defaultdict
from datetime import datetime
import pandas
import re
import warnings
import time
import humanize
import jinja2
import os
from types import SimpleNamespace
import xarray
import event_model
# Toolz and CyToolz have identical APIs -- same test suite, docstrings.
try:
from cytoolz.dicttoolz import merge
except ImportError:
from toolz.dicttoolz import merge
from tiled.client import from_profile
from tiled.client.utils import ClientError
from tiled.queries import FullText
from .queries import RawMongo, TimeRange
from .utils import ALL, get_fields, wrap_in_deprecated_doct
# The v2 API is expected to grow more options for filled than just True/False
# (e.g. 'delayed') so it expects a string instead of a boolean.
_FILL = {True: "yes", False: "no"}
def temp_config():
raise NotImplementedError("Use temp() instead, which returns a v1.Broker.")
def temp():
from .v2 import temp
catalog = temp()
return Broker(catalog)
class Registry:
"""
An accessor that serves as a backward-compatible shim for Broker.reg
"""
def __init__(self, catalog):
self._catalog = catalog
@property
def handler_reg(self):
warnings.warn(
"In databroker 2.x, there are separate notions of 'server' and 'client', "
"and root_map is not visible to the client. Likely these "
"details are handled for you on the server side, so you should not worry "
"about this message unless you encounter trouble loading large array data."
)
@property
def root_map(self):
warnings.warn(
"In databroker 2.x, there are separate notions of 'server' and 'client', "
"and root_map is not visible to the client. Likely these "
"details are handled for you on the server side, so you should not worry "
"about this message unless you encounter trouble loading large array data."
)
def register_handler(self, *args, **kwargs):
warnings.warn(
"In databroker 2.x, there are separate notions of 'server' and 'client', "
"and register_handler(...) has no effect on the client. Likely this "
"is being done for you on the server side, so you should not worry "
"about this message unless you encounter trouble loading large array data."
)
def deregister_handler(self, key):
warnings.warn(
"In databroker 2.x, there are separate notions of 'server' and 'client', "
"and deregister_handler(...) has no effect on the client. Likely this "
"is being done for you on the server side, so you should not worry "
"about this message unless you encounter trouble loading large array data."
)
def copy_files(
self,
resource,
new_root,
verify=False,
file_rename_hook=None,
run_start_uid=None,
):
raise NotImplementedError(
"The copy_files functionality is not supported via a client."
)
def _no_aliases():
raise NotImplementedError("Aliases have been removed. Use search instead.")
def _no_filters():
raise NotImplementedError(
"""Filters have been removed. Chain searches instead like
>>> db.v2.search(...).search(...)""")
class Broker:
"""
This supports the original Broker API but implemented on intake.Catalog.
"""
def __init__(self, catalog):
self._catalog = catalog
self.prepare_hook = wrap_in_deprecated_doct
self.v2._Broker__v1 = self
self._reg = Registry(catalog)
# When the user asks for a Serializer, give a RunRouter
# that will generate a fresh Serializer instance for each
# run.
def factory(name, doc):
return [self._catalog.get_serializer()], []
self._run_router = event_model.RunRouter([factory])
@property
def aliases(self):
_no_aliases()
@aliases.setter
def aliases(self, value):
_no_aliases()
@property
def filters(self):
_no_filters()
@filters.setter
def filters(self):
_no_filters()
def add_filter(self, *args, **kwargs):
_no_filters()
def clear_filters(self, *args, **kwargs):
_no_filters()
@property
def _serializer(self):
return self._run_router
@property
def reg(self):
"Registry of externally-stored data"
return self._reg
@property
def name(self):
return self._catalog.metadata.get("name")
@property
def v1(self):
"A self-reference. This makes v1.Broker and v2.Broker symmetric."
return self
@property
def v2(self):
"Accessor to the version 2 API."
return self._catalog
@classmethod
def from_config(cls, config, auto_register=None, name=None):
raise NotImplementedError(
"""Old-style databroker configuration is not supported.
"To construct from tiled profile, use:
>>> from tiled.client import from_config
>>> Broker(from_config({...}))
""")
def get_config(self):
raise NotImplementedError("No longer supported")
@classmethod
def named(cls, name, auto_register=None, try_raw=True):
"""
Create a new Broker instance using the Tiled profile of this name.
See https://blueskyproject.io/tiled/how-to/profiles.html
Special Case: The name ``'temp'`` creates a new, temporary
configuration. Subsequent calls to ``Broker.named('temp')`` will
create separate configurations. Any data saved using this temporary
configuration will not be accessible once the ``Broker`` instance has
been deleted.
Parameters
----------
name : string
auto_register : boolean, optional
By default, automatically register built-in asset handlers (classes
that handle I/O for externally stored data). Set this to ``False``
to do all registration manually.
try_raw: boolean, optional
This is a backward-compatibilty shim. Raw data has been moved from
"xyz" to "xyz/raw" in many deployments. If true, check to see if an item
named "raw" is contained in this node and, if so, uses that.
Returns
-------
db : Broker
"""
if auto_register is not None:
warnings.warn(
"The parameter auto_register is now ignored. "
"Handlers are now a concern of the service and not configurable "
"from the client."
)
if name == "temp":
raise NotImplementedError("databroker 2.0.0 does not yet support 'temp' Broker")
client = from_profile(name)
if try_raw:
try:
client = client["raw"]
except (ClientError, KeyError):
pass
return Broker(client)
@property
def fs(self):
warnings.warn("fs is deprecated, use `db.reg` instead", stacklevel=2)
return self.reg
def stream_names_given_header(self):
return list(self._catalog)
def _patch_state(self, catalog):
"Copy references to v1 state."
catalog.v1.prepare_hook = self.prepare_hook
def __call__(self, text_search=None, **kwargs):
results_catalog = self._catalog
since = kwargs.pop("since", None) or kwargs.pop("start_time", None)
until = kwargs.pop("until", None) or kwargs.pop("stop_time", None)
if (since is not None) or (until is not None):
results_catalog = results_catalog.search(
TimeRange(since=since, until=until)
)
if "data_key" in kwargs:
raise NotImplementedError("Search by data key is no longer implemented.")
if kwargs:
results_catalog = results_catalog.search(RawMongo(start=kwargs))
if text_search:
results_catalog = results_catalog.search(FullText(text_search))
self._patch_state(results_catalog)
return Results(results_catalog)
def __getitem__(self, key):
result = self._catalog[key]
if isinstance(result, list):
# self[a, b, c] -> List[BlueskyRun]
return [Header(run, self) for run in result]
else:
# self[a] -> BlueskyRun
return Header(result, self)
get_fields = staticmethod(get_fields)
def get_documents(
self, headers, stream_name=ALL, fields=None, fill=False, handler_registry=None
):
"""
Get all documents from one or more runs.
Parameters
----------
headers : Header or iterable of Headers
The headers to fetch the events for
stream_name : str, optional
Get events from only "event stream" with this name.
Default is `ALL` which yields documents for all streams.
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping asset pecs (strings) to handlers (callable classes)
Yields
------
name : str
The name of the kind of document
doc : dict
The payload, may be RunStart, RunStop, EventDescriptor, or Event.
Raises
------
ValueError if any key in `fields` is not in at least one descriptor
pre header.
"""
if handler_registry is not None:
raise NotImplementedError(
"The handler_registry must be set when "
"the Broker is initialized, usually specified "
"in a configuration file."
)
headers = _ensure_list(headers)
no_fields_filter = False
if fields is None:
no_fields_filter = True
fields = []
fields = set(fields)
comp_re = _compile_re(fields)
for header in headers:
uid = header.start["uid"]
descs = header.descriptors
per_desc_discards = {}
per_desc_extra_data = {}
per_desc_extra_ts = {}
for d in descs:
(
all_extra_dk,
all_extra_data,
all_extra_ts,
discard_fields,
) = _extract_extra_data(
header.start, header.stop, d, fields, comp_re, no_fields_filter
)
per_desc_discards[d["uid"]] = discard_fields
per_desc_extra_data[d["uid"]] = all_extra_data
per_desc_extra_ts[d["uid"]] = all_extra_ts
d = d.copy()
dict.__setitem__(d, "data_keys", d["data_keys"].copy())
for k in discard_fields:
del d["data_keys"][k]
d["data_keys"].update(all_extra_dk)
if not len(d["data_keys"]) and not len(all_extra_data):
continue
def merge_config_into_event(event):
# Mutate event in place, adding in data and timestamps from the
# descriptor's 'configuration' key.
event_data = event["data"] # cache for perf
desc = event["descriptor"]
event_timestamps = event["timestamps"]
event_data.update(per_desc_extra_data[desc])
event_timestamps.update(per_desc_extra_ts[desc])
discard_fields = per_desc_discards[desc]
for field in discard_fields:
del event_data[field]
del event_timestamps[field]
get_documents_router = _GetDocumentsRouter(
self.prepare_hook, merge_config_into_event, stream_name=stream_name
)
for name, doc in self._catalog[uid].documents(fill=fill):
yield from get_documents_router(name, doc)
def get_events(
self,
headers,
stream_name="primary",
fields=None,
fill=False,
handler_registry=None,
):
"""
Get Event documents from one or more runs.
Parameters
----------
headers : Header or iterable of Headers
The headers to fetch the events for
stream_name : str, optional
Get events from only "event stream" with this name.
Default is 'primary'
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to | |
# API for accessing the core plugin of SublimePapyrus
import sublime, sublime_plugin, sys, os, json, threading, time
PYTHON_VERSION = sys.version_info
SUBLIME_VERSION = None
if PYTHON_VERSION[0] == 2:
SUBLIME_VERSION = int(sublime.version())
import imp
root, module = os.path.split(os.getcwd())
coreModule = "SublimePapyrus"
# SublimePapyrus core module
mainPackage = os.path.join(root, coreModule, "Plugin.py")
imp.load_source("SublimePapyrus", mainPackage)
del mainPackage
import SublimePapyrus
# Skyrim linter module
linterPackage = os.path.join(root, module, "Linter.py")
imp.load_source("Linter", linterPackage)
del linterPackage
import Linter
# Cleaning up
del root
del module
del coreModule
elif PYTHON_VERSION[0] >= 3:
from SublimePapyrus import Plugin as SublimePapyrus
from . import Linter
VALID_SCOPE = "source.papyrus.skyrim"
def plugin_loaded():
global SUBLIME_VERSION
SUBLIME_VERSION = int(sublime.version())
# Completion generation
class SublimePapyrusSkyrimGenerateCompletionsCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.active_view()
if view:
self.paths = SublimePapyrus.GetSourcePaths(view)
if self.paths:
if PYTHON_VERSION[0] == 2:
self.window.show_quick_panel(self.paths, self.on_select, 0, -1)
elif PYTHON_VERSION[0] >= 3:
self.window.show_quick_panel(self.paths, self.on_select, 0, -1, None)
def on_select(self, index):
if index >= 0:
self.path = self.paths[index]
thread = threading.Thread(target=self.generate_completions)
thread.start()
def generate_completions(self):
outputFolder = os.path.join(sublime.packages_path(), "User")
lex = Linter.Lexical()
syn = Linter.Syntactic()
sem = Linter.Semantic()
files = [f for f in os.listdir(self.path) if ".psc" in f]
if len(files) > 100:
if not sublime.ok_cancel_dialog("You are about to generate static completions for %d scripts.\n\nAre you sure you want to continue?" % len(files)):
return
for file in files:
path = os.path.join(self.path, file)
scriptName = file[:-4]
scriptContents = ""
try:
with open(path) as fi:
scriptContents = fi.read()
except UnicodeDecodeError:
with open(path, encoding="utf8") as fi:
scriptContents = fi.read()
if scriptContents:
lines = []
tokens = []
try:
for token in lex.Process(scriptContents):
if token.type == lex.NEWLINE:
if tokens:
lines.append(tokens)
tokens = []
elif token.type != lex.COMMENT_LINE and token.type != lex.COMMENT_BLOCK:
tokens.append(token)
except Linter.LexicalError as e:
if PYTHON_VERSION[0] == 2:
print("SublimePapyrus - Lexical error on line %d, column %d in '%s': %s" % (e.line, e.column, path, e.message))
elif PYTHON_VERSION[0] >= 3:
SublimePapyrus.ShowMessage("Error on line %d, column %d in '%s': %s" % (e.line, e.column, path, e.message))
return
if lines:
statements = []
for line in lines:
try:
stat = syn.Process(line)
if stat and (stat.type == sem.STAT_FUNCTIONDEF or stat.type == sem.STAT_EVENTDEF):
statements.append(stat)
except Linter.SyntacticError as e:
if PYTHON_VERSION[0] == 2:
print("SublimePapyrus - Syntactic error on line %d in '%s': %s" % (e.line, path, e.message))
elif PYTHON_VERSION[0] >= 3:
SublimePapyrus.ShowMessage("Error on line %d in '%s': %s" % (e.line, path, e.message))
return
scriptNameLower = scriptName.lower()
completions = [{"trigger": "%s\t%s" % (scriptNameLower, "script"), "contents": scriptName}]
for stat in statements:
if stat.type == sem.STAT_FUNCTIONDEF:
temp = SublimePapyrus.MakeFunctionCompletion(stat, sem, script=scriptNameLower)
completions.append({"trigger": temp[0], "contents": temp[1]})
elif stat.type == sem.STAT_EVENTDEF:
temp = SublimePapyrus.MakeEventCompletion(stat, sem, calling=False, script=scriptNameLower)
completions.append({"trigger": temp[0], "contents": temp[1]})
output = {
"scope": VALID_SCOPE,
"completions": completions
}
with open(os.path.join(outputFolder, "SublimePapyrus - Skyrim - %s.sublime-completions" % scriptName), "w") as fo:
json.dump(output, fo, indent=2)
print("SublimePapyrus - Finished generating completions for scripts in '%s'" % self.path)
linterCache = {}
completionCache = {}
cacheLock = threading.RLock()
lex = Linter.Lexical()
syn = Linter.Syntactic()
sem = Linter.Semantic()
class EventListener(sublime_plugin.EventListener):
def __init__(self):
super(EventListener,self).__init__()
self.linterQueue = 0
self.linterRunning = False
self.linterErrors = {}
self.completionRunning = False
self.validScope = "source.papyrus.skyrim"
self.completionKeywordAs = ("as\tcast", "As ",)
self.completionKeywordAuto = ("auto\tkeyword", "Auto",)
self.completionKeywordAutoReadOnly = ("autoreadonly\tkeyword", "AutoReadOnly",)
self.completionKeywordConditional = ("conditional\tkeyword", "Conditional",)
self.completionKeywordExtends = ("extends\tkeyword", "Extends ",)
self.completionKeywordGlobal = ("global\tkeyword", "Global",)
self.completionKeywordHidden = ("hidden\tkeyword", "Hidden",)
self.completionKeywordNative = ("native\tkeyword", "Native",)
self.completionKeywordParent = ("parent\tkeyword", "Parent",)
self.completionKeywordSelf = ("self\tkeyword", "Self",)
self.completionKeywordFalse = ("false\tkeyword", "False",)
self.completionKeywordNone = ("none\tkeyword", "None",)
self.completionKeywordTrue = ("true\tkeyword", "True",)
self.scriptContents = None
# Clear cache in order to force an update
def on_close(self, view):
if self.IsValidScope(view):
bufferID = view.buffer_id()
if bufferID:
if self.linterErrors.get(bufferID, None):
del self.linterErrors[bufferID]
self.ClearLinterCache(bufferID)
# Linter
def on_post_save(self, view):
if self.IsValidScope(view):
settings = SublimePapyrus.GetSettings()
if settings and settings.get("linter_on_save", True):
filePath = view.file_name()
if filePath:
folderPath, fileName = os.path.split(filePath)
scriptName = fileName[:fileName.rfind(".")].upper()
if self.linterRunning:
return
self.ClearSemanticAnalysisCache(scriptName)
self.ClearCompletionCache(scriptName)
self.bufferID = view.buffer_id()
if self.bufferID:
self.linterQueue += 1
lineNumber, columnNumber = view.rowcol(view.sel()[0].begin())
lineNumber += 1
self.Linter(view, lineNumber)
def on_modified(self, view):
if self.IsValidScope(view):
settings = SublimePapyrus.GetSettings()
global SUBLIME_VERSION
tooltipParameters = settings.get("tooltip_function_parameters", True)
tooltipDocstring = settings.get("tooltip_function_docstring", True)
if SUBLIME_VERSION >= 3070 and (tooltipParameters or tooltipDocstring):
if self.linterRunning:
return
elif self.completionRunning:
return
global cacheLock
global lex
global syn
global sem
with cacheLock:
locations = [view.sel()[0].begin()]
prefix = view.word(locations[0])
line, column = view.rowcol(locations[0])
line += 1
lineString = view.substr(sublime.Region(view.line(locations[0]).begin(), locations[0]-len(prefix))).strip()
bufferID = view.buffer_id()
if bufferID:
currentScript = self.GetScript(bufferID)
if currentScript:
try:
sem.GetContext(currentScript, line)
except Linter.FunctionDefinitionCancel as e:
tokens = []
try:
for token in lex.Process(lineString):
if token.type != lex.NEWLINE:
tokens.append(token)
if tokens and tokens[-1].type != lex.COMMENT_LINE:
try:
syn.Process(tokens)
except Linter.ExpectedIdentifierError as f:
if tokens[-1].type != lex.OP_DOT:
stack = syn.stack[:]
arguments = []
for item in reversed(stack):
if item.type == sem.NODE_FUNCTIONCALLARGUMENT:
arguments.insert(0, stack.pop())
elif item.type == sem.LEFT_PARENTHESIS:
break
stackLength = len(stack)
func = None
if stackLength >= 2 and stack[-2].type == sem.IDENTIFIER:
name = stack[-2].value.upper()
if stackLength >= 4 and stack[-3].type == sem.OP_DOT:
try:
result = sem.NodeVisitor(stack[-4])
if result.type != sem.KW_SELF:
try:
script = sem.GetCachedScript(result.type)
if script:
func = script.functions.get(name, None)
except Linter.SemanticError as e:
return
else:
for scope in reversed(e.functions):
func = scope.get(name, None)
if func:
break
except Linter.SemanticError as e:
return
else:
for scope in reversed(e.functions):
func = scope.get(name, None)
if func:
break
for imp in e.imports:
script = sem.GetCachedScript(imp)
temp = script.functions.get(name, None)
if temp:
if func:
func = None
else:
func = temp
break
if func:# and func.data.parameters:
self.ShowFunctionInfo(view, tokens, func, len(arguments), tooltipParameters, tooltipDocstring)
except Linter.SyntacticError as f:
pass
except Linter.LexicalError as f:
pass
except Linter.SemanticError as e:
pass
if settings and settings.get("linter_on_modified", True):
self.QueueLinter(view)
def ShowFunctionInfo(self, aView, aTokens, aFunction, aArgumentCount, aParameters, aDocstring):
funcName = aFunction.data.identifier
currentParameter = None
if len(aTokens) > 2 and aTokens[-1].type == lex.OP_ASSIGN and aTokens[-2].type == lex.IDENTIFIER:
currentParameter = aTokens[-2].value.upper()
paramIndex = 0
funcParameters = []
if aParameters:
for param in aFunction.data.parameters:
paramName = param.identifier
paramType = param.typeIdentifier
if param.array:
paramType = "%s[]" % paramType
paramContent = None
if param.expression:
paramDefaultValue = sem.GetLiteral(param.expression, True)
paramContent = "%s %s = %s" % (paramType, paramName, paramDefaultValue)
else:
paramContent = "%s %s" % (paramType, paramName)
if currentParameter:
if currentParameter == paramName.upper():
paramContent = "<b>%s</b>" % paramContent
else:
if paramIndex == aArgumentCount:
paramContent = "<b>%s</b>" % paramContent
paramIndex += 1
funcParameters.append(paramContent)
docstring = ""
if aDocstring:
if aFunction.data.docstring:
if funcParameters:
docstring = "<br><br>%s" % "<br>".join(aFunction.data.docstring.data.value.split("\n"))
else:
docstring = "<br>".join(aFunction.data.docstring.data.value.split("\n"))
settings = SublimePapyrus.GetSettings()
backgroundColor = settings.get("tooltip_background_color", "#393939")
bodyTextColor = settings.get("tooltip_body_text_color", "#747369")
bodyFontSize = settings.get("tooltip_font_size", "12")
boldTextColor = settings.get("tooltip_bold_text_color", "#ffffff")
headingTextColor = settings.get("tooltip_heading_text_color", "#bfbfbf")
headingFontSize = settings.get("tooltip_heading_font_size", "14")
css = """<style>
html {
background-color: %s;
}
body {
font-size: %spx;
color: %s;
}
b {
color: %s;
}
h1 {
color: %s;
font-size: %spx;
}
</style>""" % (backgroundColor, bodyFontSize, bodyTextColor, boldTextColor, headingTextColor, headingFontSize)
content = "%s<h1>%s</h1>%s%s" % (css, funcName, "<br>".join(funcParameters), docstring)
if aView.is_popup_visible():
aView.update_popup(content)
else:
aView.show_popup(content, flags=sublime.COOPERATE_WITH_AUTO_COMPLETE, max_width=int(settings.get("tooltip_max_width", 600)), max_height=int(settings.get("tooltip_max_height", 300)))
def QueueLinter(self, view):
if self.linterRunning: # If an instance of the linter is running, then cancel
return
self.linterQueue += 1 # Add to queue
settings = SublimePapyrus.GetSettings()
delay = 0.500
if settings:
delay = settings.get("linter_delay", 500)/1000.0
if delay < 0.050:
delay = 0.050
self.bufferID = view.buffer_id()
if self.bufferID:
lineNumber, columnNumber = view.rowcol(view.sel()[0].begin())
lineNumber += 1
if PYTHON_VERSION[0] == 2:
self.scriptContents = view.substr(sublime.Region(0, view.size()))
self.sourcePaths = SublimePapyrus.GetSourcePaths(view)
SublimePapyrus.ClearLinterHighlights(view)
t = threading.Timer(delay, self.Linter, kwargs={"view": None, "lineNumber": lineNumber})
t.daemon = True
t.start()
elif PYTHON_VERSION[0] >= 3:
t = threading.Timer(delay, self.Linter, kwargs={"view": view, "lineNumber": lineNumber})
t.daemon = True
t.start()
def Linter(self, view, lineNumber):
self.linterQueue -= 1 # Remove from queue
if self.linterQueue > 0: # If there is a queue, then cancel
return
elif self.completionRunning: # If completions are being generated, then cancel
return
self.linterRunning = True # Block further attempts to run the linter until this instance has finished
if view:
SublimePapyrus.ClearLinterHighlights(view)
#start = None #DEBUG
def Exit():
#print("Linter: Finished in %f milliseconds and releasing lock..." % ((time.time()-start)*1000.0)) #DEBUG
self.linterRunning = False
return False
global cacheLock
global lex
global syn
global sem
global SUBLIME_VERSION
with cacheLock:
if not self.linterErrors.get(self.bufferID, None):
self.linterErrors[self.bufferID] = {}
#start = time.time() #DEBUG
settings = None
if view:
settings = SublimePapyrus.GetSettings()
if SUBLIME_VERSION >= 3103 and view.is_auto_complete_visible(): # If a list of completions is visible, then cancel
return Exit()
if view:
SublimePapyrus.SetStatus(view, "sublimepapyrus-linter", "The linter is running...")
#lexSynStart = time.time() #DEBUG
scriptContents = None
if view:
scriptContents = view.substr(sublime.Region(0, view.size()))
else:
scriptContents = self.scriptContents
if not scriptContents:
return Exit()
lineCount = scriptContents.count("\n") + 1
statements = []
lines = []
tokens = []
currentLine = None
try:
for token in lex.Process(scriptContents):
if token.type == lex.NEWLINE:
if tokens:
if currentLine:
stat = syn.Process(tokens)
if stat:
statements.append(stat)
elif token.line >= lineNumber:
currentLine = syn.Process(tokens)
if currentLine:
while lines:
stat = syn.Process(lines.pop(0))
if stat:
statements.append(stat)
statements.append(currentLine)
currentLine = True
else:
lines.append(tokens)
tokens = []
else:
if token.line >= lineNumber:
while lines:
stat = syn.Process(lines.pop(0))
if stat:
statements.append(stat)
currentLine = True
elif token.type != lex.COMMENT_LINE and token.type != lex.COMMENT_BLOCK:
tokens.append(token)
except Linter.LexicalError as e:
if view:
error = self.linterErrors[self.bufferID].get(e.message, None)
if error and error.message == e.message and abs(error.line - e.line) < settings.get("linter_error_line_threshold", 2) + 1:
SublimePapyrus.HighlightLinter(view, e.line, e.column, False)
else:
SublimePapyrus.HighlightLinter(view, e.line, e.column)
self.linterErrors[self.bufferID][e.message] = e
SublimePapyrus.SetStatus(view, "sublimepapyrus-linter", "Error on line %d, column %d: %s" % (e.line, e.column, e.message))
if settings.get("linter_panel_error_messages", False):
view.window().show_quick_panel([[e.message, "Line %d, column %d" % (e.line, e.column)]], None)
return Exit()
except Linter.SyntacticError as e:
if view:
error = self.linterErrors[self.bufferID].get(e.message, None)
if error and error.message == e.message and abs(error.line - e.line) < settings.get("linter_error_line_threshold", 2) + 1:
SublimePapyrus.HighlightLinter(view, e.line, center=False)
else:
SublimePapyrus.HighlightLinter(view, e.line)
self.linterErrors[self.bufferID][e.message] = e
SublimePapyrus.SetStatus(view, "sublimepapyrus-linter", "Error on line %d: %s" % (e.line, e.message))
if settings.get("linter_panel_error_messages", False):
view.window().show_quick_panel([[e.message, "Line %d" % e.line]], None)
return Exit()
#print("Linter: Finished lexical and syntactic in %f milliseconds..." % ((time.time()-lexSynStart)*1000.0)) #DEBUG
#semStart = time.time() #DEBUG
if statements:
try:
script = None
if view:
script = sem.Process(statements, SublimePapyrus.GetSourcePaths(view))
else:
script = sem.Process(statements, self.sourcePaths)
if script:
self.SetScript(self.bufferID, script)
except Linter.SemanticError as e:
if view:
error = self.linterErrors[self.bufferID].get(e.message, None)
if error and error.message == e.message and abs(error.line - e.line) < settings.get("linter_error_line_threshold", 2) + 1:
SublimePapyrus.HighlightLinter(view, e.line, center=False)
else:
SublimePapyrus.HighlightLinter(view, e.line)
self.linterErrors[self.bufferID][e.message] = e
SublimePapyrus.SetStatus(view, "sublimepapyrus-linter", "Error on line %d: %s" % (e.line, e.message))
if settings.get("linter_panel_error_messages", False):
window = view.window()
if window: # Has to be checked in ST2 due to only active views returning values other than None.
window.show_quick_panel([[e.message, "Line %d" % e.line]], None)
return Exit()
#print("Linter: Finished semantic in %f milliseconds..." % ((time.time()-semStart)*1000.0)) #DEBUG
if view:
SublimePapyrus.ClearStatus(view, "sublimepapyrus-linter")
if self.linterErrors.get(self.bufferID, None):
del self.linterErrors[self.bufferID]
return Exit()
# Completions
def on_query_completions(self, view, prefix, locations):
if self.IsValidScope(view):
settings = SublimePapyrus.GetSettings()
if settings and settings.get("intelligent_code_completion", True):
if self.completionRunning:
return
elif self.linterRunning:
return
self.completionRunning = True
#start = time.time() #DEBUG
completions = None
if not view.find("scriptname", 0, sublime.IGNORECASE):
path = view.file_name()
if path:
_, name = os.path.split(path)
completions = [("scriptname\tscript header", "ScriptName %s" % name[:name.rfind(".")],)]
else:
completions = [("scriptname\tscript header", "ScriptName ",)]
else:
completions = self.Completions(view, prefix, locations)
if completions:
completions = list(set(completions))
elif completions == None:
completions = []
completions = (completions, sublime.INHIBIT_WORD_COMPLETIONS|sublime.INHIBIT_EXPLICIT_COMPLETIONS,)
#print("Completions: Finished in %f milliseconds and releasing lock..." % ((time.time()-start)*1000.0)) #DEBUG
self.completionRunning = False
return completions
def Completions(self, view, prefix, locations):
SublimePapyrus.ClearLinterHighlights(view)
completions = []
flags | |
# Natural Language Toolkit: Viterbi Probabilistic Parser
#
# Copyright (C) 2001-2020 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from functools import reduce
from nltk.tree import Tree, ProbabilisticTree
from nltk.parse.api import ParserI
##//////////////////////////////////////////////////////
## Viterbi PCFG Parser
##//////////////////////////////////////////////////////
class ViterbiParser(ParserI):
"""
A bottom-up ``PCFG`` parser that uses dynamic programming to find
the single most likely parse for a text. The ``ViterbiParser`` parser
parses texts by filling in a "most likely constituent table".
This table records the most probable tree representation for any
given span and node value. In particular, it has an entry for
every start index, end index, and node value, recording the most
likely subtree that spans from the start index to the end index,
and has the given node value.
The ``ViterbiParser`` parser fills in this table incrementally. It starts
by filling in all entries for constituents that span one element
of text (i.e., entries where the end index is one greater than the
start index). After it has filled in all table entries for
constituents that span one element of text, it fills in the
entries for constitutants that span two elements of text. It
continues filling in the entries for constituents spanning larger
and larger portions of the text, until the entire table has been
filled. Finally, it returns the table entry for a constituent
spanning the entire text, whose node value is the grammar's start
symbol.
In order to find the most likely constituent with a given span and
node value, the ``ViterbiParser`` parser considers all productions that
could produce that node value. For each production, it finds all
children that collectively cover the span and have the node values
specified by the production's right hand side. If the probability
of the tree formed by applying the production to the children is
greater than the probability of the current entry in the table,
then the table is updated with this new tree.
A pseudo-code description of the algorithm used by
``ViterbiParser`` is:
| Create an empty most likely constituent table, *MLC*.
| For width in 1...len(text):
| For start in 1...len(text)-width:
| For prod in grammar.productions:
| For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC,
| where t[i].label()==prod.rhs[i],
| and the sequence covers [start:start+width]:
| old_p = MLC[start, start+width, prod.lhs]
| new_p = P(t[1])P(t[1])...P(t[n])P(prod)
| if new_p > old_p:
| new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n])
| MLC[start, start+width, prod.lhs] = new_tree
| Return MLC[0, len(text), start_symbol]
:type _grammar: PCFG
:ivar _grammar: The grammar used to parse sentences.
:type _trace: int
:ivar _trace: The level of tracing output that should be generated
when parsing a text.
"""
def __init__(self, grammar, trace=0):
"""
Create a new ``ViterbiParser`` parser, that uses ``grammar`` to
parse texts.
:type grammar: PCFG
:param grammar: The grammar used to parse texts.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
def grammar(self):
return self._grammar
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
:type trace: int
:param trace: The trace level. A trace level of ``0`` will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
:rtype: None
"""
self._trace = trace
def parse(self, tokens):
# Inherit docs from ParserI
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# The most likely constituent table. This table specifies the
# most likely constituent for a given span and type.
# Constituents can be either Trees or tokens. For Trees,
# the "type" is the Nonterminal for the tree's root node
# value. For Tokens, the "type" is the token's type.
# The table is stored as a dictionary, since it is sparse.
constituents = {}
# Initialize the constituents dictionary with the words from
# the text.
if self._trace:
print(("Inserting tokens into the most likely" + " constituents table..."))
for index in range(len(tokens)):
token = tokens[index]
constituents[index, index + 1, token] = token
if self._trace > 1:
self._trace_lexical_insertion(token, index, len(tokens))
# Consider each span of length 1, 2, ..., n; and add any trees
# that might cover that span to the constituents dictionary.
for length in range(1, len(tokens) + 1):
if self._trace:
print(
(
"Finding the most likely constituents"
+ " spanning %d text elements..." % length
)
)
for start in range(len(tokens) - length + 1):
span = (start, start + length)
self._add_constituents_spanning(span, constituents, tokens)
# Return the tree that spans the entire text & have the right cat
tree = constituents.get((0, len(tokens), self._grammar.start()))
if tree is not None:
yield tree
def _add_constituents_spanning(self, span, constituents, tokens):
"""
Find any constituents that might cover ``span``, and add them
to the most likely constituents table.
:rtype: None
:type span: tuple(int, int)
:param span: The section of the text for which we are
trying to find possible constituents. The span is
specified as a pair of integers, where the first integer
is the index of the first token that should be included in
the constituent; and the second integer is the index of
the first token that should not be included in the
constituent. I.e., the constituent should cover
``text[span[0]:span[1]]``, where ``text`` is the text
that we are parsing.
:type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
:param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. In particular,
``constituents(s,e,nv)`` is the most likely
``ProbabilisticTree`` that covers ``text[s:e]``
and has a node value ``nv.symbol()``, where ``text``
is the text that we are parsing. When
``_add_constituents_spanning`` is called, ``constituents``
should contain all possible constituents that are shorter
than ``span``.
:type tokens: list of tokens
:param tokens: The text we are parsing. This is only used for
trace output.
"""
# Since some of the grammar productions may be unary, we need to
# repeatedly try all of the productions until none of them add any
# new constituents.
changed = True
while changed:
changed = False
# Find all ways instantiations of the grammar productions that
# cover the span.
instantiations = self._find_instantiations(span, constituents)
# For each production instantiation, add a new
# ProbabilisticTree whose probability is the product
# of the childrens' probabilities and the production's
# probability.
for (production, children) in instantiations:
subtrees = [c for c in children if isinstance(c, Tree)]
p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob())
node = production.lhs().symbol()
tree = ProbabilisticTree(node, children, prob=p)
# If it's new a constituent, then add it to the
# constituents dictionary.
c = constituents.get((span[0], span[1], production.lhs()))
if self._trace > 1:
if c is None or c != tree:
if c is None or c.prob() < tree.prob():
print(" Insert:", end=" ")
else:
print(" Discard:", end=" ")
self._trace_production(production, p, span, len(tokens))
if c is None or c.prob() < tree.prob():
constituents[span[0], span[1], production.lhs()] = tree
changed = True
def _find_instantiations(self, span, constituents):
"""
:return: a list of the production instantiations that cover a
given span of the text. A "production instantiation" is
a tuple containing a production and a list of children,
where the production's right hand side matches the list of
children; and the children cover ``span``. :rtype: list
of ``pair`` of ``Production``, (list of
(``ProbabilisticTree`` or token.
:type span: tuple(int, int)
:param span: The section of the text for which we are
trying to find production instantiations. The span is
specified as a pair of integers, where the first integer
is the index of the first token that should be covered by
the production instantiation; and the second integer is
the index of the first token that should not be covered by
the production instantiation.
:type constituents: | |
<filename>src/panoptes/utils/images/fits.py
import os
import shutil
import subprocess
from warnings import warn
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
from panoptes.utils import error
from panoptes.utils.logging import logger
def solve_field(fname, timeout=15, solve_opts=None, *args, **kwargs):
""" Plate solves an image.
Note: This is a low-level wrapper around the underlying `solve-field`
program. See `get_solve_field` for more typical usage and examples.
Args:
fname(str, required): Filename to solve in .fits extension.
timeout(int, optional): Timeout for the solve-field command,
defaults to 60 seconds.
solve_opts(list, optional): List of options for solve-field.
"""
solve_field_script = shutil.which('solve-field')
if solve_field_script is None: # pragma: no cover
raise error.InvalidSystemCommand(f"Can't find solve-field, is astrometry.net installed?")
# Add the options for solving the field
if solve_opts is not None:
options = solve_opts
else:
# Default options
options = [
'--guess-scale',
'--cpulimit', str(timeout),
'--no-verify',
'--crpix-center',
'--temp-axy',
'--index-xyls', 'none',
'--solved', 'none',
'--match', 'none',
'--rdls', 'none',
'--corr', 'none',
'--downsample', '4',
'--no-plots',
]
if 'ra' in kwargs:
options.append('--ra')
options.append(str(kwargs.get('ra')))
if 'dec' in kwargs:
options.append('--dec')
options.append(str(kwargs.get('dec')))
if 'radius' in kwargs:
options.append('--radius')
options.append(str(kwargs.get('radius')))
# Gather all the kwargs that start with `--` and are not already present.
logger.debug(f'Adding kwargs: {kwargs!r}')
def _modify_opt(opt, val):
if isinstance(val, bool):
opt_string = str(opt)
else:
opt_string = f'{opt}={val}'
return opt_string
options.extend([_modify_opt(opt, val)
for opt, val
in kwargs.items()
if opt.startswith('--') and opt not in options and not isinstance(val, bool)])
cmd = [solve_field_script] + options + [fname]
logger.debug(f'Solving with: {cmd}')
try:
proc = subprocess.Popen(cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except Exception as e:
raise error.PanError(f"Problem plate-solving in solve_field: {e!r}")
return proc
def get_solve_field(fname, replace=True, overwrite=True, timeout=30, **kwargs):
"""Convenience function to wait for `solve_field` to finish.
This function merely passes the `fname` of the image to be solved along to `solve_field`,
which returns a subprocess.Popen object. This function then waits for that command
to complete, populates a dictonary with the EXIF informaiton and returns. This is often
more useful than the raw `solve_field` function.
Example:
>>> from panoptes.utils.images import fits as fits_utils
>>> # Get our fits filename.
>>> fits_fn = getfixture('unsolved_fits_file')
>>> # Perform the solve.
>>> solve_info = fits_utils.get_solve_field(fits_fn)
>>> # Show solved filename.
>>> solve_info['solved_fits_file']
'.../unsolved.fits'
>>> # Pass a suggested location.
>>> ra = 15.23
>>> dec = 90
>>> radius = 5 # deg
>>> solve_info = fits_utils.solve_field(fits_fn, ra=ra, dec=dec, radius=radius)
>>> # Pass kwargs to `solve-field` program.
>>> solve_kwargs = {'--pnm': '/tmp/awesome.bmp', '--overwrite': True}
>>> solve_info = fits_utils.get_solve_field(fits_fn, **solve_kwargs, skip_solved=False)
>>> assert os.path.exists('/tmp/awesome.bmp')
Args:
fname ({str}): Name of FITS file to be solved.
replace (bool, optional): Saves the WCS back to the original file,
otherwise output base filename with `.new` extension. Default True.
overwrite (bool, optional): Clobber file, default True. Required if `replace=True`.
timeout (int, optional): The timeout for solving, default 30 seconds.
**kwargs ({dict}): Options to pass to `solve_field` should start with `--`.
Returns:
dict: Keyword information from the solved field.
"""
skip_solved = kwargs.get('skip_solved', True)
out_dict = {}
output = None
errs = None
header = getheader(fname)
wcs = WCS(header)
# Check for solved file
if skip_solved and wcs.is_celestial:
logger.info(f"Skipping solved file (use skip_solved=False to solve again): {fname}")
out_dict.update(header)
out_dict['solved_fits_file'] = fname
return out_dict
# Set a default radius of 15
if overwrite:
kwargs['--overwrite'] = True
# Use unpacked version of file.
was_compressed = False
if fname.endswith('.fz'):
logger.debug(f'Uncompressing {fname}')
fname = funpack(fname)
logger.debug(f'Using {fname} for solving')
was_compressed = True
logger.debug(f'Solving with: {kwargs!r}')
proc = solve_field(fname, **kwargs)
try:
output, errs = proc.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
proc.kill()
output, errs = proc.communicate()
raise error.Timeout(f'Timeout while solving: {output!r} {errs!r}')
else:
if proc.returncode != 0:
logger.debug(f'Returncode: {proc.returncode}')
for log in [output, errs]:
if log and log > '':
logger.debug(f'Output on {fname}: {log}')
if proc.returncode == 3:
raise error.SolveError(f'solve-field not found: {output}')
new_fname = fname.replace('.fits', '.new')
if replace:
logger.debug(f'Overwriting original {fname}')
os.replace(new_fname, fname)
else:
fname = new_fname
try:
header = getheader(fname)
header.remove('COMMENT', ignore_missing=True, remove_all=True)
header.remove('HISTORY', ignore_missing=True, remove_all=True)
out_dict.update(header)
except OSError:
logger.warning(f"Can't read fits header for: {fname}")
# Check it was solved.
if WCS(header).is_celestial is False:
raise error.SolveError('File not properly solved, no WCS header present.')
# Remove WCS file.
os.remove(fname.replace('.fits', '.wcs'))
if was_compressed and replace:
logger.debug(f'Compressing plate-solved {fname}')
fname = fpack(fname)
out_dict['solved_fits_file'] = fname
return out_dict
def get_wcsinfo(fits_fname, **kwargs):
"""Returns the WCS information for a FITS file.
Uses the `wcsinfo` astrometry.net utility script to get the WCS information
from a plate-solved file.
Args:
fits_fname ({str}): Name of a FITS file that contains a WCS.
**kwargs: Args that can be passed to wcsinfo.
Returns:
dict: Output as returned from `wcsinfo`.
Raises:
error.InvalidCommand: Raised if `wcsinfo` is not found (part of astrometry.net)
"""
assert os.path.exists(fits_fname), warn(f"No file exists at: {fits_fname}")
wcsinfo = shutil.which('wcsinfo')
if wcsinfo is None:
raise error.InvalidCommand('wcsinfo not found')
run_cmd = [wcsinfo, fits_fname]
if fits_fname.endswith('.fz'):
run_cmd.append('-e')
run_cmd.append('1')
proc = subprocess.Popen(run_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
try:
output, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired: # pragma: no cover
proc.kill()
output, errs = proc.communicate()
unit_lookup = {
'crpix0': u.pixel,
'crpix1': u.pixel,
'crval0': u.degree,
'crval1': u.degree,
'cd11': (u.deg / u.pixel),
'cd12': (u.deg / u.pixel),
'cd21': (u.deg / u.pixel),
'cd22': (u.deg / u.pixel),
'imagew': u.pixel,
'imageh': u.pixel,
'pixscale': (u.arcsec / u.pixel),
'orientation': u.degree,
'ra_center': u.degree,
'dec_center': u.degree,
'orientation_center': u.degree,
'ra_center_h': u.hourangle,
'ra_center_m': u.minute,
'ra_center_s': u.second,
'dec_center_d': u.degree,
'dec_center_m': u.minute,
'dec_center_s': u.second,
'fieldarea': (u.degree * u.degree),
'fieldw': u.degree,
'fieldh': u.degree,
'decmin': u.degree,
'decmax': u.degree,
'ramin': u.degree,
'ramax': u.degree,
'ra_min_merc': u.degree,
'ra_max_merc': u.degree,
'dec_min_merc': u.degree,
'dec_max_merc': u.degree,
'merc_diff': u.degree,
}
wcs_info = {}
for line in output.split('\n'):
try:
k, v = line.split(' ')
try:
v = float(v)
except Exception:
pass
wcs_info[k] = float(v) * unit_lookup.get(k, 1)
except ValueError:
pass
# print("Error on line: {}".format(line))
wcs_info['wcs_file'] = fits_fname
return wcs_info
def fpack(fits_fname, unpack=False, overwrite=True):
"""Compress/Decompress a FITS file
Uses `fpack` (or `funpack` if `unpack=True`) to compress a FITS file
Args:
fits_fname ({str}): Name of a FITS file that contains a WCS.
unpack ({bool}, optional): file should decompressed instead of compressed, default False.
Returns:
str: Filename of compressed/decompressed file.
"""
assert os.path.exists(fits_fname), warn(
"No file exists at: {}".format(fits_fname))
if unpack:
fpack = shutil.which('funpack')
run_cmd = [fpack, '-D', fits_fname]
out_file = fits_fname.replace('.fz', '')
else:
fpack = shutil.which('fpack')
run_cmd = [fpack, '-D', '-Y', fits_fname]
out_file = fits_fname.replace('.fits', '.fits.fz')
if os.path.exists(out_file):
if overwrite is False:
raise FileExistsError(f'Destination file already exists at location and overwrite=False')
else:
os.remove(out_file)
try:
assert fpack is not None
except AssertionError:
warn("fpack not found (try installing cfitsio). File has not been changed")
return fits_fname
logger.debug("fpack command: {}".format(run_cmd))
proc = subprocess.Popen(run_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
try:
output, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
output, errs = proc.communicate()
return out_file
def funpack(*args, **kwargs):
"""Unpack a FITS file.
Note:
This is a thin-wrapper around the ~fpack function
with the `unpack=True` option specified. See ~fpack
documentation for details.
Args:
*args: Arguments passed to ~fpack.
**kwargs: Keyword arguments passed to ~fpack.
Returns:
str: Path to uncompressed FITS file.
"""
return fpack(*args, unpack=True, **kwargs)
def write_fits(data, header, filename, exposure_event=None, **kwargs):
"""Write FITS file to requested location.
>>> from panoptes.utils.images import fits as fits_utils
>>> data = np.random.normal(size=100)
>>> header = { 'FILE': 'delete_me', 'TEST': True }
>>> filename = str(getfixture('tmpdir').join('temp.fits'))
>>> fits_utils.write_fits(data, header, filename)
>>> assert os.path.exists(filename)
>>> fits_utils.getval(filename, 'FILE')
'delete_me'
>>> data2 = fits_utils.getdata(filename)
>>> assert np.array_equal(data, data2)
Args:
data (array_like): The data to be written.
header (dict): Dictionary of items to be saved in header.
filename (str): Path to filename for output.
exposure_event (None|`threading.Event`, optional): A `threading.Event` that
can be triggered when the image is written.
kwargs (dict): Options that are passed to the `astropy.io.fits.PrimaryHDU.writeto`
method.
"""
if not isinstance(header, fits.Header):
header = fits.Header(header)
hdu = fits.PrimaryHDU(data, header=header)
# Create directories if required.
if os.path.dirname(filename):
os.makedirs(os.path.dirname(filename), mode=0o775, exist_ok=True)
try:
hdu.writeto(filename, **kwargs)
except OSError as err:
logger.error(f'Error writing image to {filename}: {err!r}')
else:
logger.debug(f'Image written to {filename}')
finally:
if exposure_event:
exposure_event.set()
def update_observation_headers(file_path, info):
"""Update FITS headers with items from the Observation status.
>>> # Check the headers
>>> from panoptes.utils.images import fits as fits_utils
>>> fits_fn = getfixture('unsolved_fits_file')
>>> # Show original value
>>> fits_utils.getval(fits_fn, 'FIELD')
'KIC 8462852'
>>> info = {'field_name': 'Tabbys Star'}
>>> | |
u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861454354':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861384920':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861384921':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861384922':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861384923':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861384924':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861384925':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861384926':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861384927':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'86138688':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86138689':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861379078':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'861379079':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'86138684':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86138685':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86138686':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86138687':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86138680':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'86138681':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861379070':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'86138683':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861388788':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861388789':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861388780':{'en': 'Baoshan, Yunnan', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'861388781':{'en': 'Baoshan, Yunnan', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'861388782':{'en': 'Ba<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'861388783':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'861388784':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861388785':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861388786':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861388787':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861452500':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861452501':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861452502':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861452503':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861452504':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861452505':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861452506':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861452507':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861452508':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861452509':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861453818':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861453815':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861453814':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8d44\u9633\u5e02')},
'861453817':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861453816':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861453811':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861453810':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861453813':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861453812':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861379698':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861379699':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861379692':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'861379693':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'861379690':{'en': 'Shuangyashan, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u53cc\u9e2d\u5c71\u5e02')},
'861379691':{'en': 'Shuangyashan, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u53cc\u9e2d\u5c71\u5e02')},
'861379696':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861379697':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'861379694':{'en': 'Hegang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e64\u5c97\u5e02')},
'861379695':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861381523':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381522':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381521':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381520':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381527':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861381526':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861381525':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861381524':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381529':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861381528':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861384735':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861384734':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861452845':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861384737':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861457191':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861389933':{'en': 'Hami, Xinjiang', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861389934':{'en': 'Hami, Xinjiang', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861389935':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861389936':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861389937':{'en': 'Tacheng, Xinjiang', 'zh': u('\u65b0\u7586\u5854\u57ce\u5730\u533a')},
'861380234':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861380235':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'861380236':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861380237':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861380230':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861380231':{'en': 'Jieyang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'861378808':{'en': 'Hechi, Guangxi', 'zh': u('\u5e7f\u897f\u6cb3\u6c60\u5e02')},
'861378809':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861397368':{'en': 'Yiyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u76ca\u9633\u5e02')},
'861397369':{'en': 'Yiyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u76ca\u9633\u5e02')},
'861380232':{'en': 'Jieyang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'861378800':{'en': 'Fangchenggang, Guangxi', 'zh': u('\u5e7f\u897f\u9632\u57ce\u6e2f\u5e02')},
'861378801':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861378802':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861378803':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'861378804':{'en': 'Wuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u68a7\u5dde\u5e02')},
'861378805':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861378806':{'en': 'Baise, Guangxi', 'zh': u('\u5e7f\u897f\u767e\u8272\u5e02')},
'861378807':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861452348':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861452349':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861452342':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861452343':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861452340':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861452341':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861452346':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861452347':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861452344':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861452345':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861390464':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861390465':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861390466':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861390467':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'861390460':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861390461':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861390462':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'861390463':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861395638':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861395639':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861390468':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'861390469':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'86145822':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86137995':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'86137996':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'86137993':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861452844':{'en': 'Qingyang, Gansu', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861395622':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861452759':{'en': 'Shaoyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861452758':{'en': 'Loudi, Hunan', 'zh': u('\u6e56\u5357\u7701\u5a04\u5e95\u5e02')},
'861452755':{'en': 'Chenzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861452754':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861452757':{'en': 'Y<NAME>', 'zh': u('\u6e56\u5357\u7701\u76ca\u9633\u5e02')},
'861452756':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861452751':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861452750':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861452753':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u682a\u6d32\u5e02')},
'861452752':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861452101':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861380498':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861380499':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861380494':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861380495':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861380496':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861380497':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861380490':{'en': 'Shenyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861380491':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861380492':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861380493':{'en': 'Fushun, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')},
'861399755':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'86139922':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'86139560':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861399756':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'861399751':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u54b8\u5b81\u5e02')},
'86139567':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861399753':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u54b8\u5b81\u5e02')},
'861399752':{'en': 'Xianning, Hubei', 'zh': u('\u6e56\u5317\u7701\u54b8\u5b81\u5e02')},
'861453451':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86139569':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861399759':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'86139920':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'861452109':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'86139927':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861387204':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387205':{'en': 'Hu<NAME>', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861387206':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861387207':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861387200':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387201':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387202':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861387203':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'86139925':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861387208':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861387209':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861380270':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861380271':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861380272':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380273':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380274':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380275':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380276':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380277':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380278':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380279':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86139928':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'86139929':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861453932':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861453933':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861453930':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861453931':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861380343':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861453937':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861453934':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861453935':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861453938':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861453939':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861399480':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861454582':{'en': 'Tianshui, Gansu', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861454583':{'en': 'Baiyin, Gansu', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861454580':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861454581':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861454586':{'en': 'Jiuquan, Gansu', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861454587':{'en': 'Jiayuguan, Gansu', 'zh': u('\u7518\u8083\u7701\u5609\u5cea\u5173\u5e02')},
'861454584':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861454585':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u91d1\u660c\u5e02')},
'861454588':{'en': 'Lan<NAME>', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861454589':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861380344':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'861379447':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861379446':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861379445':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861379444':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861379443':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861379442':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861379441':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861379440':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86138749':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'86138748':{'en': 'Changsha, Hunan', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861379449':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861379448':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861386157':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861386156':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861386155':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861386154':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861386153':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861386152':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861386151':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861386150':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861386159':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861386158':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861379229':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379228':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379999':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861379998':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861379223':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861379222':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861379221':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861379220':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861379227':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379226':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379225':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861379224':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861398909':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861398908':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861398907':{'en': 'Ngari, Tibet', 'zh': u('\u897f\u85cf\u963f\u91cc\u5730\u533a')},
'861398906':{'en': 'Nagqu, Tibet', 'zh': u('\u897f\u85cf\u90a3\u66f2\u5730\u533a')},
'861398905':{'en': 'Qamdo, Tibet', 'zh': u('\u897f\u85cf\u660c\u90fd\u5730\u533a')},
'861398904':{'en': 'Nyingchi, Tibet', 'zh': u('\u897f\u85cf\u6797\u829d\u5730\u533a')},
'861398903':{'en': 'Shannan, Tibet', 'zh': u('\u897f\u85cf\u5c71\u5357\u5730\u533a')},
'861398902':{'en': 'Xigaze, Tibet', 'zh': u('\u897f\u85cf\u65e5\u5580\u5219\u5730\u533a')},
'861398901':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861398900':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861397177':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861397176':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861397175':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861397174':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397173':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397172':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397171':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397170':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861397179':{'en': 'Suizhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u968f\u5dde\u5e02')},
'861397178':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861457055':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861457054':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861454297':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861457056':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861457051':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861457050':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861457053':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861457052':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861457059':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861457058':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861395117':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861395116':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861390435':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'86138257':{'en': 'Dongguan, | |
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MFnBase(object):
"""
Base class for function sets.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def hasObj(*args, **kwargs):
"""
Returns True if the function set is compatible with the specified Maya object.
"""
pass
def object(*args, **kwargs):
"""
Returns a reference to the object to which the function set is currently attached, or MObject.kNullObj if none.
"""
pass
def setObject(*args, **kwargs):
"""
Attaches the function set to the specified Maya object.
"""
pass
def type(*args, **kwargs):
"""
Returns the type of the function set.
"""
pass
__new__ = None
class MAttributePattern(object):
"""
Manipulate attribute structure patterns.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def addRootAttr(*args, **kwargs):
"""
Add the given root attribute to this pattern.
"""
pass
def name(*args, **kwargs):
"""
Return the name of the attribute pattern.
"""
pass
def removeRootAttr(*args, **kwargs):
"""
Return the nth or passed-in root attribute from this pattern.
"""
pass
def rootAttr(*args, **kwargs):
"""
Return the nth root attribute in this pattern.
"""
pass
def rootAttrCount(*args, **kwargs):
"""
Return the number of root attributes in this pattern.
"""
pass
def attrPattern(*args, **kwargs):
"""
Return the specified pattern indexed from the global list.
"""
pass
def attrPatternCount(*args, **kwargs):
"""
Return the global number of patterns created.
"""
pass
def findPattern(*args, **kwargs):
"""
Return a pattern with the given name, None if not found.
"""
pass
__new__ = None
class MFloatVectorArray(object):
"""
Array of MFloatVector values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MAngle(object):
"""
Manipulate angular data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def asAngMinutes(*args, **kwargs):
"""
Returns the angular value, converted to minutes of arc.
"""
pass
def asAngSeconds(*args, **kwargs):
"""
Returns the angular value, converted to seconds of arc.
"""
pass
def asDegrees(*args, **kwargs):
"""
Returns the angular value, converted to degrees.
"""
pass
def asRadians(*args, **kwargs):
"""
Returns the angular value, converted to radians.
"""
pass
def asUnits(*args, **kwargs):
"""
Returns the angular value, converted to the specified units.
"""
pass
def internalToUI(*args, **kwargs):
"""
Converts a value from Maya's internal units to the units used in the UI.
"""
pass
def internalUnit(*args, **kwargs):
"""
Returns the angular unit used internally by Maya.
"""
pass
def setUIUnit(*args, **kwargs):
"""
Sets the angular unit used in Maya's UI.
"""
pass
def uiToInternal(*args, **kwargs):
"""
Converts a value from the units used in the UI to Maya's internal units.
"""
pass
def uiUnit(*args, **kwargs):
"""
Returns the units used to display angles in Maya's UI.
"""
pass
unit = None
value = None
__new__ = None
kAngMinutes = 3
kAngSeconds = 4
kDegrees = 2
kInvalid = 0
kLast = 5
kRadians = 1
class MEulerRotation(object):
"""
X, Y and Z rotations, applied in a specified order.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def | |
<reponame>gda-score/code
import pprint
import matplotlib.pyplot as plt
import numpy as np
from gdascore.gdaTools import getInterpolatedValue
from matplotlib.patches import Rectangle
pp = pprint.PrettyPrinter(indent=4)
# Future use for static unchanged strings
doubleColumnScore = "doubleColumnScores"
singleColumnScore = "singleColumnScores"
# Only for accuracy
acc = "accuracy"
simplerelerrormatrix = "simpleRelativeErrorMetrics"
mse = "meanSquareError"
# Only for coverage
cov = "coverage"
covPerCol = "coveragePerCol"
# This method is responsible for generate
# Score and column data structure.
# Tested by static values
def score_column_method(column, method):
scorestring = "Scoring:%s,%s columns" % (method, column)
return scorestring
# This method is responsible for generate
# utility measure parameters.
# Tested by dynamically read from json file from a desired location.
def readjsonfile(filelocation, util):
try:
# check the desired file exist on the location or not.
fileexist = open(filelocation, 'r')
util['accuracy'] = round(getaccuracyvalue(filelocation), 2)
util['coverage'] = round(getcoveragevalue(filelocation), 2)
fileexist.close()
except FileNotFoundError:
print("File is not present in the location.")
return False
except KeyboardInterrupt:
print("Program closed.")
return False
return True
def getaccuracyvalue(filelocation):
accuracy = None
# Read JSON data into the datastore variable
if filelocation:
with open(filelocation, 'r') as f:
datastore = json.load(f)
# Use the new datastore datastructure
# Utility data
utility_acc_dc = []
utility_acc_sc = []
for i in range(len(datastore[doubleColumnScore])):
if datastore[doubleColumnScore][i][acc] is not None:
utility_acc_dc.append(datastore[doubleColumnScore][i][acc][simplerelerrormatrix][mse])
for i in range(len(datastore[singleColumnScore])):
if datastore[singleColumnScore][i][acc] is not None:
utility_acc_sc.append(datastore[singleColumnScore][i][acc][simplerelerrormatrix][mse])
accuracy = ((np.mean(utility_acc_dc) + np.mean(utility_acc_sc)) / 2)
return accuracy
def getcoveragevalue(filelocation):
coverage = None
# Read JSON data into the datastore variable
if filelocation:
with open(filelocation, 'r') as f:
datastore = json.load(f)
# Use the new datastore datastructure
# Utility data coverage
utility_cov_dc = []
utility_cov_sc = []
for i in range(len(datastore[doubleColumnScore])):
if datastore[doubleColumnScore][i][cov] is not None:
if datastore[doubleColumnScore][i][cov][covPerCol] is not None:
utility_cov_dc.append(datastore[doubleColumnScore][i]["coverage"][covPerCol])
for i in range(len(datastore[singleColumnScore])):
if datastore[singleColumnScore][i][cov] is not None:
if datastore[singleColumnScore][i][cov][covPerCol] is not None:
utility_cov_sc.append(datastore[singleColumnScore][i][cov][covPerCol])
coverage = ((np.mean(utility_cov_dc) + np.mean(utility_cov_sc)) / 2)
return coverage
# This method is responsible for generate
# gdaScore plot using matplotlib.
# Tested by static and dynamic values.
def plotGdaScore(score, sc, util, fileName='', form=[], show=True):
""" Produces a GDA Score Diagram from GDA Score data.
`score` is the score data structure returned from
`gdaScores.getScore()` <br/>
`util` is the utility data structure (to be developed) <br\>
`fileName` is where the output file, if any, should be saved <br\>
`form` is a list of output types, and can be 'png', 'pdf', 'ps',
'eps', and 'svg' <br\>
Set `show` to True if you want the graph displayed
"""
# tweak the shape by playing with following numbers
base = 1
gap = base / 4
high = base * 3
aspect = 5
maxY = 0.5
minY = -0.5
# add parameters for axis change
minXaxis = -0.3
minYaxis = 0.3
axisLength = 9.75
textPlotXvalue = -0.1
maintextValue = 0.05
maintextYValue = 0.67
midTextValue = 0.09
endTextValue = 0.2
horlineX = -0.3
horlineY = 24.5
verlineX = 10
verlineY = 10
verlineGap = 0.2
scoregap = 0.04
heightposth = 0.05
heightnegth = -0.05
# end of parameters
# Code Added By Anirban 20-10-2018
# Parameters for score and column
numofcolumn = sc['column']
methodname = sc['method']
xaxisscorecolumn = 4.3
yaxisscorecolumn = -0.65
horbarYaxis = 0.0
acc, cov, conf, prob, know, work, susc = set(range(7))
labels = ['Acc', 'Cov', 'Conf', 'Prob', 'Know', 'Work', 'Susc']
if util:
doLabel = [1, 1, 1, 1, 1, 1, 1]
else:
doLabel = [0, 0, 1, 1, 1, 1, 1]
maxWork = 10000
centers = list(range(7))
widths = list(range(7))
# accuracy bar
left = gap
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[acc] = mid
widths[acc] = wid
# coverage bar
left = right + gap
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[cov] = mid
widths[cov] = wid
midVertical = right + gap
# confidence bar
left = right + (2 * gap)
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[conf] = mid
widths[conf] = wid
# probability bar
left = right + gap
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[prob] = mid
widths[prob] = wid
# knowledge bar
left = right + (2 * gap)
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[know] = mid
widths[know] = wid
# work bar
left = right + gap
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[work] = mid
widths[work] = wid
# susceptibility bar
left = right + gap
right = left + base
wid = right - left
mid = (wid / 2) + left
centers[susc] = mid
widths[susc] = wid
rightEdge = right + gap
# Change the axis due to generate Boundary box Anirban 30-09-2011
plt.figure()
plt.axis([minXaxis, rightEdge + 2 * gap, minY - minYaxis, maxY + minYaxis])
plt.axis('off')
colors = ['tan', 'tan',
'cadetblue', 'cadetblue',
'grey', 'grey', 'grey']
s = score['score']['scores'][0]
# For work needed, best case is 1 cell learned for 1 cell attacked,
# so this given value 1. We can set worst case arbitrarily at say
# 10000, which gets value 0 on graph. We can plot log scale. So:
workBar = s['workNeeded']
if workBar != None:
if workBar < 1:
workBar = 1
if workBar > 10000:
workBar = 10000
workBar = math.log10(workBar)
maxWork = math.log10(maxWork)
workBar /= maxWork
else:
# This will cause the work bar to not appear
workBar = maxY
doLabel[work] = 0
# Code Added By Anirban 25-10-2018
# Dynamically added acccuracy and coverage by reading json file
# No further change till now to python data structure
if util:
filevalues = readjsonfile(util['filelocation'], util)
else:
filevalues = None
# Do calculation if file exist on the location and initial reading calculation
utilityScore = [
(1, 1, 0), (1, .25, 0), (1, .1, 0), (1, .05, 0), (1, .01, 0), (1, 0, 0),
(.6, 1, 0), (.6, .25, .1), (.6, .1, .3), (.6, .05, .4), (.6, .01, .5), (.6, 0, .6),
(.4, 1, 0), (.4, .25, .2), (.4, .1, .4), (.4, .05, .6), (.4, .01, .7), (.4, 0, .8),
(.2, 1, 0), (.2, .25, .3), (.2, .1, 5), (.2, .05, .7), (.2, .01, .8), (.2, 0, .9),
(.1, 1, 0), (.1, .25, .4), (.1, .1, .7), (.1, .05, .8), (.1, .01, .9), (.1, 0, .95),
(0, 1, 0), (0, .25, .5), (0, .1, .75), (0, .05, .9), (0, .01, .95), (0, 0, 1)]
if filevalues:
score = getInterpolatedValue((1 - util['coverage']), util['accuracy'], utilityScore)
if util:
accuracy = util['accuracy']
coverage = util['coverage']
else:
# This basically results in no bars for utility
accuracy = maxY
coverage = maxY
if s['knowledgeNeeded'] != None:
knowledgeNeeded = s['knowledgeNeeded']
else:
# results in no bar for knowledgeNeeded
knowledgeNeeded = maxY
doLabel[know] = 0
# Accuracy bar scaled Issue10
accuracyHeight = None
if util['accuracy'] == 0:
accuracyHeight = 0.5
elif util['accuracy'] == 0.01:
accuracyHeight = 0.4
elif util['accuracy'] == 0.05:
accuracyHeight = 0.3
elif util['accuracy'] == 0.1:
accuracyHeight = 0.1
elif util['accuracy'] == 0.25:
accuracyHeight = 0.0
elif util['accuracy'] == 0.5:
accuracyHeight = -0.25
else:
accuracyHeight = -0.5
heights = [accuracyHeight,
# maxY - accuracy, Changes for Issue10
# maxY - coverage, Changes for Issue10
coverage - maxY,
maxY - s['confidenceImprovement'],
maxY - s['claimProbability'],
knowledgeNeeded - maxY,
workBar - maxY,
maxY - s['susceptibility']]
# Plot the bars
plt.bar(centers, heights, width=widths, color=colors)
# Plot the axes
plt.plot([0, axisLength], [0, 0], color='black', linewidth=4) # Changes done by Anirban 30-08-2018
plt.plot([midVertical, midVertical], [minY, maxY], color='black', linewidth=4)
# Plot the text
nudge = 0.03
plt.text(midVertical, maxY + nudge, 'good',
horizontalalignment='center', verticalalignment='center')
plt.text(midVertical, minY - nudge, 'bad',
horizontalalignment='center', verticalalignment='center')
# Plot the score above of the edge of the Bar. If Bar is not visible properly then
# Just beside the Bar.
# Changes Done By Anirban 09-10-2018
# region Code Added to Add score as number
| |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import json
import os
import pathlib
from argparse import ArgumentParser
from datetime import datetime
import matplotlib.pyplot as plt
import pytorch_lightning as pl
import torch
import wandb
from fastmri.data.mri_data import fetch_dir
from fastmri.data.transforms import MiniCoilTransform
from fastmri.pl_modules import FastMriDataModule
from pytorch_lightning.callbacks import Callback
from pl_modules import AdaptiveVarNetModule, VarNetModule
from subsample import create_mask_for_mask_type
def count_parameters(model):
return sum(p.numel() for p in model.parameters()) if model is not None else 0
def count_trainable_parameters(model):
return (
sum(p.numel() for p in model.parameters() if p.requires_grad)
if model is not None
else 0
)
def count_untrainable_parameters(model):
return (
sum(p.numel() for p in model.parameters() if not p.requires_grad)
if model is not None
else 0
)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def str2none(v):
if v is None:
return v
if v.lower() == "none":
return None
else:
return v
def int2none(v):
if v is None:
return v
if v.lower() == "none":
return None
else:
return int(v)
def float2none(v):
if v is None:
return v
if v.lower() == "none":
return None
else:
return float(v)
def make_wandb_run_name(args):
name = ""
# Create base name
if args.learn_acquisition:
if args.loupe_mask:
name += "loupe-"
else:
name += "act-"
else:
name += "Nact-"
assert len(args.accelerations) == 1
name += str(args.accelerations[0])
name += "-cas"
name += str(args.num_cascades)
name += "-"
if args.learn_acquisition and not args.loupe_mask:
name += "p"
name += str(args.cascades_per_policy)
name += "-"
if args.chans != 18:
name += "ch"
name += str(args.chans)
name += "-"
if args.num_compressed_coils == 1:
name += "singlecoil-"
if args.sparse_dc_gradients:
name += "dcsparse-"
else:
name += "dcmultip-"
if args.learn_acquisition:
if args.use_softplus:
name += f"softplus{args.slope}b-"
else:
name += f"sigmoid{args.slope}s-"
if args.straight_through_slope != 10:
name += f"stslope{args.straight_through_slope}-"
if args.hard_dc:
if not args.dc_mode == "first":
name += f"hdc{args.dc_mode}-"
else:
name += "sdc-"
if args.st_clamp:
name += "stclamp-"
if not args.loupe_mask: # Policy runs
if args.policy_num_fc_layers != 3 and args.policy_fc_size != 256:
name += f"{args.policy_num_fc_layers}fc{args.policy_fc_size}-"
elif args.policy_num_fc_layers != 3:
name += f"{args.policy_num_fc_layers}fc-"
elif args.policy_fc_size != 256:
name += f"fc{args.policy_fc_size}-"
if args.policy_drop_prob != 0.0:
name += f"drop{args.policy_drop_prob}-"
if args.policy_activation != "leakyrelu":
name += "elu-"
else: # LOUPE runs
pass
else: # Non-active runs
if args.mask_type != "adaptive_equispaced_fraction":
name += f"{args.mask_type}-"
name += "seed"
name += str(args.seed)
if args.lr != 0.001:
name += "-lr{}".format(args.lr)
if args.sample_rate is None and args.volume_sample_rate is not None:
if args.volume_sample_rate != 1.0:
name += "-"
name += "vsr"
name += str(args.volume_sample_rate)
elif args.sample_rate is not None and args.volume_sample_rate is None:
if args.sample_rate != 1.0:
name += "-"
name += "sr"
name += str(args.sample_rate)
return name
class WandbLoggerCallback(Callback):
def __init__(self, args):
super().__init__()
self.args = args
if args.resume_from_checkpoint:
# Get wandb id from file in checkpoint dir
# resume_from_checkpoint = default_root_dir / checkpoints / model.ckpt
# wandb_id is stored in default_root_dir / wandb_id.txt
with open(
pathlib.Path(args.resume_from_checkpoint).parent.parent
/ "wandb_id.txt",
"r",
) as f:
id = f.read()
with open(
pathlib.Path(args.resume_from_checkpoint).parent.parent
/ "wandb_dir.txt",
"r",
) as f:
dir = pathlib.Path(f.read())
else:
id = wandb.util.generate_id()
base_dir = pathlib.Path.cwd() / "wandb"
now = datetime.now()
if args.learn_acquisition:
if args.loupe_mask:
algo = "loupe"
else:
algo = "adaptive"
else:
algo = "non_adaptive"
dir = base_dir / now.strftime("%Y_%m_%d") / algo
dir.mkdir(parents=True, exist_ok=True)
wandb.init(
entity=self.args.wandb_entity,
project=self.args.project,
config=self.args,
resume="allow",
id=id,
dir=dir,
)
if not wandb.run.resumed:
# Extract run index from wandb name
wandb_index = wandb.run.name.split("-")[-1]
# Overwrite wandb run name
wandb_name = make_wandb_run_name(args)
wandb.run.name = wandb_name + "-" + wandb_index
# Save wandb info
with open(pathlib.Path(args.default_root_dir) / wandb.run.name, "w") as f:
f.write(wandb.run.id)
with open(pathlib.Path(args.default_root_dir) / wandb.run.id, "w") as f:
f.write(wandb.run.name)
with open(pathlib.Path(args.default_root_dir) / "wandb_id.txt", "w") as f:
f.write(wandb.run.id)
with open(pathlib.Path(args.default_root_dir) / "wandb_dir.txt", "w") as f:
f.write(str(dir))
def on_pretrain_routine_start(self, trainer, pl_module):
train_loader_len = len(trainer.datamodule.train_dataloader())
val_loader_len = len(trainer.datamodule.val_dataloader())
test_loader_len = len(trainer.datamodule.test_dataloader())
print(f"Train loader batches: {train_loader_len}")
print(f"Val loader batches: {val_loader_len}")
print(f"Test loader batches: {test_loader_len}")
wandb.log(
{
"epoch": -1,
"train_batches": train_loader_len,
"val_batches": val_loader_len,
"test_batches": test_loader_len,
}
)
def on_train_epoch_end(self, trainer, pl_module, outputs):
epoch = trainer.current_epoch
tot_ex = pl_module.TrainTotExamples.compute().item()
tot_slice_ex = pl_module.TrainTotSliceExamples.compute().item()
ssim = pl_module.TrainSSIM.compute().item() / tot_ex
psnr = pl_module.TrainPSNR.compute().item() / tot_ex
nmse = pl_module.TrainNMSE.compute().item() / tot_ex
train_loss = pl_module.TrainLoss.compute().item() / tot_slice_ex
wandb_dict = {
"epoch": epoch,
"train_loss": train_loss,
"train_ssim": ssim,
"train_psnr": psnr,
"train_nmse": nmse,
"train_tot_ex": tot_ex,
"train_tot_slice_ex": tot_slice_ex,
}
wandb.log(wandb_dict)
# For some reason tot_ex is not the correct number to divide by, due to some
# kind of weird issue with how we count it. Fortunately, we know the sum of
# val_marg_dist should be 1, so we can compute the correct normalisation
# number from that constraint. Probably this means that we're overcounting
# some examples relative to others for the entropy calculations?
# NOTE: This is not really the correct distribution, since the policy is a
# bunch of independent Bernoullis (+ rejection sampling), not a policy over
# a single acquisition.
# NOTE: These are not the entropies reported in the paper.
train_marg_dist = pl_module.TrainMargDist.compute()
norm_ex = train_marg_dist.sum()
train_marg_dist = train_marg_dist / norm_ex
if train_marg_dist.shape != torch.Size([1]): # Check that we didn't skip
W = len(train_marg_dist)
plt.imshow(
train_marg_dist.expand(W, W).cpu().numpy(),
cmap="gist_gray",
)
plt.colorbar()
wandb.log({"train_marg_dist": plt, "epoch": epoch})
plt.close()
train_marg_ent = torch.sum(
-1 * train_marg_dist * torch.log(train_marg_dist + 1e-8)
)
train_cond_ent = pl_module.TrainCondEnt.compute() / norm_ex
train_mut_inf = train_marg_ent - train_cond_ent
wandb.log(
{
"epoch": epoch,
"train_marg_ent": train_marg_ent,
"train_cond_ent": train_cond_ent,
"train_mut_inf": train_mut_inf,
}
)
def on_validation_epoch_end(self, trainer, pl_module):
epoch = trainer.current_epoch
# See MriModule.validation_epoch_end()
tot_ex = pl_module.TotExamples.compute().item()
tot_slice_ex = pl_module.TotSliceExamples.compute().item()
ssim = pl_module.SSIM.compute().item() / tot_ex
psnr = pl_module.PSNR.compute().item() / tot_ex
nmse = pl_module.NMSE.compute().item() / tot_ex
val_loss = pl_module.ValLoss.compute().item() / tot_slice_ex
wandb_dict = {
"epoch": epoch,
"val_loss": val_loss,
"val_ssim": ssim,
"val_psnr": psnr,
"val_nmse": nmse,
"val_tot_ex": tot_ex,
"val_tot_slice_ex": tot_slice_ex,
}
wandb.log(wandb_dict)
# For some reason tot_ex is not the correct number to divide by, due to some
# kind of weird issue with how we count it. Fortunately, we know the sum of
# val_marg_dist should be 1, so we can compute the correct normalisation
# number from that constraint. Probably this means that we're overcounting
# some examples relative to others for the entropy calculations?
# NOTE: This is not really the correct distribution, since the policy is a
# bunch of independent Bernoullis (+ rejection sampling), not a policy over
# a single acquisition.
# NOTE: These are not the entropies reported in the paper.
val_marg_dist = pl_module.ValMargDist.compute()
norm_ex = val_marg_dist.sum().item()
val_marg_dist = val_marg_dist / norm_ex
if val_marg_dist.shape != torch.Size([1]): # Check that we didn't skip
W = len(val_marg_dist)
plt.imshow(
val_marg_dist.expand(W, W).cpu().numpy(),
cmap="gist_gray",
)
plt.colorbar()
wandb.log({"val_marg_dist": plt, "epoch": epoch})
plt.close()
val_marg_ent = torch.sum(
-1 * val_marg_dist * torch.log(val_marg_dist + 1e-8)
)
val_cond_ent = pl_module.ValCondEnt.compute() / norm_ex
val_mut_inf = val_marg_ent - val_cond_ent
wandb.log(
{
"epoch": epoch,
"val_marg_ent": val_marg_ent,
"val_cond_ent": val_cond_ent,
"val_mut_inf": val_mut_inf,
}
)
def cli_main(args):
if args.num_sense_lines is not None:
assert (
args.num_sense_lines % 2 == 0
), "`num_sense_lines` must be even, not {}".format(args.num_sense_lines)
assert (
len(args.accelerations) == 1 and len(args.center_fractions) == 1
), "Cannot use multiple accelerations when `num_sense_lines` is set."
if args.seed is not None:
pl.seed_everything(args.seed)
# ------------
# data
# ------------
# this creates a k-space mask for transforming input data
mask = create_mask_for_mask_type(
args.mask_type,
args.center_fractions,
args.accelerations,
args.skip_low_freqs,
)
# use random masks for train transform, fixed masks for val transform
train_transform = MiniCoilTransform(
mask_func=mask,
use_seed=False, # Set this to True to get deterministic results for Equispaced and Random.
num_compressed_coils=args.num_compressed_coils,
crop_size=args.crop_size,
)
val_transform = MiniCoilTransform(
mask_func=mask,
num_compressed_coils=args.num_compressed_coils,
crop_size=args.crop_size,
)
if args.test_split in ("test", "challenge"):
mask = None
test_transform = MiniCoilTransform(
mask_func=mask,
num_compressed_coils=args.num_compressed_coils,
crop_size=args.crop_size,
)
# ptl data module - this handles data loaders
data_module = FastMriDataModule(
data_path=args.data_path,
challenge=args.challenge,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
test_split=args.test_split,
test_path=args.test_path,
sample_rate=args.sample_rate,
volume_sample_rate=args.volume_sample_rate,
batch_size=args.batch_size,
num_workers=args.num_workers,
distributed_sampler=(args.accelerator in ("ddp", "ddp_cpu")),
)
# ------------
# model
# ------------
if args.learn_acquisition:
model = AdaptiveVarNetModule(
num_cascades=args.num_cascades,
pools=args.pools,
chans=args.chans,
sens_pools=args.sens_pools,
sens_chans=args.sens_chans,
lr=args.lr,
lr_step_size=args.lr_step_size,
lr_gamma=args.lr_gamma,
weight_decay=args.weight_decay,
budget=args.budget,
cascades_per_policy=args.cascades_per_policy,
loupe_mask=args.loupe_mask,
use_softplus=args.use_softplus,
crop_size=args.crop_size,
num_actions=args.crop_size[1],
num_sense_lines=args.num_sense_lines,
hard_dc=args.hard_dc,
dc_mode=args.dc_mode,
slope=args.slope,
sparse_dc_gradients=args.sparse_dc_gradients,
straight_through_slope=args.straight_through_slope,
st_clamp=args.st_clamp,
policy_fc_size=args.policy_fc_size,
policy_drop_prob=args.policy_drop_prob,
policy_num_fc_layers=args.policy_num_fc_layers,
policy_activation=args.policy_activation,
| |
<filename>src/python/dxpy/bindings/dxapplet.py
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DXApplet Handler
++++++++++++++++
Applets are data objects that store application logic, including
specifications for executing it, and (optionally) input and output
signatures. They can be run by calling the :func:`DXApplet.run` method.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import dxpy
from . import DXDataObject, DXJob
from ..utils import merge
from ..system_requirements import SystemRequirementsDict
from ..exceptions import DXError
from ..compat import basestring
class DXExecutable:
'''Methods in :class:`!DXExecutable` are used by
:class:`~dxpy.bindings.dxapp.DXApp`,
:class:`~dxpy.bindings.dxapplet.DXApplet`,
:class:`~dxpy.bindings.dxworkflow.DXWorkflow`, and
:class:`~dxpy.bindings.dxworkflow.DXGlobalWorkflow`
'''
def __init__(self, *args, **kwargs):
raise NotImplementedError("This class is a mix-in. Use DXApp or DXApplet instead.")
@staticmethod
def _get_run_input_common_fields(executable_input, **kwargs):
'''
Takes the same arguments as the run method. Creates an input hash for the /executable-xxxx/run method,
translating ONLY the fields that can be handled uniformly across all executables: project, folder, name, tags,
properties, details, depends_on, allow_ssh, debug, delay_workspace_destruction, ignore_reuse, and extra_args.
'''
project = kwargs.get('project') or dxpy.WORKSPACE_ID
run_input = {"input": executable_input}
for arg in ['folder', 'name', 'tags', 'properties', 'details']:
if kwargs.get(arg) is not None:
run_input[arg] = kwargs[arg]
if kwargs.get('instance_type') is not None or kwargs.get('cluster_spec') is not None:
instance_type_srd = SystemRequirementsDict.from_instance_type(kwargs.get('instance_type'))
cluster_spec_srd = SystemRequirementsDict(kwargs.get('cluster_spec'))
run_input["systemRequirements"] = (instance_type_srd + cluster_spec_srd).as_dict()
if kwargs.get('depends_on') is not None:
run_input["dependsOn"] = []
if isinstance(kwargs['depends_on'], list):
for item in kwargs['depends_on']:
if isinstance(item, DXJob) or isinstance(item, DXDataObject):
if item.get_id() is None:
raise DXError('A dxpy handler given in depends_on does not have an ID set')
run_input["dependsOn"].append(item.get_id())
elif isinstance(item, basestring):
run_input['dependsOn'].append(item)
else:
raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings')
else:
raise DXError('Expected depends_on field to be a list')
if kwargs.get('delay_workspace_destruction') is not None:
run_input["delayWorkspaceDestruction"] = kwargs['delay_workspace_destruction']
if kwargs.get('allow_ssh') is not None:
run_input["allowSSH"] = kwargs['allow_ssh']
if kwargs.get('debug') is not None:
run_input["debug"] = kwargs['debug']
if kwargs.get('priority') is not None:
run_input["priority"] = kwargs['priority']
if kwargs.get('ignore_reuse') is not None:
run_input["ignoreReuse"] = kwargs['ignore_reuse']
if dxpy.JOB_ID is None:
run_input["project"] = project
if kwargs.get('extra_args') is not None:
merge(run_input, kwargs['extra_args'])
if kwargs.get('detach') is not None:
run_input["detach"] = kwargs['detach']
if kwargs.get('cost_limit') is not None:
run_input["costLimit"] = kwargs['cost_limit']
return run_input
@staticmethod
def _get_run_input_fields_for_applet(executable_input, **kwargs):
'''
Takes the same arguments as the run method. Creates an input
hash for the /applet-xxxx/run method.
'''
# Although it says "for_applet", this is factored out of
# DXApplet because apps currently use the same mechanism
for unsupported_arg in ['stage_instance_types', 'stage_folders', 'rerun_stages', 'ignore_reuse_stages']:
if kwargs.get(unsupported_arg):
raise DXError(unsupported_arg + ' is not supported for applets (only workflows)')
return DXExecutable._get_run_input_common_fields(executable_input, **kwargs)
def _run_impl(self, run_input, **kwargs):
"""
Runs the executable with the specified input and returns a
handler for the resulting execution object
(:class:`~dxpy.bindings.dxjob.DXJob` or
:class:`~dxpy.bindings.dxanalysis.DXAnalysis`).
Any kwargs are passed on to :func:`~dxpy.DXHTTPRequest`.
"""
raise NotImplementedError('_run_impl is not implemented')
def _get_run_input(self, executable_input, **kwargs):
"""
Takes the same arguments as the run method. Creates an input
hash for the /executable-xxxx/run method.
"""
raise NotImplementedError('_get_run_input is not implemented')
def _get_required_keys(self):
"""
Abstract method used in executable_unbuilder.dump_executable
"""
raise NotImplementedError('_get_required_keys is not implemented')
def _get_optional_keys(self):
"""
Abstract method used in executable_unbuilder.dump_executable
"""
raise NotImplementedError('_get_optional_keys is not implemented')
def _get_describe_output_keys(self):
"""
Abstract method used in executable_unbuilder.dump_executable
"""
raise NotImplementedError('_get_describe_output_keys is not implemented')
def _get_cleanup_keys(self):
"""
Abstract method used in executable_unbuilder.dump_executable
"""
raise NotImplementedError('_get_cleanup_keys is not implemented')
def run(self, executable_input, project=None, folder=None, name=None, tags=None, properties=None, details=None,
instance_type=None, stage_instance_types=None, stage_folders=None, rerun_stages=None, cluster_spec=None,
depends_on=None, allow_ssh=None, debug=None, delay_workspace_destruction=None, priority=None,
ignore_reuse=None, ignore_reuse_stages=None, detach=None, cost_limit=None, extra_args=None, **kwargs):
'''
:param executable_input: Hash of the executable's input arguments
:type executable_input: dict
:param project: Project ID of the project context
:type project: string
:param folder: Folder in which executable's outputs will be placed in *project*
:type folder: string
:param name: Name for the new job (default is "<name of the executable>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:param allow_ssh: List of hostname or IP masks to allow SSH connections from
:type allow_ssh: list
:param debug: Configuration options for job debugging
:type debug: dict
:param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails
:type delay_workspace_destruction: boolean
:param priority: Priority level to request for all jobs created in the execution tree, "low", "normal", or "high"
:type priority: string
:param ignore_reuse: Disable job reuse for this execution
:type ignore_reuse: boolean
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:param detach: If provided, job will not start as subjob if run inside of a different job.
:type detach: boolean
:param cost_limit: Maximum cost of the job before termination.
:type cost_limit: float
:param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call
:type extra_args: dict
:returns: Object handler of the newly created job
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates a new job that executes the function "main" of this executable with
the given input *executable_input*.
'''
# stage_instance_types, stage_folders, and rerun_stages are
# only supported for workflows, but we include them
# here. Applet-based executables should detect when they
# receive a truthy workflow-specific value and raise an error.
run_input = self._get_run_input(executable_input,
project=project,
folder=folder,
name=name,
tags=tags,
properties=properties,
details=details,
instance_type=instance_type,
stage_instance_types=stage_instance_types,
stage_folders=stage_folders,
rerun_stages=rerun_stages,
cluster_spec=cluster_spec,
depends_on=depends_on,
allow_ssh=allow_ssh,
ignore_reuse=ignore_reuse,
ignore_reuse_stages=ignore_reuse_stages,
debug=debug,
delay_workspace_destruction=delay_workspace_destruction,
priority=priority,
detach=detach,
cost_limit=cost_limit,
extra_args=extra_args)
return self._run_impl(run_input, **kwargs)
############
# DXApplet #
############
_applet_required_keys = ['name', 'title', 'summary', 'types', 'tags',
'httpsApp', 'properties', 'dxapi', 'inputSpec', 'outputSpec',
'runSpec', 'access', 'details']
_applet_optional_keys = ['ignoreReuse']
_applet_describe_output_keys = ['properties', 'details']
_applet_cleanup_keys = ['name', 'title', 'summary', 'types', 'tags',
'properties', 'runSpec', 'access', 'details']
def _makeNonexistentAPIWrapper(method):
def nonexistentAPIWrapper(object_id, input_params=None, always_retry=None, **kwargs):
raise DXError("Wrapper for " + method + " does not exist")
return nonexistentAPIWrapper
class DXApplet(DXDataObject, DXExecutable):
'''
Remote applet object handler.
.. py:attribute:: runSpec
The applet's run specification (a dict indicating, among other things, how the code of the
applet is to be interpreted). See `the API docs for Run Specification
<https://documentation.dnanexus.com/developer/api/running-analyses/io-and-run-specifications#run-specification>`_
for more information.
.. py:attribute:: dxapi
String containing the version of the DNAnexus API that the applet should run against.
.. py:attribute:: access
The applet's access requirements hash (a dict indicating any nonstandard permissions, such
as requiring access to the internet, that are needed by the applet). See `the API docs for
Access Requirements
<https://documentation.dnanexus.com/developer/api/running-analyses/io-and-run-specifications#access-requirements>`_
for more information.
.. py:attribute:: title
String containing the (human-readable) title of the app
.. py:attribute:: summary
String containing a short, one-line summary of the applet's purpose
.. py:attribute:: description
String of free-form text (`Markdown <http://daringfireball.net/projects/markdown/>`_ syntax
is supported) containing a description of the applet. The description is presented to users
to help them understand the purpose of the app and how to invoke it.
.. py:attribute:: developerNotes
String of free-form text (`Markdown <http://daringfireball.net/projects/markdown/>`_ syntax
is supported) containing information about the internals or implementation details of the
applet, suitable for developers or advanced users.
.. automethod:: _new
'''
_class = "applet"
_describe = staticmethod(dxpy.api.applet_describe)
_add_types = staticmethod(_makeNonexistentAPIWrapper("/applet-xxxx/addTypes"))
_remove_types = staticmethod(_makeNonexistentAPIWrapper("/applet-xxxx/removeTypes"))
| |
= res_data['detections']
img_meta = res_data['img_meta']
is_pcl = 'input_style' in img_meta and img_meta['input_style'] == 'pcl'
if not is_pcl:
img = res_data['img']
p = self._eval_img_size_aug
if p>0:
img = np.pad(img, (p,p,p,p,0,0), 'constant', constant_values=0)
else:
img_shape = res_data['img']
img_shape[:2] = img_shape[:2] * self._eval_img_scale_ratio + self._eval_img_size_aug * 2
img = np.zeros(img_shape, dtype=np.int8)
pass
filename = img_meta['filename']
scene_name = os.path.splitext(os.path.basename(filename))[0]
if 'Area_' in filename:
area_id = filename.split('Area_')[1][0]
scene_name = scene_name.split('-topview')[0]
scene_name = 'Area_' + area_id + '/' + scene_name
if self.scene_list is not None:
if scene_name not in self.scene_list:
continue
print(f'\n\n\n\n{i_img}th file: {filename}')
gt_lines = results_datas[i_img]['gt_bboxes'].copy()
gt_labels = results_datas[i_img]['gt_labels'].copy()
if gt_lines.ndim == 1:
gt_lines = gt_lines[None,:]
gt_lines[:,:4] = gt_lines[:,:4] * self._eval_img_scale_ratio + self._eval_img_size_aug
pass
if debug and 0:
print('gt')
_show_objs_ls_points_ls(img, [gt_lines], obj_rep=self.obj_rep)
pass
num_labels = len(detections)
eval_draws_ls = []
walls = None
time_post = 0
dets_1s = {}
gts_1s = {}
det_points_1s = {}
ious_1s = {}
labels_to_cats = {}
for label in range(1, num_labels+1):
cat = catid_2_cat[label]
if cat in ['ceiling', 'floor']:
continue
labels_to_cats[label] = cat
label_mask = (gt_labels == label).reshape(-1)
gt_lines_l = gt_lines[label_mask]
det_lines = detections[label-1][f'detection_{out_type}'].copy()
gts_1s[label] = gt_lines_l
if out_type == 'bInit_sRefine':
det_points = detections[label-1]['points_init']
elif out_type == 'bRefine_sAve':
det_points = detections[label-1]['points_refine']
det_lines[:,:2] = det_lines[:,:2] * self._eval_img_scale_ratio + self._eval_img_size_aug
det_lines[:,3] = det_lines[:,3] * self._eval_img_scale_ratio
if optimize_graph:
det_lines_merged, _, ids_merged, t = post_process_bboxes_1cls(det_lines,
self._score_threshold, label, cat, self._opt_graph_cor_dis_thr,
self.dim_parse.OBJ_REP, self._min_out_length, walls=walls)
time_post += t
else:
det_lines_merged, ids_merged = filter_low_score_det(det_lines, self._score_threshold)
if cat == 'wall':
walls = det_lines_merged
dets_1s[label] = det_lines_merged
det_points_1s[label] = det_points[ids_merged]
if debug and 1:
print('raw prediction')
_show_objs_ls_points_ls(img[:,:,0], [det_lines[:,:-1], gt_lines_l], obj_rep=self.obj_rep, obj_colors=['green','red'])
det_category_id = detections[label-1]['category_id']
if det_category_id != 1:
pass
#raise NotImplementedError
line_nums_gt_dt_tp, ious = self.eval_1img_1cls_by_iou(img, det_lines_merged, gt_lines_l, scene_name, cat, det_points)
all_line_nums_gt_dt_tp[label].append(line_nums_gt_dt_tp)
if ious.shape[0] > 0:
ious_of_dets = ious.max(1)
else:
ious_of_dets = ious
all_ious[label].append( ious_of_dets )
ious_1s[label] = ious
#eval_draws_ls.append(eval_draws)
if debug or 0:
print(f'optimize graph with self._opt_graph_cor_dis_thr= {self._opt_graph_cor_dis_thr}')
_show_objs_ls_points_ls(img[:,:,0], [det_lines[:,:5], gt_lines_l], obj_colors=['green','red'])
_show_objs_ls_points_ls(img[:,:,0], [det_lines_merged[:,:5], gt_lines_l], obj_colors=['green','red'])
pass
res_filename = os.path.join( self.eval_dir_all_cls, scene_name)
if i_img < MAX_Draw_Num:
draw_1_scene(img, gts_1s, dets_1s, ious_1s, labels_to_cats, self.obj_rep, self._iou_threshold, res_filename, det_points_1s)
pass
line_recall_precision_perimg = defaultdict(list)
line_recall_precision = {}
line_nums_sum = {}
ave_ious = {}
iou_thres = 0.3
for label in all_line_nums_gt_dt_tp:
cat = catid_2_cat[label]
line_nums = np.array(all_line_nums_gt_dt_tp[label])
line_recall_precision_perimg[cat].append( (line_nums[:,2] / line_nums[:,0], line_nums[:,2] / line_nums[:,1] ))
line = line_nums.sum(axis=0)
line_recall_precision[cat] = [line[2]/line[0], line[2]/line[1]]
line_nums_sum[cat] = line
# cal iou
ious_l = np.concatenate(all_ious[label])
ave_iou = ious_l.mean()
#ave_iou = ious_l[ious_l > iou_thres].mean()
ave_ious[cat] = ave_iou
eval_res_str = self.get_eval_res_str_iou(line_recall_precision, img_meta, line_nums_sum, ave_ious)
path = os.path.dirname(out_file)
#path = os.path.join(path, 'eval_res')
if not os.path.exists(path):
os.makedirs(path)
eval_path = os.path.join(path, 'eval_res.txt')
with open(eval_path, 'a') as f:
f.write(eval_res_str)
#print(eval_res_str)
#print(f'post time: {time_post}')
# save eval res
eval_res = dict(
line_recall_precision = line_recall_precision,
line_recall_precision_perimg = line_recall_precision_perimg,
all_ious = all_ious,
)
s = 'OptimizeGraph' if self.optimize_graph else 'NoOptmizeGraph'
eval_res_file = out_file.replace('.pickle', f'_EvalRes{s}.npy')
#np.save(eval_res_file, eval_res)
return eval_res_str
def eval_rooms_with_rel(self, dets_ls, gts_ls, cat_ls, det_wall_ids_per_room, gt_relations_room_wall):
from mmdet.core.bbox.geometry import dsiou_rotated_3d_bbox_np
show_gt_dt_compare = 0
show_in_relations = 0
show_fail_rooms = 0
show_per_room = 0
num_cats = len(cat_ls)
cats_to_label = {cat_ls[i]: i for i in range(num_cats)}
gt_rooms = gts_ls[cats_to_label['room']]
gt_walls = gts_ls[cats_to_label['wall']]
dt_rooms = dets_ls[cats_to_label['room']]
dt_walls = dets_ls[cats_to_label['wall']]
num_dt_w = dt_walls.shape[0]
#dt_rooms = dt_rooms0[dt_rooms0[:,-1] > self._score_threshold][:,:7]
#dt_walls = dt_walls0[dt_walls0[:,-1] > self._score_threshold][:,:7]
num_gt_r = gt_rooms.shape[0]
num_gt_w = gt_walls.shape[0]
num_dt_r = dt_rooms.shape[0]
assert gt_relations_room_wall.shape == ( num_gt_r, num_gt_w)
if show_in_relations:
det_relations = rel_ids_to_mask(det_wall_ids_per_room, num_dt_w)
show_connectivity( gt_walls, gt_rooms, gt_relations_room_wall, self.obj_rep)
show_connectivity( dt_walls[:,:7], dt_rooms[:,:7], det_relations, self.obj_rep)
ious = dsiou_rotated_3d_bbox_np( gt_rooms, dt_rooms[:,:7], iou_w=1, size_rate_thres=None )
dt_id_per_gt = ious.argmax(1)
iou_per_gt = ious.max(1)
gt_true_mask = iou_per_gt >= self._iou_threshold
gt_true_ids = np.where(gt_true_mask)[0]
dt_pos_ids = dt_id_per_gt[gt_true_mask]
num_tp = gt_true_ids.shape[0]
room_nums_gt_dt_tp = [num_gt_r, num_dt_r, num_tp]
# analye walls of per room
gt_wids_per_room = relation_mask_to_ids(gt_relations_room_wall)
if show_gt_dt_compare:
_show_objs_ls_points_ls( (512,512), [gt_walls, dt_walls[:,:7]],
self.obj_rep, obj_colors=['white', 'red', ], obj_thickness = [6,1] )
if show_fail_rooms:
gt_false_ids = np.where(gt_true_mask==False)[0]
fail_wids = [gt_wids_per_room[i] for i in gt_false_ids]
fail_wids = np.concatenate(fail_wids)
_show_objs_ls_points_ls( (512,512), [gt_walls, gt_walls[fail_wids], gt_rooms[gt_false_ids]],
self.obj_rep, obj_colors=['white', 'red', 'lime' ] )
_show_objs_ls_points_ls( (512,512), [dt_walls[:,:7], ],
self.obj_rep, obj_colors=['lime' ] )
pass
succ_room_ids = []
fail_room_ids = []
for i in range(num_tp):
gt_i = gt_true_ids[i]
dt_i = dt_id_per_gt[gt_i]
wids_gt_i = gt_wids_per_room[gt_i]
wids_dt_i = det_wall_ids_per_room[dt_i]
gtws_i = gt_walls[wids_gt_i]
dtws_i = dt_walls[wids_dt_i]
gtn = gtws_i.shape[0]
dtn = dtws_i.shape[0]
if gtn == dtn:
ious_i = dsiou_rotated_3d_bbox_np(gtws_i, dtws_i[:,:7], 0.7, size_rate_thres=0.3).max(0)
miou = ious_i.mean()
if miou > 0.7:
succ_room_ids.append( gt_i )
if show_per_room:
print(f'success wall rel')
else:
fail_room_ids.append( gt_i )
if show_per_room:
print(f'fail wall rel')
if show_per_room:
print(f'{i} ious: {ious_i}, {miou:.3f}')
#ni = gtws_i.shape[0]
#dtws_i = OBJ_REPS_PARSE.encode_obj(dtws_i[:,:7], self.obj_rep, 'RoLine2D_2p').reshape(1,ni, 2,1, 2)
#gtws_i = OBJ_REPS_PARSE.encode_obj(gtws_i, self.obj_rep, 'RoLine2D_2p').reshape(ni,1, 1,2, 2)
#dif_i = dtws_i - gtws_i
#dif_i = np.linalg.norm(dif_i, axis=-1)
#dif_i = dif_i.max(-1).min(-1)
else:
if show_per_room:
print(f'fail room')
print(f'{i} gtn:{gtn}, dtn:{dtn}, iou:{iou_per_gt[gt_i]:.3f}')
pass
if show_per_room:
#_show_objs_ls_points_ls( (512,512), [gt_rooms[gt_i,None], dt_rooms[dt_i,None][:,:7]], self.obj_rep, obj_colors=['red','lime'] )
_show_objs_ls_points_ls( (512,512), [gt_walls, gtws_i, dtws_i[:,:7]], self.obj_rep, obj_colors=['white', 'red','lime'], obj_thickness=[1,2,2] )
#show_1by1((512,512), gtws_i, self.obj_rep, gt_walls)
#show_1by1((512,512), dtws_i[:,:7], self.obj_rep, dt_walls[:,:7])
print(f'\n')
pass
num_rel_tp = len(succ_room_ids)
rooms_gt_dt_tp_rel = room_nums_gt_dt_tp + [num_rel_tp]
return rooms_gt_dt_tp_rel
def get_eval_res_str_iou(self, line_recall_precision, img_meta, line_nums_sum, ave_ious ):
rotate = False
eval_str = '\n\n--------------------------------------\n\n' + \
str(self) + f'num_img: {self.num_img}\n'
eval_str += f'optimize_graph: {self.optimize_graph}\n'
eval_str += f'IoU threshold: {self._iou_threshold}\n'
eval_str += 'Precision-Recall\n\n'
cats = line_recall_precision.keys()
eval_str += '| split |'
for cat in cats:
str_e = f'{cat} edge '
eval_str += f'{str_e:14}|'
eval_str += '\n|-|'
for cat in cats:
eval_str += '-|-|'
eval_str += '\n|pre-rec|'
for cat in cats:
line_rec, line_prec = line_recall_precision[cat]
line_str = f'{line_prec:.3} - {line_rec:.3}'
eval_str += f'{line_str:14}|'
pass
eval_str += '\n'
eval_str += '| iou |'
for cat in cats:
iou = ave_ious[cat]
iou_str = f'{iou:.3}'
s=''
eval_str += f'{iou_str:14}|'
pass
eval_str += '\n'
eval_str += '|gt num |'
for cat in cats:
line_num = line_nums_sum[cat][0]
eval_str += f'{line_num:14}|'
pass
eval_str += '\n'
return eval_str
def get_eval_res_str(self, corner_recall_precision, line_recall_precision, img_meta, line_nums_sum, cor_nums_sum, ave_ious, time_post):
rotate = False
eval_str = '\n\n--------------------------------------\n\n' + \
str(self) + f'num_img: {self.num_img}\n'
eval_str += f'optimize graph geometrically: {self.optimize_graph}\n'
eval_str += f'optimize graph semantically: {self.optimize_graph_by_relation}\n'
eval_str += f'optimize walls by rooms: {self._opti_room}\n'
eval_str += 'Precision-Recall\n\n'
cats = line_recall_precision.keys()
eval_str += '| split |'
for cat in cats:
str_c = f'{cat} corner'
str_e = f'{cat} edge '
eval_str += f'{str_c:14}|{str_e:14}|'
eval_str += '\n|-|'
for cat in cats:
eval_str += '-|-|'
eval_str += '\n|pre-rec|'
for cat in cats:
if cat in corner_recall_precision:
cor_rec, cor_prec = corner_recall_precision[cat]
cor_str = f'{cor_prec:.3} - {cor_rec:.3}'
else:
cor_str = ''
line_rec, line_prec = line_recall_precision[cat]
line_str = f'{line_prec:.3} - {line_rec:.3}'
eval_str += f'{cor_str:14}|{line_str:14}|'
pass
eval_str += '\n'
eval_str += '| iou |'
for cat in corner_recall_precision:
iou = ave_ious[cat]
iou_str = f'{iou:.3}'
s=''
eval_str += f'{s:14}|{iou_str:14}|'
pass
eval_str += '\n'
eval_str += '|gt num |'
for cat in cats:
if cat in cor_nums_sum:
cor_num = cor_nums_sum[cat][0]
line_num = line_nums_sum[cat][0]
else:
cor_num = 0
line_num = 0
eval_str += f'{cor_num:14}|{line_num:14}|'
pass
eval_str += '\n'
eval_str += f'post time: {time_post}'
eval_str += '\n'
return eval_str
def eval_1img_1cls_by_iou(self, img, det_lines, gt_lines, scene_name, cat, det_points):
from mmdet.core.bbox.geometry import dsiou_rotated_3d_bbox_np
num_det = det_lines.shape[0]
num_gt = gt_lines.shape[0]
if num_det == 0:
return [num_gt, num_det, 0], np.array([])
# [num_det, num_gt]
iou_matrix = dsiou_rotated_3d_bbox_np(det_lines[:,:-1], gt_lines, iou_w=1, size_rate_thres=0.07)
ious = iou_matrix.max(0)
mask = ious > self._iou_threshold
num_tp = sum(mask)
obj_nums_gt_dt_tp = [num_gt, num_det, num_tp]
return obj_nums_gt_dt_tp, iou_matrix
def cal_iou_1img_1cls(self, img, det_lines, gt_lines, scene_name, det_cat, det_points):
from mmdet.core.bbox.geometry import dsiou_rotated_3d_bbox_np
if det_lines.shape[0] == 0:
return np.array([])
iou_matrix = dsiou_rotated_3d_bbox_np(det_lines[:,:7], gt_lines, iou_w=1, size_rate_thres=0.07)
ious = iou_matrix.max(1)
return ious
def eval_1img_1cls_by_corner(self, img, det_lines, gt_lines, scene_name, det_cat, det_points):
show_missed_gt = 0
show_all_matching = 0
num_gt = gt_lines.shape[0]
det_corners, cor_scores, det_cor_ids_per_line,_ = gen_corners_from_lines_np(det_lines[:,:self.obj_dim],\
None, self.obj_rep, self._opt_graph_cor_dis_thr//2)
gt_corners, _, gt_corIds_per_line,_ = gen_corners_from_lines_np(gt_lines, None, self.obj_rep, self._opt_graph_cor_dis_thr//2)
cor_nums_gt_dt_tp, cor_detIds_per_gt = self.eval_corners(gt_corners, det_corners)
# cal det_lineIds_per_cor: [num_det_corner]
det_lineIds_per_cor = get_lineIdsPerCor_from_corIdsPerLine(det_cor_ids_per_line, det_corners.shape[0])
# detCorIds_per_gtLine: [num_gt_line, 2]
# line_detIds_per_gt: [num_gt_line]
detCorIds_per_gtLine = cor_detIds_per_gt[ gt_corIds_per_line ]
line_detIds_per_gt = []
for i in range(detCorIds_per_gtLine.shape[0]):
a,b = detCorIds_per_gtLine[i]
det_lineIds = -1
if a>=0 and b>=0:
lineIds_a = det_lineIds_per_cor[a]
lineIds_b = det_lineIds_per_cor[b]
# find if the two corners map the same line
for ai in lineIds_a:
for bi in lineIds_b:
if ai == bi:
det_lineIds | |
from unittest import TestCase
from unittest import mock
import copy
from data import views, api_caller
@mock.patch.object(
api_caller, "load_committee_statement_of_organization", return_value=None
)
@mock.patch.object(api_caller, "load_endpoint_results")
@mock.patch.object(views, "load_reports_and_totals")
@mock.patch.object(views, "load_cycle_data")
@mock.patch.object(views, "load_committee_history")
class TestCommittee(TestCase):
STOCK_COMMITTEE = {
"organization_type_full": None,
"affiliated_committee_name": "SOME CONNECTED COMMITTEE",
"zip": "37024",
"committee_type": "N",
"committee_type_full": "PAC - Nonqualified",
"committee_id": "C001",
"designation_full": "Joint fundraising committee",
"party_full": None,
"street_2": None,
"designation": "J",
"state_full": "Tennessee",
"party": None,
"street_1": "PO BOX 123",
"state": "TN",
"treasurer_name": "<NAME>",
"candidate_ids": [],
"organization_type": None,
"cycles": [2018],
"filing_frequency": "Q",
"cycle": 2018,
"city": "BRENTWOOD",
"name": "<NAME>",
"cycles_has_financial": [2018],
"last_cycle_has_financial": 2018,
"cycles_has_activity": [2018],
"last_cycle_has_activity": 2018,
"former_candidate_id": "P001",
"former_committee_name": "Friends of President",
"former_candidate_name": "<NAME>",
}
STOCK_REPORTS = [
{
"non_allocated_fed_election_activity_period": None,
"allocated_federal_election_levin_share_period": None,
"total_disbursements_period": 7400.0,
"coverage_end_date": "2018-09-30T00:00:00+00:00",
"document_description": "OCTOBER QUARTERLY 2018",
"refunded_political_party_committee_contributions_ytd": None,
"loans_made_period": None,
"net_operating_expenditures_period": None,
"committee_type": "N",
"committee_name": "<NAME>",
"amendment_indicator_full": "NEW",
"net_contributions_period": None,
"total_disbursements_ytd": None,
"fec_url": "http://docquery.fec.gov/dcdev/posted/1273093.fec",
"other_fed_receipts_period": None,
"shared_fed_activity_ytd": None,
"cash_on_hand_beginning_period": 0.0,
"total_operating_expenditures_ytd": None,
"non_allocated_fed_election_activity_ytd": None,
"debts_owed_to_committee": 0.0,
"pdf_url": " http://docquery.fec.gopdf042/201810159125475042/20181015912 5475042.pdf",
"political_party_committee_contributions_period": None,
"other_political_committee_contributions_period": None,
"fec_file_id": "FEC-1273093",
"beginning_image_number": "201810159125475042",
"cash_on_hand_beginning_calendar_ytd": None,
"coordinated_expenditures_by_party_committee_period": None,
"total_nonfed_transfers_period": None,
"loan_repayments_made_period": None,
"fed_candidate_contribution_refunds_period": None,
"individual_unitemized_contributions_period": None,
"fed_candidate_committee_contribution_refunds_ytd": None,
"total_fed_receipts_period": None,
"transfers_from_affiliated_party_period": None,
"total_contributions_ytd": None,
"refunded_political_party_committee_contributions_period": None,
"transfers_to_affiliated_committee_period": None,
"subtotal_summary_ytd": None,
"refunded_individual_contributions_period": None,
"transfers_from_nonfed_levin_ytd": None,
"other_political_committee_contributions_ytd": None,
"report_form": "Form 3",
"total_fed_operating_expenditures_period": None,
"total_individual_contributions_period": None,
"csv_url": "http://docquery.fec.gov/csv/093/1273093.csv",
"total_contribution_refunds_period": None,
"loans_made_ytd": None,
"loan_repayments_made_ytd": None,
"amendment_indicator": "N",
"total_fed_election_activity_period": None,
"transfers_from_nonfed_levin_period": None,
"total_contributions_period": None,
"offsets_to_operating_expenditures_period": None,
"total_fed_election_activity_ytd": None,
"report_year": 2018,
"offsets_to_operating_expenditures_ytd": None,
"other_fed_operating_expenditures_ytd": None,
"total_fed_disbursements_ytd": None,
"cash_on_hand_close_ytd": None,
"most_recent_file_number": 1273093.0,
"shared_fed_operating_expenditures_ytd": None,
"total_contribution_refunds_ytd": None,
"total_nonfed_transfers_ytd": None,
"all_loans_received_period": None,
"debts_owed_by_committee": 0.0,
"shared_fed_activity_period": None,
"net_contributions_ytd": None,
"transfers_from_affiliated_party_ytd": None,
"coverage_start_date": "2018-07-01T00:00:00+00:00",
"refunded_individual_contributions_ytd": None,
"loan_repayments_received_ytd": None,
"individual_unitemized_contributions_ytd": None,
"end_image_number": "201810159125475048",
"previous_file_number": 1273093.0,
"independent_expenditures_ytd": None,
"fed_candidate_committee_contributions_ytd": None,
"total_fed_receipts_ytd": None,
"means_filed": "e-file",
"committee_id": "C00687574",
"amendment_chain": [1273093.0],
"total_fed_disbursements_period": None,
"cycle": 2018,
"transfers_from_nonfed_account_ytd": None,
"shared_fed_operating_expenditures_period": None,
"shared_nonfed_operating_expenditures_period": None,
"receipt_date": "2018-10-15T00:00:00",
"refunded_other_political_committee_contributions_period": None,
"most_recent": True,
"html_url": "http://docquery.fec.gov/cgi-bin/forms/C00687574/127309",
"shared_fed_activity_nonfed_ytd": None,
"cash_on_hand_end_period": 0.0,
"report_type": "Q3",
"shared_nonfed_operating_expenditures_ytd": None,
"subtotal_summary_page_period": None,
"loan_repayments_received_period": None,
"political_party_committee_contributions_ytd": None,
"file_number": 1273093,
"total_receipts_period": 7400.0,
"other_fed_receipts_ytd": None,
"other_disbursements_ytd": None,
"calendar_ytd": None,
"independent_expenditures_period": None,
"individual_itemized_contributions_ytd": None,
"refunded_other_political_committee_contributions_ytd": None,
"individual_itemized_contributions_period": None,
"total_receipts_ytd": None,
"other_fed_operating_expenditures_period": None,
"transfers_to_affilitated_committees_ytd": None,
"report_type_full": "OCTOBER QUARTERLY",
"coordinated_expenditures_by_party_committee_ytd": None,
"total_individual_contributions_ytd": None,
"fed_candidate_committee_contributions_period": None,
"net_operating_expenditures_ytd": None,
"transfers_from_nonfed_account_period": None,
"total_fed_operating_expenditures_ytd": None,
"all_loans_received_ytd": None,
"total_operating_expenditures_period": None,
"other_disbursements_period": None,
"nonfed_share_allocated_disbursements_period": None,
"is_amended": False,
}
]
STOCK_TOTALS = []
def test_base_case(
self,
load_committee_history_mock,
load_cycle_data_mock,
load_reports_and_totals_mock,
load_endpoint_results_mock,
load_committee_statement_of_organization_mock,
):
cycle = 2018
test_committee = copy.deepcopy(self.STOCK_COMMITTEE)
load_committee_history_mock.return_value = (test_committee, [], cycle)
# cycle_out_of_range, last_cycle_has_financial, cycles
load_cycle_data_mock.return_value = (False, 2018, [2018])
load_reports_and_totals_mock.return_value = (
self.STOCK_REPORTS,
self.STOCK_TOTALS,
)
load_endpoint_results_mock.return_value = mock.MagicMock()
load_committee_statement_of_organization_mock.return_value = {
"receipt_date": "2019-11-30T00:00:00"
}
template_variables = views.get_committee("C001", 2018)
committee = template_variables.get("committee")
assert committee["name"] == test_committee["name"]
assert committee["committee_id"] == test_committee["committee_id"]
assert committee["committee_type_full"] == test_committee["committee_type_full"]
assert committee["committee_type"] == test_committee["committee_type"]
assert committee["designation_full"] == test_committee["designation_full"]
assert committee["street_1"] == test_committee["street_1"]
assert committee["street_2"] == test_committee["street_2"]
assert committee["city"] == test_committee["city"]
assert committee["state"] == test_committee["state"]
assert committee["zip"] == test_committee["zip"]
assert committee["treasurer_name"] == test_committee["treasurer_name"]
assert committee["party_full"] == test_committee["party_full"]
assert committee["former_candidate_id"] is not None
assert committee["former_candidate_id"] == test_committee["former_candidate_id"]
assert committee["former_committee_name"] == test_committee["former_committee_name"]
assert committee["former_candidate_name"] == test_committee["former_candidate_name"]
assert template_variables["context_vars"] == {
"cycle": 2018,
"timePeriod": "2017–2018",
"name": "<NAME>",
"cycleOutOfRange": False,
"lastCycleHasFinancial": 2018,
}
assert template_variables["parent"] == "data"
assert template_variables["totals"] == []
assert template_variables["candidates"] == []
assert template_variables["cycle"] == cycle
assert template_variables["cycles"] == test_committee["cycles_has_activity"]
assert template_variables["year"] == test_committee["cycle"]
assert template_variables["result_type"] == "committees"
assert template_variables["report_type"] == "pac-party"
assert template_variables["reports"] == self.STOCK_REPORTS
assert template_variables["statement_of_organization"] == {
"receipt_date": "11/30/2019"
}
def test_ie_summary(
self,
load_committee_history_mock,
load_cycle_data_mock,
load_reports_and_totals_mock,
load_endpoint_results_mock,
load_committee_statement_of_organization_mock,
):
cycle = 2018
test_committee = copy.deepcopy(self.STOCK_COMMITTEE)
test_committee["committee_type"] = "I"
load_committee_history_mock.return_value = (test_committee, [], cycle)
# cycle_out_of_range, last_cycle_has_financial, cycles
load_cycle_data_mock.return_value = (False, 2018, [2018])
load_reports_and_totals_mock.return_value = (
{"report_type_full": "YEAR-END", "report_type": "YE"},
{
"total_independent_contributions": 11000.0,
"total_independent_expenditures": 4262.0,
},
)
template_variables = views.get_committee("C001", 2018)
assert template_variables["ie_summary"] == [
(11000.0, {"label": "Contributions received", "level": "1"}),
(4262.0, {"label": "Independent expenditures", "level": "1"}),
]
def test_inaugural_summary(
self,
load_committee_history_mock,
load_cycle_data_mock,
load_reports_and_totals_mock,
load_endpoint_results_mock,
load_committee_statement_of_organization_mock,
):
cycle = 2018
test_committee = copy.deepcopy(self.STOCK_COMMITTEE)
test_committee["organization_type"] = "I"
load_committee_history_mock.return_value = (test_committee, [], cycle)
load_cycle_data_mock.return_value = (False, 2018, [2018])
load_reports_and_totals_mock.return_value = (
{"report_type_full": "POST INAUGURAL SUPPLEMENT", "report_type": "90S"},
{"receipts": 85530042.0, "contribution_refunds": 966240.0},
)
committee = views.get_committee("C001", 2018)
assert committee["inaugural_summary"] == [
(85530042.0, {"label": "Total Donations Accepted", "level": "1"}),
(966240.0, {"label": "Total Donations Refunded", "level": "1"}),
]
def test_host_f4_summary(
self,
load_committee_history_mock,
load_cycle_data_mock,
load_reports_and_totals_mock,
load_endpoint_results_mock,
load_committee_statement_of_organization_mock,
):
cycle = 2018
test_committee = copy.deepcopy(self.STOCK_COMMITTEE)
test_committee["organization_type"] = "H"
load_committee_history_mock.return_value = (test_committee, [], cycle)
load_cycle_data_mock.return_value = (False, 2018, [2018])
load_reports_and_totals_mock.return_value = (
{
"report_type_full": "POST INAUGURAL SUPPLEMENT",
"report_type": "90S",
"form_type": "F4",
},
{
"cash_on_hand_beginning_period": 503347.81,
"committee_name": "COMMITTEE FOR CHARLOTTE_CHARLOTTE DNC HOST COMMITTEE",
"other_disbursements": 0.0,
"last_beginning_image_number": "201610109032226424",
"itemized_refunds_relating_convention_exp": 0.0,
"committee_designation_full": "Unauthorized",
"refunds_relating_convention_exp": 0.0,
"cycle": 2016,
"committee_type_full": "Party - Nonqualified",
"individual_contributions": 0.0,
"unitemized_other_disb": 0.0,
"loans_and_loan_repayments_received": 0.0,
"last_report_year": 2016,
"itemized_other_disb": 0.0,
"coverage_start_date": "2016-07-01T00:00:00+00:00",
"itemized_other_income": 0.0,
"itemized_convention_exp": 4500.0,
"exp_subject_limits": 4500.0,
"other_refunds": 0.0,
"last_cash_on_hand_end_period": 498847.81,
"exp_prior_years_subject_limits": 0.0,
"all_loans_received": 0.0,
"last_report_type_full": "OCTOBER QUARTERLY",
"loans_made": 0.0,
"unitemized_other_income": 0.0,
"loans_and_loan_repayments_made": 0.0,
"receipts": 12345.0,
"committee_id": "C00493254",
"fed_disbursements": 0.0,
"committee_designation": "U",
"loan_repayments_received": 0.0,
"itemized_other_refunds": 0.0,
"unitemized_other_refunds": 0.0,
"unitemized_refunds_relating_convention_exp": 0.0,
"contributions": 0.0,
"transfers_from_affiliated_party": 0.0,
"coverage_end_date": "2016-09-30T00:00:00+00:00",
"convention_exp": 4500.0,
"individual_unitemized_contributions": 0.0,
"federal_funds": 12345.0,
"transfers_to_affiliated_committee": 0.0,
"other_fed_receipts": 0.0,
"party_full": "DEMOCRATIC PARTY",
"last_debts_owed_by_committee": 5000,
"loan_repayments_made": 0.0,
"unitemized_convention_exp": 0.0,
"committee_type": "X",
"disbursements": 4500.0,
"last_debts_owed_to_committee": 1000,
"total_exp_subject_limits": None,
},
)
template_variables = views.get_committee("C001", 2018)
assert template_variables["raising_summary"] == [
(
12345.0,
{"label": "Total receipts", "level": "1", "term": "total receipts"},
),
(12345.0, {"label": "Federal funds", "level": "2"}),
(
0.0,
{
"label": "Total Contributions to Defray Convention Expenses",
"level": "2",
},
),
(
0.0,
{
"label": "Itemized Contributions to Defray Convention Expenses",
"level": "3",
},
),
(
0.0,
{
"label": "Unitemized Contributions to Defray Convention Expenses",
"level": "3",
},
),
(0.0, {"label": "Transfers from affiliated committees", "level": "2"}),
(0.0, {"label": "Loans Received", "level": "3"}),
(0.0, {"label": "Loan Repayments Received", "level": "3"}),
(
0.0,
{"label": "Other Refunds, Rebates, Returns of Deposits", "level": "2"},
),
(
0.0,
{
"label": " Itemized Other Refunds, Rebates, Returns of Deposits",
"level": "3",
},
),
(
0.0,
{
"label": "Unitemized Other Refunds, Rebates, Returns of Deposits",
"level": "3",
},
),
(0.0, {"label": " Other Income", "level": "2"}),
(0.0, {"label": "Itemized Other Income", "level": "3"}),
(0.0, {"label": "Unitemized Other Income", "level": "3"}),
]
assert template_variables["spending_summary"] == [
(
4500.0,
{
"label": "Total disbursements",
"level": "1",
"term": "total disbursements",
},
),
(4500.0, {"label": "Convention Expenditures", "level": "2"}),
(4500.0, {"label": "Itemized Convention Expenditures", "level": "3"}),
(0.0, {"label": "Unitemized Convention Expenditures", "level": "3"}),
(0.0, {"label": "Transfers to Affiliated Committees", "level": "2"}),
(0.0, {"label": "Loans and Loan Repayments Made", "level": "2"}),
(0.0, {"label": "Loans Made", "level": "3"}),
(0.0, {"label": "Loan Repayments Made", "level": "3"}),
(0.0, {"label": "Other Disbursements", "level": "2"}),
(0.0, {"label": "Itemized Other Disbursements", "level": "3"}),
(0.0, {"label": "Unitemized Other Disbursements", "level": "3"}),
]
assert template_variables["cash_summary"] == [
(503347.81, {"label": "Beginning cash on hand", "level": "2"}),
(
498847.81,
{
"label": "Ending cash on hand",
"term": "ending cash on hand",
"level": "2",
},
),
(1000, {"label": "Debts/loans owed to committee", "level": "2"}),
(5000, {"label": "Debts/loans owed by committee", "level": "2"}),
]
def test_host_f3x_summary_returns_standard_values(
self,
load_committee_history_mock,
load_cycle_data_mock,
load_reports_and_totals_mock,
load_endpoint_results_mock,
load_committee_statement_of_organization_mock,
):
cycle = 2018
test_committee = copy.deepcopy(self.STOCK_COMMITTEE)
test_committee["organization_type"] = "H"
load_committee_history_mock.return_value = (test_committee, [], cycle)
load_cycle_data_mock.return_value = (False, 2018, [2018])
load_reports_and_totals_mock.return_value = (
self.STOCK_REPORTS,
self.STOCK_TOTALS,
)
template_variables = views.get_committee("C001", 2018)
committee = template_variables.get("committee")
assert committee["name"] == test_committee["name"]
assert committee["committee_id"] == test_committee["committee_id"]
assert committee["committee_type_full"] == test_committee["committee_type_full"]
assert committee["committee_type"] == test_committee["committee_type"]
assert committee["designation_full"] == test_committee["designation_full"]
assert committee["street_1"] == test_committee["street_1"]
assert committee["street_2"] == test_committee["street_2"]
assert committee["city"] == test_committee["city"]
assert committee["state"] == test_committee["state"]
assert committee["zip"] == test_committee["zip"]
assert committee["treasurer_name"] == test_committee["treasurer_name"]
assert committee["cycle"] == test_committee["cycle"]
assert committee["cycles"] == test_committee["cycles"]
assert committee["party_full"] == test_committee["party_full"]
assert template_variables["year"] == test_committee["cycle"]
assert template_variables["parent"] == "data"
assert template_variables["context_vars"] == {
"cycle": 2018,
"timePeriod": "2017–2018",
"name": "<NAME>",
"cycleOutOfRange": False,
"lastCycleHasFinancial": 2018,
}
assert template_variables["totals"] == []
assert template_variables["candidates"] == []
assert template_variables["result_type"] == "committees"
assert template_variables["report_type"] == "pac-party"
assert template_variables["reports"] == self.STOCK_REPORTS
def test_fallback_cycle(
self,
load_committee_history_mock,
load_cycle_data_mock,
load_reports_and_totals_mock,
load_endpoint_results_mock,
load_committee_statement_of_organization_mock,
):
cycle = 2020
test_committee = copy.deepcopy(self.STOCK_COMMITTEE)
load_committee_history_mock.return_value = (test_committee, [], cycle)
# cycle_out_of_range, last_cycle_has_financial, cycles
load_cycle_data_mock.return_value = (True, 2018, [2018])
load_reports_and_totals_mock.return_value = (
self.STOCK_REPORTS,
self.STOCK_TOTALS,
)
load_endpoint_results_mock.return_value = mock.MagicMock()
load_committee_statement_of_organization_mock.return_value = {
"receipt_date": "2019-11-30T00:00:00"
}
template_variables = views.get_committee("C001", | |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to describe the shape and dtype of numpy arrays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
import tensorflow as tf
nest = tf.contrib.framework.nest
def sample_bounded_spec(spec, rng):
"""Samples the given bounded spec.
Args:
spec: A BoundedSpec to sample.
rng: A numpy RandomState to use for the sampling.
Returns:
An np.array sample of the requested space.
"""
tf_dtype = tf.as_dtype(spec.dtype)
low = spec.minimum
high = spec.maximum
if tf_dtype.is_floating:
if spec.dtype == np.float64 and np.any(np.isinf(high - low)):
# The min-max interval cannot be represented by the np.float64. This is a
# problem only for np.float64, np.float32 works as expected.
# Spec bounds are set to read only so we can't use argumented assignment.
low = low / 2 # pylint: disable=g-no-augmented-assignment
high = high / 2 # pylint: disable=g-no-augmented-assignment
return rng.uniform(
low,
high,
size=spec.shape,
).astype(spec.dtype)
else:
if spec.dtype == np.int64 and np.any(high - low < 0):
# The min-max interval cannot be represented by the tf_dtype. This is a
# problem only for int64.
low = low / 2 # pylint: disable=g-no-augmented-assignment
high = high / 2 # pylint: disable=g-no-augmented-assignment
if high < tf_dtype.max:
high = high + 1 # pylint: disable=g-no-augmented-assignment
elif spec.dtype != np.int64 or spec.dtype != np.uint64:
# We can still +1 the high if we cast it to the larger dtype.
high = high.astype(np.int64) + 1
return rng.randint(
low,
high,
size=spec.shape,
dtype=spec.dtype,
)
def sample_spec_nest(structure, rng, outer_dims=()):
"""Samples the given nest of specs.
Args:
structure: An `ArraySpec`, or a nested dict, list or tuple of
`ArraySpec`s.
rng: A numpy RandomState to use for the sampling.
outer_dims: An optional list/tuple specifying outer dimensions to add to
the spec shape before sampling.
Returns:
A nest of sampled values following the ArraySpec definition.
"""
def sample_fn(spec):
spec = BoundedArraySpec.from_spec(spec)
spec = BoundedArraySpec(
tuple(outer_dims) + tuple(spec.shape), spec.dtype, spec.minimum,
spec.maximum, spec.name)
return sample_bounded_spec(spec, rng)
return nest.map_structure(sample_fn, structure)
def check_arrays_nest(arrays, spec):
"""Check that the arrays conform to the spec.
Args:
arrays: A NumPy array, or a nested dict, list or tuple of arrays.
spec: An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.
Returns:
True if the arrays conforms to the spec, False otherwise.
"""
# Check that arrays and spec has the same structure.
try:
nest.assert_same_structure(arrays, spec)
except (TypeError, ValueError):
return False
def check_array(spec, array):
if not isinstance(spec, ArraySpec):
return False
return spec.check_array(array)
# Check all the elements in arrays match to their spec
checks = nest.map_structure(check_array, spec, arrays)
# Only return True if all the checks pass.
return all(nest.flatten(checks))
def add_outer_dims_nest(structure, outer_dims):
def add_outer_dims(spec):
name = spec.name
shape = outer_dims + spec.shape
if hasattr(spec, 'minimum') and hasattr(spec, 'maximum'):
return BoundedArraySpec(shape, spec.dtype, spec.minimum,
spec.maximum, name)
return ArraySpec(shape, spec.dtype, name=name)
return nest.map_structure(add_outer_dims, structure)
class ArraySpec(object):
"""Describes a numpy array or scalar shape and dtype.
An `ArraySpec` allows an API to describe the arrays that it accepts or
returns, before that array exists.
The equivalent version describing a `tf.Tensor` is `TensorSpec`.
"""
__slots__ = ('_shape', '_dtype', '_name')
def __init__(self, shape, dtype, name=None):
"""Initializes a new `ArraySpec`.
Args:
shape: An iterable specifying the array shape.
dtype: numpy dtype or string specifying the array dtype.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
TypeError: If the shape is not an iterable or if the `dtype` is an invalid
numpy dtype.
"""
self._shape = tuple(shape)
self._dtype = np.dtype(dtype)
self._name = name
@property
def shape(self):
"""Returns a `tuple` specifying the array shape."""
return self._shape
@property
def dtype(self):
"""Returns a numpy dtype specifying the array dtype."""
return self._dtype
@property
def name(self):
"""Returns the name of the ArraySpec."""
return self._name
def __repr__(self):
return 'ArraySpec(shape={}, dtype={}, name={})'.format(
self.shape, repr(self.dtype), repr(self.name))
def __eq__(self, other):
"""Checks if the shape and dtype of two specs are equal."""
if not isinstance(other, ArraySpec):
return False
return self.shape == other.shape and self.dtype == other.dtype
def __ne__(self, other):
return not self == other
def check_array(self, array):
"""Return whether the given NumPy array conforms to the spec.
Args:
array: A NumPy array or a scalar. Tuples and lists will not be converted
to a NumPy array automatically; they will cause this function to return
false, even if a conversion to a conforming array is trivial.
Returns:
True if the array conforms to the spec, False otherwise.
"""
if isinstance(array, np.ndarray):
return self.shape == array.shape and self.dtype == array.dtype
elif isinstance(array, numbers.Number):
return self.shape == tuple() and self.dtype == np.dtype(type(array))
else:
return False
@staticmethod
def from_array(array, name=None):
"""Construct a spec from the given array or number."""
if isinstance(array, np.ndarray):
return ArraySpec(array.shape, array.dtype, name)
elif isinstance(array, numbers.Number):
return ArraySpec(tuple(), type(array), name)
else:
raise ValueError('Array must be a np.ndarray or number. Got %r.' % array)
@staticmethod
def from_spec(spec):
"""Construct a spec from the given spec."""
return ArraySpec(spec.shape, spec.dtype, spec.name)
@classmethod
def is_bounded(cls):
del cls
return False
def is_discrete(self):
"""Whether spec is discrete."""
return np.issubdtype(self.dtype, np.integer)
def is_continuous(self):
"""Whether spec is continuous."""
return np.issubdtype(self.dtype, np.float)
class BoundedArraySpec(ArraySpec):
"""An `ArraySpec` that specifies minimum and maximum values.
Example usage:
```python
# Specifying the same minimum and maximum for every element.
spec = BoundedArraySpec((3, 4), np.float64, minimum=0.0, maximum=1.0)
# Specifying a different minimum and maximum for each element.
spec = BoundedArraySpec(
(2,), np.float64, minimum=[0.1, 0.2], maximum=[0.9, 0.9])
# Specifying the same minimum and a different maximum for each element.
spec = BoundedArraySpec(
(3,), np.float64, minimum=-10.0, maximum=[4.0, 5.0, 3.0])
```
Bounds are meant to be inclusive. This is especially important for
integer types. The following spec will be satisfied by arrays
with values in the set {0, 1, 2}:
```python
spec = BoundedArraySpec((3, 4), np.int, minimum=0, maximum=2)
```
"""
__slots__ = ('_minimum', '_maximum')
def __init__(self, shape, dtype, minimum=None, maximum=None, name=None):
"""Initializes a new `BoundedArraySpec`.
Args:
shape: An iterable specifying the array shape.
dtype: numpy dtype or string specifying the array dtype.
minimum: Number or sequence specifying the maximum element bounds
(inclusive). Must be broadcastable to `shape`.
maximum: Number or sequence specifying the maximum element bounds
(inclusive). Must be broadcastable to `shape`.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
ValueError: If `minimum` or `maximum` are not broadcastable to `shape` or
if the limits are outside of the range of the specified dtype.
TypeError: If the shape is not an iterable or if the `dtype` is an invalid
numpy dtype.
"""
super(BoundedArraySpec, self).__init__(shape, dtype, name)
try:
np.broadcast_to(minimum, shape=shape)
except ValueError as numpy_exception:
raise ValueError('minimum is not compatible with shape. '
'Message: {!r}.'.format(numpy_exception))
try:
np.broadcast_to(maximum, shape=shape)
except ValueError as numpy_exception:
raise ValueError('maximum is not compatible with shape. '
'Message: {!r}.'.format(numpy_exception))
tf_dtype = tf.as_dtype(self._dtype)
low = tf_dtype.min
high = tf_dtype.max
if minimum is None:
minimum = low
if maximum is None:
maximum = high
self._minimum = np.array(minimum)
self._maximum = np.array(maximum)
if tf_dtype.is_floating:
# Replacing infinities with extreme finite float values.
self._minimum[self._minimum == -np.inf] = low
self._minimum[self._minimum == np.inf] = high
self._maximum[self._maximum == -np.inf] = low
self._maximum[self._maximum == np.inf] = high
if np.any(self._minimum > self._maximum):
raise ValueError(
'Spec bounds min has values greater than max: [{},{}]'.format(
self._minimum, self._maximum))
if (np.any(self._minimum < low) or np.any(self._minimum > high) or
np.any(self._maximum < low) or np.any(self._maximum > high)):
raise ValueError(
'Spec bounds [{},{}] not within the range [{}, {}] of the given '
'dtype ({})'.format(self._minimum, self._maximum, low, high,
self._dtype))
self._minimum = self._minimum.astype(self._dtype)
self._minimum.setflags(write=False)
self._maximum = self._maximum.astype(self._dtype)
self._maximum.setflags(write=False)
@classmethod
def from_spec(cls, spec, name=None):
if name is None:
name | |
<filename>api/twitter/flow/display.py
import logging
from math import ceil
from bottle import template, redirect, request, abort
from api.caching.temporal_analytics import getTemporalRange, getTemporalInfluenceCollection, getTimeIdFromTimestamp
from api.caching.tweet_user import readTweetsFromCache, readUsersFromCache, UserProjection, UserDataProjection
from api.config import Configuration
from api.core.threads import FollowerExtractorGateThread
from api.core.utility import parseInteger, parseString, getDateTime, getEpochMs, splitList
from api.geocode.geocode_cached import geocodeFromCacheById, geocodeSearch
from api.geocode.geocode_shared import GeocodeResultAbstract
from api.twitter.feed import User, Tweet
from api.twitter.flow.data_core import DataCollection
from api.twitter.flow.display_core import Display, AsyncTunnelProviderFile
from api.twitter.flow.display_oauth import OAuthSignIn
from api.web.config import WEBSITE_ROOT_HTTP
from api.web.twitter_instance import TwitterInstance
from api.web.web_core import redirect_problem, WebApplicationTwitter
__author__ = '<NAME>'
logger = logging.getLogger(__name__)
def onDisplayUsageFunc(display, keys):
instanceKey = keys.instance
assert isinstance(display, Display)
application = display.application
assert isinstance(application, WebApplicationTwitter)
instance = application.twitter_instances.getInstanceByInstanceKey(instanceKey)
# This prevents instance from automatically being shut down.
if instance is not None:
instance.touch()
def getInstanceDescription(twitterInstance, includePrefix=True):
if includePrefix:
return 'Search stream %s' % twitterInstance.getShortDescription(False)
else:
return twitterInstance.getShortDescription(True)
def getHomeLink(projectName):
return Display.getLink(LandingPage.link_info.getPageLink(),projectName,target='_self')
def getInstanceLink(twitterInstance):
return LocationsMapPage.link_info.getPageLink(twitterInstance.instance_key)
class LocationsMapPage(Display):
link_info = Display.LinkInfo(lambda link: lambda instance: link % instance, '/instance/%s')
def __init__(self, application, mapWebSocketGroup, dataDownloadWebSocketManager, realTimePerformanceWebSocketGroup):
assert isinstance(application, WebApplicationTwitter)
Display.__init__(self,
application,
pageRoute=('/instance/<instance>', None),
webSocketManagers=[mapWebSocketGroup, dataDownloadWebSocketManager, realTimePerformanceWebSocketGroup],
onDisplayUsageFunc=onDisplayUsageFunc)
self.locations = set()
@property
def page_html_function(self):
def func(templateArguments, instance):
instance = self.application.twitter_instances.getInstanceByInstanceKey(instance)
if instance is None:
abort(404, "No active search stream found at this address")
assert isinstance(instance, TwitterInstance)
keywords = instance.twitter_thread.twitter_feed.keywords
geographicalSetupString = instance.geographic_setup_string
if keywords is None:
keywords = ''
keywordsDisplay = '[None]'
else:
keywords = ','.join(keywords)
keywordsDisplay = keywords
instanceDescription = getInstanceDescription(instance, False)
instanceDescriptionWithPrefix = getInstanceDescription(instance, True)
homeLink = getHomeLink(Configuration.PROJECT_NAME)
templateArguments.update({'instance_description' : instanceDescription,
'instance_description_with_prefix' : instanceDescriptionWithPrefix,
'home_link' : homeLink,
'instance_name': instance.instance_key,
'keywords': keywords,
'keywords_display' : keywordsDisplay,
'instance_map_data' : geographicalSetupString,
'post_address': WEBSITE_ROOT_HTTP + '/manage_instance', # for terminate instance button.
'login_address' : OAuthSignIn.link_info.getPageLink(),
'start_epoch' : instance.constructed_at,
'server_current_epoch' : getEpochMs()})
return template('locations-map.tpl', templateArguments)
return func
class UserInformationPage(Display):
link_info = Display.LinkInfo(lambda link: lambda instance, user: link % (instance, user), '/instance/%s/user/%d')
@staticmethod
def getPageLinkImage(instance, user, target):
assert isinstance(user, User)
imageHtml = Display.getImage(user.profile_image_url, user.name, None, className='twitter-profile-image')
return Display.getLink(UserInformationPage.link_info.getPageLink(instance,user.id), imageHtml, target=target)
def __init__(self, application, userInformationWebSocketGroup):
Display.__init__(self,
application,
pageRoute=('/instance/<instance>/user/<user:int>', None),
webSocketManagers=[userInformationWebSocketGroup],
onDisplayUsageFunc=onDisplayUsageFunc)
@property
def page_html_function(self):
def func(templateArguments, instance, user):
twitterInstance = self.application.twitter_instances.getInstanceByInstanceKey(instance)
if twitterInstance is None:
abort(404, "No active search stream found at this address")
instanceDescription = getInstanceDescription(twitterInstance, True)
instanceLink = getInstanceLink(twitterInstance)
homeLink = getHomeLink(Configuration.PROJECT_NAME)
templateArguments.update({'home_link' : homeLink,
'instance' : instance,
'instance_description' : instanceDescription,
'instance_link' : instanceLink,
'user_id' : user})
return template('user.tpl', templateArguments)
return func
class UserFollowerEnrichPage(Display):
link_info = Display.LinkInfo(lambda link: lambda instance, user: link % (instance, user), '/instance/%s/user_follower_enrich/%d')
def __init__(self, application, dataCollection, followerExtractorGateThread):
Display.__init__(self,
application,
pageRoute=('/instance/<instance>/user_follower_enrich/<user:int>', None),
webSocketManagers=None,
onDisplayUsageFunc=onDisplayUsageFunc)
assert isinstance(dataCollection, DataCollection)
assert isinstance(followerExtractorGateThread, FollowerExtractorGateThread)
self.data_collection = dataCollection
self.follower_extractor_gate_thread = followerExtractorGateThread
@property
def page_html_function(self):
def func(templateArguments, instance, user):
try:
twitterInstance = self.application.twitter_instances.getInstanceByInstanceKey(instance)
if twitterInstance is None:
abort(404, "No active search stream found at this address")
# We need name and followers count for the progress bar.
userObject = self.data_collection.getUser(instance, user, twitterInstance.twitter_thread.twitter_session, False, UserProjection.Geocode(True, UserDataProjection(['name','followers_count'])))
if not userObject:
abort(404, "No user found at this address")
assert isinstance(userObject, User)
if not self.follower_extractor_gate_thread.addUser(userObject, restrictInfluenceArea = False):
logger.warn('Not processed user: %s/%d for enrichment - shouldProcessUser returned false' % (instance,user))
except KeyError:
logger.warn('Follower information enrich request received for user which does not exist: %s/%d' % (instance, user))
return redirect(UserInformationPage.link_info.getPageLink(instance, user))
return func
class TwitterCachePage(Display):
link_info = Display.LinkInfo(lambda link: lambda instance: link % instance, '/instance/%s/cached_tweets')
PAGE_SIZE_ID_NAME_DATA = 80
PAGE_SIZE_FULL_DATA = 10
def __init__(self, application):
Display.__init__(self,
application,
pageRoute=('/instance/<instance>/cached_tweets', None),
webSocketManagers=None,
onDisplayUsageFunc=onDisplayUsageFunc)
@property
def page_html_function(self):
def func(templateArguments, instance):
dataType = parseString(request.GET.type,['tweet','user'])
start_epoch = parseInteger(request.GET.start_epoch)
end_epoch = parseInteger(request.GET.end_epoch)
page_num = parseInteger(request.GET.page)
place_id = parseInteger(request.GET.place_id)
provider_id = parseInteger(request.GET.provider_id)
projection_type = parseString(request.GET.projection_type)
followee = parseInteger(request.GET.followee)
cache_id = GeocodeResultAbstract.buildCacheId(provider_id, place_id)
if dataType is None:
return redirect_problem('type is a required argument')
if page_num is None:
page_num = 0
data = []
if dataType == 'tweet':
tweets = readTweetsFromCache(None, instance, cache_id, start_epoch, end_epoch, page_num, TwitterCachePage.PAGE_SIZE_FULL_DATA)
if tweets is not None:
for tweet in tweets:
assert isinstance(tweet, Tweet)
userHtml = UserInformationPage.getPageLinkImage(instance, tweet.user, target='_self')
data.append([ tweet.created_at,
userHtml,
tweet.user.location_text,
tweet.text ])
elif dataType == 'user':
if len(projection_type) == 0:
projection = None
pageSize = TwitterCachePage.PAGE_SIZE_FULL_DATA
elif projection_type == 'name-only':
projection = UserProjection.IdNameImage()
pageSize = TwitterCachePage.PAGE_SIZE_ID_NAME_DATA
else:
return redirect_problem('Unsupported projection type: %s' % projection_type)
if followee is None:
return redirect_problem('Followee is required')
users = readUsersFromCache(None, instance, cache_id, start_epoch, end_epoch, page_num, pageSize, followee, userProjection=projection)
if users is not None:
for user in users:
assert isinstance(user, User)
data.append([user.id,
user.name,
user.profile_image_url,
UserInformationPage.link_info.getPageLink(instance, user.id)])
return {'json' : data}
return func
class InfluenceCachePage(Display):
link_info = Display.LinkInfo(lambda link: lambda instance: link % instance, '/instance/%s/influence')
def __init__(self, application):
Display.__init__(self,
application,
pageRoute=('/instance/<instance>/influence', None),
webSocketManagers=None,
onDisplayUsageFunc=onDisplayUsageFunc)
@property
def page_html_function(self):
def func(templateArguments, instance):
twitterInstance = self.application.twitter_instances.getInstanceByInstanceKey(instance)
if twitterInstance is None:
return dict()
baseEpoch = twitterInstance.constructed_at
start_epoch = parseInteger(request.GET.start_epoch, default=None)
end_epoch = parseInteger(request.GET.end_epoch, default=None)
source_place_id = parseInteger(request.GET.source_place_id)
source_provider_id = parseInteger(request.GET.source_provider_id)
if source_place_id is None:
logger.error('Invalid place ID specified while providing influence data: %s' % unicode(source_place_id))
return dict()
source_cache_id = GeocodeResultAbstract.buildCacheId(source_provider_id, source_place_id)
temporalCollection = getTemporalInfluenceCollection(instance)
if start_epoch is not None:
start_time_id = getTimeIdFromTimestamp(baseEpoch, Configuration.TEMPORAL_STEP, start_epoch)
else:
start_time_id = None
if end_epoch is not None:
end_time_id = getTimeIdFromTimestamp(baseEpoch, Configuration.TEMPORAL_STEP, end_epoch)
else:
end_time_id = None
timerMs = getEpochMs()
cacheData = getTemporalRange(temporalCollection, start_time_id, end_time_id, source_cache_id, preciseFromBack=True, preciseFromFront=True)
logger.info('Took %dms to read temporal range data' % (getEpochMs() - timerMs))
timerMs = getEpochMs()
geocodeByPlaceType = dict()
totalsByPlaceType = dict()
if cacheData is not None:
for providerId, providerIdData in cacheData.iteritems():
providerId = int(providerId)
for destination, count in providerIdData.iteritems():
split = destination.split('_')
placeType = int(split[0])
placeId = int(split[1])
record = [placeId,
providerId,
None,
None,
count,
None]
geocodeByPlaceType.setdefault(placeType,list()).append(record)
# Process only the records we are going to display.
for placeType, records in geocodeByPlaceType.iteritems():
aux = sorted(records, key=lambda x: x[4], reverse=True)
aux = aux[:Configuration.DISPLAY_MAX_NUM_INFLUENCE_RECORDS_PER_PLACE_TYPE]
geocodeByPlaceType[placeType] = aux
for record in aux:
cacheId = GeocodeResultAbstract.buildCacheId(record[1], record[0])
geocode = geocodeFromCacheById(cacheId)
record[2] = geocode.display_name
record[3] = geocode.coordinate
count = record[4]
record[5] = geocode.bounding_box
totalsByPlaceType[placeType] = totalsByPlaceType.get(placeType,0) + count
def getResultPart(placeType):
return {'geocode_list' : geocodeByPlaceType.get(placeType,list()), 'total' : totalsByPlaceType.get(placeType, 0)}
resultData = dict()
resultData['city'] = getResultPart(GeocodeResultAbstract.PlaceTypes.CITY)
resultData['country'] = getResultPart(GeocodeResultAbstract.PlaceTypes.COUNTRY)
resultData['continent'] = getResultPart(GeocodeResultAbstract.PlaceTypes.CONTINENT)
logger.info('Took %dms to build temporal range result data' % (getEpochMs() - timerMs))
return {'json' : resultData}
return func
class GeocodeCachePage(Display):
link_info = Display.LinkInfo(lambda link: lambda: link, '/geocode_search')
def __init__(self, application, providerId):
Display.__init__(self,
application,
pageRoute=('/geocode_search', None),
webSocketManagers=None)
self.provider_id = providerId
@property
def page_html_function(self):
def func(templateArguments):
placeName = parseString(request.GET.place_name)
data = list()
if placeName is None:
return {'json' : data}
locations = list()
maxLocations = 10
locations += GeocodeResultAbstract.searchGnsByName(placeName,3)
maxLocations -= len(locations)
if maxLocations > 0:
newLocations = geocodeSearch(self.provider_id,placeName,maxLocations)
maxLocations -= len(newLocations)
locations += newLocations
for location in locations:
assert isinstance(location,GeocodeResultAbstract)
data.append((location.cache_id,location.bounding_box,location.coordinate,location.display_name))
return {'json' : data}
return func
class LandingPage(Display):
link_info = Display.LinkInfo(lambda link: lambda: link, '/')
def __init__(self, application):
Display.__init__(self,
application,
pageRoute=('/', None),
webSocketManagers=None)
@staticmethod
def getLandingPageLink(pageNum):
return Display.addArgumentsToLink(LandingPage.link_info.getPageLink(),page=pageNum)
def getHumanTime(self, milliseconds):
seconds = milliseconds / 1000
if seconds < 60:
if seconds == 1:
return "1 second"
return "%d seconds" % seconds
minutes = seconds / 60
if minutes < 60:
if minutes == 1:
return "1 minute"
return "%d minutes" % minutes
hours = minutes / 60
if hours < 24:
if hours == 1:
return "1 hour"
return "%d hours" % hours
days = hours / 24
if days == 1:
return "1 day"
return "%d days" % days
@property
def page_html_function(self):
def func(templateArguments):
argList = list()
instanceList = self.application.twitter_instances.getInstanceList()
# todo remove this, this generates more instances for debugging.
# if len(instanceList) > 0:
# count = 0
# while count < 7:
# instanceList.append(instanceList[0])
# count += 1
# todo end of remove this.
numInstances = len(instanceList)
numInstancesPerPage = Configuration.NUM_LANDING_PAGE_INSTANCES_PER_PAGE
if numInstances > 3 or numInstances < 1:
numInstancesPerRow = Configuration.NUM_LANDING_PAGE_INSTANCES_PER_ROW
else:
numInstancesPerRow = numInstances
thumbnailSpan = 12 / numInstancesPerRow
pageNum = parseInteger(request.GET.page,0,numInstances,0)
startIndex = numInstancesPerPage * pageNum
endIndex = startIndex + numInstancesPerPage
numPages = int(ceil(float(numInstances) / float(numInstancesPerPage)))
for instance in instanceList[startIndex:endIndex]:
assert isinstance(instance, TwitterInstance)
argList.append((LocationsMapPage.link_info.getPageLink(instance.instance_key),
instance.getShortDescription(True),
instance.geographic_setup_string))
# Split into rows.
argList = splitList(argList, numInstancesPerRow)
templateArguments.update({'instances' : argList})
# Pagination
startSmallPageIndex = pageNum - 5
endSmallPageIndex = 0
if startSmallPageIndex < 0:
endSmallPageIndex -= startSmallPageIndex
startSmallPageIndex = 0
endSmallPageIndex += (pageNum + 5)
offEndBy = endSmallPageIndex - numPages
if offEndBy > 0:
startSmallPageIndex -= offEndBy
if startSmallPageIndex < 0:
startSmallPageIndex = 0
endSmallPageIndex = numPages
pagination = list()
for | |
from typing import Optional
import warnings
import pandas as pd
from macpie._config import get_option
from macpie import lltools, validatortools
def date_proximity(
left,
right,
id_on=None,
id_left_on=None,
id_right_on=None,
date_on=None,
date_left_on=None,
date_right_on=None,
get: str = "all",
when: str = "earlier_or_later",
days: int = 90,
left_link_id=None,
dropna: bool = False,
drop_duplicates: bool = False,
duplicates_indicator: bool = False,
merge="partial",
merge_suffixes=get_option("operators.binary.column_suffixes"),
prepend_levels=(None, None),
) -> pd.DataFrame:
"""Links data across two :class:`pandas.DataFrame` objects by date proximity.
Specifically, a "left" DataFrame contains a timepoint anchor, and a "right" DataFrame
is linked to the left by retrieving all rows that match on a specified id col, and
whose specified date fields are within a certain time range of each other.
:param left: the DataFrame containing the timepoint anchor
:param right: the DataFrame to link
:param id_on: primary column to join on. These must be found in both
DataFrames.
:param id_left_on: primary column to join on in the left DataFrame
:param id_right_on: primary column to join on in the right DataFrame
:param date_on: date columns to use for timepoint matching. These must
be found in both DataFrames, and the one on the left
will act as timepoint anchor.
:param date_left_on: date column in left DataFrame to act as timepoint anchor.
:param date_right_on: date column in the right DataFrame to compare with left's
timepoint anchor
:param get: which rows of the right DataFrame to link in reference to the
timepoint anchor:
``all``
keep all rows
``closest``
get only the closest row that is within ``days`` days of the
right DataFrame timepoint anchor
:param when: which rows of the right DataFrame to link in temporal relation
to the timepoint anchor
``earlier``
get only rows that are earlier than the timepoint anchor
``later``
get only rows that are lter (more recent) than the timepoint anchor
``earlier_or_later``
get rows that are earlier or later than the timepoint anchor
:param days: the time range measured in days
:param left_link_id: the id column in the left DataFrame to act as the
primary key of that data. This helps to ensure there
are no duplicates in the left DataFrame (i.e. rows with
the same ``id_left_on`` and ``date_left_on``)
:param dropna: whether to exclude rows that did not find any match
:param merge: which columns to include in result
``partial``
include only columns from the right DataFrame
``full``
include all columns from both left and right DataFrames
:param merge_suffixes: A length-2 sequence where the first element is
suffix to add to the left DataFrame columns, and
second element is suffix to add to the right DataFrame columns.
:param prepend_levels: A length-2 sequence where each element is optionally a string
indicating a top-level index to add to columnn indexes in ``left``
and ``right`` respectively (thus creating a :class:`pandas.MultiIndex`
if needed). Pass a value of ``None`` instead of a string
to indicate that the column index in ``left`` or ``right`` should be
left as-is. At least one of the values must not be ``None``.
"""
op = _DateProximityOperation(
left,
right,
id_on=id_on,
id_left_on=id_left_on,
id_right_on=id_right_on,
date_on=date_on,
date_left_on=date_left_on,
date_right_on=date_right_on,
get=get,
when=when,
days=days,
left_link_id=left_link_id,
dropna=dropna,
drop_duplicates=drop_duplicates,
duplicates_indicator=duplicates_indicator,
merge=merge,
merge_suffixes=merge_suffixes,
prepend_levels=prepend_levels,
)
return op.get_result()
class _DateProximityOperation:
def __init__(
self,
left: pd.DataFrame,
right: pd.DataFrame,
id_on=None,
id_left_on=None,
id_right_on=None,
date_on=None,
date_left_on=None,
date_right_on=None,
get: str = "all",
when: str = "earlier_or_later",
days: int = 90,
left_link_id=None,
dropna: bool = False,
drop_duplicates: bool = False,
duplicates_indicator: bool = False,
merge="partial",
merge_suffixes=get_option("operators.binary.column_suffixes"),
prepend_levels=(None, None),
):
self.left = left
self.right = right
self.id_on = lltools.maybe_make_list(id_on)
self.id_left_on = lltools.maybe_make_list(id_left_on)
self.id_right_on = lltools.maybe_make_list(id_right_on)
self.date_on = date_on
self.date_left_on = date_left_on
self.date_right_on = date_right_on
self.get = get
self.when = when
self.days = days
self.left_link_id = left_link_id
self.dropna = validatortools.validate_bool_kwarg(dropna, "dropna")
self.drop_duplicates = validatortools.validate_bool_kwarg(
drop_duplicates, "drop_duplicates"
)
self.duplicates_indicator = duplicates_indicator
self.duplicates_indicator_name: Optional[str]
if isinstance(self.duplicates_indicator, str):
self.duplicates_indicator_name = self.duplicates_indicator
elif isinstance(self.duplicates_indicator, bool):
self.duplicates_indicator_name = (
get_option("column.system.duplicates") if self.duplicates_indicator else None
)
else:
raise ValueError("indicator option can only accept boolean or string arguments")
self.merge = merge
self.merge_suffixes = merge_suffixes
self.prepend_levels = prepend_levels
self._left_suffix = get_option("operators.binary.column_suffixes")[0]
self._right_suffix = get_option("operators.binary.column_suffixes")[1]
self._diff_days_col = get_option("column.system.diff_days")
self._abs_diff_days_col = get_option("column.system.abs_diff_days")
self._merge_indicator_col = get_option("column.system.merge")
self._validate_specification()
def get_result(self):
result = self._get_all()
if self.get == "closest":
result = self._get_closest(result)
result = self._handle_dropna(result)
result = self._handle_duplicates(result)
result = self._handle_merge(result)
return result
def _get_all(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Ignore the following warning caused when merging on dataframes
# with MultiIndex columns:
# PerformanceWarning: dropping on a non-lexsorted multi-index
# without a level parameter may impact performance.
# obj = obj._drop_axis(labels, axis, level=level, errors=errors)
everything = pd.merge(
self.link_table,
self.right,
how="left",
left_on=self.id_left_on,
right_on=self.id_right_on,
indicator=self._merge_indicator_col,
)
# fix the leveling of the merge indicator column created by pd.merge
if self._right_level:
everything.rename(
columns={
self._merge_indicator_col: self._right_level,
"": self._merge_indicator_col,
},
inplace=True,
)
# create a column 'diff_days' with date difference in days
everything = everything.mac.add_diff_days(
self.date_left_on, self.date_right_on, self._diff_days_col
)
# keep rows where the date differences within range
# create copy to avoid chained indexing and getting a SettingWithCopyWarning
all_candidates = everything.loc[abs(everything[self._diff_days_col]) <= self.days].copy()
if self.when == "earlier":
all_candidates = all_candidates.loc[all_candidates[self._diff_days_col] <= 0]
elif self.when == "later":
all_candidates = all_candidates.loc[all_candidates[self._diff_days_col] >= 0]
return all_candidates
def _get_closest(self, all_candidates):
# create a column containing the absolute value of diff_days
all_candidates.loc[:, self._abs_diff_days_col] = all_candidates[self._diff_days_col].abs()
all_candidates = all_candidates.sort_values(
by=self.link_table_cols + [self._abs_diff_days_col],
# by=['index', self._abs_diff_days_col],
inplace=False,
na_position="last",
)
groupby_cols = self.id_left_on + [self.date_left_on]
closest_candidates = all_candidates[
(
all_candidates[self._abs_diff_days_col]
== all_candidates.groupby(groupby_cols)[[self._abs_diff_days_col]].transform(
"min"
)[self._abs_diff_days_col]
)
]
return closest_candidates
def _handle_dropna(self, result):
if self.dropna is False:
left_frame = self.left[self.link_table_cols] if self.merge == "partial" else self.left
result = pd.merge(
left_frame, result, how="left", on=self.link_table_cols, indicator=False
)
return result
def _handle_duplicates(self, result):
dups = result.duplicated(subset=self.link_table_cols, keep=False)
# handle duplicates
if dups.any():
if self.drop_duplicates:
result = result.drop_duplicates(
subset=self.link_table_cols, keep="last", ignore_index=True
)
elif self.duplicates_indicator:
result.mac.insert(self.duplicates_indicator_name, dups)
return result
def _handle_merge(self, result):
left_suffix = self.merge_suffixes[0]
right_suffix = self.merge_suffixes[1]
if self.merge == "partial":
result = result.mac.drop_suffix(self._right_suffix)
result = result.mac.replace_suffix(self._left_suffix, left_suffix)
else:
result = result.mac.replace_suffix(self._left_suffix, left_suffix)
result = result.mac.replace_suffix(self._right_suffix, right_suffix)
return result
def _validate_specification(self):
if self.id_on:
if self.id_left_on or self.id_right_on:
raise ValueError(
'Must pass argument "id_on" OR "id_left_on" '
'and "id_right_on", but not a combination of both.'
)
self.id_left_on = self.left.mac.get_col_names(self.id_on)
self.id_right_on = self.right.mac.get_col_names(self.id_on)
elif self.id_left_on and self.id_right_on:
if len(self.id_left_on) != len(self.id_right_on):
raise ValueError("len(id_right_on) must equal len(id_left_on)")
self.id_left_on = self.left.mac.get_col_names(self.id_left_on)
self.id_right_on = self.right.mac.get_col_names(self.id_right_on)
else:
raise ValueError(
'Must pass argument "id_on" OR "id_left_on" '
'and "id_right_on", but not a combination of both.'
)
if not self.date_on:
if not self.date_left_on or not self.date_right_on:
raise ValueError(
'Must pass argument "date_on" OR "date_left_on" '
'and "date_right_on", but not a combination of both.'
)
self.date_left_on = self.left.mac.get_col_name(self.date_left_on)
self.date_right_on = self.right.mac.get_col_name(self.date_right_on)
else:
if self.date_left_on or self.date_right_on:
raise ValueError(
'Must pass argument "date_on" OR "date_left_on" '
'and "date_right_on", but not a combination of both.'
)
self.date_left_on = self.date_right_on = self.left.mac.get_col_name(self.date_on)
self.date_left_on = self.left.mac.get_col_name(self.date_left_on)
self.date_right_on = self.right.mac.get_col_name(self.date_right_on)
if not self.left.mac.is_date_col(self.date_left_on):
self.left.mac.to_datetime(self.date_left_on)
# raise TypeError(
# f"'date_left_on' column of '{self.date_left_on}' is not a valid date column"
# )
if not self.right.mac.is_date_col(self.date_right_on):
self.right.mac.to_datetime(self.date_right_on)
# raise TypeError(
# f"'date_right_on' column of '{self.date_right_on}' is not a valid date column"
# )
if self.get not in ["all", "closest"]:
raise ValueError(f"invalid get option: {self.get}")
if self.when not in ["earlier", "later", "earlier_or_later"]:
raise ValueError(f"invalid when option: {self.when}")
if isinstance(self.days, int):
if self.days < 0:
raise ValueError("days option value cannot be negative")
else:
raise TypeError("days option needs to be an integer")
# check for duplicates
if not self.left_link_id:
has_dupes = self.left[self.id_left_on + [self.date_left_on]].duplicated().any()
if has_dupes:
raise ValueError(
f"Duplicate rows with the same '{self.id_left_on}' and '{self.date_left_on}' exist. Aborting."
)
else:
self.left_link_id = self.left.mac.get_col_name(self.left_link_id)
has_dupes = self.left[self.left_link_id].duplicated().any()
if has_dupes:
raise ValueError(
f"ID column '{self.left_link_id}' must be unique but is not. Aborting."
)
if self.merge not in ["partial", "full"]:
raise ValueError(f"invalid merge option: {self.merge}")
if not lltools.is_list_like(self.merge_suffixes):
raise ValueError(
"'merge_suffixes' needs to be a tuple or list of two strings (e.g. ('_x','_y'))"
)
elif len(self.merge_suffixes) != 2:
raise ValueError(
"'merge_suffixes' needs to be a tuple or list of two strings (e.g. ('_x','_y'))"
)
self._add_suffixes()
if not lltools.is_list_like(self.prepend_levels):
raise ValueError(
"'prepend_levels' needs to be a tuple or list of | |
"""**repolygon**: create designs by spatial *re*petition of *polygon*s.
Created by <NAME>, 2017; tidied & uploaded to GitHub 2019.
This module contains the data that defines the original example designs as
well as some replications of famous designs.
"""
# Colour scheme for designs without use of the advanced colouring module:
NO_COLOURING_DARK = "midnightblue"
NO_COLOURING_MID = "royalblue" # of lightness between _DARK and _LIGHT
NO_COLOURING_LIGHT = "cornflowerblue"
NO_COLOURING_TRANSPARENT = "none" # implies (by context) unfilled or no edges
""" Define colours for all example designs in a single dictionary.
Dictionary keys are the design identifiers, which are simply labels of
'repolygon_design_i' for integer 'i', and correspond to the designs
represented (with basic default colouring so as to distinguish the outlines)
in REPOLYGON_EXAMPLES.
Dictionary values are the design's defining colour set which is stored in a
dictionary structure where the keys are descriptors of each colour and the
values are the corresponding HTML colour codes specifying the exact colour.
"""
FULL_COLOUR_EXAMPLES_COLOURS = {
"repolygon_design_1": {
"BLACK": "#000000",
"WHITE": "#FFFFFF",
"DARK BLUE": "#2C254F",
"LIGHT BLUE": "#617E83",
"DARK BROWN": "#321D1C",
"LIGHT BROWN": "#55423F",
},
"repolygon_design_2": {
"OFF WHITE": "#DEF7FF",
"BLUE": "#8788B9",
"TEAL": "#232A31",
"MINT": "#C6D2B6",
"DARK BROWN": "#321E1E",
"MEDIUM BROWN": "#662B1E",
"SAND": "#AC9371",
},
"repolygon_design_3": {
"OFF BLACK": "#080303",
"GREY": "#2E2830",
"DARK PURPLE": "#2B1931",
"LIGHT PURPLE": "#604980",
"DARK RED": "#611E15",
"MEDIUM RED": "#8C2B23",
"LIGHT RED": "#B93B2F",
"DARK BROWN": "#30130D",
"MEDIUM BROWN": "#612E16",
"LIGHT BROWN": "#8B4B2C",
"DARK YELLOW": "#858310",
"MEDIUM YELLOW": "#E2CC20",
"LIGHT YELLOW": "#E3D372",
"DARK GREEN": "#232905",
"MEDIUM GREEN": "#4D4F0D",
"LIGHT GREEN": "#778811",
},
"repolygon_design_4": {
"DARK RED": "#6C0C02",
"MEDIUM RED": "#A71A03",
"ORANGE": "#D95505",
"SAND": "#EDB279",
"BLUE": "#3947FF",
},
"repolygon_design_5": {
"BLACK": "#000000",
"YELLOW": "#EFEF83",
"DARK BLUE": "#110E20",
"LIGHT BLUE": "#425182",
"DARK MAROON": "#1F040A",
"MEDIUM MAROON": "#3D0B16",
"LIGHT MAROON": "#771721",
},
"repolygon_design_6": {
"OFF BLACK": "#060606",
"DARK GREEN": "#176621",
"MEDIUM GREEN": "#77B06B",
"LIGHT GREEN": "#B8EFB1",
},
"repolygon_design_7": {
"OFF BLACK": "#071510",
"KHAKI": "#1E301A",
"BROWN": "#5C2616",
"SAND": "#B57F31",
"YELLOW": "#FBFC80",
},
"repolygon_design_8": {
"OFF BLACK": "#030306",
"WHITE": "#FFFFFF",
"GREY": "#332D3F",
"TEAL": "#002627",
"BLUE": "#00249A",
"PURPLE": "#37264B",
"PINK": "#982B47",
"RED": "#96231E",
"BROWN": "#672A1E",
},
"repolygon_design_9": {
"BLACK": "#000000",
"WHITE": "#FFFFFF",
"BLUE": "#083C6C",
"GREEN": "#00765E",
"DARK TEAL": "#002A3A",
"MEDIUM TEAL": "#00979C",
"LIGHT TEAL": "#00BAC6",
},
"repolygon_design_10": {
"BLACK": "#000000",
"OFF WHITE": "#FAE7B5",
"YELLOW": "#E3A857",
"ORANGE": "#AF4035",
"GREEN": "#1C352D",
},
"repolygon_design_11": {
"OFF BLACK": "#101D18",
"OFF WHITE": "#F0EAD6",
"MAROON": "#662628",
"GREEN": "#144C2E",
},
}
""" Define data for all minimal-tone example designs in a single dictionary.
Dictionary keys are the design identifiers, which are simply labels of
'repolygon_design_i' for integer 'i'. In general, the higher the label value
of 'i', the more complicated the design with respect to the styling.
Dictionary values are the design's defining data which is stored in a
two-tuple data structure, with the first element a list of tiled (identical)
polygon layers (here labelled 'j') to plot, each defined as another two-tuple:
(
(A_j, B_j, C_j, (Dx_j, Dy_j), (Ex_j, Ey_j)),
(F_j, G_j, H_j, I_j, J_j)
)
where the elements of the first tuple define the geometry of the specific
polygon tile layer, and are (all being compulsory to define):
* A_j :
number of sides of the polygon e.g. 4 produces a square, 6 a hexagon;
* B_j :
size of the polygon, scaled relative to 1 for a polygon drawn on the
unit circle (a circle of radius 1);
* C_j :
rotational factor x, which rotates the polygon by 180/x degrees
relative to the equivalent polygon with a horizontal top edge;
* Dx_j, Dy_j :
tile-to-tile distance (take any vertex as a reference) along the 'x'
(Dx_j) and 'y' (Dy_j) axes;
* Ex_j, Ey_j :
shift from 0 for the first polygon's root vertex 'x' (Ex_j) and 'y'
(Ey_j) coordinates (this shift is also applied to all other polygons).
and the elements for the second tuple define the styling of that same layer,
and are (all being optional, with defaults as given applied if undefined):
* F_j :
linewidth of the edges (default 1);
* G_j :
style of edges (default 'solid');
* H_j :
colour of the edges (default NO_COLOURING_LIGHT);
* I_j :
colour of any fill (default NO_COLOURING_TRANSPARENT i.e. no fill);
* J_j :
integer influencing the order in which the tile layer is plotted
relative to other layers (the matplotlib 'zorder'), where the higher
the integer the further towards top the layer appears (default 0).
and the second element of the overarching two-tuple defines properties of the
whole design and is defined as:
(
(Kx_min, Kx_max, Ky_min, Ky_max),
L
)
where the elements of the above tuple are (where L is optional):
* Kx_min, Kx_max :
the 'x' axes limits i.e. minimum (Kx_min) & maximum (Kx_max) points
which bound the full design i.e. plot;
* Ky_min, Ky_max :
the 'y' axes limits i.e. minimum (Ky_min) & maximum (Ky_max) points
which bound the full design i.e. plot;
* L :
the background colour of the full design i.e. plot facecolour
(default NO_COLOURING_DARK).
This gives an overall structure, indented consistently as follows for tuple
distinction and general clarity, for 'j = 1, 2, ..., n' tiled polygon layers:
(
[
((A_1, B_1, C_1, (Dx_1, Dy_1), (Ex_1, Ey_1)),
(F_1, G_1, H_1, I_1, J_1)),
((A_2, B_2, C_2, (Dx_2, Dy_2), (Ex_2, Ey_2)),
(F_2, G_2, H_2, I_2, J_2)),
...
...
...
((A_n, B_n, C_n, (Dx_n, Dy_n), (Ex_n, Ey_n)),
(F_n, G_n, H_n, I_n, J_n)),
],
((Kx_min, Kx_max, Ky_min, Ky_max), L)
)
"""
MINIMAL_TONE_EXAMPLES_SPEC = {
"repolygon_design_1": (
[
((4, 15.125, 1, (15, 48), (0, 0)), ()),
((6, 15.125, 1, (15, 48), (7.5, 24)), ()),
((12, 22, 12, (15, 48), (7.5, 24)), ()),
],
((40, 200, 40, 200),),
),
"repolygon_design_2": (
[
((6, 2, 1, (12.2, 7), (0, 0)), ()),
((6, 2, 1, (12.2, 7), (6.1, 3.5)), ()),
((12, 6, 1, (12.2, 7), (0, 0)), ()),
((12, 6, 1, (12.2, 7), (6.1, 3.5)), ()),
((50, 3.5, 6, (12.2, 7), (0, 0)), ()),
((50, 3.5, 6, (12.2, 7), (6.1, 3.5)), ()),
],
((6.1, 42.7, 6.1, 42.7),),
),
"repolygon_design_3": (
[
((4, 1, 1, (6, 6), (2, 2)), (0.75,)),
((6, 2, 1, (6, 6), (2, 2)), (0.75,)),
((6, 2.75, 1, (6, 6), (5, 5)), (0.75,)),
((8, 5, 1, (6, 6), (2, 2)), (0.75,)),
((12, 3.5, 1, (6, 6), (2, 2)), (0.75,)),
],
((2, 26, 2, 26),),
),
"repolygon_design_4": (
[
((16, 0.9, 1, (6, 6), (0, 0)), (1.5,)),
((16, 0.9, 1, (6, 6), (3, 3)), (1.5,)),
((16, 0.9, 8, (6, 6), (0, 0)), (1.5,)),
((16, 0.9, 8, (6, 6), (3, 3)), (1.5,)),
((16, 1.8, 1, (6, 6), (0, 0)), (3,)),
((16, 1.8, 1, (6, 6), (3, 3)), (3,)),
((16, 1.8, 8, (6, 6), (0, 0)), (3,)),
((16, 1.8, 8, (6, 6), (3, 3)), (3,)),
((16, 2.7, 1, (6, 6), (0, 0)), (4.5,)),
((16, 2.7, 1, (6, 6), (3, 3)), (5.5,)),
((16, 2.7, 8, (6, 6), (0, 0)), (4.5,)),
((16, 2.7, 8, (6, 6), (3, 3)), (4.5,)),
((16, 3.6, 1, (6, 6), (0, 0)), (6,)),
((16, 3.6, 1, (6, 6), (3, 3)), (6,)),
((16, 3.6, 8, (6, 6), (0, 0)), (6,)),
((16, 3.6, 8, (6, 6), (3, 3)), (6,)),
],
((6, 24, 6, 24),),
),
"repolygon_design_5": (
[
((8, 1.6, 8, (4, 4), (0, 0)), (3,)),
((8, 2.5, 1, (4, 4), (0, 0)), (2,)),
((8, 4, 1, (4, 4), (0, 0)), (2,)),
],
((0, 20, 0, 20),),
),
"repolygon_design_6": (
[
((4, 1.28, -2.58, (5.6, 5.6), (0, 0)), (2,)),
((8, 8.5, 4.25, (5.6, 5.6), (0, 0)), (6,)),
((8, 10, 4.25, (5.6, 5.6), (0, 0)), (2,)),
],
((11.2, 28, 11.2, 28),),
),
"repolygon_design_7": (
[
((12, 0.56, -2.8, (5.6, 5.6), (0, 0)), (7,)),
((12, 1.4, -2.8, (5.6, 5.6), (0, 0)), (6,)),
((12, 2.24, -2.8, (5.6, 5.6), (0, 0)), (5,)),
((12, 3.08, -2.8, (5.6, 5.6), (0, 0)), (4,)),
((12, 3.92, -2.8, (5.6, 5.6), (0, 0)), (3,)),
((12, 4.72, -2.8, (5.6, 5.6), (0, 0)), (2,)),
((12, 5.6, -2.8, (5.6, 5.6), (0, 0)), ()),
],
((2.8, 19.6, 2.8, 19.6),),
),
"repolygon_design_8": (
[
((4, 0.45, 1, (5.6, 5.6), (0, 2.8)), ()),
((4, 0.7, 1, (5.6, 5.6), (0, 2.8)), ()),
((6, 0.8, 6, | |
type may apply to a resource at any point in time.
:param str message: A Message containing details about this condition's last transition from one status to another, if any.
"""
pulumi.set(__self__, "last_transition_time", last_transition_time)
pulumi.set(__self__, "reason", reason)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if message is not None:
pulumi.set(__self__, "message", message)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> str:
"""
LastTransitionTime is the last time this condition transitioned from one status to another.
"""
return pulumi.get(self, "last_transition_time")
@property
@pulumi.getter
def reason(self) -> str:
"""
A Reason for this condition's last transition from one status to another.
"""
return pulumi.get(self, "reason")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of this condition; is it currently True, False, or Unknown?
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of this condition. At most one of each condition type may apply to a resource at any point in time.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def message(self) -> Optional[str]:
"""
A Message containing details about this condition's last transition from one status to another, if any.
"""
return pulumi.get(self, "message")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceSpec(dict):
"""
NoSQLInstanceSpec specifies the desired state of a NoSQLInstance.
"""
def __init__(__self__, *,
class_ref: Optional['outputs.NoSQLInstanceSpecClassRef'] = None,
class_selector: Optional['outputs.NoSQLInstanceSpecClassSelector'] = None,
resource_ref: Optional['outputs.NoSQLInstanceSpecResourceRef'] = None,
write_connection_secret_to_ref: Optional['outputs.NoSQLInstanceSpecWriteConnectionSecretToRef'] = None):
"""
NoSQLInstanceSpec specifies the desired state of a NoSQLInstance.
:param 'NoSQLInstanceSpecClassRefArgs' class_ref: A ClassReference specifies a resource class that will be used to dynamically provision a managed resource when the resource claim is created.
:param 'NoSQLInstanceSpecClassSelectorArgs' class_selector: A ClassSelector specifies labels that will be used to select a resource class for this claim. If multiple classes match the labels one will be chosen at random.
:param 'NoSQLInstanceSpecResourceRefArgs' resource_ref: A ResourceReference specifies an existing managed resource, in any namespace, to which this resource claim should attempt to bind. Omit the resource reference to enable dynamic provisioning using a resource class; the resource reference will be automatically populated by Crossplane.
:param 'NoSQLInstanceSpecWriteConnectionSecretToRefArgs' write_connection_secret_to_ref: WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this resource claim, to which any connection details for this resource claim should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource bound to this resource claim.
"""
if class_ref is not None:
pulumi.set(__self__, "class_ref", class_ref)
if class_selector is not None:
pulumi.set(__self__, "class_selector", class_selector)
if resource_ref is not None:
pulumi.set(__self__, "resource_ref", resource_ref)
if write_connection_secret_to_ref is not None:
pulumi.set(__self__, "write_connection_secret_to_ref", write_connection_secret_to_ref)
@property
@pulumi.getter(name="classRef")
def class_ref(self) -> Optional['outputs.NoSQLInstanceSpecClassRef']:
"""
A ClassReference specifies a resource class that will be used to dynamically provision a managed resource when the resource claim is created.
"""
return pulumi.get(self, "class_ref")
@property
@pulumi.getter(name="classSelector")
def class_selector(self) -> Optional['outputs.NoSQLInstanceSpecClassSelector']:
"""
A ClassSelector specifies labels that will be used to select a resource class for this claim. If multiple classes match the labels one will be chosen at random.
"""
return pulumi.get(self, "class_selector")
@property
@pulumi.getter(name="resourceRef")
def resource_ref(self) -> Optional['outputs.NoSQLInstanceSpecResourceRef']:
"""
A ResourceReference specifies an existing managed resource, in any namespace, to which this resource claim should attempt to bind. Omit the resource reference to enable dynamic provisioning using a resource class; the resource reference will be automatically populated by Crossplane.
"""
return pulumi.get(self, "resource_ref")
@property
@pulumi.getter(name="writeConnectionSecretToRef")
def write_connection_secret_to_ref(self) -> Optional['outputs.NoSQLInstanceSpecWriteConnectionSecretToRef']:
"""
WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this resource claim, to which any connection details for this resource claim should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource bound to this resource claim.
"""
return pulumi.get(self, "write_connection_secret_to_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceSpecClassRef(dict):
"""
A ClassReference specifies a resource class that will be used to dynamically provision a managed resource when the resource claim is created.
"""
def __init__(__self__, *,
api_version: Optional[str] = None,
field_path: Optional[str] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
resource_version: Optional[str] = None,
uid: Optional[str] = None):
"""
A ClassReference specifies a resource class that will be used to dynamically provision a managed resource when the resource claim is created.
:param str api_version: API version of the referent.
:param str field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param str kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param str namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param str resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param str uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[str]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[str]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@property
@pulumi.getter
def uid(self) -> Optional[str]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceSpecClassSelector(dict):
"""
A ClassSelector specifies labels that will be used to select a resource class for this claim. If multiple classes match the labels one will be chosen at random.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.NoSQLInstanceSpecClassSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A ClassSelector specifies labels that will be used to select a resource class for this claim. If multiple classes match the labels one will be chosen at random.
:param Sequence['NoSQLInstanceSpecClassSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if | |
.text could be 'sw-2 cut' or 'thr imp'
# .attrib could be type=cut st=sw base=-2 or type=fat base=1d+1
_st = ''
_type = ''
_base = ''
if damage_element.text is not None and len(damage_element.text) > 0:
# For strength-based damage (like a knife): sw-3 cut
match = re.match(
'(?P<st>[a-zA-Z]+)' +
'(?P<base>[+-]?[0-9]*) *' +
'(?P<type>[a-zA-Z]+).*',
damage_element.text)
if match is not None: # NOTE: sick stick has damage 1d+1 fat
_st = match.group('st')
_type = match.group('type')
_base = (0 if len(match.group('base')) == 0
else int(match.group('base')))
new_thing['damage'][_st] = {'plus': _base, 'type': _type}
else:
# For non-strength-based damage (like a sick stick): 1d1+1 fat
match = re.match(
'(?P<dice>[0-9]*)(?P<d>d?)' +
'(?P<sign>[+-]?)(?P<plus>[0-9]*) *' +
'(?P<type>[a-zA-Z]*)',
damage_element.text)
if match is not None:
_num_dice = (0 if (match.group('dice') is None or
len(match.group('dice')) == 0) else
int(match.group('dice')))
_plus = (0 if (match.group('plus') is None or
len(match.group('plus')) == 0) else
int(match.group('plus')))
if (match.group('sign') is not None and
len(match.group('sign')) > 0):
sign = 1 if match.group('sign') == '+' else -1
_plus *= sign
_type = ('pi'
if (match.group('type') is None or
len(match.group('type')) == 0) else
match.group('type'))
new_thing['damage']['dice'] = {
'num_dice': _num_dice,
'plus': _plus,
'type': _type}
elif 'st' in damage_element.attrib: # 1d+4 or -2 or 3d
_st = damage_element.attrib['st']
_base = (0 if 'base' not in damage_element.attrib else
int(damage_element.attrib['base']))
_type = ('pi' # random but after-armor damage is x1
if 'type' not in damage_element.attrib else
damage_element.attrib['type'])
new_thing['damage'][_st] = {'plus': _base, 'type': _type}
elif 'base' in damage_element.attrib: # 1d+4 or -2 or 3d
_type = ('pi' # random but after-armor damage is x1
if 'type' not in damage_element.attrib else
damage_element.attrib['type'])
match = re.match(
'(?P<dice>[0-9]*)(?P<d>d?)' +
'(?P<sign>[+-]?)(?P<plus>[0-9]*) *',
damage_element.attrib['base'])
if match is not None:
_num_dice = (0 if (match.group('dice') is None or
len(match.group('dice')) == 0) else
int(match.group('dice')))
_plus = (0 if (match.group('plus') is None or
len(match.group('plus')) == 0) else
int(match.group('plus')))
if (match.group('sign') is not None and
len(match.group('sign')) > 0):
sign = 1 if match.group('sign') == '+' else -1
_plus *= sign
new_thing['damage']['dice'] = {'num_dice': _num_dice,
'plus': _plus,
'type': _type}
self.__add_skill_to_weapon(new_thing, melee_weapon_element)
parry_element = melee_weapon_element.find('parry')
if (parry_element is not None and parry_element.text is not None
and len(parry_element.text) > 0):
parry = int(parry_element.text)
new_thing['parry'] = parry
blank_item = self.__ruleset.make_empty_melee_weapon()
for key, value in blank_item.iteritems():
if key not in new_thing:
new_thing[key] = value
def __get_ranged_weapon_damaage(
self,
new_thing, # dict for item
weapon_element # xml.etree ranged_weapon element
):
new_thing['type'].append('ranged weapon')
# Skill -- find the first one with skill modifier == 0
self.__add_skill_to_weapon(new_thing, weapon_element)
bulk_element = weapon_element.find('bulk')
if bulk_element is not None:
new_thing['bulk'] = int(bulk_element.text)
# accuracy x+y where |x| is the accuracy of the weapon and |y| is
# the accuracy of the built-in scope
accuracy_element = weapon_element.find('accuracy')
new_thing['acc'] = 0
if accuracy_element is not None:
values_string = accuracy_element.text.replace('+', ' ')
values = values_string.split()
for value in values:
new_thing['acc'] += int(value)
damage_element = weapon_element.find('damage')
if damage_element is not None:
new_thing['damage'] = {'dice': {}}
# ignoring armor_divisor
if 'type' in damage_element.attrib:
new_thing['damage']['dice']['type'] = (
damage_element.attrib['type'])
damage_text = None
if 'base' in damage_element.attrib: # 1d+4 or -2 or 3d
damage_text = damage_element.attrib['base']
elif damage_element.text is not None and len(damage_element.text) > 0:
damage_text = damage_element.text
if damage_text is not None:
new_thing['damage']['dice']['num_dice'] = 0
new_thing['damage']['dice']['plus'] = 0
new_thing['damage']['dice']['type'] = '* UNKNOWN *'
match = re.match(
'(?P<dice>[0-9]*)(?P<d>d?)' +
'(?P<sign>[+-]?)(?P<plus>[0-9]*).*',
damage_text)
if match:
if len(match.group('dice')) > 0:
if len(match.group('d')) == 0:
window_manager.error([
'Problem parsing base damage "%s" for %s' %
(damage_text, new_thing['name'])
])
else:
new_thing['damage']['dice']['num_dice'] = (
int(match.group('dice')))
if (len(match.group('sign')) == 0 or
match.group('sign') == '+'):
sign = 1
elif match.group('sign') == '-':
sign = -1
if len(match.group('plus')) > 0:
new_thing['damage']['dice']['plus'] = (
int(match.group('plus')) * sign)
else:
window_manager.error([
'Problem parsing base info "%s" for %s' %
(damage_text, new_thing['name'])
])
class ImportCharacter(object):
def __init__(self,
window_manager,
char_json, # dict for this char directly from the JSON
char_gcs # CharacterGcs object
):
self.__window_manager = window_manager
self.__char_json = char_json
self.__char_gcs = char_gcs
def import_data(self):
self.__import_attribs()
self.__import_advantages()
self.__import_skills()
self.__import_techniques()
self.__import_spells()
self.__import_equipment(squash=False)
def update_data(self):
changes = []
changes.extend(self.__import_attribs())
changes.extend(self.__import_advantages())
changes.extend(self.__import_skills())
changes.extend(self.__import_techniques())
changes.extend(self.__import_spells())
changes.extend(self.__import_equipment(squash=True))
if len(changes) == 0:
changes.append('Character up to date -- no changes')
return changes
@staticmethod
def is_optional_element_equal(element, # string: comaring this
existing_item, # dict
new_item # dict
):
if element in existing_item:
if element not in new_item:
return False
if existing_item[element] != new_item[element]:
return False
elif element in new_item:
return False
return True
@staticmethod
def find_differences(existing_item, # dict:
new_item # dict:
):
'''
Returns list of differences, None if none were found.
'''
found_differences = False
differences = []
if existing_item['name'].lower() != new_item['name'].lower():
found_differences = True
differences.append('name')
return differences
if existing_item['type'] != new_item['type']:
found_differences = True
differences.append('type')
return differences
# We don't care if the counts, notes, or owners aren't the same
if 'melee weapon' in existing_item['type']:
if not ImportCharacter.is_optional_element_equal('parry',
existing_item,
new_item):
found_differences = True
differences.append('parry')
if ('skill' not in existing_item or 'skill' not in new_item or
existing_item['skill'] != new_item['skill']):
found_differences = True
differences.append('skill')
if 'ranged weapon' in existing_item['type']:
if not ImportCharacter.is_optional_element_equal('bulk',
existing_item,
new_item):
found_differences = True
differences.append('bulk')
if not ImportCharacter.is_optional_element_equal('acc',
existing_item,
new_item):
found_differences = True
differences.append('acc')
if not ImportCharacter.is_optional_element_equal('reload',
existing_item,
new_item):
found_differences = True
differences.append('reload')
if ('skill' not in existing_item or 'skill' not in new_item or
existing_item['skill'] != new_item['skill']):
found_differences = True
differences.append('skill')
if 'armor' in existing_item['type']:
if ('dr' not in existing_item or 'dr' not in new_item or
existing_item['dr'] != new_item['dr']):
found_differences = True
differences.append('dr')
if 'container' in existing_item['type']:
new_contents = copy.deepcopy(new_item['stuff'])
for thing in existing_item['stuff']:
found_match = False
for index, new_thing in enumerate(new_contents):
new_differences = ImportCharacter.find_differences(
thing, new_thing)
if new_differences is None:
new_contents.pop(index)
found_match = True
break
else:
differences.extend(new_differences)
if not found_match:
found_differences = True
if len(new_contents) > 0:
found_differences = True
return differences if found_differences else None
# Private and protected methods
def __copy_json_equipment_list(self,
equipment, # list of items
squash # Bool: do we flatten containers
):
'''
Makes a deep copy of a JSON equipment list.
This (optionally) flattens (i.e., squashes) nested containers so that
all of the items are on the same level. That allows one set of
containers in GCS and a different set of containers in the JSON.
This can copy a GCS list so long as it's been JSON-ized.
'''
new_list = []
for item in equipment:
if squash and ('container' in item['type']):
# Copy the contents
sub_list = self.__copy_json_equipment_list(item['stuff'],
squash)
new_list.extend(sub_list)
# I don't think I want to copy the container without its
# contents because the whole purpose of squashing is to
# allow differnet containers in the GCS than in the JSON.
#
# new_item = copy.deepcopy(item)
# new_item['stuff'] = []
# new_list.append(new_item)
else:
new_item = copy.deepcopy(item)
new_list.append(new_item)
return new_list
def __import_attribs(self):
'''
Copies the attributes from GCS to the JSON. Puts the values in both
the current and permanent locations.
Cool for both import and update.
Returns changes (array of strings describing changes made).
'''
attrs_to_check = [ 'st' , 'dx' , 'iq' , 'ht', 'hp', 'fp', 'wi', 'per',
'basic-speed', 'basic-move' ]
changes = []
for attr_name in attrs_to_check:
attr_gcs = self.__char_gcs.char['permanent'][attr_name]
attr_json = self.__char_json['permanent'][attr_name]
if attr_gcs != attr_json:
changes.append('%s changed from %r to %r' %
(attr_name, attr_json, attr_gcs))
self.__char_json['permanent'][attr_name] = attr_gcs
self.__char_json['current'][attr_name] = attr_gcs
return changes
def __import_advantages(self):
return self.__import_heading('advantages', 'advantage')
def __import_skills(self):
return self.__import_heading('skills', 'skill')
def __import_techniques(self):
# TODO: there's probably a way to combine techniques and skills (since
# they're both lists as opposed to the dicts examined by
# |__import_heading|). The challenge is that skills and techniques look
# different under the hood so the 'do we copy' stuff needs to be
# custom.
changes = []
if 'techniques' not in self.__char_json:
self.__char_json['techniques'] = []
techniques_json = self.__char_json['techniques']
if 'techniques' in self.__char_gcs.char:
techniques_gcs = copy.deepcopy(self.__char_gcs.char['techniques'])
else:
techniques_gcs = []
if len(techniques_gcs) == 0 and len(techniques_json) == 0:
# Neither has |techniques| and that's OK.
return changes
for technique_json in techniques_json:
match_gcs = None
index_gcs = None
for index, technique_gcs in enumerate(techniques_gcs):
if (technique_gcs['name'] == technique_json['name'] and
technique_gcs['default'] == technique_json['default']):
match_gcs = technique_gcs
| |
self.assertEqual(1, out)
def test_get_key_not_in_cache(self):
lc = LIFOCache(1)
sentinel = object()
out = lc.get('key', sentinel)
self.assertEqual(sentinel, out)
def test_put_key_in_cache(self):
lc = LIFOCache(1)
lc.put('key', 1)
out = lc.get('key', object())
self.assertEqual(1, out)
self.assertEqual(1, lc.hits)
self.assertEqual(0, lc.misses)
def test_put_existing_key_in_cache(self):
lc = LIFOCache(1)
lc.put('key', 1)
lc.put('key', 2)
out = lc.get('key', object())
self.assertEqual(2, out)
self.assertEqual(1, lc.hits)
self.assertEqual(0, lc.misses)
def test_key_evicts_when_full(self):
sentinel = object()
lc = LIFOCache(2)
lc.put('key1', 1)
lc.put('key2', 2)
out1 = lc.get('key1', sentinel)
# key2 gets evicted
lc.put('key3', 3)
# key3 gets evicted
lc.put('key4', 4)
out2 = lc.get('key2', sentinel)
out3 = lc.get('key3', sentinel)
out4 = lc.get('key4', sentinel)
out5 = lc.get('key1', sentinel)
self.assertEqual(1, out1)
self.assertEqual(sentinel, out2)
self.assertEqual(sentinel, out3)
self.assertEqual(4, out4)
self.assertEqual(1, out5)
class TestLFUCache(unittest.TestCase):
def test_invalid_size(self):
with self.assertRaises(ValueError):
LFUCache(0)
def test_current_size_when_empty(self):
lc = LFUCache(1)
self.assertEqual(0, lc.current_size)
def test_current_size_with_items(self):
lc = LFUCache(2)
lc.put('key1', 1)
lc.put('key2', 2)
self.assertEqual(2, lc.current_size)
def test_current_size_with_full_cache(self):
lc = LFUCache(2)
lc.put('key1', 1)
lc.put('key2', 2)
self.assertEqual(2, lc.current_size)
def test_max_size(self):
lc = LFUCache(1)
self.assertEqual(1, lc.max_size)
def test_hits_none(self):
lc = LFUCache(1)
lc.get('key', object())
lc.get('key', object())
self.assertEqual(0, lc.hits)
def test_hits_some(self):
lc = LFUCache(2)
lc.put('key', object())
lc.get('key', object())
lc.get('key', object())
self.assertEqual(2, lc.hits)
def test_misses(self):
lc = LFUCache(1)
lc.get('key', object())
lc.get('key', object())
self.assertEqual(2, lc.misses)
def test_misses_none(self):
lc = LFUCache(2)
lc.put('key', object())
lc.get('key', object())
lc.get('key', object())
self.assertEqual(0, lc.misses)
def test_clear_with_empty_cache(self):
lc = LFUCache(1)
lc.clear()
self.assertEqual({}, lc._map)
self.assertEqual(0, len(lc._freq_list))
self.assertEqual(0, lc.hits)
self.assertEqual(0, lc.misses)
def test_clear_with_items(self):
lc = LFUCache(1)
lc.put('key1', 1)
lc.put('key2', 2)
lc.clear()
self.assertEqual({}, lc._map)
self.assertEqual(0, len(lc._freq_list))
self.assertEqual(0, lc.hits)
self.assertEqual(0, lc.misses)
def test_get_key_in_cache(self):
lc = LFUCache(1)
lc.put('key', 1)
out = lc.get('key', object())
self.assertEqual(1, out)
def test_get_key_not_in_cache(self):
lc = LFUCache(1)
sentinel = object()
out = lc.get('key', sentinel)
self.assertEqual(sentinel, out)
def test_put_key_in_cache(self):
lc = LFUCache(1)
lc.put('key', 1)
out = lc.get('key', object())
self.assertEqual(1, out)
self.assertEqual(1, lc.hits)
self.assertEqual(0, lc.misses)
def test_put_existing_key_in_cache(self):
lc = LFUCache(1)
lc.put('key', 1)
lc.put('key', 2)
out = lc.get('key', object())
self.assertEqual(2, out)
self.assertEqual(1, lc.hits)
self.assertEqual(0, lc.misses)
def test_key_evicts_when_full(self):
sentinel = object()
lc = LFUCache(1)
lc.put('key1', 1)
lc.put('key2', 2)
out1 = lc.get('key1', sentinel)
out2 = lc.get('key2', sentinel)
self.assertEqual(sentinel, out1)
self.assertEqual(2, out2)
def test_key_evicts_least_frequent(self):
sentinel = object()
lc = LFUCache(3)
lc.put('key1', 1)
lc.put('key2', 2)
lc.put('key3', 3)
out1 = lc.get('key2', sentinel)
out2 = lc.get('key1', sentinel)
out3 = lc.get('key2', sentinel)
# key3 should be evicted
lc.put('key4', 4)
out4 = lc.get('key1', sentinel)
# key4 should be evicted
lc.put('key5', 5)
out5 = lc.get('key2', sentinel)
out6 = lc.get('key3', sentinel)
out7 = lc.get('key4', sentinel)
out8 = lc.get('key5', sentinel)
out9 = lc.get('key5', sentinel)
out10 = lc.get('key2', sentinel)
out11 = lc.get('key5', sentinel)
self.assertEqual(2, out1)
self.assertEqual(1, out2)
self.assertEqual(2, out3)
self.assertEqual(1, out4)
self.assertEqual(2, out5)
self.assertEqual(sentinel, out6)
self.assertEqual(sentinel, out7)
self.assertEqual(5, out8)
self.assertEqual(5, out9)
self.assertEqual(2, out10)
self.assertEqual(5, out11)
def test_key_evicts_random_if_tie(self):
sentinel = object()
lc = LFUCache(2)
lc.put('key1', 1)
lc.put('key2', 2)
lc.put('key3', 3)
out1 = lc.get('key1', sentinel)
out2 = lc.get('key2', sentinel)
out3 = lc.get('key3', sentinel)
self.assertEqual(1, len({1, 2, 3}.difference({out1, out2, out3})))
class TestLRUCache(unittest.TestCase):
def test_invalid_size(self):
with self.assertRaises(ValueError):
LRUCache(0)
def test_current_size_when_empty(self):
lc = LRUCache(1)
self.assertEqual(0, lc.current_size)
def test_current_size_with_items(self):
lc = LRUCache(2)
lc.put('key1', 1)
lc.put('key2', 2)
self.assertEqual(2, lc.current_size)
def test_current_size_with_full_cache(self):
lc = LRUCache(2)
lc.put('key1', 1)
lc.put('key2', 2)
self.assertEqual(2, lc.current_size)
def test_max_size(self):
lc = LRUCache(1)
self.assertEqual(1, lc.max_size)
def test_hits_none(self):
lc = LRUCache(1)
lc.get('key', object())
lc.get('key', object())
self.assertEqual(0, lc.hits)
def test_hits_some(self):
lc = LRUCache(2)
lc.put('key', object())
lc.get('key', object())
lc.get('key', object())
self.assertEqual(2, lc.hits)
def test_misses(self):
lc = LRUCache(1)
lc.get('key', object())
lc.get('key', object())
self.assertEqual(2, lc.misses)
def test_misses_none(self):
lc = LRUCache(2)
lc.put('key', object())
lc.get('key', object())
lc.get('key', object())
self.assertEqual(0, lc.misses)
def test_clear_with_empty_cache(self):
lc = LRUCache(1)
lc.clear()
self.assertEqual({}, lc._map)
self.assertEqual(0, len(lc._queue))
self.assertEqual(0, lc.hits)
self.assertEqual(0, lc.misses)
def test_clear_with_items(self):
lc = LRUCache(1)
lc.put('key1', 1)
lc.put('key2', 2)
lc.clear()
self.assertEqual({}, lc._map)
self.assertEqual(0, len(lc._queue))
self.assertEqual(0, lc.hits)
self.assertEqual(0, lc.misses)
def test_get_key_in_cache(self):
lc = LRUCache(1)
lc.put('key', 1)
out = lc.get('key', object())
self.assertEqual(1, out)
def test_get_key_not_in_cache(self):
lc = LRUCache(1)
sentinel = object()
out = lc.get('key', sentinel)
self.assertEqual(sentinel, out)
def test_put_key_in_cache(self):
lc = LRUCache(1)
lc.put('key', 1)
out = lc.get('key', object())
self.assertEqual(1, out)
self.assertEqual(1, lc.hits)
self.assertEqual(0, lc.misses)
def test_put_existing_key_in_cache(self):
lc = LRUCache(1)
lc.put('key', 1)
lc.put('key', 2)
out = lc.get('key', object())
self.assertEqual(2, out)
self.assertEqual(1, lc.hits)
self.assertEqual(0, lc.misses)
def test_key_evicts_when_full(self):
sentinel = object()
lc = LRUCache(1)
lc.put('key1', 1)
lc.put('key2', 2)
out1 = lc.get('key1', sentinel)
out2 = lc.get('key2', sentinel)
self.assertEqual(sentinel, out1)
self.assertEqual(2, out2)
def test_key_evicts_least_recent(self):
sentinel = object()
lc = LRUCache(3)
lc.put('key1', 1)
lc.put('key2', 2)
lc.put('key3', 3)
out1 = lc.get('key2', sentinel)
out2 = lc.get('key1', sentinel)
out3 = lc.get('key2', sentinel)
# key3 should be evicted
lc.put('key4', 4)
out4 = lc.get('key1', sentinel)
# key2 should be evicted
lc.put('key5', 5)
out5 = lc.get('key2', sentinel)
out6 = lc.get('key3', sentinel)
out7 = lc.get('key4', sentinel)
out8 = lc.get('key5', sentinel)
self.assertEqual(2, out1)
self.assertEqual(1, out2)
self.assertEqual(2, out3)
self.assertEqual(1, out4)
self.assertEqual(sentinel, out5)
self.assertEqual(sentinel, out6)
self.assertEqual(4, out7)
self.assertEqual(5, out8)
class TestMFUCache(unittest.TestCase):
def test_invalid_size(self):
with self.assertRaises(ValueError):
MFUCache(0)
def test_current_size_when_empty(self):
mc = MFUCache(1)
self.assertEqual(0, mc.current_size)
def test_current_size_with_items(self):
mc = MFUCache(2)
mc.put('key1', 1)
mc.put('key2', 2)
self.assertEqual(2, mc.current_size)
def test_current_size_with_full_cache(self):
mc = MFUCache(2)
mc.put('key1', 1)
mc.put('key2', 2)
self.assertEqual(2, mc.current_size)
def test_max_size(self):
mc = MFUCache(1)
self.assertEqual(1, mc.max_size)
def test_hits_none(self):
mc = MFUCache(1)
mc.get('key', object())
mc.get('key', object())
self.assertEqual(0, mc.hits)
def test_hits_some(self):
mc = MFUCache(2)
mc.put('key', object())
mc.get('key', object())
mc.get('key', object())
self.assertEqual(2, mc.hits)
def test_misses(self):
mc = MFUCache(1)
mc.get('key', object())
mc.get('key', object())
self.assertEqual(2, mc.misses)
def test_misses_none(self):
mc = MFUCache(2)
mc.put('key', object())
mc.get('key', object())
mc.get('key', object())
self.assertEqual(0, mc.misses)
def test_clear_with_empty_cache(self):
mc = MFUCache(1)
mc.clear()
self.assertEqual({}, mc._map)
self.assertEqual(0, len(mc._freq_list))
self.assertEqual(0, mc.hits)
self.assertEqual(0, mc.misses)
def test_clear_with_items(self):
mc = MFUCache(1)
mc.put('key1', 1)
mc.put('key2', 2)
mc.clear()
self.assertEqual({}, mc._map)
self.assertEqual(0, len(mc._freq_list))
self.assertEqual(0, mc.hits)
self.assertEqual(0, mc.misses)
def test_get_key_in_cache(self):
mc = MFUCache(1)
mc.put('key', 1)
out = mc.get('key', object())
self.assertEqual(1, out)
def test_get_key_not_in_cache(self):
mc = MFUCache(1)
sentinel = object()
out = mc.get('key', sentinel)
self.assertEqual(sentinel, out)
def test_put_key_in_cache(self):
mc = MFUCache(1)
mc.put('key', 1)
out = mc.get('key', object())
self.assertEqual(1, out)
self.assertEqual(1, mc.hits)
self.assertEqual(0, mc.misses)
def test_put_existing_key_in_cache(self):
mc = MFUCache(1)
mc.put('key', 1)
mc.put('key', 2)
out = mc.get('key', object())
self.assertEqual(2, out)
self.assertEqual(1, mc.hits)
self.assertEqual(0, mc.misses)
def test_key_evicts_when_full(self):
sentinel = object()
mc = MFUCache(1)
mc.put('key1', 1)
mc.put('key2', 2)
out1 = mc.get('key1', sentinel)
out2 = mc.get('key2', sentinel)
self.assertEqual(sentinel, out1)
self.assertEqual(2, out2)
def test_key_evicts_most_frequent(self):
sentinel = object()
mc = MFUCache(2)
mc.put('key1', 1)
mc.put('key2', 2)
out1 = mc.get('key1', sentinel)
out2 = mc.get('key2', sentinel)
out3 = mc.get('key2', sentinel)
# key2 should be evicted
mc.put('key3', 3)
out4 = mc.get('key2', sentinel)
# key1 should be evicted
mc.put('key4', 4)
out5 = mc.get('key1', sentinel)
out6 = mc.get('key3', sentinel)
out7 = mc.get('key4', sentinel)
self.assertEqual(1, out1)
self.assertEqual(2, out2)
self.assertEqual(2, out3)
self.assertEqual(sentinel, out4)
self.assertEqual(sentinel, out5)
self.assertEqual(3, out6)
self.assertEqual(4, out7)
def test_key_evicts_random_if_tie(self):
sentinel = object()
mc = MFUCache(2)
mc.put('key1', 1)
mc.put('key2', 2)
mc.put('key3', 3)
out1 = mc.get('key1', sentinel)
out2 = mc.get('key2', sentinel)
out3 = mc.get('key3', sentinel)
self.assertEqual(1, len({1, 2, 3}.difference({out1, out2, out3})))
class TestMQCache(unittest.TestCase):
def test_invalid_size(self):
with self.assertRaises(ValueError):
MQCache(0, 1, 1)
def test_invalid_buffer_size(self):
with self.assertRaises(ValueError):
MQCache(1, 0, 1)
def test_invalid_expire_time(self):
with self.assertRaises(ValueError):
MQCache(1, 1, 0)
def test_invalid_num_queues(self):
with self.assertRaises(ValueError):
MQCache(1, 1, 1, 0)
def test_current_size_when_empty(self):
mc = MQCache(1, 1, 2)
self.assertEqual(0, mc.current_size)
def test_current_size_with_items(self):
mc = MQCache(1, 1, 2)
mc.put('key1', 1)
mc.put('key2', 2)
self.assertEqual(2, mc.current_size)
def test_current_size_with_full_cache(self):
mc = MQCache(1, 1, 2)
mc.put('key1', 1)
mc.put('key2', 2)
mc.put('key3', 3)
self.assertEqual(2, mc.current_size)
def test_max_size(self):
mc = MQCache(4, 3, 2)
self.assertEqual(7, mc.max_size)
def test_hits_none(self):
mc = MQCache(1, 1, 1)
mc.get('key', object())
mc.get('key', object())
self.assertEqual(0, mc.hits)
def test_hits_some(self):
mc = MQCache(1, 1, 1)
mc.put('key', object())
mc.get('key', object())
mc.get('key', object())
self.assertEqual(2, mc.hits)
def test_misses(self):
mc = MQCache(1, 1, 1)
mc.get('key', object())
mc.get('key', object())
self.assertEqual(2, mc.misses)
def test_misses_none(self):
mc = MQCache(2, 1, 1)
mc.put('key', object())
mc.get('key', object())
mc.get('key', object())
self.assertEqual(0, mc.misses)
def test_clear_with_empty_cache(self):
mc = MQCache(1, 1, 1)
mc.clear()
self.assertEqual({}, mc._map)
self.assertEqual({}, mc._buffer_map)
for i in range(mc._num_queues):
self.assertEqual(0, len(mc._queues[i]))
self.assertEqual(0, mc.hits)
self.assertEqual(0, mc.misses)
def test_clear_with_items(self):
mc = MQCache(1, 1, 1)
mc.put('key1', 1)
mc.put('key2', 2)
mc.clear()
self.assertEqual({}, mc._map)
self.assertEqual({}, mc._buffer_map)
for i in range(mc._num_queues):
self.assertEqual(0, len(mc._queues[i]))
self.assertEqual(0, mc.hits)
self.assertEqual(0, mc.misses)
def test_get_key_in_cache(self):
mc = MQCache(1, 1, 1)
mc.put('key', 1)
out = mc.get('key', object())
self.assertEqual(1, out)
def test_get_key_not_in_cache(self):
mc = MQCache(1, 1, 1)
sentinel = object()
out = mc.get('key', sentinel)
self.assertEqual(sentinel, out)
def test_put_key_in_cache(self):
mc = MQCache(1, 1, 1)
mc.put('key', 1)
out = mc.get('key', object())
self.assertEqual(1, out)
self.assertEqual(1, mc.hits)
self.assertEqual(0, mc.misses)
def test_put_existing_key_in_cache(self):
mc = MQCache(1, 1, 1)
mc.put('key', 1)
mc.put('key', 2)
out = mc.get('key', object())
| |
<reponame>xiongxianzhu/qingmi
# coding: utf-8
import random
from datetime import datetime
from qingmi.base import db, cache
from qingmi.utils import today
class Item(db.Document):
""" 选项 """
MENU_ICON = 'gear'
TYPE = db.choices(INT='整数', FLOAT='浮点数', STRING='字符串', BOOLEAN='布尔值')
name = db.StringField(max_length=40, verbose_name='名称')
key = db.StringField(max_length=40, verbose_name='键名')
data_type = db.StringField(
default=TYPE.INT,
choices=TYPE.CHOICES,
verbose_name='类型')
value = db.DynamicField(verbose_name='值')
created_at = db.DateTimeField(default=datetime.now, verbose_name='创建时间')
updated_at = db.DateTimeField(default=datetime.now, verbose_name='更新时间')
meta = dict(
indexes=[
'key',
'-created_at'
],
ordering=['-created_at'],
)
@staticmethod
# @cache.memoize(timeout=5)
def get(key, value=0, name=None):
""" 取值(整型/浮点型) """
item = Item.objects(key=key).first()
if item:
try:
if item.data_type == Item.TYPE.INT:
return int(item.value)
if item.data_type == Item.TYPE.FLOAT:
return float(item.value)
except ValueError as e:
return 0
data_type = Item.TYPE.FLOAT if isinstance(
value, float) else Item.TYPE.INT
Item(key=key, data_type=data_type, value=value, name=name).save()
return value
@staticmethod
def set(key, value=0, name=None):
""" 设置值(整型/浮点型) """
item = Item.objects(key=key).first()
if not item:
item = Item(key=key)
if name is not None:
item.name = name
item.data_type = Item.TYPE.FLOAT if isinstance(
value, float) else Item.TYPE.INT
item.value = value
item.updated_at = datetime.now()
item.save()
@staticmethod
def inc(key, start=0, value=1, name=None):
""" 递增,步长为num, 默认递增1; 不存在则创建 """
params = dict(inc__value=value, set__updated_at=datetime.now())
if name is not None:
params['set__name'] = name
item = Item.objects(key=key).modify(**params)
if not item:
params = dict(
key=key,
data_type=Item.TYPE.INT,
value=start + value)
if name is not None:
params['name'] = name
Item(**params).save()
return start + value
else:
return item.value + value
@staticmethod
# @cache.memoize(timeout=5)
def text(key, value='', name=None):
""" 取值(字符串) """
item = Item.objects(key=key).first()
if item:
return str(item.value)
Item(
key=key,
data_type=Item.TYPE.STRING,
value=value,
name=name).save()
return value
@staticmethod
def set_text(key, value='', name=None):
""" 设置值(字符串) """
item = Item.objects(key=key).first()
if not item:
item = Item(key=key)
if name is not None:
item.name = name
item.data_type = Item.TYPE.STRING
item.value = value
item.updated_at = datetime.now()
item.save()
@staticmethod
# @cache.memoize(timeout=5)
def bool(key, value=False, name=None):
""" 取值(布尔类型) """
item = Item.objects(key=key).first()
if item:
return True if item.value in ['true', 'True', True] else False
Item(
key=key,
data_type=Item.TYPE.BOOLEAN,
value=value,
name=name).save()
return value
@staticmethod
def set_bool(key, value=False, name=None):
""" 设置值(布尔类型) """
item = Item.objects(key=key).first()
if not item:
item = Item(key=key)
if name is not None:
item.name = name
item.data_type = Item.TYPE.BOOLEAN
item.value = value
item.updated_at = datetime.now()
item.save()
return True if value in ['true', 'True', True] else False
@staticmethod
def choice(key, value='', name=None, sep='|', coerce=str):
return coerce(random.choice(Item.text(key, value, name).split(sep)))
@staticmethod
def list(key, value='', name=None, sep='|', coerce=int):
return [coerce(x) for x in Item.text(key, value, name).split(sep)]
@staticmethod
def group(key, value='', name=None, sep='|', sub='-', coerce=int):
texts = Item.text(key, value, name).split(sep)
return [[coerce(y) for y in x.split(sub)] for x in texts]
@staticmethod
def hour(key, value='', name=None, sep='|', sub='-', default=None):
h = datetime.now().hour
for x in Item.group(key, value, name, sep, sub):
if x[0] <= h <= x[1]:
return x
return default
@staticmethod
def time(key, value='', name=None):
format_str = "%Y-%m-%d %H:%M:%S"
value = Item.text(key, datetime.now().strftime(format_str), name)
try:
value = datetime.strptime(value, format_str)
except BaseException:
pass
return value
# class StatsLog(db.Document):
# """ 统计日志 """
# MENU_ICON = 'bar-chart'
# key = db.StringField(verbose_name='键名')
# uid = db.StringField(verbose_name='用户ID')
# xid = db.StringField(verbose_name='其他ID')
# label = db.StringField(verbose_name='标签')
# day = db.StringField(verbose_name='日期')
# hour = db.IntField(default=0, verbose_name='小时')
# value = db.IntField(default=0, verbose_name='结果')
# created_at = db.DateTimeField(default=datetime.now, verbose_name='创建时间')
# updated_at = db.DateTimeField(default=datetime.now, verbose_name='更新时间')
# meta = dict(
# indexes=[
# '-created_at',
# ('key', 'day', 'hour'),
# ('key', 'uid', 'xid', 'label', 'day', 'hour'),
# ],
# ordering=['-created_at'],
# )
# @staticmethod
# def get(key, uid='', xid='', label='', day=lambda: today(), hour=-1, value=0, save=True):
# """ 取值 """
# if callable(day):
# day = day()
# day = str(day)[:10]
# item = StatsLog.objects(key=key, uid=uid, xid=xid, label=label, day=day, hour=hour).first()
# if item:
# return item.value
# if save:
# StatsLog(key=key, uid=uid, xid=xid, label=label, day=day, hour=hour, value=value).save()
# return value
# return None
# @staticmethod
# def set(key, uid='', xid='', label='', day=lambda: today(), hour=-1, value=0, save=True):
# """ 设置值 """
# if callable(day):
# day = day()
# day = str(day)[:10]
# item = StatsLog.objects(key=key, uid=uid, xid=xid, label=label, day=day, hour=hour).modify(
# set__value=value,
# set__updated_at=datetime.now(),
# )
# if item:
# return value
# if save:
# StatsLog(key=key, uid=uid, xid=xid, label=label, day=day, hour=hour, value=value).save()
# return value
# return None
# @staticmethod
# def inc(key, uid='', xid='', label='', day=lambda: today(), hour=-1, start=0, value=1, save=True):
# """ 递增 """
# if callable(day):
# day = day()
# day = str(day)[:10]
# item = StatsLog.objects(key=key, uid=uid, xid=xid, label=label, day=day, hour=hour).modify(
# inc__value=value,
# set__updated_at=datetime.now()
# )
# if item:
# return item.value + value
# if save:
# StatsLog(key=key, uid=uid, xid=xid, label=label, day=day, hour=hour, value=start+value).save()
# return start+value
# return None
# @staticmethod
# def xget(key, uid='', xid='', label='', day='', hour=-1, value=0, save=True):
# """ 取值 """
# return StatsLog.get(key, uid, xid, label, day, hour, value, save)
# @staticmethod
# def xset(key, uid='', xid='', label='', day='', hour=-1, value=0, save=True):
# """ 设置值 """
# return StatsLog.set(key, uid, xid, label, day, hour, value, save)
# @staticmethod
# def xinc(key, uid='', xid='', label='', day='', hour=-1, start=0, value=1, save=True):
# """ 递增 """
# return StatsLog.inc(key, uid, xid, label, day, hour, start, value, save)
# @staticmethod
# def get_bool(key, uid='', xid='', label='', day=lambda: today(), hour=-1, value=False, save=True):
# """ 取布尔值 """
# value = StatsLog.get(key, uid, xid, label, day, hour, 1 if value else 0, save)
# if value is None:
# return None
# if value is 1:
# return True
# return False
# @staticmethod
# def set_bool(key, uid='', xid='', label='', day=lambda: today(), hour=-1, value=False, save=True):
# """ 设置布尔值 """
# value = StatsLog.set(key, uid, xid, label, day, hour, 1 if value else 0, save)
# if value is None:
# return None
# if value is 1:
# return True
# return False
# @staticmethod
# def xget_bool(key, uid='', xid='', label='', day='', hour=-1, value=False, save=True):
# """ 取布尔值 """
# return StatsLog.get_bool(key, uid, xid, label, day, hour, value, save)
# @staticmethod
# def xset_bool(key, uid='', xid='', label='', day='', hour=-1, value=False, save=True):
# """ 设置布尔值 """
# return StatsLog.set_bool(key, uid, xid, label, day, hour, value, save)
class StatsLog(db.Document):
""" 统计日志 """
MENU_ICON = 'bar-chart'
TYPE = db.choices(INT='整数', FLOAT='浮点数', STRING='字符串', BOOLEAN='布尔值')
name = db.StringField(max_length=40, verbose_name='名称')
key = db.StringField(max_length=128, verbose_name='键名')
uid = db.StringField(max_length=128, verbose_name='用户ID')
xid = db.StringField(max_length=128, verbose_name='其他ID')
data_type = db.StringField(
default=TYPE.INT,
choices=TYPE.CHOICES,
verbose_name='类型')
label = db.StringField(max_length=128, verbose_name='标签')
day = db.StringField(max_length=20, verbose_name='日期')
hour = db.IntField(default=0, verbose_name='小时')
value = db.DynamicField(verbose_name='值')
created_at = db.DateTimeField(default=datetime.now, verbose_name='创建时间')
updated_at = db.DateTimeField(default=datetime.now, verbose_name='更新时间')
meta = dict(
indexes=[
'key',
'-created_at',
'-updated_at',
('key', 'uid'),
('key', 'day', 'hour'),
('key', 'uid', 'xid', 'label', 'day', 'hour'),
],
ordering=['-created_at'],
)
@staticmethod
def get(key, uid='', xid='', label='', day=lambda: today(),
hour=-1, value=0, name=None, save=True):
""" 取值(整型/浮点型) """
if callable(day):
day = day()
day = str(day)[:10]
log = StatsLog.objects(
key=key,
uid=uid,
xid=xid,
label=label,
day=day,
hour=hour).first()
if log:
if name is not None:
log.name = name
log.save()
try:
if log.data_type == StatsLog.TYPE.INT:
return int(log.value)
if log.data_type == StatsLog.TYPE.FLOAT:
return float(log.value)
except ValueError as e:
return 0
if save:
data_type = StatsLog.TYPE.FLOAT if isinstance(
value, float) else StatsLog.TYPE.INT
StatsLog(
key=key,
data_type=data_type,
uid=uid,
xid=xid,
label=label,
day=day,
hour=hour,
value=value,
name=name).save()
return value
return None
@staticmethod
def set(key, uid='', xid='', label='', day=lambda: today(),
hour=-1, value=0, name=None, save=True):
""" 设置值(整型/浮点型) """
if callable(day):
day = day()
day = str(day)[:10]
data_type = StatsLog.TYPE.FLOAT if isinstance(
value, float) else StatsLog.TYPE.INT
params = dict(
set__value=value,
set__data_type=data_type,
set__updated_at=datetime.now())
if name is not None:
params['set__name'] = name
log = StatsLog.objects(key=key, uid=uid, xid=xid, label=label,
day=day, hour=hour).modify(**params)
if log:
return value
if save:
StatsLog(
key=key,
data_type=data_type,
uid=uid,
xid=xid,
label=label,
day=day,
hour=hour,
value=value,
name=name).save()
return value
return None
@staticmethod
def inc(key, uid='', xid='', label='', day=lambda: today(),
hour=-1, start=0, value=1, name=None, save=True):
""" 递增(整型/浮点型) """
if callable(day):
day = day()
day = str(day)[:10]
data_type = StatsLog.TYPE.FLOAT if isinstance(
value, float) else StatsLog.TYPE.INT
params = dict(
inc__value=value,
set__data_type=data_type,
set__updated_at=datetime.now())
if name is not None:
params['set__name'] = name
log = StatsLog.objects(key=key, uid=uid, xid=xid, label=label,
day=day, hour=hour).modify(**params)
if log:
return log.value + value
if save:
StatsLog(
key=key,
data_type=data_type,
uid=uid,
xid=xid,
label=label,
day=day,
hour=hour,
value=start + value,
name=name).save()
return start + value
return None
@staticmethod
def text(key, uid='', xid='', label='', day=lambda: today(),
hour=-1, value='', name=None, save=True):
""" 取值(字符串) """
if callable(day):
day = day()
day = str(day)[:10]
log = StatsLog.objects(key=key, uid=uid, xid=xid, label=label,
day=day, hour=hour).first()
if log:
if name is not None:
log.name = name
log.save()
return log.value
if save:
StatsLog(
key=key,
data_type=StatsLog.TYPE.STRING,
uid=uid,
xid=xid,
label=label,
day=day,
hour=hour,
value=value,
name=name).save()
return value
return None
@staticmethod
def set_text(key, uid='', xid='', label='', day=lambda: today(),
hour=-1, value='', name=None, save=True):
""" 设置值(字符串) """
if callable(day):
day = day()
day = str(day)[:10]
params = dict(set__value=value, set__updated_at=datetime.now())
if name is not None:
params['set__name'] = name
log = StatsLog.objects(key=key, uid=uid, xid=xid, label=label,
day=day, hour=hour).modify(**params)
if log:
return value
if save:
StatsLog(
key=key,
data_type=StatsLog.TYPE.STRING,
uid=uid,
| |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import cm
import torch
"""
Adapted from https://github.com/bamsumit/slayerPytorch and PySNN (https://github.com/BasBuller/PySNN)
"""
#########################################################
# Event base class
#########################################################
class Events:
r"""
This class provides a way to store, read, write and visualize spike Events.
Members:
* ``x`` (numpy ``int`` array): `x` index of spike Events.
* ``y`` (numpy ``int`` array): `y` index of spike Events (not used if the spatial dimension is 1).
* ``p`` (numpy ``int`` array): `polarity` or `channel` index of spike Events.
* ``t`` (numpy ``double`` array): `timestamp` of spike Events. Time is assumend to be in ms.
Usage:
>>> TD = file_io.Events(x_events, y_events, p_events, t_events)
"""
def __init__(self, x_events, y_events, p_events, t_events):
if y_events is None:
self.dim = 1
else:
self.dim = 2
self.x = (
x_events if type(x_events) is np.array else np.asarray(x_events)
) # x spatial dimension
self.y = (
y_events if type(y_events) is np.array else np.asarray(y_events)
) # y spatial dimension
self.p = (
p_events if type(p_events) is np.array else np.asarray(p_events)
) # spike polarity
self.t = (
t_events if type(t_events) is np.array else np.asarray(t_events)
) # time stamp in ms
self.p -= self.p.min()
def to_spike_array(self, sampling_time=1, dim=None):
r"""
Returns a numpy tensor that contains the spike Eventss sampled in bins of `sampling_time`.
The array is of dimension (channels, height, time) or``CHT`` for 1D data.
The array is of dimension (channels, height, width, time) or``CHWT`` for 2D data.
Arguments:
* ``sampling_time``: the width of time bin to use.
* ``dim``: the dimension of the desired tensor. Assignes dimension itself if not provided.
y
Usage:
>>> spike = TD.to_spike_array()
"""
if self.dim == 1:
if dim is None:
dim = (
np.round(max(self.p) + 1).astype(int),
np.round(max(self.x) + 1).astype(int),
np.round(max(self.t) / sampling_time + 1).astype(int),
)
frame = np.zeros((dim[0], 1, dim[1], dim[2]))
elif self.dim == 2:
if dim is None:
dim = (
np.round(max(self.p) + 1).astype(int),
np.round(max(self.y) + 1).astype(int),
np.round(max(self.x) + 1).astype(int),
np.round(max(self.t) / sampling_time + 1).astype(int),
)
frame = np.zeros((dim[0], dim[1], dim[2], dim[3]))
return self.to_spike_tensor(frame, sampling_time).reshape(dim)
def to_spike_tensor(self, empty_tensor, sampling_time=1):
r"""
Returns a numpy tensor that contains the spike Eventss sampled in bins of `sampling_time`.
The tensor is of dimension (channels, height, width, time) or``CHWT``.
Arguments:
* ``empty_tensor`` (``numpy or torch tensor``): an empty tensor to hold spike data
* ``sampling_time``: the width of time bin to use.
Usage:
>>> spike = TD.to_spike_tensor( torch.zeros((2, 240, 180, 5000)) )
"""
if self.dim == 1:
x_events = np.round(self.x).astype(int)
p_events = np.round(self.p).astype(int)
t_events = np.round(self.t / sampling_time).astype(int)
valid_ind = np.argwhere(
(x_events < empty_tensor.shape[2])
& (p_events < empty_tensor.shape[0])
& (t_events < empty_tensor.shape[3])
)
empty_tensor[
p_events[valid_ind], 0, x_events[valid_ind], t_events[valid_ind]
] = (1 / sampling_time)
elif self.dim == 2:
x_events = np.round(self.x).astype(int)
y_events = np.round(self.y).astype(int)
p_events = np.round(self.p).astype(int)
t_events = np.round(self.t / sampling_time).astype(int)
valid_ind = np.argwhere(
(x_events < empty_tensor.shape[2])
& (y_events < empty_tensor.shape[1])
& (p_events < empty_tensor.shape[0])
& (t_events < empty_tensor.shape[3])
)
empty_tensor[
p_events[valid_ind],
y_events[valid_ind],
x_events[valid_ind],
t_events[valid_ind],
] = (1 / sampling_time)
return empty_tensor
#########################################################
# Conversion
#########################################################
def spike_array_to_events(spike_mat, sampling_time=1):
r"""
Returns TD Events from a numpy array (of dimension 3 or 4).
The numpy array must be of dimension (channels, height, time) or``CHT`` for 1D data.
The numpy array must be of dimension (channels, height, width, time) or``CHWT`` for 2D data.
Arguments:
* ``spike_mat``: numpy array with spike information.
* ``sampling_time``: time width of each time bin.
Usage:
>>> TD = file_io.spike_array_to_events(spike)
"""
if spike_mat.ndim == 3:
spike_events = np.argwhere(spike_mat > 0)
x_events = spike_events[:, 1]
y_events = None
p_events = spike_events[:, 0]
t_events = spike_events[:, 2]
elif spike_mat.ndim == 4:
spike_events = np.argwhere(spike_mat > 0)
x_events = spike_events[:, 2]
y_events = spike_events[:, 1]
p_events = spike_events[:, 0]
t_events = spike_events[:, 3]
else:
raise Exception(
"Expected numpy array of 3 or 4 dimension. It was {}".format(spike_mat.ndim)
)
return Events(x_events, y_events, p_events, t_events * sampling_time)
#########################################################
# 1D reading and encoding
#########################################################
def read_1d_spikes(filename):
r"""
Reads one dimensional binary spike file and returns a TD Events.
The binary file is encoded as follows:
* Each spike Events is represented by a 40 bit number.
* First 16 bits (bits 39-24) represent the neuron_id.
* Bit 23 represents the sign of spike Events: 0=>OFF Events, 1=>ON Events.
* the last 23 bits (bits 22-0) represent the spike Events timestamp in microseconds.
Arguments:
* ``filename`` (``string``): path to the binary file.
Usage:
>>> TD = file_io.read_1d_spikes(file_path)
"""
with open(filename, "rb") as input_file:
input_byte_array = input_file.read()
input_as_int = np.asarray([x for x in input_byte_array])
x_events = (input_as_int[0::5] << 8) | input_as_int[1::5]
p_events = input_as_int[2::5] >> 7
t_events = (
(input_as_int[2::5] << 16) | (input_as_int[3::5] << 8) | (input_as_int[4::5])
) & 0x7FFFFF
return Events(
x_events, None, p_events, t_events / 1000
) # convert spike times to ms
def encode_1d_spikes(filename, TD):
r"""
Writes one dimensional binary spike file from a TD Events.
The binary file is encoded as follows:
* Each spike Events is represented by a 40 bit number.
* First 16 bits (bits 39-24) represent the neuron_id.
* Bit 23 represents the sign of spike Events: 0=>OFF Events, 1=>ON Events.
* the last 23 bits (bits 22-0) represent the spike Events timestamp in microseconds.
Arguments:
* ``filename`` (``string``): path to the binary file.
* ``TD`` (an ``file_io.Events``): TD Events.
Usage:
>>> file_io.write1Dspikes(file_path, TD)
"""
assert TD.dim != 1, f"Expected TD dimension to be 1. It was: {TD.dim}"
x_events = np.round(TD.x).astype(int)
p_events = np.round(TD.p).astype(int)
t_events = np.round(TD.t * 1000).astype(int) # encode spike time in us
output_byte_array = bytearray(len(t_events) * 5)
output_byte_array[0::5] = np.uint8((x_events >> 8) & 0xFF00).tobytes()
output_byte_array[1::5] = np.uint8((x_events & 0xFF)).tobytes()
output_byte_array[2::5] = np.uint8(
((t_events >> 16) & 0x7F) | (p_events.astype(int) << 7)
).tobytes()
output_byte_array[3::5] = np.uint8((t_events >> 8) & 0xFF).tobytes()
output_byte_array[4::5] = np.uint8(t_events & 0xFF).tobytes()
with open(filename, "wb") as output_file:
output_file.write(output_byte_array)
#########################################################
# 2D reading and encoding
#########################################################
def read_2d_spikes(filename):
r"""
Reads two dimensional binary spike file and returns a TD Events.
It is the same format used in neuromorphic datasets NMNIST & NCALTECH101.
The binary file is encoded as follows:
* Each spike Events is represented by a 40 bit number.
* First 8 bits (bits 39-32) represent the xID of the neuron.
* Next 8 bits (bits 31-24) represent the yID of the neuron.
* Bit 23 represents the sign of spike Events: 0=>OFF Events, 1=>ON Events.
* The last 23 bits (bits 22-0) represent the spike Events timestamp in microseconds.
Arguments:
* ``filename`` (``string``): path to the binary file.
Usage:
>>> TD = file_io.read_2d_spikes(file_path)
"""
with open(filename, "rb") as input_file:
input_byte_array = input_file.read()
input_as_int = np.asarray([x for x in input_byte_array])
x_events = input_as_int[0::5]
y_events = input_as_int[1::5]
p_events = input_as_int[2::5] >> 7
t_events = (
(input_as_int[2::5] << 16) | (input_as_int[3::5] << 8) | (input_as_int[4::5])
) & 0x7FFFFF
return Events(
x_events, y_events, p_events, t_events / 1000
) # convert spike times to ms
def encode_2d_spikes(filename, TD):
r"""
Writes two dimensional binary spike file from a TD Events.
It is the same format used in neuromorphic datasets NMNIST & NCALTECH101.
The binary file is encoded as follows:
* Each spike Events is represented by a 40 bit number.
* First 8 bits (bits 39-32) represent the xID of the neuron.
* Next 8 bits (bits 31-24) represent the yID of the neuron.
* Bit 23 represents the sign of spike Events: 0=>OFF Events, 1=>ON Events.
* The last 23 bits (bits 22-0) represent the spike Events timestamp in microseconds.
Arguments:
* ``filename`` (``string``): path to the binary file.
* ``TD`` (an ``file_io.Events``): TD Events.
Usage:
>>> file_io.write2Dspikes(file_path, TD)
"""
assert TD.dim != 2, f"Expected TD dimension to be 2. It was: {TD.dim}"
x_events = np.round(TD.x).astype(int)
y_events = np.round(TD.y).astype(int)
p_events = np.round(TD.p).astype(int)
t_events = np.round(TD.t * 1000).astype(int) # encode spike time in us
output_byte_array = bytearray(len(t_events) * 5)
output_byte_array[0::5] = np.uint8(x_events).tobytes()
output_byte_array[1::5] = np.uint8(y_events).tobytes()
output_byte_array[2::5] = np.uint8(
((t_events >> 16) & 0x7F) | (p_events.astype(int) << 7)
).tobytes()
output_byte_array[3::5] = np.uint8((t_events >> 8) & 0xFF).tobytes()
output_byte_array[4::5] = np.uint8(t_events & 0xFF).tobytes()
with open(filename, "wb") | |
"""
flask_clova
~~~~~~~~~~~
:copyright: (c) 2018 by <NAME>. <EMAIL>
:license: MIT, see LICENSE for more details.
:All the structure and ideas were copied after seeing flask-ask
:flask-ask github: https://github.com/johnwheeler/flask-ask
"""
import os
import yaml
import inspect
from functools import partial
from werkzeug.local import LocalProxy
from jinja2 import BaseLoader, ChoiceLoader, TemplateNotFound
from flask import make_response, current_app, json, request as flask_request, _app_ctx_stack
from . import verifier, logger
import collections
def find_clova():
"""
Find our instance of Clova, navigating Local's and possible blueprints.
Note: This only supports returning a reference to the first instance
of Clova found.
"""
if hasattr(current_app, 'clova'):
return getattr(current_app, 'clova')
else:
if hasattr(current_app, 'blueprints'):
blueprints = getattr(current_app, 'blueprints')
for blueprint_name in blueprints:
if hasattr(blueprints[blueprint_name], 'clova'):
return getattr(blueprints[blueprint_name], 'clova')
def dbgdump(obj, default=None, cls=None):
if current_app.config.get('CLOVA_PRETTY_DEBUG_LOGS', False):
indent = 2
else:
indent = None
msg = json.dumps(obj, indent=indent, default=default, cls=cls)
logger.debug(msg)
#Define global variables
request = LocalProxy(lambda: find_clova().request)
session = LocalProxy(lambda: find_clova().session)
version = LocalProxy(lambda: find_clova().version)
context = LocalProxy(lambda: find_clova().context)
convert_errors = LocalProxy(lambda: find_clova().convert_errors)
from . import models
class Clova(object):
"""The Clova object provides the central interface for interacting with the Clova Extension Service.
Clova object maps CEK Requests to flask view functions and handles CEK sessions.
The constructor is passed a Flask App instance, and URL endpoint.
The Flask instance allows the convienient API of endpoints and their view functions,
so that CEK requests may be mapped with syntax similar to a typical Flask server.
Route provides the entry point for the skill, and must be provided if an app is given.
Keyword Arguments:
app {Flask object} -- App instance - created with Flask(__name__) (default: {None})
route {str} -- entry point to which initial CEK Requests are forwarded (default: {None})
blueprint {Flask blueprint} -- Flask Blueprint instance to use instead of Flask App (default: {None})
stream_cache {Werkzeug BasicCache} -- BasicCache-like object for storing Audio stream data (default: {SimpleCache})
path {str} -- path to templates yaml file for VUI dialog (default: {'templates.yaml'})
"""
def __init__(self, app=None, route=None, blueprint=None, path='templates.yaml'):
self.app = app
self._route = route
self._intent_view_funcs = {}
self._intent_converts = {}
self._intent_defaults = {}
self._intent_mappings = {}
self._launch_view_func = None
self._session_ended_view_func = None
self._on_session_started_callback = None
self._default_intent_view_func = None
if app is not None:
self.init_app(app, path)
elif blueprint is not None:
self.init_blueprint(blueprint, path)
def init_app(self, app, path='templates.yaml'):
"""Initializes Clova app by setting configuration variables, loading templates, and maps Clova route to a flask view.
The Clova instance is given the following configuration variables by calling on Flask's configuration:
`CLOVA_APPLICATION_ID`:
Turn on application ID verification by setting this variable to an application ID or a
list of allowed application IDs. By default, application ID verification is disabled and a
warning is logged. This variable should be set in production to ensure
requests are being sent by the applications you specify.
Default: None
`CLOVA_VERIFY_REQUESTS`:
Enables or disables CEK request verification, which ensures requests sent to your skill
are from Naver's CEK service. This setting should not be disabled in production.
It is useful for mocking JSON requests in automated tests.
Default: True
`CLOVA_PRETTY_DEBUG_LOGS`:
Add tabs and linebreaks to the CEK request and response printed to the debug log.
This improves readability when printing to the console, but breaks formatting when logging to CloudWatch.
Default: False
"""
if self._route is None:
raise TypeError("route is a required argument when app is not None")
app.clova = self
app.add_url_rule(self._route, view_func=self._flask_view_func, methods=['POST'])
app.jinja_loader = ChoiceLoader([app.jinja_loader, YamlLoader(app, path)])
def init_blueprint(self, blueprint, path='templates.yaml'):
"""Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None})
path {str} -- path to templates yaml file, relative to Blueprint (Default: {'templates.yaml'})
"""
if self._route is not None:
raise TypeError("route cannot be set when using blueprints!")
# we need to tuck our reference to this Clova instance into the blueprint object and find it later!
blueprint.clova = self
# BlueprintSetupState.add_url_rule gets called underneath the covers and
# concats the rule string, so we should set to an empty string to allow
# Blueprint('blueprint_api', __name__, url_prefix="/clova") to result in
# exposing the rule at "/clova" and not "/clova/".
blueprint.add_url_rule("", view_func=self._flask_view_func, methods=['POST'])
blueprint.jinja_loader = ChoiceLoader([YamlLoader(blueprint, path)])
@property
def clova_application_id(self):
return current_app.config.get('CLOVA_APPLICATION_ID', None)
@property
def clova_verify_requests(self):
return current_app.config.get('CLOVA_VERIFY_REQUESTS', True)
def on_session_started(self, f):
"""Decorator to call wrapped function upon starting a session.
@clova.on_session_started
def new_session():
log.info('new session started')
Because both launch and intent requests may begin a session, this decorator is used call
a function regardless of how the session began.
Arguments:
f {function} -- function to be called when session is started.
"""
self._on_session_started_callback = f
return f
def launch(self, f):
"""Decorator maps a view function as the endpoint for an CEK LaunchRequest and starts the skill.
@clova.launch
def launched():
return question('Welcome to Foo')
The wrapped function is registered as the launch view function and renders the response
for requests to the Launch URL.
A request to the launch URL is verified with the CEK server before the payload is
passed to the view function.
Arguments:
f {function} -- Launch view function
"""
self._launch_view_func = f
return f
def session_ended(self, f):
"""Decorator routes CEK SessionEndedRequest to the wrapped view function to end the skill.
@clova.session_ended
def session_ended():
return "{}", 200
The wrapped function is registered as the session_ended view function
and renders the response for requests to the end of the session.
Arguments:
f {function} -- session_ended view function
"""
self._session_ended_view_func = f
return f
def intent(self, intent_name, mapping=None, convert=None, default=None):
"""Decorator routes an CEK IntentRequest and provides the slot parameters to the wrapped function.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@clova.intent('WeatherIntent', mapping={'city': 'City'})
def weather(city):
return statement('I predict great weather for {}'.format(city))
Arguments:
intent_name {str} -- Name of the intent request to be mapped to the decorated function
Keyword Arguments:
mapping {dict} -- Maps parameters to intent slots of a different name
default: {}
convert {dict} -- Converts slot values to data types before assignment to parameters
default: {}
default {dict} -- Provides default values for Intent slots if CEK reuqest
returns no corresponding slot, or a slot with an empty value
default: {}
"""
if mapping is None:
mapping = dict()
if convert is None:
convert = dict()
if default is None:
default = dict()
def decorator(f):
self._intent_view_funcs[intent_name] = f
self._intent_mappings[intent_name] = mapping
self._intent_converts[intent_name] = convert
self._intent_defaults[intent_name] = default
return f
return decorator
def default_intent(self, f):
"""Decorator routes any CEK IntentRequest that is not matched by any existing @clova.intent routing."""
self._default_intent_view_func = f
return f
@property
def request(self):
return getattr(_app_ctx_stack.top, '_clova_request', None)
@request.setter
def request(self, value):
_app_ctx_stack.top._clova_request = value
@property
def session(self):
return getattr(_app_ctx_stack.top, '_clova_session', models._Field())
@session.setter
def session(self, value):
_app_ctx_stack.top._clova_session = value
@property
def version(self):
return getattr(_app_ctx_stack.top, '_clova_version', None)
@version.setter
def version(self, value):
_app_ctx_stack.top._clova_version = value
@property
def context(self):
return getattr(_app_ctx_stack.top, '_clova_context', None)
@context.setter
def context(self, value):
_app_ctx_stack.top._clova_context = value
@property
def convert_errors(self):
return getattr(_app_ctx_stack.top, '_clova_convert_errors', None)
@convert_errors.setter
def convert_errors(self, value):
_app_ctx_stack.top._clova_convert_errors = value
def _get_user(self):
if self.context:
return self.context.get('System', {}).get('user', {}).get('userId')
return None
def _cek_request(self, verify=True):
raw_body = flask_request.data
cek_request_payload = json.loads(raw_body)
if verify:
# verify application id
if self.clova_application_id is not None:
application_id = cek_request_payload['context']['System']['application']['applicationId']
verifier.verify_application_id(application_id, self.clova_application_id)
try:
cek_request_payload['session']
except KeyError:
logger.debug("Session field is missing.\n"
"This message should not be appeared in produciton.")
cek_request_payload['session'] = {}
return cek_request_payload
def _flask_view_func(self, *args, **kwargs):
clova_payload = self._cek_request(verify=self.clova_verify_requests)
dbgdump(clova_payload)
request_body = models._Field(clova_payload)
self.request = request_body.request
self.version = request_body.version
self.context = getattr(request_body, 'context', models._Field())
self.session = getattr(request_body, 'session', models._Field())
if not self.session.sessionAttributes:
self.session.sessionAttributes = models._Field()
try:
if self.session.new and self._on_session_started_callback is not None:
self._on_session_started_callback()
except AttributeError:
pass
result = None
request_type = self.request.type
if request_type == 'LaunchRequest' and self._launch_view_func:
result = self._launch_view_func()
elif request_type == 'SessionEndedRequest':
if self._session_ended_view_func:
result = self._session_ended_view_func()
else:
logger.info("SessionEndedRequest Handler is not defined.")
result = "{}", 200
elif request_type == 'IntentRequest' and (self._intent_view_funcs or self._default_intent_view_func):
result | |
<gh_stars>0
"""
Django views for interacting with Part app
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.utils import IntegrityError
from django.shortcuts import get_object_or_404
from django.shortcuts import HttpResponseRedirect
from django.utils.translation import gettext_lazy as _
from django.urls import reverse, reverse_lazy
from django.views.generic import DetailView, ListView, FormView, UpdateView
from django.forms.models import model_to_dict
from django.forms import HiddenInput, CheckboxInput
from django.conf import settings
from django.contrib import messages
from moneyed import CURRENCIES
from djmoney.contrib.exchange.models import convert_money
from PIL import Image
import requests
import os
import io
from rapidfuzz import fuzz
from decimal import Decimal, InvalidOperation
from .models import PartCategory, Part, PartRelated
from .models import PartParameterTemplate, PartParameter
from .models import PartCategoryParameterTemplate
from .models import BomItem
from .models import match_part_names
from .models import PartSellPriceBreak, PartInternalPriceBreak
from common.models import InvenTreeSetting
from company.models import SupplierPart
from common.files import FileManager
from common.views import FileManagementFormView, FileManagementAjaxView
from stock.models import StockLocation
import common.settings as inventree_settings
from . import forms as part_forms
from .bom import MakeBomTemplate, BomUploadManager, ExportBom, IsValidBOMFormat
from order.models import PurchaseOrderLineItem
from .admin import PartResource
from InvenTree.views import AjaxView, AjaxCreateView, AjaxUpdateView, AjaxDeleteView
from InvenTree.views import QRCodeView
from InvenTree.views import InvenTreeRoleMixin
from InvenTree.helpers import DownloadFile, str2bool
class PartIndex(InvenTreeRoleMixin, ListView):
""" View for displaying list of Part objects
"""
model = Part
template_name = 'part/category.html'
context_object_name = 'parts'
def get_queryset(self):
return Part.objects.all().select_related('category')
def get_context_data(self, **kwargs):
context = super(PartIndex, self).get_context_data(**kwargs).copy()
# View top-level categories
children = PartCategory.objects.filter(parent=None)
context['children'] = children
context['category_count'] = PartCategory.objects.count()
context['part_count'] = Part.objects.count()
return context
class PartRelatedCreate(AjaxCreateView):
""" View for creating a new PartRelated object
- The view only makes sense if a Part object is passed to it
"""
model = PartRelated
form_class = part_forms.CreatePartRelatedForm
ajax_form_title = _("Add Related Part")
ajax_template_name = "modal_form.html"
def get_initial(self):
""" Set parent part as part_1 field """
initials = {}
part_id = self.request.GET.get('part', None)
if part_id:
try:
initials['part_1'] = Part.objects.get(pk=part_id)
except (Part.DoesNotExist, ValueError):
pass
return initials
def get_form(self):
""" Create a form to upload a new PartRelated
- Hide the 'part_1' field (parent part)
- Display parts which are not yet related
"""
form = super(AjaxCreateView, self).get_form()
form.fields['part_1'].widget = HiddenInput()
try:
# Get parent part
parent_part = self.get_initial()['part_1']
# Get existing related parts
related_parts = [related_part[1].pk for related_part in parent_part.get_related_parts()]
# Build updated choice list excluding
# - parts already related to parent part
# - the parent part itself
updated_choices = []
for choice in form.fields["part_2"].choices:
if (choice[0] not in related_parts) and (choice[0] != parent_part.pk):
updated_choices.append(choice)
# Update choices for related part
form.fields['part_2'].choices = updated_choices
except KeyError:
pass
return form
class PartRelatedDelete(AjaxDeleteView):
""" View for deleting a PartRelated object """
model = PartRelated
ajax_form_title = _("Delete Related Part")
context_object_name = "related"
# Explicit role requirement
role_required = 'part.change'
class PartSetCategory(AjaxUpdateView):
""" View for settings the part category for multiple parts at once """
ajax_template_name = 'part/set_category.html'
ajax_form_title = _('Set Part Category')
form_class = part_forms.SetPartCategoryForm
role_required = 'part.change'
category = None
parts = []
def get(self, request, *args, **kwargs):
""" Respond to a GET request to this view """
self.request = request
if 'parts[]' in request.GET:
self.parts = Part.objects.filter(id__in=request.GET.getlist('parts[]'))
else:
self.parts = []
return self.renderJsonResponse(request, form=self.get_form(), context=self.get_context_data())
def post(self, request, *args, **kwargs):
""" Respond to a POST request to this view """
self.parts = []
for item in request.POST:
if item.startswith('part_id_'):
pk = item.replace('part_id_', '')
try:
part = Part.objects.get(pk=pk)
except (Part.DoesNotExist, ValueError):
continue
self.parts.append(part)
self.category = None
if 'part_category' in request.POST:
pk = request.POST['part_category']
try:
self.category = PartCategory.objects.get(pk=pk)
except (PartCategory.DoesNotExist, ValueError):
self.category = None
valid = self.category is not None
data = {
'form_valid': valid,
'success': _('Set category for {n} parts').format(n=len(self.parts))
}
if valid:
self.set_category()
return self.renderJsonResponse(request, data=data, form=self.get_form(), context=self.get_context_data())
@transaction.atomic
def set_category(self):
for part in self.parts:
part.set_category(self.category)
def get_context_data(self):
""" Return context data for rendering in the form """
ctx = {}
ctx['parts'] = self.parts
ctx['categories'] = PartCategory.objects.all()
ctx['category'] = self.category
return ctx
class MakePartVariant(AjaxCreateView):
""" View for creating a new variant based on an existing template Part
- Part <pk> is provided in the URL '/part/<pk>/make_variant/'
- Automatically copy relevent data (BOM, etc, etc)
"""
model = Part
form_class = part_forms.EditPartForm
ajax_form_title = _('Create Variant')
ajax_template_name = 'part/variant_part.html'
def get_part_template(self):
return get_object_or_404(Part, id=self.kwargs['pk'])
def get_context_data(self):
return {
'part': self.get_part_template(),
}
def get_form(self):
form = super(AjaxCreateView, self).get_form()
# Hide some variant-related fields
# form.fields['variant_of'].widget = HiddenInput()
# Force display of the 'bom_copy' widget
form.fields['bom_copy'].widget = CheckboxInput()
# Force display of the 'parameters_copy' widget
form.fields['parameters_copy'].widget = CheckboxInput()
return form
def post(self, request, *args, **kwargs):
form = self.get_form()
context = self.get_context_data()
part_template = self.get_part_template()
valid = form.is_valid()
data = {
'form_valid': valid,
}
if valid:
# Create the new part variant
part = form.save(commit=False)
part.variant_of = part_template
part.is_template = False
part.save()
data['pk'] = part.pk
data['text'] = str(part)
data['url'] = part.get_absolute_url()
bom_copy = str2bool(request.POST.get('bom_copy', False))
parameters_copy = str2bool(request.POST.get('parameters_copy', False))
# Copy relevent information from the template part
part.deep_copy(part_template, bom=bom_copy, parameters=parameters_copy)
return self.renderJsonResponse(request, form, data, context=context)
def get_initial(self):
part_template = self.get_part_template()
initials = model_to_dict(part_template)
initials['is_template'] = False
initials['variant_of'] = part_template
initials['bom_copy'] = InvenTreeSetting.get_setting('PART_COPY_BOM')
initials['parameters_copy'] = InvenTreeSetting.get_setting('PART_COPY_PARAMETERS')
return initials
class PartDuplicate(AjaxCreateView):
""" View for duplicating an existing Part object.
- Part <pk> is provided in the URL '/part/<pk>/copy/'
- Option for 'deep-copy' which will duplicate all BOM items (default = True)
"""
model = Part
form_class = part_forms.EditPartForm
ajax_form_title = _("Duplicate Part")
ajax_template_name = "part/copy_part.html"
def get_data(self):
return {
'success': _('Copied part')
}
def get_part_to_copy(self):
try:
return Part.objects.get(id=self.kwargs['pk'])
except (Part.DoesNotExist, ValueError):
return None
def get_context_data(self):
return {
'part': self.get_part_to_copy()
}
def get_form(self):
form = super(AjaxCreateView, self).get_form()
# Force display of the 'bom_copy' widget
form.fields['bom_copy'].widget = CheckboxInput()
# Force display of the 'parameters_copy' widget
form.fields['parameters_copy'].widget = CheckboxInput()
return form
def post(self, request, *args, **kwargs):
""" Capture the POST request for part duplication
- If the bom_copy object is set, copy all the BOM items too!
- If the parameters_copy object is set, copy all the parameters too!
"""
form = self.get_form()
context = self.get_context_data()
valid = form.is_valid()
name = request.POST.get('name', None)
if name:
matches = match_part_names(name)
if len(matches) > 0:
# Display the first five closest matches
context['matches'] = matches[:5]
# Enforce display of the checkbox
form.fields['confirm_creation'].widget = CheckboxInput()
# Check if the user has checked the 'confirm_creation' input
confirmed = str2bool(request.POST.get('confirm_creation', False))
if not confirmed:
msg = _('Possible matches exist - confirm creation of new part')
form.add_error('confirm_creation', msg)
form.pre_form_warning = msg
valid = False
data = {
'form_valid': valid
}
if valid:
# Create the new Part
part = form.save(commit=False)
part.creation_user = request.user
part.save()
data['pk'] = part.pk
data['text'] = str(part)
bom_copy = str2bool(request.POST.get('bom_copy', False))
parameters_copy = str2bool(request.POST.get('parameters_copy', False))
original = self.get_part_to_copy()
if original:
part.deep_copy(original, bom=bom_copy, parameters=parameters_copy)
try:
data['url'] = part.get_absolute_url()
except AttributeError:
pass
if valid:
pass
return self.renderJsonResponse(request, form, data, context=context)
def get_initial(self):
""" Get initial data based on the Part to be copied from.
"""
part = self.get_part_to_copy()
if part:
initials = model_to_dict(part)
else:
initials = super(AjaxCreateView, self).get_initial()
initials['bom_copy'] = str2bool(InvenTreeSetting.get_setting('PART_COPY_BOM', True))
initials['parameters_copy'] = str2bool(InvenTreeSetting.get_setting('PART_COPY_PARAMETERS', True))
return initials
class PartCreate(AjaxCreateView):
""" View for creating a new Part object.
Options for providing initial conditions:
- Provide a category object as initial data
"""
model = Part
form_class = part_forms.EditPartForm
ajax_form_title = _('Create New Part')
ajax_template_name = 'part/create_part.html'
def get_data(self):
return {
'success': _("Created new part"),
}
def get_category_id(self):
return self.request.GET.get('category', None)
def get_context_data(self, **kwargs):
""" Provide extra context information for the form to display:
- Add category information (if provided)
"""
context = super(PartCreate, self).get_context_data(**kwargs)
# Add category information to the page
cat_id = self.get_category_id()
if cat_id:
try:
context['category'] = PartCategory.objects.get(pk=cat_id)
except (PartCategory.DoesNotExist, ValueError):
pass
return context
def get_form(self):
""" Create Form for making new Part object.
Remove the 'default_supplier' field as there are not yet any matching SupplierPart objects
"""
form = super(AjaxCreateView, self).get_form()
# Hide the "default expiry" field if the feature is not enabled
if not inventree_settings.stock_expiry_enabled():
form.fields['default_expiry'].widget = HiddenInput()
# Hide the default_supplier field (there are no matching supplier parts yet!)
form.fields['default_supplier'].widget = HiddenInput()
# Display category templates widgets
form.fields['selected_category_templates'].widget = CheckboxInput()
form.fields['parent_category_templates'].widget = CheckboxInput()
return form
def post(self, request, *args, **kwargs):
form = self.get_form()
context = {}
valid = form.is_valid()
name = request.POST.get('name', None)
if name:
matches = match_part_names(name)
if len(matches) | |
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 128, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 129, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 130, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 131, 6))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 132, 6))
counters.add(cc_5)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(DateTime._UseForTag(pyxb.namespace.ExpandedName(None, 'Year')), pyxb.utils.utility.Location('./pug_rest.xsd', 127, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(DateTime._UseForTag(pyxb.namespace.ExpandedName(None, 'Month')), pyxb.utils.utility.Location('./pug_rest.xsd', 128, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(DateTime._UseForTag(pyxb.namespace.ExpandedName(None, 'Day')), pyxb.utils.utility.Location('./pug_rest.xsd', 129, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(DateTime._UseForTag(pyxb.namespace.ExpandedName(None, 'Hour')), pyxb.utils.utility.Location('./pug_rest.xsd', 130, 6))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(DateTime._UseForTag(pyxb.namespace.ExpandedName(None, 'Minute')), pyxb.utils.utility.Location('./pug_rest.xsd', 131, 6))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(DateTime._UseForTag(pyxb.namespace.ExpandedName(None, 'Second')), pyxb.utils.utility.Location('./pug_rest.xsd', 132, 6))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_5, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
DateTime._Automaton = _BuildAutomaton_6()
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'CID'), pyxb.binding.datatypes.int, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 139, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'SID'), pyxb.binding.datatypes.int, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 140, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'AID'), pyxb.binding.datatypes.int, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 141, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'ListKey'), pyxb.binding.datatypes.string, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 142, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'Size'), pyxb.binding.datatypes.int, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 143, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'EntrezDB'), pyxb.binding.datatypes.string, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 144, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'EntrezWebEnv'), pyxb.binding.datatypes.string, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 145, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'EntrezQueryKey'), pyxb.binding.datatypes.int, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 146, 8)))
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'EntrezURL'), pyxb.binding.datatypes.string, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('./pug_rest.xsd', 147, 8)))
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 139, 8))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 140, 8))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 141, 8))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 142, 8))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 143, 8))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 144, 8))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 145, 8))
counters.add(cc_6)
cc_7 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 146, 8))
counters.add(cc_7)
cc_8 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('./pug_rest.xsd', 147, 8))
counters.add(cc_8)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'CID')), pyxb.utils.utility.Location('./pug_rest.xsd', 139, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'SID')), pyxb.utils.utility.Location('./pug_rest.xsd', 140, 8))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'AID')), pyxb.utils.utility.Location('./pug_rest.xsd', 141, 8))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'ListKey')), pyxb.utils.utility.Location('./pug_rest.xsd', 142, 8))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'Size')), pyxb.utils.utility.Location('./pug_rest.xsd', 143, 8))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'EntrezDB')), pyxb.utils.utility.Location('./pug_rest.xsd', 144, 8))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'EntrezWebEnv')), pyxb.utils.utility.Location('./pug_rest.xsd', 145, 8))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_7, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'EntrezQueryKey')), pyxb.utils.utility.Location('./pug_rest.xsd', 146, 8))
st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_8, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(None, 'EntrezURL')), pyxb.utils.utility.Location('./pug_rest.xsd', 147, 8))
st_8 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_5, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_5, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_6, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_6, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_7, True) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_7, False) ]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_8, True) ]))
st_8._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_6._Automaton = _BuildAutomaton_7()
CTD_ANON_7._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'Columns'), CTD_ANON_8, scope=CTD_ANON_7, location=pyxb.utils.utility.Location('./pug_rest.xsd', 155, 8)))
CTD_ANON_7._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'Row'), CTD_ANON_9, scope=CTD_ANON_7, location=pyxb.utils.utility.Location('./pug_rest.xsd', 162, 8)))
def _BuildAutomaton_8 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_8
del _BuildAutomaton_8
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_7._UseForTag(pyxb.namespace.ExpandedName(None, 'Columns')), pyxb.utils.utility.Location('./pug_rest.xsd', 155, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_7._UseForTag(pyxb.namespace.ExpandedName(None, 'Row')), pyxb.utils.utility.Location('./pug_rest.xsd', 162, 8))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_7._Automaton = _BuildAutomaton_8()
CTD_ANON_8._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'Column'), pyxb.binding.datatypes.string, scope=CTD_ANON_8, location=pyxb.utils.utility.Location('./pug_rest.xsd', 158, 14)))
def _BuildAutomaton_9 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_9
del _BuildAutomaton_9
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_8._UseForTag(pyxb.namespace.ExpandedName(None, 'Column')), pyxb.utils.utility.Location('./pug_rest.xsd', 158, 14))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_8._Automaton = _BuildAutomaton_9()
CTD_ANON_9._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'Cell'), pyxb.binding.datatypes.string, scope=CTD_ANON_9, location=pyxb.utils.utility.Location('./pug_rest.xsd', 165, 14)))
def _BuildAutomaton_10 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_10
del _BuildAutomaton_10
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_9._UseForTag(pyxb.namespace.ExpandedName(None, 'Cell')), pyxb.utils.utility.Location('./pug_rest.xsd', 165, 14))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_9._Automaton = _BuildAutomaton_10()
CTD_ANON_10._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'NumberOfRows'), pyxb.binding.datatypes.int, scope=CTD_ANON_10, location=pyxb.utils.utility.Location('./pug_rest.xsd', 176, 8)))
CTD_ANON_10._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'ColumnValues'), CTD_ANON_11, scope=CTD_ANON_10, location=pyxb.utils.utility.Location('./pug_rest.xsd', 177, 8)))
def _BuildAutomaton_11 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_11
del _BuildAutomaton_11
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_10._UseForTag(pyxb.namespace.ExpandedName(None, 'NumberOfRows')), pyxb.utils.utility.Location('./pug_rest.xsd', 176, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_10._UseForTag(pyxb.namespace.ExpandedName(None, 'ColumnValues')), pyxb.utils.utility.Location('./pug_rest.xsd', 177, 8))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_10._Automaton = _BuildAutomaton_11()
CTD_ANON_11._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'ColumnName'), pyxb.binding.datatypes.string, scope=CTD_ANON_11, location=pyxb.utils.utility.Location('./pug_rest.xsd', 180, 14)))
CTD_ANON_11._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'Item'), CTD_ANON_12, scope=CTD_ANON_11, location=pyxb.utils.utility.Location('./pug_rest.xsd', 181, 14)))
def _BuildAutomaton_12 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_12
del _BuildAutomaton_12
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_11._UseForTag(pyxb.namespace.ExpandedName(None, 'ColumnName')), pyxb.utils.utility.Location('./pug_rest.xsd', 180, 14))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_11._UseForTag(pyxb.namespace.ExpandedName(None, 'Item')), pyxb.utils.utility.Location('./pug_rest.xsd', 181, 14))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
| |
"""Example module in template package."""
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import SGDClassifier
from sklearn import svm
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from geo import *
__all__ = ['Tool']
class Tool(object):
"""Class to interact with a postcode database file."""
def __init__(self, postcode_file='', sample_labels='',
household_file=''):
"""
Parameters
----------
postcode_file : str, optional
Filename of a .csv file containing geographic location
data for postcodes.
sample_labels : str, optional
Filename of a .csv file containing sample data on property
values and flood risk labels.
household_file : str, optional
Filename of a .csv file containing information on households
by postcode.
"""
if postcode_file == '':
postcode_file = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_unlabelled.csv'))
if sample_labels == '':
sample_labels = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_sampled.csv'))
if household_file == '':
household_file = os.sep.join((os.path.dirname(__file__),
'resources',
'households_per_sector.csv'))
self.label = pd.read_csv(sample_labels)
self.postcodes = pd.read_csv(postcode_file)
self.house_label = pd.read_csv(household_file)
def get_easting_northing(self, postcodes):
"""Get a frame of OS eastings and northings from a collection
of input postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only OSGB36 easthing and northing indexed
by the input postcodes. Invalid postcodes (i.e. not in the
input unlabelled postcodes file) return as NaN.
"""
if type(postcodes) is str:
postcodes = [postcodes]
postcode_df = self.postcodes
postcode_df = postcode_df.fillna('np.nan')
postcode_df_index = postcode_df.set_index('postcode')
df = pd.DataFrame(columns=(['sector','easting', 'northing','localAuthority']))
for i in range(len(postcodes)):
if postcodes[i] in postcode_df['postcode'].tolist():
df.loc[postcodes[i]] = postcode_df_index.loc[postcodes[i]]
else:
df.loc[postcodes[i]] = np.NaN
del df['sector']
del df['localAuthority']
return df
def get_lat_long(self, postcodes):
"""Get a frame containing GPS latitude and longitude information for a
collection of of postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only WGS84 latitude and longitude pairs for
the input postcodes. Invalid postcodes (i.e. not in the
input unlabelled postcodes file) return as NAN.
"""
NE = self.get_easting_northing(postcodes)
east = NE['easting']
north = NE['northing']
lat_long = []
for i in range(len(NE)):
postcode = postcodes[i]
if np.isnan(east[i]):
lat = np.NaN
long = np.NaN
else:
a = get_gps_lat_long_from_easting_northing([east[i]],[north[i]],rads=False)
lat = int(a[0])
long = int(a[1])
lat_long.append([postcode,lat,long])
postcode_df = pd.DataFrame(lat_long, columns=('postcode','lat','lon'))
postcode_df = postcode_df.set_index('postcode')
return postcode_df
def get_easting_northing_sample(self, postcodes):
"""Get a frame of OS eastings and northings from a collection
of input postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only OSGB36 easthing and northing indexed
by the input postcodes. Invalid postcodes (i.e. not in the
input sampled postcodes file) return as NaN.
"""
if type(postcodes) is str:
postcodes = [postcodes]
postcode_df = self.label
postcode_df = postcode_df.fillna('np.nan')
postcode_df_index = postcode_df.set_index('postcode')
df = pd.DataFrame(columns=(['sector','easting', 'northing','localAuthority','riskLabel','medianPrice']))
for i in range(len(postcodes)):
if postcodes[i] in postcode_df['postcode'].tolist():
df.loc[postcodes[i]] = postcode_df_index.loc[postcodes[i]]
else:
df.loc[postcodes[i]] = np.NaN
del df['sector']
del df['localAuthority']
del df['riskLabel']
del df['medianPrice']
return df
def get_lat_long_sample(self, postcodes):
"""Get a frame containing GPS latitude and longitude information for a
collection of of postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only WGS84 latitude and longitude pairs for
the input postcodes. Invalid postcodes (i.e. not in the
input sampled postcodes file) return as NAN.
"""
NE = self.get_easting_northing_sample(postcodes)
east = NE['easting']
north = NE['northing']
lat_long = []
for i in range(len(NE)):
postcode = postcodes[i]
if np.isnan(east[i]):
lat = np.NaN
long = np.NaN
else:
a = get_gps_lat_long_from_easting_northing([east[i]],[north[i]],rads=False)
lat = int(a[0])
long = int(a[1])
lat_long.append([postcode,lat,long])
postcode_df = pd.DataFrame(lat_long, columns=('postcode','lat','lon'))
postcode_df = postcode_df.set_index('postcode')
return postcode_df
@staticmethod
def get_flood_class_methods():
"""
Get a dictionary of available flood probablity classification methods.
Returns
-------
dict
Dictionary mapping classification method names (which have
no inate meaning) on to an identifier to be passed to the
get_flood_probability method.
"""
return {'random_forest': 0, "RF_balanced": 1, "SGD_Classifier": 2, "knn": 3, 'GBC': 4}
def get_flood_class(self, postcodes, method=0):
"""
Generate series predicting flood probability classification
for a collection of poscodes.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
method : int (optional)
optionally specify (via a value in
self.get_flood_probability_methods) the classification
method to be used.
Returns
-------
pandas.Series
Series of flood risk classification labels indexed by postcodes.
"""
# print("asdas", self.label)
X = self.label[["easting","northing"]]
y = self.label['riskLabel']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42) # Holdout
northing_eastings = self.get_easting_northing(postcodes)
# print(northing_eastings, 'asdsa')
# northing_eastings = X.iloc[0:2]
# print(self.get_flood_class_methods(), 'asd')
if method == self.get_flood_class_methods()["random_forest"]:
model = RandomForestClassifier(criterion = 'gini', max_features = 'log2',
class_weight = {1: 10, 2: 10, 3: 10, 4: 1, 5: 15, 6: 15, 7: 10, 8: 150, 9: 300, 10: 300})
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["RF_balanced"]:
over = SMOTE(sampling_strategy='not majority', random_state=41)
under = RandomUnderSampler(sampling_strategy={1:500}, random_state=43)
steps = [('u', under)] #, ('o', over)
pipeline = Pipeline(steps=steps)
X_train, y_train = pipeline.fit_resample(X_train, y_train)
model = RandomForestClassifier(criterion = 'gini', max_features = 'log2',
class_weight = {1: 10, 2: 10, 3: 10, 4: 1, 5: 15, 6: 15, 7: 10, 8: 150, 9: 300, 10: 300})
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["SGD_Classifier"]:
model = SGDClassifier(loss='hinge', penalty='l1', alpha=1/20)
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["knn"]:
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train, y_train)
if method == self.get_flood_class_methods()["GBC"]:
model = GradientBoostingClassifier(random_state=1)
model.fit(X_train, y_train)
y_new = model.predict(northing_eastings)
return pd.Series(data=y_new,
index=np.asarray(postcodes),
name='riskLabel')
@staticmethod
def get_house_price_methods():
"""
Get a dictionary of available flood house price regression methods.
Returns
-------
dict
Dictionary mapping regression method names (which have
no inate meaning) on to an identifier to be passed to the
get_median_house_price_estimate method.
"""
return {'all_england_median': 0, 'another_1': 1, 'Decision_tree_regressor': 2}
def get_median_house_price_estimate(self, postcodes, method=2):
"""
Generate series predicting median house price for a collection
of poscodes.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
method : int (optional)
optionally specify (via a value in
self.get_house_price_methods) the regression
method to be used.
Returns
-------
pandas.Series
Series of median house price estimates indexed by postcodes.
"""
df = self.label
df['outwardDistrict'] = df['postcode'].apply(lambda x: x.split(' ')[0])
df['sec_num']=df['sector'].apply(lambda x: x.split(' ')[1][0])
if method == 0:
return pd.Series(data=np.full(len(postcodes), 245000.0),
index=np.asarray(postcodes),
name='medianPrice')
elif method == 1: # another one
median_price = []
for code in postcodes:
if code in df['postcode'].values:
median_price.append(df[df['postcode']==code]['medianPrice'].values[0])
elif code.split(' ')[0]+' '+code.split(' ')[1][0] in df['sector'].values:
sec = code.split(' ')[0]+' '+code.split(' ')[1][0]
median_price.append(df[df['sector'] == sec]['medianPrice'].mean())
elif code.split(' ')[0] in df['outwardDistrict'].values:
district = df[df['outwardDistrict'] == code.split(' ')[0]]
X_test = code.split(' ')[1][0]
KNN_model = KNeighborsRegressor(n_neighbors=1,weights='distance', n_jobs=-1)
X = district[['sec_num']]
y = district['medianPrice']
KNN_model.fit(X,y)
y_pred = KNN_model.predict(pd.DataFrame([X_test], columns=['sec_num']))
median_price.append(y_pred[0])
else:
median_price.append(np.nan)
return pd.Series(data= median_price, index = postcodes, name='another_one')
elif method == 2: # Decision tree regressor
median_price = []
for code in postcodes:
if code in df['postcode'].values:
median_price.append(df[df['postcode']==code]['medianPrice'].values[0])
elif code.split(' ')[0]+' '+code.split(' ')[1][0] in df['sector'].values:
sec = code.split(' ')[0]+' '+code.split(' ')[1][0]
median_price.append(df[df['sector'] == sec]['medianPrice'].mean())
elif code.split(' ')[0] in df['outwardDistrict'].values:
district = df[df['outwardDistrict'] == code.split(' ')[0]]
X_test = code.split(' ')[1][0] # sector
dtree = DecisionTreeRegressor(max_depth=5, min_samples_leaf=0.13, random_state=3)
X = district[['sec_num']]
y = district['medianPrice']
dtree.fit(X, y)
y_pred = dtree.predict(pd.DataFrame([X_test], columns=['sec_num']))
median_price.append(y_pred[0])
else:
median_price.append(np.nan)
return pd.Series(data= median_price, index = postcodes, name='another_one')
else:
raise NotImplementedError
series = pd.Series(median_prices.flatten(), index = postcodes)
return series
def get_total_value(self, locations):
"""
Return a series of estimates of the total property values
of a collection of postcode units or sectors.
Parameters
----------
locations : sequence of strs
Sequence of postcode units or sectors
Returns
-------
pandas.Series
Series of total property value estimates indexed by locations.
"""
total_price = np.zeros((len(locations), 1), dtype=float)
for i, string in enumerate(locations):
wanted_outwardDistrict = string.split(' ')[0]
wanted_sector = string.split(' ')[1]
if (len(wanted_outwardDistrict) == 3): # there are double spaces in household_per_sector.csv
sector_df = self.house_label[self.house_label['postcode sector'] == wanted_outwardDistrict + ' ' + wanted_sector[0]]
elif (len(wanted_outwardDistrict) == 4):
sector_df = self.house_label[self.house_label['postcode sector'] == wanted_outwardDistrict + ' ' + wanted_sector[0]]
else: # invalid district given
total_price[i] == np.nan
if len(string) < 7: # is a sector
#mean house price in that sector
mean_price = self.label[self.label['sector'] == wanted_outwardDistrict + ' | |
import logging
import os
import shutil
from pathlib import Path
from typing import Any, Dict, Optional, Union
import torch
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoTokenizer,
PretrainedConfig,
)
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, default_cache_path
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import (
BaseModelOutput,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.onnx import FeaturesManager, export
import onnxruntime as ort
from huggingface_hub import HfApi, hf_hub_download
from ..modeling_base import OptimizedModel
from .utils import ONNX_WEIGHTS_NAME, _is_gpu_available
logger = logging.getLogger(__name__)
_TOKENIZER_FOR_DOC = "AutoTokenizer"
ONNX_MODEL_START_DOCSTRING = r"""
This model inherits from [~`onnxruntime.modeling_ort.ORTModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving)
Parameters:
config (`transformers.PretrainedConfig`): [PretrainedConfig](https://huggingface.co/docs/transformers/main_classes/configuration#transformers.PretrainedConfig) is the Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~onnxruntime.modeling_ort.ORTModel.from_pretrained`] method to load the model weights.
model (`onnxruntime.InferenceSession`): [onnxruntime.InferenceSession](https://onnxruntime.ai/docs/api/python/api_summary.html#inferencesession) is the main class used to run a model. Check out the [`~onnxruntime.modeling_ort.ORTModel.load_model`] method for more information.
"""
ONNX_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`](https://huggingface.co/docs/transformers/autoclass_tutorial#autotokenizer).
See [`PreTrainedTokenizer.encode`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizerBase.encode) and
[`PreTrainedTokenizer.__call__`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizerBase.__call__) for details.
[What are input IDs?](https://huggingface.co/docs/transformers/glossary#input-ids)
attention_mask (`torch.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](https://huggingface.co/docs/transformers/glossary#attention-mask)
token_type_ids (`torch.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 1 for tokens that are **sentence A**,
- 0 for tokens that are **sentence B**.
[What are token type IDs?](https://huggingface.co/docs/transformers/glossary#token-type-ids)
"""
@add_start_docstrings(
"""
Base ORTModel class for implementing models using ONNX Runtime. The ORTModel implements generic methods for interacting
with the Hugging Face Hub as well as exporting vanilla transformers models to ONNX using `transformers.onnx` toolchain.
The ORTModel implements additionally generic methods for optimizing and quantizing Onnx models.
""",
)
class ORTModel(OptimizedModel):
base_model_prefix = "onnx_model"
auto_model_class = AutoModel
def __init__(self, model=None, config=None, **kwargs):
self.model = model
self.config = config
self.model_save_dir = kwargs.get("model_save_dir", None)
self.latest_model_name = kwargs.get("latest_model_name", "model.onnx")
# registers the ORTModelForXXX classes into the transformers AutoModel classes
# to avoid warnings when create a pipeline https://github.com/huggingface/transformers/blob/cad61b68396a1a387287a8e2e2fef78a25b79383/src/transformers/pipelines/base.py#L863
AutoConfig.register(self.base_model_prefix, AutoConfig)
self.auto_model_class.register(AutoConfig, self.__class__)
def forward(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def load_model(path: Union[str, Path], provider=None):
"""
loads ONNX Inference session with Provider. Default Provider is if CUDAExecutionProvider GPU available else `CPUExecutionProvider`
Arguments:
path (`str` or `Path`):
Directory from which to load
provider(`str`, *optional*):
Onnxruntime provider to use for loading the model, defaults to `CUDAExecutionProvider` if GPU is
available else `CPUExecutionProvider`
"""
if provider is None:
provider = "CUDAExecutionProvider" if _is_gpu_available() else "CPUExecutionProvider"
return ort.InferenceSession(path, providers=[provider])
def _save_pretrained(self, save_directory: Union[str, Path], file_name: Optional[str] = None, **kwargs):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
[`~optimum.onnxruntime.modeling_ort.ORTModel.from_pretrained`] class method. It will always save the latest_model_name.
Arguments:
save_directory (`str` or `Path`):
Directory where to save the model file.
file_name(`str`, *optional*):
Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to save the model with
a different name.
"""
model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME
src_path = self.model_save_dir.joinpath(self.latest_model_name)
dst_path = Path(save_directory).joinpath(model_file_name)
shutil.copyfile(src_path, dst_path)
@classmethod
def _from_pretrained(
cls,
model_id: Union[str, Path],
use_auth_token: Optional[Union[bool, str, None]] = None,
revision: Optional[Union[str, None]] = None,
force_download: bool = False,
cache_dir: Optional[str] = None,
file_name: Optional[str] = None,
**kwargs,
):
"""
Load a model and its configuration file from a directory or the HF Hub.
Implements: https://github.com/huggingface/huggingface_hub/blob/e67de48368bc1843e40afc1cc9d236402b9609ee/src/huggingface_hub/hub_mixin.py#L73
Arguments:
model_id (`str` or `Path`):
Directory from which to load
use_auth_token (`str` or `bool`):
Is needed to load models from a private repository
revision (`str`):
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id
cache_dir (`Union[str, Path]`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
file_name(`str`):
Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to load different model files from the same
repository or directory.
kwargs (`Dict`, *optional*):
kwargs will be passed to the model during initialization
"""
config_dict = kwargs.pop("config", {})
model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(model_id):
config = PretrainedConfig.from_dict(config_dict)
model = ORTModel.load_model(os.path.join(model_id, model_file_name))
kwargs["model_save_dir"] = Path(model_id)
kwargs["latest_model_name"] = model_file_name
# load model from hub
else:
# download model
model_cache_path = hf_hub_download(
repo_id=model_id,
filename=model_file_name,
use_auth_token=use_auth_token,
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
)
kwargs["model_save_dir"] = Path(model_cache_path).parent
kwargs["latest_model_name"] = Path(model_cache_path).name
model = ORTModel.load_model(model_cache_path)
config = PretrainedConfig.from_dict(config_dict)
return cls(model=model, config=config, **kwargs)
@classmethod
def _from_transformers(
cls,
model_id: str,
save_dir: Union[str, Path] = default_cache_path,
use_auth_token: Optional[Union[bool, str, None]] = None,
revision: Optional[Union[str, None]] = None,
force_download: bool = False,
cache_dir: Optional[str] = None,
**kwargs,
):
"""
Converts a vanilla Transformers model into an optimized model using `transformers.onnx.export_onnx`.
Arguments:
model_id (`str` or `Path`):
Directory from which to load
save_dir (`str` or `Path`):
Directory where the onnx model should be saved, default to `transformers.file_utils.default_cache_path`, which is the cache dir for
transformers.
use_auth_token (`str` or `bool`):
Is needed to load models from a private repository
revision (`str`):
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id
cache_dir (`Union[str, Path]`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
kwargs (`Dict`, *optional*):
kwargs will be passed to the model during initialization
"""
# create local save dir in cache dir
save_dir = Path(save_dir).joinpath(model_id)
save_dir.mkdir(parents=True, exist_ok=True)
kwargs["model_save_dir"] = save_dir
# reads pipeline task from ORTModelForXXX class if available else tries to extract from hub
if cls.pipeline_task is not None:
task = cls.pipeline_task
else:
task = HfApi().model_info(model_id, revision=revision).pipeline_tag
if task in ["sentiment-analysis", "text-classification", "zero-shot-classification"]:
task = "sequence-classification"
elif task in ["feature-extraction", "fill-mask"]:
task = "default"
# 2. convert to temp dir
# FIXME: transformers.onnx conversion doesn't support private models
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = FeaturesManager.get_model_from_feature(task, model_id)
_, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=task)
onnx_config = model_onnx_config(model.config)
# export model
export(
preprocessor=tokenizer,
model=model,
config=onnx_config,
opset=onnx_config.default_onnx_opset,
output=save_dir.joinpath(ONNX_WEIGHTS_NAME),
)
kwargs["config"] = model.config.__dict__
# 3. load normal model
return cls._from_pretrained(save_dir.as_posix(), **kwargs)
FEAUTRE_EXTRACTION_SAMPLE = r"""
Example of feature extraction:
```python
>>> from transformers import {processor_class}
>>> from optimum.onnxruntime import {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("My name is Philipp and I live in Germany.", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
```
Example using `transformers.pipeline`:
```python
>>> from transformers import {processor_class}, pipeline
>>> from optimum.onnxruntime import {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> onnx_extractor = pipeline("feature-extraction", model=model, tokenizer=tokenizer)
>>> text = "My name is Philipp and I live in Germany."
>>> pred = onnx_extractor(text)
```
"""
@add_start_docstrings(
"""
Onnx Model with a MaskedLMOutput for feature-extraction tasks.
""",
ONNX_MODEL_START_DOCSTRING,
)
class ORTModelForFeatureExtraction(ORTModel):
"""
Feature Extraction model for ONNX.
"""
# used in from_transformers to export model to onnx
pipeline_task = "default"
auto_model_class = AutoModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create {name:idx} dict for model outputs
self.model_outputs = {output_key.name: idx for idx, output_key in enumerate(self.model.get_outputs())}
@add_start_docstrings_to_model_forward(
ONNX_INPUTS_DOCSTRING.format("batch_size, sequence_length")
+ FEAUTRE_EXTRACTION_SAMPLE.format(
processor_class=_TOKENIZER_FOR_DOC,
model_class="ORTModelForFeatureExtraction",
checkpoint="optimum/all-MiniLM-L6-v2",
)
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
**kwargs,
):
# converts pytorch inputs into numpy inputs for onnx
onnx_inputs = {
"input_ids": input_ids.cpu().detach().numpy(),
"attention_mask": attention_mask.cpu().detach().numpy(),
}
if token_type_ids is not None:
onnx_inputs["token_type_ids"] | |
self, func, name=None, multioutput="uniform_average", sp=1, square_root=False
):
self.square_root = square_root
super().__init__(func=func, name=name, multioutput=multioutput, sp=sp)
class _PercentageForecastingErrorMetric(
_PercentageErrorMixin, BaseForecastingErrorMetricFunc
):
def __init__(self, func, name=None, multioutput="uniform_average", symmetric=True):
self.symmetric = symmetric
super().__init__(func=func, name=name, multioutput=multioutput)
class _SquaredForecastingErrorMetric(
_SquaredErrorMixin, BaseForecastingErrorMetricFunc
):
def __init__(
self, func, name=None, multioutput="uniform_average", square_root=False
):
self.square_root = square_root
super().__init__(func=func, name=name, multioutput=multioutput)
class _SquaredPercentageForecastingErrorMetric(
_SquaredPercentageErrorMixin, BaseForecastingErrorMetricFunc
):
def __init__(
self,
func,
name=None,
multioutput="uniform_average",
square_root=False,
symmetric=True,
):
self.square_root = square_root
self.symmetric = symmetric
super().__init__(func=func, name=name, multioutput=multioutput)
class _AsymmetricForecastingErrorMetric(
_AsymmetricErrorMixin, BaseForecastingErrorMetricFunc
):
def __init__(
self,
func,
name=None,
multioutput="uniform_average",
asymmetric_threshold=0,
left_error_function="squared",
right_error_function="absolute",
left_error_penalty=1.0,
right_error_penalty=1.0,
):
self.asymmetric_threshold = asymmetric_threshold
self.left_error_function = left_error_function
self.right_error_function = right_error_function
self.left_error_penalty = left_error_penalty
self.right_error_penalty = right_error_penalty
super().__init__(func=func, name=name, multioutput=multioutput)
class _LinexForecastingErrorMetric(_LinexErrorMixin, BaseForecastingErrorMetricFunc):
def __init__(
self,
func,
name=None,
multioutput="uniform_average",
a=1.0,
b=1.0,
):
self.a = a
self.b = b
super().__init__(func=func, name=name, multioutput=multioutput)
class _RelativeLossForecastingErrorMetric(
_RelativeLossMixin, BaseForecastingErrorMetricFunc
):
_tags = {
"requires-y-train": False,
"requires-y-pred-benchmark": True,
"univariate-only": False,
}
def __init__(
self,
func,
name=None,
multioutput="uniform_average",
relative_loss_function=mean_absolute_error,
):
self.relative_loss_function = relative_loss_function
super().__init__(func=func, name=name, multioutput=multioutput)
def make_forecasting_scorer(
func, name=None, greater_is_better=False, multioutput="uniform_average"
):
"""Create a metric class from metric functions.
Parameters
----------
func
Function to convert to a forecasting scorer class.
Score function (or loss function) with signature ``func(y, y_pred, **kwargs)``.
name : str, default=None
Name to use for the forecasting scorer loss class.
greater_is_better : bool, default=False
If True then maximizing the metric is better.
If False then minimizing the metric is better.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines how to aggregate metric for multivariate (multioutput) data.
If array-like, values used as weights to average the errors.
If 'raw_values', returns a full set of errors in case of multioutput input.
If 'uniform_average', errors of all outputs are averaged with uniform weight.
Returns
-------
scorer:
Metric class that can be used as forecasting scorer.
"""
if greater_is_better:
return _BaseForecastingScoreMetric(func, name=name, multioutput=multioutput)
else:
return _BaseForecastingErrorMetric(func, name=name, multioutput=multioutput)
class MeanAbsoluteScaledError(_ScaledForecastingErrorMetric):
"""Mean absolute scaled error (MASE).
MASE output is non-negative floating point. The best value is 0.0.
Like other scaled performance metrics, this scale-free error metric can be
used to compare forecast methods on a single series and also to compare
forecast accuracy between series.
This metric is well suited to intermittent-demand series because it
will not give infinite or undefined values unless the training data
is a flat timeseries. In this case the function returns a large value
instead of inf.
Works with multioutput (multivariate) timeseries data
with homogeneous seasonal periodicity.
Parameters
----------
sp : int, default = 1
Seasonal periodicity of the data
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines how to aggregate metric for multivariate (multioutput) data.
If array-like, values used as weights to average the errors.
If 'raw_values', returns a full set of errors in case of multioutput input.
If 'uniform_average', errors of all outputs are averaged with uniform weight.
Attributes
----------
name : str
The name of the loss metric
greater_is_better : bool
Stores whether the metric is optimized by minimization or maximization.
If False, minimizing the metric is optimal.
If True, maximizing the metric is optimal.
sp : int
Stores seasonal periodicity of data.
multioutput : str
Stores how the metric should aggregate multioutput data.
See Also
--------
MedianAbsoluteScaledError
MeanSquaredScaledError
MedianSquaredScaledError
References
----------
<NAME> and <NAME>. (2006). "Another look at measures of
forecast accuracy", International Journal of Forecasting, Volume 22, Issue 4.
<NAME>. (2006). "Another look at forecast accuracy metrics
for intermittent demand", Foresight, Issue 4.
<NAME>., <NAME>. and <NAME>. (2020)
"The M4 Competition: 100,000 time series and 61 forecasting methods",
International Journal of Forecasting, Volume 3.
Examples
--------
>>> import numpy as np
>>> from sktime.performance_metrics.forecasting import MeanAbsoluteScaledError
>>> y_train = np.array([5, 0.5, 4, 6, 3, 5, 2])
>>> y_true = np.array([3, -0.5, 2, 7, 2])
>>> y_pred = np.array([2.5, 0.0, 2, 8, 1.25])
>>> mase = MeanAbsoluteScaledError()
>>> mase(y_true, y_pred, y_train=y_train)
0.18333333333333335
>>> y_train = np.array([[0.5, 1], [-1, 1], [7, -6]])
>>> y_true = np.array([[0.5, 1], [-1, 1], [7, -6]])
>>> y_pred = np.array([[0, 2], [-1, 2], [8, -5]])
>>> mase(y_true, y_pred, y_train=y_train)
0.18181818181818182
>>> mase = MeanAbsoluteScaledError(multioutput='raw_values')
>>> mase(y_true, y_pred, y_train=y_train)
array([0.10526316, 0.28571429])
>>> mase = MeanAbsoluteScaledError(multioutput=[0.3, 0.7])
>>> mase(y_true, y_pred, y_train=y_train)
0.21935483870967742
"""
def __init__(self, multioutput="uniform_average", sp=1):
name = "MeanAbsoluteScaledError"
func = mean_absolute_scaled_error
super().__init__(func=func, name=name, multioutput=multioutput, sp=sp)
class MedianAbsoluteScaledError(_ScaledForecastingErrorMetric):
"""Median absolute scaled error (MdASE).
MdASE output is non-negative floating point. The best value is 0.0.
Taking the median instead of the mean of the test and train absolute errors
makes this metric more robust to error outliers since the median tends
to be a more robust measure of central tendency in the presence of outliers.
Like MASE and other scaled performance metrics this scale-free metric can be
used to compare forecast methods on a single series or between series.
Also like MASE, this metric is well suited to intermittent-demand series
because it will not give infinite or undefined values unless the training
data is a flat timeseries. In this case the function returns a large value
instead of inf.
Works with multioutput (multivariate) timeseries data
with homogeneous seasonal periodicity.
Parameters
----------
sp : int, default = 1
Seasonal periodicity of data.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines how to aggregate metric for multivariate (multioutput) data.
If array-like, values used as weights to average the errors.
If 'raw_values', returns a full set of errors in case of multioutput input.
If 'uniform_average', errors of all outputs are averaged with uniform weight.
Attributes
----------
name : str
The name of the loss metric
greater_is_better : bool
Stores whether the metric is optimized by minimization or maximization.
If False, minimizing the metric is optimal.
If True, maximizing the metric is optimal.
sp : int
Stores seasonal periodicity of data.
multioutput : str
Stores how the metric should aggregate multioutput data.
See Also
--------
MeanAbsoluteScaledError
MeanSquaredScaledError
MedianSquaredScaledError
References
----------
<NAME> and <NAME>. (2006). "Another look at measures of
forecast accuracy", International Journal of Forecasting, Volume 22, Issue 4.
<NAME>. (2006). "Another look at forecast accuracy metrics
for intermittent demand", Foresight, Issue 4.
<NAME>., <NAME>. and <NAME>. (2020)
"The M4 Competition: 100,000 time series and 61 forecasting methods",
International Journal of Forecasting, Volume 3.
Examples
--------
>>> import numpy as np
>>> from sktime.performance_metrics.forecasting import MedianAbsoluteScaledError
>>> y_train = np.array([5, 0.5, 4, 6, 3, 5, 2])
>>> y_true = np.array([3, -0.5, 2, 7])
>>> y_pred = np.array([2.5, 0.0, 2, 8])
>>> mdase = MedianAbsoluteScaledError()
>>> mdase(y_true, y_pred, y_train=y_train)
0.16666666666666666
>>> y_train = np.array([[0.5, 1], [-1, 1], [7, -6]])
>>> y_true = np.array([[0.5, 1], [-1, 1], [7, -6]])
>>> y_pred = np.array([[0, 2], [-1, 2], [8, -5]])
>>> mdase(y_true, y_pred, y_train=y_train)
0.18181818181818182
>>> mdase = MedianAbsoluteScaledError(multioutput='raw_values')
>>> mdase(y_true, y_pred, y_train=y_train)
array([0.10526316, 0.28571429])
>>> mdase = MedianAbsoluteScaledError(multioutput=[0.3, 0.7])
>>> mdase( y_true, y_pred, y_train=y_train)
0.21935483870967742
"""
def __init__(self, multioutput="uniform_average", sp=1):
name = "MedianAbsoluteScaledError"
func = median_absolute_scaled_error
super().__init__(func=func, name=name, multioutput=multioutput, sp=sp)
class MeanSquaredScaledError(_ScaledSquaredForecastingErrorMetric):
"""Mean squared scaled error (MSSE) or root mean squared scaled error (RMSSE).
If `square_root` is False then calculates MSSE, otherwise calculates RMSSE if
`square_root` is True. Both MSSE and RMSSE output is non-negative floating
point. The best value is 0.0.
This is a squared varient of the MASE loss metric. Like MASE and other
scaled performance metrics this scale-free metric can be used to compare
forecast methods on a single series or between series.
This metric is also suited for intermittent-demand series because it
will not give infinite or undefined values unless the training data
is a flat timeseries. In this case the function returns a large value
instead of inf.
Works with multioutput (multivariate) timeseries data
with homogeneous seasonal periodicity.
Parameters
----------
sp : int, default = 1
Seasonal periodicity of data.
square_root : bool, default = False
Whether to take the square root of the metric
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines how to aggregate metric for multivariate (multioutput) | |
points for inequality data minimal bound
:param vIneqMin: (1-dimensional array or float or None) value
at conditioning points for inequality data minimal bound
(same type as xIneqMin)
:param xIneqMax: (1-dimensional array or float or None) coordinate of
conditioning points for inequality data maximal bound
:param vIneqMax: (1-dimensional array or float or None) value
at conditioning points for inequality data maximal bound
(same type as xIneqMax)
:param mask: (nd-array of ints, or None) if given, mask values
over the SG: 1 for simulated cell / 0 for not simulated
cell (nunber of entries should be equal to the number of
grid cells)
:param searchRadiusRelative:
(float) indicating how restricting the search ellipsoid (should be positive):
let r_i be the ranges of the covariance model along its main axes,
if x is a node to be simulated, a node y is taken into account iff it is
within the ellipsoid centered at x of half-axes searchRadiusRelative * r_i
Note:
- if a range is a variable parameter, its maximal value over the simulation grid
is considered
:param nneighborMax:(int) maximum number of nodes retrieved from the search ellipsoid,
set -1 for unlimited
:param searchNeighborhoodSortMode:
(int) indicating how to sort the search neighboorhood nodes
(neighbors), they are sorted in increasing order according to:
- searchNeighborhoodSortMode = 0:
distance in the usual axes system
- searchNeighborhoodSortMode = 1:
distance in the axes sytem supporting the covariance model
and accounting for anisotropy given by the ranges
- searchNeighborhoodSortMode = 2:
minus the evaluation of the covariance model
Note:
- if the covariance model has any variable parameter (non-stationary),
then searchNeighborhoodSortMode = 2 is not allowed
- if the covariance model has any range or angle set as a variable parameter,
then searchNeighborhoodSortMode must be set to 0
- greatest possible value as default
:param nGibbsSamplerPath:
(int) number of Gibbs sampler paths to deal with inequality data
the conditioning locations with inequality data are first simulated
(with truncated gaussian distribution) sequentially; then, these
locations are re-simulated following a new path as many times as
desired; this parameter (nGibbsSamplerPath) is the total number
of path(s)
:param seed: (int or None) initial seed, if None an initial seed between
1 and 999999 is generated with numpy.random.randint
:param outputReportFile:
(string or None) name of the report file, if None: no report file
:param nthreads:
(int) number of thread(s) to use for "GeosClassicSim" program (C),
(nthreads = -n <= 0: for maximal number of threads except n,
but at least 1)
:param verbose:
(int) indicates what is displayed during the GeosClassicSim run:
- 0: mininal display
- 1: version and warning(s) encountered
- 2 (or >1): version, progress, and warning(s) encountered
:return geosclassic_output: (dict)
{'image':image,
'nwarning':nwarning,
'warnings':warnings}
image: (Img (class)) output image, with image.nv=nreal variables (each
variable is one realization)
(image is None if mpds_geosClassicOutput->outputImage is NULL)
nwarning:
(int) total number of warning(s) encountered
(same warnings can be counted several times)
warnings:
(list of strings) list of distinct warnings encountered
(can be empty)
"""
# --- Set grid geometry and varname
# Set grid geometry
nx, ny, nz = dimension, 1, 1
sx, sy, sz = spacing, 1.0, 1.0
ox, oy, oz = origin, 0.0, 0.0
nxy = nx * ny
nxyz = nxy * nz
# spatial dimension
space_dim = 1
# Set varname
varname = 'V0'
# --- Check and prepare parameters
# cov_model
if not isinstance(cov_model, gcm.CovModel1D):
print("ERROR (SIMUL_1D): 'cov_model' (first argument) is not valid")
return None
for el in cov_model.elem:
# weight
w = el[1]['w']
if np.size(w) != 1 and np.size(w) != nxyz:
print("ERROR (SIMUL_1D): 'cov_model': weight ('w') not compatible with simulation grid")
return None
# ranges
if 'r' in el[1].keys():
r = el[1]['r']
if np.size(r) != 1 and np.size(r) != nxyz:
print("ERROR (SIMUL_1D): 'cov_model': range ('r') not compatible with simulation grid")
return None
# additional parameter (s)
if 's' in el[1].keys():
s = el[1]['s']
if np.size(s) != 1 and np.size(s) != nxyz:
print("ERROR (SIMUL_1D): 'cov_model': parameter ('s') not compatible with simulation grid")
return None
# method
# computationMode=0: GEOS_CLASSIC_OK
# computationMode=1: GEOS_CLASSIC_SK
# computationMode=2: GEOS_CLASSIC_SIM_OK
# computationMode=3: GEOS_CLASSIC_SIM_SK
# if method not in ('simple_kriging', 'ordinary_kriging'):
# print("ERROR (SIMUL_1D): 'method' is not valid")
# return None
if method == 'simple_kriging':
computationMode = 3
elif method == 'ordinary_kriging':
computationMode = 2
else:
print("ERROR (SIMUL_1D): 'method' is not valid")
return None
# data points: x, v, xIneqMin, vIneqMin, xIneqMax, vIneqMax
dataPointSet = []
# data point set from x, v
if x is not None:
x = np.asarray(x, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
v = np.asarray(v, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if len(v) != x.shape[0]:
print("(ERROR (SIMUL_1D): length of 'v' is not valid")
return None
xc = x
yc = np.ones_like(xc) * oy + 0.5 * sy
zc = np.ones_like(xc) * oz + 0.5 * sz
dataPointSet.append(
PointSet(npt=v.shape[0], nv=4, val=np.array((xc, yc, zc, v)), varname=['X', 'Y', 'Z', varname])
)
# data point set from xIneqMin, vIneqMin
if xIneqMin is not None:
xIneqMin = np.asarray(xIneqMin, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
vIneqMin = np.asarray(vIneqMin, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if len(vIneqMin) != xIneqMin.shape[0]:
print("(ERROR (SIMUL_1D): length of 'vIneqMin' is not valid")
return None
xc = xIneqMin
yc = np.ones_like(xc) * oy + 0.5 * sy
zc = np.ones_like(xc) * oz + 0.5 * sz
dataPointSet.append(
PointSet(npt=vIneqMin.shape[0], nv=4, val=np.array((xc, yc, zc, vIneqMin)), varname=['X', 'Y', 'Z', '{}_min'.format(varname)])
)
# data point set from xIneqMax, vIneqMax
if xIneqMax is not None:
xIneqMax = np.asarray(xIneqMax, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
vIneqMax = np.asarray(vIneqMax, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if len(vIneqMax) != xIneqMax.shape[0]:
print("(ERROR (SIMUL_1D): length of 'vIneqMax' is not valid")
return None
xc = xIneqMax
yc = np.ones_like(xc) * oy + 0.5 * sy
zc = np.ones_like(xc) * oz + 0.5 * sz
dataPointSet.append(
PointSet(npt=vIneqMax.shape[0], nv=4, val=np.array((xc, yc, zc, vIneqMax)), varname=['X', 'Y', 'Z', '{}_max'.format(varname)])
)
# Check parameters - mask
if mask is not None:
try:
mask = np.asarray(mask).reshape(nz, ny, nx)
except:
print("ERROR (SIMUL_1D): 'mask' is not valid")
return None
# Check parameters - searchRadiusRelative
if searchRadiusRelative < geosclassic.MPDS_GEOSCLASSIC_SEARCHRADIUSRELATIVE_MIN:
print("ERROR (SIMUL_1D): 'searchRadiusRelative' too small (should be at least {})".format(geosclassic.MPDS_GEOSCLASSIC_SEARCHRADIUSRELATIVE_MIN))
return None
# Check parameters - nneighborMax
if nneighborMax != -1 and nneighborMax <= 0:
print("ERROR (SIMUL_1D): 'nneighborMax' should be greater than 0 or equal to -1 (unlimited)")
return None
# Check parameters - searchNeighborhoodSortMode
if searchNeighborhoodSortMode is None:
# set greatest possible value
if cov_model.is_stationary():
searchNeighborhoodSortMode = 2
elif cov_model.is_orientation_stationary() and cov_model.is_range_stationary():
searchNeighborhoodSortMode = 1
else:
searchNeighborhoodSortMode = 0
else:
if searchNeighborhoodSortMode == 2:
if not cov_model.is_stationary():
print("ERROR (SIMUL_1D): 'searchNeighborhoodSortMode=2' not allowed with non-stationary covariance model")
return None
elif searchNeighborhoodSortMode == 1:
if not cov_model.is_orientation_stationary() or not cov_model.is_range_stationary():
print("ERROR (SIMUL_1D): 'searchNeighborhoodSortMode=1' not allowed with non-stationary range or non-stationary orientation in covariance model")
return None
# Check parameters - mean
if mean is not None:
# if method == 'ordinary_kriging':
# print("ERROR (SIMUL_1D): specifying 'mean' not allowed with ordinary kriging")
# return None
mean = np.asarray(mean, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if mean.size not in (1, nxyz):
print("ERROR (SIMUL_1D): size of 'mean' is not valid")
return None
# Check parameters - var
if var is not None:
if method == 'ordinary_kriging':
print("ERROR (SIMUL_1D): specifying 'var' not allowed with ordinary kriging")
return None
var = np.asarray(var, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if var.size not in (1, nxyz):
print("ERROR (SIMUL_1D): size of 'var' is not valid")
return None
# Check parameters - nreal
nreal = int(nreal) # cast to int if needed
if nreal <= 0:
if verbose >= 1:
print('SIMUL_1D: nreal <= 0: nothing to do!')
return None
# --- Fill mpds_geosClassicInput structure (C)
mpds_geosClassicInput, flag = fill_mpds_geosClassicInput(
space_dim,
cov_model,
nx, ny, nz,
sx, sy, sz,
ox, oy, oz,
varname,
outputReportFile,
computationMode,
None,
dataPointSet,
mask,
| |
import numpy as np
from scipy.stats import norm
import pandas
from tqdm import tqdm
import random
import torch
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Load kcgof for testing conditional
import kcgof.util as util
import kcgof.cdensity as cden
import kcgof.cgoftest as cgof
import kcgof.kernel as ker
# Help functions
e = lambda n=1: np.random.normal(size=(n,1)) # Gaussian noise
def cb(*args): return np.concatenate(args, axis=1) #Col bind
p = norm.pdf
class ShiftedTester():
"""
Shifted tester for resampling and applying test statistic to resample.
Inputs:
- weight: function, taking as input data, X, and returning an array of weights for each row
- T: function, taking as input data and returning a p-value
- rate: function, taking as input n, and returning the rate of resampling m
- replacement: boolean, indicating whether or not resampling is with replacement
- degenerate: string [raise, retry, ignore], specifying handling of degenerate resamples
"""
def __init__(self, weight, T, rate=lambda n: n**0.45, replacement=False,
degenerate="raise", reject_retries=100, verbose=False,
gibbs_steps=10, alternative_sampler=False):
self.weight, self.rate, self.T = weight, rate, T
self.replacement = replacement
self.degenerate = degenerate
self.reject_retries = reject_retries
self.gibbs_steps = gibbs_steps
self.verbose = verbose
self.alternative_sampler = alternative_sampler
# When degenerates are handled with retries, we count the retries, as to avoid infinite recursion
self.retries = 0
# Initiate latest_resample variable, which allows for optionally storing last resample
self.latest_resample = None
def resample(self, X, replacement=None, m=None, store_last=False):
"""
Resampling function that returns a weighted resample of X
"""
# Potentially overwrite default replacement
replacement = replacement if replacement is not None else self.replacement
# Compute sample and resample size
n = X.shape[0]
m = int(self.rate(n)) if m is None else m
# Draw weights
if callable(self.weight):
w = self.weight(X)
else:
w = self.weight
w /= w.sum()
# Resample with modified replacement scheme:
# Sample w replace, but reject if non-distinct
if replacement == "REPL-reject":
idx = np.random.choice(n, size=m, p=w, replace=True)
count = 0
while count < self.reject_retries and (len(np.unique(idx)) != len(idx)):
count += 1
idx = np.random.choice(n, size=m, p=w, replace=True)
if self.verbose:
print(f"Rejections: {count}")
if (len(np.unique(idx)) != len(idx)):
if self.alternative_sampler == "error":
raise ValueError("Unable to draw sample from REPL rejection sampler")
else:
return self.resample(X, replacement=self.alternative_sampler, m=m, store_last=store_last)
elif replacement == "NO-REPL-gibbs":
# Initialize space
space = np.arange(n)
# Initialize Gibbs sampler in NO-REPL distribution and shuffle to mimick dist
idx = np.random.choice(space, size=m, p=w, replace=False)
np.random.shuffle(idx)
# Loop, sampling from conditional
for _ in range(self.gibbs_steps):
for j, i in (tqdm(enumerate(idx)) if self.verbose else enumerate(idx)):
retain = np.delete(idx, j)
vacant = np.setdiff1d(space, retain)
idx[j] = np.random.choice(vacant, 1, p=w[vacant]/w[vacant].sum())
# #TODO: New sampler
# vacant = list(np.setdiff1d(space, idx))
# w_vacant = w[vacant].sum()
# for _ in range(self.gibbs_steps):
# for j, i in (tqdm(enumerate(idx), total=m) if self.verbose else enumerate(idx)):
# # Add popped index to vacant
# vacant.append(i)
# w_vacant += w[i]
# # Sample new index
# idx[j] = np.random.default_rng().choice(vacant, 1, p=w[vacant]/w_vacant)
# # Remove new index from vacant
# vacant.remove(idx[j])
# w_vacant -= w[idx[j]]
elif replacement == "NO-REPL-reject":
# Denominator for rejection sampler is smallest weights
m_smallest = np.cumsum(w[np.argsort(w)][:(m-1)])
# Sample from proposal, and compute bound p/Mq
count = 0
idx = np.random.choice(n, size=m, p=w, replace=False)
bound = np.prod(1 - np.cumsum(w[idx])[:-1])/np.prod(1 - m_smallest)
while ((np.random.uniform() > bound) and count < self.reject_retries):
count += 1
idx = np.random.choice(n, size=m, p=w, replace=False)
bound = np.prod(1 - np.cumsum(w[idx])[:-1])/np.prod(1 - m_smallest)
if count == self.reject_retries:
if self.alternative_sampler == "error":
raise ValueError("Unable to draw sample from NO-REPL rejection sampler")
else:
return self.resample(X, replacement=self.alternative_sampler, m=m, store_last=store_last)
# If nothing else, just sample with or without replacement
else:
idx = np.random.choice(n, size=m, p=w, replace=replacement)
if isinstance(X, pandas.core.frame.DataFrame):
out = X.loc[idx]
elif isinstance(X, np.ndarray):
out = X[idx]
else:
raise TypeError("Incorrect dataformat provided. Please provide either a Pandas dataframe or Python array")
# Handling the situation with only a single data point drawn
unique_draws = len(np.unique(idx))
if replacement and unique_draws == 1:
if self.degenerate == "raise": raise ValueError("Degenerate resample drawn!")
elif self.degenerate == "retry":
self.retries = self.retries + 1
if self.retries < 10:
return self.resample(X, replacement)
else:
self.retries = 0
raise ValueError("Degenerate resample drawn!")
elif self.degenerate == "ignore": return out
if store_last:
self.latest_resample = out
return out
def test(self, X, replacement=None, m=None, store_last=False):
# Resample data
X_m = self.resample(X, replacement, m=m, store_last=store_last)
# Apply test statistic
return self.T(X_m)
def kernel_conditional_validity(self, X, cond, j_x, j_y, return_p=False):
"""
Test that resampled data has the correct conditional
X:
Data (resampled) in numpy format
cond(x,y):
torch function taking as input torch arrays x, y and
outputting the log conditional density log p(y|x)
j_y, j_x:
Lists specifying which columns in X are respectively y and x.
E.g. j_x = [0, 1, 2], j_y = [3]
return_p:
If True, returns p-value, else 0-1 indicator of rejection
"""
# Convert input data to torch
x, y = torch.from_numpy(X[:,j_x]).float(), torch.from_numpy(X[:,j_y]).float()
dx, dy = len(j_x), len(j_y)
# Specify conditional model
cond_ = cden.from_log_den(dx, dy, cond)
# Choose kernel bandwidth
sigx = util.pt_meddistance(x, subsample=1000, seed=2)
sigy = util.pt_meddistance(y, subsample=1000, seed=3)
k = ker.PTKGauss(sigma2=sigx**2)
l = ker.PTKGauss(sigma2=sigy**2)
# Create kernel object
kcsdtest = cgof.KCSDTest(cond_, k, l, alpha=0.05, n_bootstrap=500)
# Compute output
result = kcsdtest.perform_test(x, y)
if return_p: return result['pvalue']
return 1*result['h0_rejected']
def gaussian_validity(self, X, cond, j_x, j_y, const=None, return_p=False):
"""
Test that resampled data has the correct conditional
X:
Data (resampled) in numpy format
cond:
List containing linear conditional mean of y|x.
E.g. if y = 2*x1 + 3*x2, should be cond=[2, 3]
j_y, j_x:
Lists specifying which columns in X are respectively y and x.
E.g. j_x = [0, 1, 2], j_y = [3]
const:
Intercept in target distribution. Default is None
return_p:
If True, returns p-value, else 0-1 indicator of rejection
"""
x, y = X[:,j_x], X[:,j_y]
if const=="fit":
tests = [f"(x{i+1} = {b})" for i, b in enumerate(cond)]
p_val = sm.OLS(y, sm.add_constant(x)).fit().f_test(tests).pvalue
elif const is not None:
tests = [f"(const = {const})"] + [f"(x{i+1} = {b})" for i, b in enumerate(cond)]
p_val = sm.OLS(y, sm.add_constant(x)).fit().f_test(tests).pvalue
else:
tests = [f"(x{i+1} = {b})" for i, b in enumerate(cond)]
p_val = sm.OLS(y, x).fit().f_test(tests).pvalue
if return_p: return p_val
return 1*(p_val < 0.05)
def binary_validity(self, X, cond, j_x, j_y, const=None, return_p=False):
"""
Test that resampled data has the correct conditional
X:
Data (resampled) in numpy format
cond:
Dictionary {tuple: float} containing conditional distribution of x|y
- Dictionary keys are all possible outcomes of x, e.g. x1=1, x2=0 -> (1, 0). Ordering of the tuple should be the same as of j_x
- Dictionary values specify the probability of y=1 given x.
E.g. if P(y=1|x1=0, x2=0) = 0.3, P(y=1|x1=0, x2=1) = 0.9, cond should be {(0, 0): 0.3, (0, 1): 0.9}
j_y, j_x:
Lists specifying which columns in X are respectively y and x.
E.g. j_x = [0, 1, 2], j_y = [3]
return_p:
If True, returns p-value, else 0-1 indicator of rejection
"""
# Convert X to data frame
df = pd.DataFrame(X[:,j_y + j_x], columns = ["x"+str(i) for i in j_y + j_x])
# Setup formula for statsmodels
formula = f"x{j_y[0]}~" + ":".join(f"C(x{i})" for i in j_x) + "-1"
# Convert the conditional distribution to log-odds
log_odds = {k: np.log(p/(1-p)) for k,p in log_odds.items()}
# Create list of tests to conduct
tests = [":".join([f"C(x{idx})[{val}]" for idx, val in zip(j_x, outcome)]) + f"={p}" for outcome,p in log_odds.items()]
# Conduct F-test and get p-value
p_val = smf.logit(formula=formula, data=df).fit(disp=0).f_test(tests).pvalue
if return_p: return p_val
return 1*(p_val < 0.05)
def tune_m(self, X, cond, j_x, j_y, gaussian=False, binary=False, m_init = None,
m_factor=2, p_cutoff=0.1, repeats=100, const=None, replacement=None):
# Initialize parameters
n = X.shape[0]
m = int(np.sqrt(n)/2) if m_init is None else m_init
replacement = self.replacement if replacement is None else replacement
res = [1]
# print(f"n={n},m={m}")
# Loop over increasing m as long as level is below 10%
while (np.min(res) > p_cutoff) and (m < n):
# m = int(2*m)
res = []
for _ in tqdm(range(repeats)) if self.verbose else range(repeats):
if gaussian:
z = self.gaussian_validity(self.resample(X, m=int(min(m_factor*m, n)), replacement=replacement), cond=cond, j_x=j_x, j_y=j_y, const=const, return_p = True)
elif binary:
z | |
fixed ip on network %s', network['uuid'],
instance_uuid=instance_id)
self._allocate_fixed_ips(context, instance_id, host, [network])
return self.get_instance_nw_info(context, instance_id, rxtx_factor,
host)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
"""Return backdoor port for eventlet_backdoor."""
return self.backdoor_port
def remove_fixed_ip_from_instance(self, context, instance_id, host,
address, rxtx_factor=None):
"""Removes a fixed ip from an instance from specified network."""
LOG.debug('Remove fixed ip %s', address, instance_uuid=instance_id)
fixed_ips = objects.FixedIPList.get_by_instance_uuid(context,
instance_id)
for fixed_ip in fixed_ips:
if str(fixed_ip.address) == address:
self.deallocate_fixed_ip(context, address, host)
# NOTE(vish): this probably isn't a dhcp ip so just
# deallocate it now. In the extremely rare
# case that this is a race condition, we
# will just get a warn in lease or release.
if not fixed_ip.leased:
fixed_ip.disassociate()
return self.get_instance_nw_info(context, instance_id,
rxtx_factor, host)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance_id, ip=address)
def _validate_instance_zone_for_dns_domain(self, context, instance):
if not self.instance_dns_domain:
return True
instance_domain = self.instance_dns_domain
domainref = objects.DNSDomain.get_by_domain(context, instance_domain)
if domainref is None:
LOG.warning(_LW('instance-dns-zone not found |%s|.'),
instance_domain, instance=instance)
return True
dns_zone = domainref.availability_zone
instance_zone = instance.get('availability_zone')
if dns_zone and (dns_zone != instance_zone):
LOG.warning(_LW('instance-dns-zone is |%(domain)s|, '
'which is in availability zone |%(zone)s|. '
'Instance is in zone |%(zone2)s|. '
'No DNS record will be created.'),
{'domain': instance_domain,
'zone': dns_zone,
'zone2': instance_zone},
instance=instance)
return False
else:
return True
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
address = None
# NOTE(vish) This db query could be removed if we pass az and name
# (or the whole instance object).
instance = objects.Instance.get_by_uuid(context, instance_id)
LOG.debug('Allocate fixed ip on network %s', network['uuid'],
instance=instance)
# A list of cleanup functions to call on error
cleanup = []
# Check the quota; can't put this in the API because we get
# called into from other places
quotas = self.quotas_cls(context=context)
quota_project, quota_user = quotas_obj.ids_from_instance(context,
instance)
try:
quotas.reserve(fixed_ips=1, project_id=quota_project,
user_id=quota_user)
cleanup.append(functools.partial(quotas.rollback, context))
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
used = (usages['fixed_ips']['in_use'] +
usages['fixed_ips']['reserved'])
LOG.warning(_LW("Quota exceeded for project %(pid)s, tried to "
"allocate fixed IP. %(used)s of %(allowed)s are "
"in use or are already reserved."),
{'pid': quota_project, 'used': used,
'allowed': exc.kwargs['quotas']['fixed_ips']},
instance_uuid=instance_id)
raise exception.FixedIpLimitExceeded()
try:
if network['cidr']:
# NOTE(mriedem): allocate the vif before associating the
# instance to reduce a race window where a previous instance
# was associated with the fixed IP and has released it, because
# release_fixed_ip will disassociate if allocated is False.
vif = objects.VirtualInterface.get_by_instance_and_network(
context, instance_id, network['id'])
if vif is None:
LOG.debug('vif for network %(network)s is used up, '
'trying to create new vif',
{'network': network['id']}, instance=instance)
vif = self._add_virtual_interface(context,
instance_id, network['id'])
address = kwargs.get('address', None)
if address:
LOG.debug('Associating instance with specified fixed IP '
'%(address)s in network %(network)s on subnet '
'%(cidr)s.' %
{'address': address, 'network': network['id'],
'cidr': network['cidr']},
instance=instance)
fip = objects.FixedIP.associate(
context, str(address), instance_id, network['id'],
vif_id=vif.id)
else:
LOG.debug('Associating instance with fixed IP from pool '
'in network %(network)s on subnet %(cidr)s.' %
{'network': network['id'],
'cidr': network['cidr']},
instance=instance)
fip = objects.FixedIP.associate_pool(
context.elevated(), network['id'], instance_id,
vif_id=vif.id)
LOG.debug('Associated instance with fixed IP: %s', fip,
instance=instance)
address = str(fip.address)
cleanup.append(functools.partial(fip.disassociate, context))
LOG.debug('Refreshing security group members for instance.',
instance=instance)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
cleanup.append(functools.partial(
self._do_trigger_security_group_members_refresh_for_instance, # noqa
instance_id))
name = instance.display_name
if self._validate_instance_zone_for_dns_domain(context, instance):
self.instance_dns_manager.create_entry(
name, str(fip.address), "A", self.instance_dns_domain)
cleanup.append(functools.partial(
self.instance_dns_manager.delete_entry,
name, self.instance_dns_domain))
self.instance_dns_manager.create_entry(
instance_id, str(fip.address), "A",
self.instance_dns_domain)
cleanup.append(functools.partial(
self.instance_dns_manager.delete_entry,
instance_id, self.instance_dns_domain))
LOG.debug('Setting up network %(network)s on host %(host)s.' %
{'network': network['id'], 'host': self.host},
instance=instance)
self._setup_network_on_host(context, network)
cleanup.append(functools.partial(
self._teardown_network_on_host,
context, network))
quotas.commit()
if address is None:
# TODO(mriedem): should _setup_network_on_host return the addr?
LOG.debug('Fixed IP is setup on network %s but not returning '
'the specific IP from the base network manager.',
network['uuid'], instance=instance)
else:
LOG.debug('Allocated fixed ip %s on network %s', address,
network['uuid'], instance=instance)
return address
except Exception:
with excutils.save_and_reraise_exception():
for f in cleanup:
try:
f()
except Exception:
LOG.warning(_LW('Error cleaning up fixed ip '
'allocation. Manual cleanup may '
'be required.'), exc_info=True)
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
instance=None):
"""Returns a fixed ip to the pool."""
fixed_ip_ref = objects.FixedIP.get_by_address(
context, address, expected_attrs=['network'])
instance_uuid = fixed_ip_ref.instance_uuid
vif_id = fixed_ip_ref.virtual_interface_id
LOG.debug('Deallocate fixed ip %s', address,
instance_uuid=instance_uuid)
if not instance:
# NOTE(vish) This db query could be removed if we pass az and name
# (or the whole instance object).
# NOTE(danms) We can't use fixed_ip_ref.instance because
# instance may be deleted and the relationship
# doesn't extend to deleted instances
instance = objects.Instance.get_by_uuid(
context.elevated(read_deleted='yes'), instance_uuid)
quotas = self.quotas_cls(context=context)
quota_project, quota_user = quotas_obj.ids_from_instance(context,
instance)
try:
quotas.reserve(fixed_ips=-1, project_id=quota_project,
user_id=quota_user)
except Exception:
LOG.exception(_LE("Failed to update usages deallocating "
"fixed IP"))
try:
self._do_trigger_security_group_members_refresh_for_instance(
instance_uuid)
if self._validate_instance_zone_for_dns_domain(context, instance):
for n in self.instance_dns_manager.get_entries_by_address(
address, self.instance_dns_domain):
self.instance_dns_manager.delete_entry(n,
self.instance_dns_domain)
fixed_ip_ref.allocated = False
fixed_ip_ref.save()
if teardown:
network = fixed_ip_ref.network
if CONF.force_dhcp_release:
dev = self.driver.get_dev(network)
# NOTE(vish): The below errors should never happen, but
# there may be a race condition that is causing
# them per
# https://code.launchpad.net/bugs/968457,
# so we log a message to help track down
# the possible race.
if not vif_id:
LOG.info(_LI("Unable to release %s because vif "
"doesn't exist"), address)
return
vif = objects.VirtualInterface.get_by_id(context, vif_id)
if not vif:
LOG.info(_LI("Unable to release %s because vif "
"object doesn't exist"), address)
return
# NOTE(cfb): Call teardown before release_dhcp to ensure
# that the IP can't be re-leased after a release
# packet is sent.
self._teardown_network_on_host(context, network)
# NOTE(vish): This forces a packet so that the
# release_fixed_ip callback will
# get called by nova-dhcpbridge.
try:
self.driver.release_dhcp(dev, address, vif.address)
except exception.NetworkDhcpReleaseFailed:
LOG.error(_LE("Error releasing DHCP for IP %(address)s"
" with MAC %(mac_address)s"),
{'address': address,
'mac_address': vif.address},
instance=instance)
# NOTE(yufang521247): This is probably a failed dhcp fixed
# ip. DHCPRELEASE packet sent to dnsmasq would not trigger
# dhcp-bridge to run. Thus it is better to disassociate
# such fixed ip here.
fixed_ip_ref = objects.FixedIP.get_by_address(
context, address)
if (instance_uuid == fixed_ip_ref.instance_uuid and
not fixed_ip_ref.leased):
LOG.debug('Explicitly disassociating fixed IP %s from '
'instance.', address,
instance_uuid=instance_uuid)
fixed_ip_ref.disassociate()
else:
# We can't try to free the IP address so just call teardown
self._teardown_network_on_host(context, network)
except Exception:
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.warning(_LW("Failed to rollback quota for "
"deallocate fixed ip: %s"), address,
instance=instance)
# Commit the reservations
quotas.commit()
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug('Leased IP |%s|', address, context=context)
fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s leased that is not associated'), address,
context=context)
return
fixed_ip.leased = True
fixed_ip.save()
if not fixed_ip.allocated:
LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), address,
context=context, instance_uuid=fixed_ip.instance_uuid)
def release_fixed_ip(self, context, address, mac=None):
"""Called by dhcp-bridge when ip is released."""
LOG.debug('Released IP |%s|', address, context=context)
fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s released that is not associated'), address,
context=context)
return
if not fixed_ip.leased:
LOG.warning(_LW('IP %s released that was not leased'), address,
context=context, instance_uuid=fixed_ip.instance_uuid)
else:
fixed_ip.leased = False
fixed_ip.save()
if not fixed_ip.allocated:
# NOTE(mriedem): Sometimes allocate_fixed_ip will associate the
# fixed IP to a new instance while an old associated instance is
# being deallocated. So we check to see if the mac is for the VIF
# that is associated to the instance that is currently associated
# with the fixed IP because if it's not, we hit this race and
# should ignore the request so we don't disassociate the fixed IP
# from the wrong instance.
if mac:
LOG.debug('Checking to see if virtual interface with MAC '
'%(mac)s is still associated to instance.',
{'mac': mac}, instance_uuid=fixed_ip.instance_uuid)
vif = objects.VirtualInterface.get_by_address(context, mac)
if vif:
LOG.debug('Found VIF: %s', vif,
instance_uuid=fixed_ip.instance_uuid)
if vif.instance_uuid != fixed_ip.instance_uuid:
LOG.info(_LI("Ignoring request to release fixed IP "
"%(address)s with MAC %(mac)s since it "
"is now associated with a new instance "
"that is in the process of allocating "
"it's network."),
| |
s):
# Solve
#
# [ P A' G' ] [ ux ] [ bx ]
# [ A 0 0 ] [ uy ] = [ by ]
# [ G 0 -W'*W ] [ W^{-1}*uz ] [ bz - W'*(lmbda o\ bs) ]
#
# us = lmbda o\ bs - uz.
#
# On entry, x, y, z, s contains bx, by, bz, bs.
# On exit they contain x, y, z, s.
# s := lmbda o\ s
# = lmbda o\ bs
misc.sinv(s, lmbda, dims)
# z := z - W'*s
# = bz - W'*(lambda o\ bs)
ws3 = matrix(np.copy(s))
misc.scale(ws3, W, trans='T')
blas.axpy(ws3, z, alpha=-1.0)
# Solve for ux, uy, uz
f3(x, y, z)
# s := s - z
# = lambda o\ bs - uz.
blas.axpy(z, s, alpha=-1.0)
# f4(x, y, z, s) solves the same system as f4_no_ir, but applies
# iterative refinement.
if iters == 0:
if refinement:
wx, wy = matrix(q), matrix(b)
wz, ws = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
if refinement:
wx2, wy2 = matrix(q), matrix(b)
wz2, ws2 = matrix(0.0, (cdim, 1)), matrix(0.0, (cdim, 1))
def f4(x, y, z, s):
if refinement:
wx = matrix(np.copy(x))
wy = matrix(np.copy(y))
wz = matrix(np.copy(z))
ws = matrix(np.copy(s))
f4_no_ir(x, y, z, s)
for i in range(refinement):
wx2 = matrix(np.copy(wx))
wy2 = matrix(np.copy(wy))
wz2 = matrix(np.copy(wz))
ws2 = matrix(np.copy(ws))
res(x, y, z, s, wx2, wy2, wz2, ws2, W, lmbda)
f4_no_ir(wx2, wy2, wz2, ws2)
y += wx2
y += wy2
blas.axpy(wz2, z)
blas.axpy(ws2, s)
mu = gap / (dims['l'] + len(dims['q']) + sum(dims['s']))
sigma, eta = 0.0, 0.0
for i in [0, 1]:
# Solve
#
# [ 0 ] [ P A' G' ] [ dx ]
# [ 0 ] + [ A 0 0 ] * [ dy ] = -(1 - eta) * r
# [ W'*ds ] [ G 0 0 ] [ W^{-1}*dz ]
#
# lmbda o (dz + ds) = -lmbda o lmbda + sigma*mu*e (i=0)
# lmbda o (dz + ds) = -lmbda o lmbda - dsa o dza
# + sigma*mu*e (i=1) where dsa, dza
# are the solution for i=0.
# ds = -lmbdasq + sigma * mu * e (if i is 0)
# = -lmbdasq - dsa o dza + sigma * mu * e (if i is 1),
# where ds, dz are solution for i is 0.
blas.scal(0.0, ds)
if i == 1:
blas.axpy(ws3, ds, alpha=-1.0)
blas.axpy(lmbdasq, ds, n=dims['l'] + sum(dims['q']),
alpha=-1.0)
ds[:dims['l']] += sigma * mu
ind = dims['l']
for m in dims['q']:
ds[ind] += sigma * mu
ind += m
ind2 = ind
for m in dims['s']:
blas.axpy(lmbdasq, ds, n=m, offsetx=ind2, offsety=ind, incy=m + 1, alpha=-1.0)
ds[ind: ind + m * m: m + 1] += sigma * mu
ind += m * m
ind2 += m
# (dx, dy, dz) := -(1 - eta) * (rx, ry, rz)
dx *= 0.0
dx += (-1.0 + eta) * rx
dy *= 0.0
dy += (-1.0 + eta) * ry
blas.scal(0.0, dz)
blas.axpy(rz, dz, alpha=-1.0 + eta)
try:
f4(dx, dy, dz, ds)
except ArithmeticError:
if iters == 0:
raise ValueError("Rank(A) < p or Rank([P; A; G]) < n")
else:
ind = dims['l'] + sum(dims['q'])
for m in dims['s']:
misc.symm(s, m, ind)
misc.symm(z, m, ind)
ind += m ** 2
ts = misc.max_step(s, dims)
tz = misc.max_step(z, dims)
if show_progress:
print("Terminated (singular KKT matrix).")
return {'x': x, 'y': y, 's': s, 'z': z,
'status': 'unknown', 'gap': gap,
'relative gap': relgap, 'primal objective': pcost,
'dual objective': dcost,
'primal infeasibility': pres,
'dual infeasibility': dres, 'primal slack': -ts,
'dual slack': -tz, 'iterations': iters}
dsdz = misc.sdot(ds, dz, dims)
# Save ds o dz for Mehrotra correction
if i == 0:
ws3 = matrix(np.copy(ds))
misc.sprod(ws3, dz, dims)
# Maximum steps to boundary.
#
# If i is 1, also compute eigenvalue decomposition of the
# 's' blocks in ds,dz. The eigenvectors Qs, Qz are stored in
# dsk, dzk. The eigenvalues are stored in sigs, sigz.
misc.scale2(lmbda, ds, dims)
misc.scale2(lmbda, dz, dims)
if i == 0:
ts = misc.max_step(ds, dims)
tz = misc.max_step(dz, dims)
else:
ts = misc.max_step(ds, dims, sigma=sigs)
tz = misc.max_step(dz, dims, sigma=sigz)
t = max([0.0, ts, tz])
if t == 0:
step = 1.0
else:
if i == 0:
step = min(1.0, 1.0 / t)
else:
step = min(1.0, STEP / t)
if i == 0:
sigma = min(1.0, max(0.0,
1.0 - step + dsdz / gap * step ** 2)) ** EXPON
eta = 0.0
x += step * dx
y += step * dy
# We will now replace the 'l' and 'q' blocks of ds and dz with
# the updated iterates in the current scaling.
# We also replace the 's' blocks of ds and dz with the factors
# Ls, Lz in a factorization Ls*Ls', Lz*Lz' of the updated variables
# in the current scaling.
# ds := e + step*ds for nonlinear, 'l' and 'q' blocks.
# dz := e + step*dz for nonlinear, 'l' and 'q' blocks.
blas.scal(step, ds, n=dims['l'] + sum(dims['q']))
blas.scal(step, dz, n=dims['l'] + sum(dims['q']))
ind = dims['l']
ds[:ind] += 1.0
dz[:ind] += 1.0
for m in dims['q']:
ds[ind] += 1.0
dz[ind] += 1.0
ind += m
# ds := H(lambda)^{-1/2} * ds and dz := H(lambda)^{-1/2} * dz.
#
# This replaced the 'l' and 'q' components of ds and dz with the
# updated iterates in the current scaling.
# The 's' components of ds and dz are replaced with
#
# diag(lmbda_k)^{1/2} * Qs * diag(lmbda_k)^{1/2}
# diag(lmbda_k)^{1/2} * Qz * diag(lmbda_k)^{1/2}
#
misc.scale2(lmbda, ds, dims, inverse='I')
misc.scale2(lmbda, dz, dims, inverse='I')
# sigs := ( e + step*sigs ) ./ lambda for 's' blocks.
# sigz := ( e + step*sigz ) ./ lmabda for 's' blocks.
blas.scal(step, sigs)
blas.scal(step, sigz)
sigs += 1.0
sigz += 1.0
blas.tbsv(lmbda, sigs, n=sum(dims['s']), k=0, ldA=1, offsetA=dims['l'] + sum(dims['q']))
blas.tbsv(lmbda, sigz, n=sum(dims['s']), k=0, ldA=1, offsetA=dims['l'] + sum(dims['q']))
# dsk := Ls = dsk * sqrt(sigs).
# dzk := Lz = dzk * sqrt(sigz).
ind2, ind3 = dims['l'] + sum(dims['q']), 0
for k in range(len(dims['s'])):
m = dims['s'][k]
for i in range(m):
blas.scal(math.sqrt(sigs[ind3 + i]), ds, offset=ind2 + m * i,
n=m)
blas.scal(math.sqrt(sigz[ind3 + i]), dz, offset=ind2 + m * i,
n=m)
ind2 += m * m
ind3 += m
# Update lambda and scaling.
misc.update_scaling(W, lmbda, ds, dz)
# Unscale s, z (unscaled variables are used only to compute
# feasibility residuals).
blas.copy(lmbda, s, n=dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
blas.scal(0.0, s, offset=ind2)
blas.copy(lmbda, s, offsetx=ind, offsety=ind2, n=m,
incy=m + 1)
ind += m
ind2 += m * m
misc.scale(s, W, trans='T')
blas.copy(lmbda, z, n=dims['l'] + sum(dims['q']))
ind = dims['l'] + sum(dims['q'])
ind2 = ind
for m in dims['s']:
blas.scal(0.0, z, offset=ind2)
blas.copy(lmbda, z, offsetx=ind, offsety=ind2, n=m,
incy=m + 1)
ind += m
ind2 += m * m
misc.scale(z, W, inverse='I')
gap = blas.dot(lmbda, lmbda)
if __name__ == '__main__':
from cvxopt.base import matrix
Q = 2 * matrix([[2, .5], [.5, 1]])
p = matrix([1.0, 1.0])
G = matrix([[-1.0, 0.0], [0.0, -1.0]])
h = matrix([0.0, 0.0])
A = matrix([1.0, 1.0], (1, 2))
b = matrix(1.0)
# print("Q")
# print(Q)
# print("p")
# print(p)
# print("G")
# print(G)
# print("h")
# print(h)
# print("A")
# print(A)
# print("b")
# print(b)
sol = qp(Q, p, G, h, A, b)
print(sol['x'])
assert sol['x'][0] - 0.25 < 0.01
assert sol['x'][1] - 0.75 < 0.01
P = matrix(np.diag([1.0, 0.0]))
q = matrix(np.array([3.0, 4.0]))
G = matrix(np.array([[-1.0, 0.0], [0, -1.0], [-1.0, -3.0], [2.0, 5.0], [3.0, 4.0]]))
h = matrix(np.array([0.0, 0.0, -15.0, 100.0, 80.0]))
sol = qp(P, q, G=G, h=h)
print(sol)
print(sol["x"])
assert sol['x'][0] - 0.00 < 0.01
assert sol['x'][1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.