input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
from datetime import datetime, timezone
from typing import List, Optional
from pathlib import Path
import os
import configparser
import getpass
from sqlalchemy import create_engine, func, event, and_, inspect
from sqlalchemy.orm import sessionmaker, aliased
from fire.api.model import (
RegisteringTidObjekt,
FikspunktregisterObjekt,
Sag,
Punkt,
PunktInformation,
PunktInformationType,
GeometriObjekt,
Konfiguration,
Koordinat,
Observation,
ObservationType,
Bbox,
Sagsevent,
SagseventInfo,
Sagsinfo,
Beregning,
Geometry,
EventType,
Srid,
)
class FireDb(object):
def __init__(self, connectionstring=None, debug=False):
"""
Parameters
----------
connectionstring : str
Connection string for the oracle database where the FIRE database resides.
Of the general form 'user:pass@host:port/dbname[?key=value&key=value...]'
debug: bool
if True, the SQLALchemy Engine will log all statements as well as a repr() of their parameter lists to the
engines logger, which defaults to sys.stdout
"""
self.dialect = "oracle+cx_oracle"
self.config = self._read_config()
if connectionstring:
self.connectionstring = connectionstring
else:
self.connectionstring = self._build_connection_string()
self.engine = create_engine(
f"{self.dialect}://{self.connectionstring}",
connect_args={"encoding": "UTF-8", "nencoding": "UTF-8"},
echo=debug,
)
self.sessionmaker = sessionmaker(bind=self.engine)
self.session = self.sessionmaker(autoflush=False)
@event.listens_for(self.sessionmaker, "before_flush")
def listener(thissession, flush_context, instances):
for obj in thissession.deleted:
if isinstance(obj, RegisteringTidObjekt):
obj._registreringtil = func.sysdate()
thissession.add(obj)
# region "Hent" methods
def hent_punkt(self, punktid: str) -> Punkt:
p = aliased(Punkt)
return (
self.session.query(p)
.filter(p.id == punktid, p._registreringtil == None)
.one()
)
def hent_geometri_objekt(self, punktid: str) -> GeometriObjekt:
go = aliased(GeometriObjekt)
return (
self.session.query(go)
.filter(go.punktid == punktid, go._registreringtil == None)
.one()
)
def hent_alle_punkter(self) -> List[Punkt]:
return self.session.query(Punkt).all()
def hent_sag(self, sagid: str) -> Sag:
return self.session.query(Sag).filter(Sag.id == sagid).one()
def hent_alle_sager(self) -> List[Sag]:
return self.session.query(Sag).all()
def soeg_geometriobjekt(self, bbox) -> List[GeometriObjekt]:
if not isinstance(bbox, Bbox):
bbox = Bbox(bbox)
return (
self.session.query(GeometriObjekt)
.filter(func.sdo_filter(GeometriObjekt.geometri, bbox) == "TRUE")
.all()
)
def hent_observationtype(self, name: str) -> ObservationType:
"""Gets ObservationType by its name.
Parameters
----------
observationstypeid : str
Name (including namespace) of the observationtype.
Returns
-------
ObservationType:
The first ObservationType matching the specified name. None if not found.
"""
namefilter = name
return (
self.session.query(ObservationType)
.filter(ObservationType.name == namefilter)
.first()
)
def hent_observationtyper(
self, namespace: Optional[str] = None
) -> List[ObservationType]:
"""Gets all ObservationTyper optionally filtered by namespace.
"""
if not namespace:
return self.session.query(ObservationType).all()
like_filter = f"{namespace}:%"
return (
self.session.query(ObservationType)
.filter(ObservationType.name.ilike(like_filter))
.all()
)
def hent_observationer(self, objectids: List[int]) -> List[Observation]:
return (
self.session.query(Observation)
.filter(Observation.objectid.in_(objectids))
.all()
)
def hent_observationer_naer_opstillingspunkt(
self,
punkt: Punkt,
afstand: float,
tidfra: Optional[datetime] = None,
tidtil: Optional[datetime] = None,
) -> List[Observation]:
g1 = aliased(GeometriObjekt)
g2 = aliased(GeometriObjekt)
return (
self.session.query(Observation)
.join(g1, Observation.opstillingspunktid == g1.punktid)
.join(g2, g2.punktid == punkt.id)
.filter(
self._filter_observationer(
g1.geometri, g2.geometri, afstand, tidfra, tidtil
)
)
.all()
)
def hent_observationer_naer_geometri(
self,
geometri: Geometry,
afstand: float,
tidfra: Optional[datetime] = None,
tidtil: Optional[datetime] = None,
) -> List[Observation]:
"""
Parameters
----------
geometri
Either a WKT string or a Geometry instance which will be used as
filter to identify the set of spatial objects that are within some
specified distance of the given object.
"""
g = aliased(GeometriObjekt)
return (
self.session.query(Observation)
.join(
g,
g.punktid == Observation.opstillingspunktid
or g.punktid == Observation.sigtepunktid,
)
.filter(
self._filter_observationer(
g.geometri, geometri, afstand, tidfra, tidtil
)
)
.all()
)
def hent_srid(self, sridid: str):
"""Gets a Srid object by its id.
Parameters
----------
sridid : str
srid id string. For instance "EPSG:25832"
Returns
-------
Srid
Srid object with the specified id. None if not found.
"""
srid_filter = str(sridid).upper()
return self.session.query(Srid).filter(Srid.name == srid_filter).one()
def hent_srider(self, namespace: Optional[str] = None):
"""Gets Srid objects. Optionally filtering by srid namespace
Parameters
----------
namespace: str - optional
Return only Srids with the specified namespace. For instance "EPSG". If not specified all objects are returned.
Returns
-------
List of Srid
"""
if not namespace:
return self.session.query(Srid).all()
like_filter = f"{namespace}:%"
return self.session.query(Srid).filter(Srid.name.ilike(like_filter)).all()
def hent_punktinformationtype(self, infotype: str):
typefilter = infotype
return (
self.session.query(PunktInformationType)
.filter(PunktInformationType.name == typefilter)
.first()
)
def hent_punktinformationtyper(self, namespace: Optional[str] = None):
if not namespace:
return self.session.query(PunktInformationType).all()
like_filter = f"{namespace}:%"
return (
self.session.query(PunktInformationType)
.filter(PunktInformationType.name.ilike(like_filter))
.all()
)
# endregion
# region "Indset" methods
def indset_sag(self, sag: Sag):
if not self._is_new_object(sag):
raise Exception(f"Cannot re-add already persistent sag: {sag}")
if len(sag.sagsinfos) < 1:
raise Exception("At least one sagsinfo must be added to the sag")
if sag.sagsinfos[-1].aktiv != "true":
raise Exception("Last sagsinfo should have aktiv = 'true'")
self.session.add(sag)
self.session.commit()
def indset_sagsevent(self, sagsevent: Sagsevent):
if not self._is_new_object(sagsevent):
raise Exception(f"Cannot re-add already persistent sagsevent: {sagsevent}")
if len(sagsevent.sagseventinfos) < 1:
raise Exception("At least one sagseventinfo must be added to the sagsevent")
self.session.add(sagsevent)
self.session.commit()
def indset_punkt(self, sagsevent: Sagsevent, punkt: Punkt):
if not self._is_new_object(punkt):
raise Exception(f"Cannot re-add already persistent punkt: {punkt}")
if len(punkt.geometriobjekter) != 1:
raise Exception("A single geometriobjekt must be added to the punkt")
self._check_and_prepare_sagsevent(sagsevent, EventType.PUNKT_OPRETTET)
punkt.sagsevent = sagsevent
for geometriobjekt in punkt.geometriobjekter:
if not self._is_new_object(geometriobjekt):
raise Exception(f"Added punkt cannot refer to existing geometriobjekt")
geometriobjekt.sagsevent = sagsevent
for punktinformation in punkt.punktinformationer:
if not self._is_new_object(punktinformation):
raise Exception(
f"Added punkt cannot refer to existing punktinformation"
)
punktinformation.sagsevent = sagsevent
self.session.add(punkt)
self.session.commit()
def indset_punktinformation(
self, sagsevent: Sagsevent, punktinformation: PunktInformation
):
if not self._is_new_object(punktinformation):
raise Exception(
f"Cannot re-add already persistant punktinformation: {punktinformation}"
)
self._check_and_prepare_sagsevent(sagsevent, EventType.PUNKTINFO_TILFOEJET)
punktinformation.sagsevent = sagsevent
self.session.add(punktinformation)
self.session.commit()
def indset_punktinformationtype(self, punktinfotype: PunktInformationType):
if not self._is_new_object(punktinfotype):
raise Exception(
f"Cannot re-add already persistant punktinformationtype: {punktinfotype}"
)
n = self.session.query(func.max(PunktInformationType.infotypeid)).one()[0]
if n is None:
n = 0
punktinfotype.infotypeid = n + 1
self.session.add(punktinfotype)
self.session.commit()
def indset_observation(self, sagsevent: Sagsevent, observation: Observation):
if not self._is_new_object(observation):
raise Exception(
f"Cannot re-add already persistent observation: {observation}"
)
self._check_and_prepare_sagsevent(sagsevent, EventType.OBSERVATION_INDSAT)
observation.sagsevent = sagsevent
self.session.add(observation)
self.session.commit()
def indset_observationtype(self, observationtype: ObservationType):
if not self._is_new_object(observationtype):
raise Exception(
f"Cannot re-add already persistent observationtype: {observationtype}"
)
n = self.session.query(func.max(ObservationType.observationstypeid)).one()[0]
if n is None:
n = 0
observationtype.observationstypeid = n + 1
self.session.add(observationtype)
self.session.commit()
def indset_beregning(self, sagsevent: Sagsevent, beregning: Beregning):
if not self._is_new_object(beregning):
raise Exception(f"Cannot re-add already persistent beregning: {beregning}")
self._check_and_prepare_sagsevent(sagsevent, EventType.KOORDINAT_BEREGNET)
beregning.sagsevent = sagsevent
for koordinat in beregning.koordinater:
if not self._is_new_object(koordinat):
raise Exception(
f"Added beregning cannot refer to existing koordinat: {koordinat}"
)
koordinat.sagsevent = sagsevent
self.session.add(beregning)
self.session.commit()
def indset_srid(self, srid: Srid):
if not self._is_new_object(srid):
raise Exception(f"Cannot re-add already persistent Srid: {srid}")
n = self.session.query(func.max(Srid.sridid)).one()[0]
if n is None:
n = 0
srid.sridid = n + 1
self.session.add(srid)
self.session.commit()
# endregion
# region "luk" methods
def luk_sag(self, sag: Sag):
"""Sætter en sags status til inaktiv"""
if not isinstance(sag, Sag):
raise TypeError("'sag' is not an instance of Sag")
current = sag.sagsinfos[-1]
new = Sagsinfo(
aktiv="false",
journalnummer=current.journalnummer,
behandler=current.behandler,
beskrivelse=current.beskrivelse,
sag=sag,
)
self.session.add(new)
self.session.commit()
def luk_punkt(self, punkt: Punkt, sagsevent: Sagsevent):
"""
Luk et punkt.
Lukker udover selve punktet også tilhørende geometriobjekt,
koordinater og punktinformationer. Alle lukkede objekter tilknyttes
samme sagsevent af typen EventType.PUNKT_NEDLAGT.
Dette er den ultimative udrensning. BRUG MED OMTANKE!
"""
if not isinstance(punkt, Punkt):
raise TypeError("'punkt' is not an instance of Punkt")
sagsevent.eventtype = EventType.PUNKT_NEDLAGT
self._luk_fikspunkregisterobjekt(punkt, sagsevent, commit=False)
self._luk_fikspunkregisterobjekt(
punkt.geometriobjekter[-1], sagsevent, commit=False
)
for koordinat in punkt.koordinater:
self._luk_fikspunkregisterobjekt(koordinat, sagsevent, commit=False)
for punktinfo in punkt.punktinformationer:
self._luk_fikspunkregisterobjekt(punktinfo, sagsevent, commit=False)
for observation in punkt.observationer_fra:
self._luk_fikspunkregisterobjekt(observation, sagsevent, commit=False)
for observation in punkt.observationer_til:
self._luk_fikspunkregisterobjekt(observation, sagsevent, commit=False)
self.session.commit()
def luk_koordinat(self, koordinat: Koordinat, sagsevent: Sagsevent):
"""
Luk en koordinat.
Hvis ikke allerede sat, ændres sagseventtypen til EventType.KOORDINAT_NEDLAGT.
"""
if not isinstance(koordinat, Koordinat):
raise TypeError("'koordinat' is not an instance of Koordinat")
sagsevent.eventtype = EventType.KOORDINAT_NEDLAGT
self._luk_fikspunkregisterobjekt(koordinat, sagsevent)
def luk_observation(self, observation: Observation, sagsevent: Sagsevent):
"""
Luk en observation.
Hvis ikke allerede sat, ændres sagseventtypen til EventType.OBSERVATION_NEDLAGT.
"""
if not isinstance(observation, Observation):
raise TypeError("'observation' is not an instance of Observation")
sagsevent.eventtype = EventType.OBSERVATION_NEDLAGT
self._luk_fikspunkregisterobjekt(observation, sagsevent)
def luk_punktinfo(self, punktinfo: PunktInformation, sagsevent: Sagsevent):
"""
Luk en punktinformation.
Hvis ikke allerede sat, ændres sagseventtypen til EventType.PUNKTINFO_FJERNET.
"""
if not isinstance(punktinfo, PunktInformation):
raise TypeError("'punktinfo' is not an instance of PunktInformation")
sagsevent.eventtype = EventType.PUNKTINFO_FJERNET
self._luk_fikspunkregisterobjekt(punktinfo, sagsevent)
def luk_beregning(self, beregning: Beregning, sagsevent: Sagsevent):
"""
Luk en beregning.
Lukker alle koordinater der er tilknyttet beregningen.
Hvis ikke allerede sat, ændres sagseventtypen til EventType.KOORDINAT_NEDLAGT.
"""
if not isinstance(beregning, Beregning):
raise TypeError("'beregning' is not an instance of Beregning")
sagsevent.eventtype = EventType.KOORDINAT_NEDLAGT
for koordinat in beregning.koordinater:
self._luk_fikspunkregisterobjekt(koordinat, sagsevent, commit=False)
self._luk_fikspunkregisterobjekt(beregning, sagsevent, commit=False)
self.session.commit()
@property
def basedir_skitser(self):
"""Returner absolut del af sti til skitser."""
konf = self._hent_konfiguration()
return konf.dir_skitser
@property
def basedir_materiale(self):
"""Returner absolut del af sti til sagsmateriale."""
konf = self._hent_konfiguration()
return konf.dir_materiale
def _hent_konfiguration(self):
return (
self.session.query(Konfiguration)
.filter(Konfiguration.objectid == 1)
.first()
)
# region Private methods
def _luk_fikspunkregisterobjekt(
self, objekt: FikspunktregisterObjekt, sagsevent: Sagsevent, | |
#!/usr/bin/env python3
#
# SPDX-License-Identifier: MIT
#
# This file is formatted with Python Black
"""
A Parser helper function to convert a byte array to a Python object and the
other way around. The conversion is specified in a list of :class:`Spec`
instances, for example:
>>> data = bytes(range(16))
>>> spec = [
... Spec("B", "zero"),
... Spec("B", "first"),
... Spec("H", "second", endian="BE"),
... Spec("H", "third", endian="le"),
... Spec("BB", "tuples", repeat=5)
... ]
...
>>> result = Parser.to_object(data, spec)
>>> assert result.size == len(data)
>>> assert result.object.zero == 0
>>> assert result.object.first == 0x1
>>> assert result.object.second == 0x0203
>>> assert result.object.third == 0x0504 # little endian
>>> assert result.object.tuples == [(6, 7), (8, 9), (10, 11), (12, 13), (14, 15)]
And likewise, an object can be turned into a bytearray: ::
>>> new_data = Parser.from_object(result.object, spec)
>>> assert new_data == data
See the :class:`Spec` documentation for details on the format.
"""
import attr
import logging
import re
import struct
from ratbag.util import as_hex
from typing import Any, Callable, Dict, List, Optional, Type, Union
logger = logging.getLogger(__name__)
@attr.s
class Spec(object):
"""
The format specification for a single **logical** in a data set. This is
used in :meth:`Parser.to_object` or :meth:`Parser.from_object` to convert
from or to a byte stream. For example:
- ``Spec("B", "myattr")`` is a single byte from/to an object's ``myattr``
property
- ``Spec("BB", "point")`` is a tuple of two bytes from/to an object's ``myattr``
property
See :meth:`Parser.to_object` and :meth:`Parser.from_object` for details.
"""
@attr.s
class ConverterArg:
"""
The argument passed to :attr:`convert_to_data`
"""
bytes: bytes = attr.ib()
value: Any = attr.ib()
index: int = attr.ib()
format: str = attr.ib()
"""
The format, must be compatible to Python's ``struct`` format specifiers,
excluding the endian prefixes. If the format contains more than one
element, the respective object attribute is a tuple.
With the exception of fixed-length strings (``4s`` for a 4-byte string)
this format must not contain any repeat specifiers. Use the ``repeat``
attribute instead. IOW:
>>> Spec("3s", "string") # One 3-byte string
>>> Spec("s", "string", repeat=3) # Three 1-byte strings
>>> Spec("3H", "foo") # Not permitted
"""
name: str = attr.ib()
"""
The name to assign to the resulting object attribute.
"""
endian: str = attr.ib(default="BE", validator=attr.validators.in_(["BE", "le"]))
"""
Endianess of the field, one of ``"BE"`` or ``"le"``.
"""
repeat: int = attr.ib(default=1, validator=attr.validators.instance_of(int))
"""
The number of times this field repeats in struct. Where repeat is greater
than 1, the resulting attribute is a list with ``repeat`` elements (each
element may be tuple, see ``format``).
"""
greedy: bool = attr.ib(default=False)
"""
If true, ``repeat`` is ignored and the current field repeats until the
remainder of the available data. This takes the current format spec into
account. For example, a `HH` tuple has 4 bytes and will repeat 5 times in
a data size 20.
If the data size is not a multiple of the current format size, the
remainder is silently skipped:
>>> spec = Spec("H", "foo", greedy=True)
>>> data = Parser.to_object(bytes(5), spec)
>>> assert data.object.size == 4
"""
convert_from_data: Optional[Callable[[Any], Any]] = attr.ib(default=None)
"""
Conversion function for the data. An example for converting a sequence of
bytes to a string:
>>> spec = Spec("B", "foo", repeat=3, convert_from_data=lambda s: bytes(s).decode("utf-8"))
# Or alternatively use the string length format:
>>> spec = Spec("3s", "foo", convert_from_data=lambda s: s.decode("utf-8"))
>>> data = Parser.to_object("bar".encode("utf-8"), spec)
>>> assert data.object.foo == "bar"
Note that the conversion happens once all ``repeat`` have been completed,
i.e. the input value for ``repeat > 1`` is a list.
"""
convert_to_data: Optional[Callable[[ConverterArg], Any]] = attr.ib(default=None)
"""
Conversion function of this attribute to data. This function takes the
data bytes produced so far by :meth:`Parser.from_object` and the current
value and index (if applicable). It must return a value compatible to the
format specifier. Specifically:
- if ``format`` specifies more than one type, the return value must be a
tuple
- if ``repeat`` is greater than 1, the return value must be a list of
``repeat`` elements. Note that this function is called once per element
the list, with the data argument updated accordingly.
An example for producing a checksum with ``some_crc()``: ::
>>> specs = [] # other fields
>>> checksum_spec("H", "checksum", convert_to_data=lambda bs, v, idx: some_crc(bs))
>>> data = Parser.from_object(myobj, specs + checksum_spec)
>>> assert data[-2:] == some_crc(data[:-2])
"""
_size: int = attr.ib(init=False)
_count: int = attr.ib(init=False)
def __attrs_post_init__(self):
self._size = struct.calcsize(self.format)
invalid = re.findall(r"\d+[^s\d]+", self.format)
assert not invalid, f"Invalid use of repeat found in pattern(s): {invalid}"
# struct allows repeats which are useful for strings in particular.
# Where they're used, make the count a function of the struct format
# specifiers only, not the repeats, i.e. a format like "3s" is one
# string, not a tuple of two.
self._count = len(re.sub(r"[0-9]", "", self.format))
@repeat.validator
def _check_repeat(self, attribute, value):
if value <= 0:
raise ValueError("repeat must be greater than zero")
@attr.s
class Result(object):
"""
The return value from :meth:`Parser.to_object`
"""
object: Any = attr.ib()
"""
The object passed to :meth:`Parser.to_object` or otherwise an unspecified
instance with all attribute names as requested by the parser spec.
"""
size: int = attr.ib()
"""
The number of bytes used to create this object
"""
@attr.s
class Parser(object):
@classmethod
def to_object(
cls,
data: bytes,
specs: List[Spec],
obj: object = None,
result_class: Union[str, Type] = "_ResultObject",
) -> Result:
"""
Convert the given data into an object according to the specs. If
``obj`` is not ``None``, the attributes are set on that
object (resetting any attributes of the same name already set on the
object). Otherwise, a new generic object is created with all
attributes as specified in the parser specs.
The ``result_class`` specifies either the type of class to
instantiate, or the name of the created class for this object.
>>> specs = [Spec("B", "field")]
>>> r = Parser.to_object(bytes(16), specs, result_class = "Foo")
>>> print(type(r.object).__name__)
Foo
>>> class Bar:
... def __init__(self, field):
... pass
>>> r = Parser.to_object(bytes(16), specs, result_class = Bar)
>>> assert isinstance(r.object, Bar)
Where an existing type is used, that type must take all Spec fields as
keyword arguments in the constructor.
"""
# Only the last element can be greedy
assert all([spec.greedy is False for spec in list(reversed(specs))[1:]])
# This parser is quite noisy but if the input is a zero-byte array
# (used by some drivers to init an object with all spec fields) we
# disable the logger. This should be handled better (specifically: the
# driver shouldn't need to do this) but for now it'll do.
disable_logger = data == bytes(len(data))
if disable_logger:
logger.debug("Parsing zero byte array, detailed output is skipped")
# All parsing data is temporarily stored in this dictionary which is
# simply: { spec.name: parsed_value }
# Once we're done parsing we move all these to the object passed in
values: Dict[str, Any] = {}
offset = 0
for spec in specs:
endian = {"BE": ">", "le": "<"}[spec.endian]
if spec.greedy:
repeat = len(data[offset:]) // struct.calcsize(spec.format)
else:
repeat = spec.repeat
for idx in range(repeat):
try:
val = struct.unpack_from(endian + spec.format, data, offset=offset)
except struct.error as e:
logger.error(
f"Parser error while parsing spec {spec} at offset {offset}: {e}"
)
raise e
if spec.name == "_":
debugstr = "<pad bytes>"
elif spec.name == "?":
debugstr = "<unknown>"
else:
if spec._count == 1:
val = val[0]
if repeat > 1:
debugstr = f"self.{spec.name:24s} += {val}"
if idx == 0:
values[spec.name] = []
values[spec.name].append(val)
else:
debugstr = f"self.{spec.name:24s} = {val}"
values[spec.name] = val
if not disable_logger:
logger.debug(
f"offset {offset:02d}: {as_hex(data[offset:offset+spec._size]):5s} → {debugstr}"
)
offset += spec._size
if spec.convert_from_data is not None:
values[spec.name] = spec.convert_from_data(values[spec.name])
# if we don't have an object, construct an attr class with the spec
# names (skipping padding/unknown). This makes printing and inspecting
# results a lot saner.
if obj is None:
vals = {n.lstrip("_"): v for n, v in values.items()}
if isinstance(result_class, str):
c = attr.make_class(result_class, attrs=list(values.keys()))
# private fields in attr drop the leading underscore | |
<filename>tests/google/appengine/api/memcache/memcache_unittest.py
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit-test for google.appengine.api.memcache module.
This tests that google.appengine.api.memcache sets up request protos
correctly, and returns the correct results assuming faked response protos.
"""
import collections
import hashlib
import google
import mock
import six
from six.moves import range
import six.moves.cPickle
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.api.memcache import memcache_service_pb2
from google.appengine.runtime import apiproxy_errors
from google.appengine.runtime.context import ctx_test_util
from google.protobuf import text_format
from absl.testing import absltest
if six.PY3:
long = int
MemcacheSetResponse = memcache_service_pb2.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb2.MemcacheSetRequest
MemcacheGetResponse = memcache_service_pb2.MemcacheGetResponse
MemcacheGetRequest = memcache_service_pb2.MemcacheGetRequest
MemcacheIncrementResponse = memcache_service_pb2.MemcacheIncrementResponse
MemcacheIncrementRequest = memcache_service_pb2.MemcacheIncrementRequest
MemcacheBatchIncrementResponse = memcache_service_pb2.MemcacheBatchIncrementResponse
MemcacheBatchIncrementRequest = memcache_service_pb2.MemcacheBatchIncrementRequest
MemcacheDeleteResponse = memcache_service_pb2.MemcacheDeleteResponse
MemcacheDeleteRequest = memcache_service_pb2.MemcacheDeleteRequest
MemcacheFlushResponse = memcache_service_pb2.MemcacheFlushResponse
MemcacheFlushRequest = memcache_service_pb2.MemcacheFlushRequest
MemcacheStatsRequest = memcache_service_pb2.MemcacheStatsRequest
MergedNamespaceStats = memcache_service_pb2.MergedNamespaceStats
MemcacheStatsResponse = memcache_service_pb2.MemcacheStatsResponse
ONE_MEGABYTE = 1024 * 1024
def MakeArbitraryGetRequest():
"""Makes an arbitrary MemcacheGetRequest.
The request is arbitrary in that every field is set to a valid value, but the
caller should not depened on anything other than the presence. Instead,
callers should set/clear what's important to their test case.
Returns:
An arbitrary MemcacheGetRequest.
"""
request = MemcacheGetRequest()
body = """
key: "test-key"
name_space: "unused-namespace"
for_cas: false
override <
app_id: "override-app"
>
"""
return text_format.Parse(body, request)
@ctx_test_util.isolated_context()
class MemcacheNamespaceTest(absltest.TestCase):
def testAddNamespacePart(self):
"""Test _add_namespace_part and related methods."""
request = MakeArbitraryGetRequest()
expected = MemcacheGetRequest()
expected.CopyFrom(request)
namespace_manager.set_namespace('a_namespace')
self.assertEqual('a_namespace', namespace_manager.get_namespace())
memcache._add_name_space(request)
expected.name_space = 'a_namespace'
self.assertEqual(expected, request)
namespace_manager.set_namespace('')
memcache._add_name_space(request)
expected.ClearField('name_space')
self.assertEqual(expected, request)
namespace_manager.set_namespace(None)
memcache._add_name_space(request)
self.assertEqual(expected, request)
namespace_manager.set_namespace('foo')
memcache._add_name_space(request)
expected.name_space = 'foo'
self.assertEqual(expected, request)
def testAddNamespacePartEmptyString(self):
"""Tests that namespace field is cleared when empty requested."""
request = MakeArbitraryGetRequest()
expected = MemcacheGetRequest()
expected.CopyFrom(request)
namespace_manager.set_namespace('not-this-one')
memcache._add_name_space(request, '')
expected.ClearField('name_space')
self.assertEqual(expected, request)
class MockUserRPC(object):
"""Minimally functional mock for UserRPC."""
_error = None
_hook = None
def __init__(self, *args):
pass
def check_success(self):
if self._error is not None:
raise self._error
def get_result(self):
if self._hook is None:
self.check_success()
return None
else:
return self._hook(self)
class MemcacheAppIdTest(absltest.TestCase):
"""Test that specifying app_ids and num_memcacheg_backends works correctly."""
def setUp(self):
self.app_id = 'app1'
def testAppIdSet(self):
"""Test undocumented fields are set when specified in the constructor."""
request = MakeArbitraryGetRequest()
request.ClearField('override')
expected = MemcacheGetRequest()
expected.CopyFrom(request)
client = memcache.Client(_app_id=self.app_id)
client._add_app_id(request)
expected.override.app_id = self.app_id
self.assertEqual(expected, request)
def testAppIdNotSetWhenNotSpecified(self):
"""Test that app_id not set in request."""
request = MakeArbitraryGetRequest()
request.ClearField('override')
expected = MemcacheGetRequest()
expected.CopyFrom(request)
client = memcache.Client()
client._add_app_id(request)
self.assertEqual(expected, request)
def testAddAppIdCalled(self):
"""Test that _add_app_id is called for every type of request."""
self.last_recorded_request = None
def RecordSetAppIdRequest(message):
"""We record the message passed in to Client._set_app_id.
The same message should then be sent to _make_async_call.
"""
self.last_recorded_request = message
def MockedAsyncCall(rpc, method, request, response, hook, user_data):
self.assertEqual(request, self.last_recorded_request)
if method == 'Delete':
response.delete_status.append(MemcacheDeleteResponse.DELETED)
if method == 'Set':
response.set_status.append(MemcacheSetResponse.STORED)
if method == 'BatchIncrement':
response.item.add()
rpc = MockUserRPC()
rpc.method = method
rpc.request = request
rpc.response = response
rpc._hook = hook
rpc.user_data = user_data
return rpc
client = memcache.Client(_app_id=self.app_id)
methods = [
(memcache.Client.get_stats, [client]),
(memcache.Client.flush_all, [client]),
(memcache.Client.get, [client, 'key']),
(memcache.Client.gets, [client, 'key']),
(memcache.Client.get_multi, [client, ['key']]),
(memcache.Client.delete, [client, 'key']),
(memcache.Client.delete_multi, [client, ['key']]),
(memcache.Client.set, [client, 'key', 'value']),
(memcache.Client.add, [client, 'key', 'value']),
(memcache.Client.replace, [client, 'key', 'value']),
(memcache.Client.cas, [client, 'key', 'value']),
(memcache.Client.set_multi, [client, {'key': 'value'}]),
(memcache.Client.add_multi, [client, {'key': 'value'}]),
(memcache.Client.replace_multi, [client, {'key': 'value'}]),
(memcache.Client.cas_multi, [client, {'key': 'value'}]),
(memcache.Client.incr, [client, 'key']),
(memcache.Client.decr, [client, 'key']),
(memcache.Client.offset_multi, [client, {'key': 1}])
]
for method, args in methods:
with mock.patch.object(client, '_add_app_id', RecordSetAppIdRequest):
with mock.patch.object(client, '_make_async_call', MockedAsyncCall):
method(*args)
class MemcacheTest(absltest.TestCase):
"""Unit test for getting keys from memcache."""
def CallMethod(self,
method,
args=None,
error=None,
proto_response=None,
client=None,
**kwargs):
"""Calls named method with provided args on new client object.
Args:
method: Method to call on memcache client object.
args: List of arguments to pass to method.
error: Error to raise in sync call, if true.
proto_response: The fake proto response to give to the sync call.
client: Optional pre-initialized Client instance.
**kwargs: Keyword arguments passed to the method.
Returns:
Tuple (service_method, proto_request, result) where:
service_method: The service method that was sync called.
proto_request: The request proto that was generated.
result: The result of the memcached method call, after it
assumes the result of proto_response came from
the backend.
"""
result_list = [None, None, None]
if args is None:
args = []
def FakeAsyncCall(rpc, method, request, response, hook=None,
user_data=None):
"""Does fake asynchronous API call."""
result_list[0] = method
result_list[1] = request
rpc = MockUserRPC()
rpc.method = method
rpc.request = request
if error is not None:
rpc._error = error
else:
response.CopyFrom(proto_response)
rpc.response = response
rpc._hook = hook
rpc.user_data = user_data
return rpc
if client is None:
client = memcache.Client()
self.client = client
client._make_async_call = FakeAsyncCall
memcache.setup_client(client)
if method in ('gets', 'cas', 'cas_multi') or method.endswith('_async'):
method = getattr(client, method)
else:
method = getattr(memcache, method)
result_list[2] = method(*args, **kwargs)
return tuple(result_list)
def testKeyStringHelper(self):
"""Tests the _key_string() helper function."""
self.assertEqual(b'foo', memcache._key_string(b'foo'))
self.assertEqual(b'pfx:foo', memcache._key_string(b'foo', b'pfx:'))
server_to_user_map = {}
self.assertEqual(
b'pfx:foo2',
memcache._key_string((1234, b'foo2'), b'pfx:', server_to_user_map))
self.assertEqual(
b'pfx:foo3', memcache._key_string(b'foo3', b'pfx:', server_to_user_map))
self.assertEqual({
b'pfx:foo3': b'foo3',
b'pfx:foo2': b'foo2'
}, server_to_user_map)
def testKeyStringHelperValidation(self):
"""Tests that the _key_string() helper validates keys."""
self.assertRaises(TypeError, memcache._key_string, 1234)
self.assertRaises(TypeError, memcache._key_string, 1234, key_prefix='stuff')
self.assertRaises(TypeError, memcache._key_string, 'key', key_prefix=1234)
def testKeyStringHelperUnicodeKey(self):
"""Tests that unicode keys will be encoded strings as server keys."""
server_to_user_dict = {}
memcache._key_string(b'\xc3\xa9', server_to_user_dict=server_to_user_dict)
self.assertEqual(b'\xc3\xa9', server_to_user_dict[b'\xc3\xa9'])
server_to_user_dict.clear()
memcache._key_string(u'\xe9', server_to_user_dict=server_to_user_dict)
self.assertEqual(u'\xe9', server_to_user_dict[b'\xc3\xa9'])
def testKeyStringHelperUnicodeKeyPrefix(self):
"""Tests when a key_prefix is a unicode string."""
server_to_user_dict = {}
memcache._key_string(
'asdf', key_prefix=u'\xe9:', server_to_user_dict=server_to_user_dict)
self.assertEqual('asdf', server_to_user_dict[b'\xc3\xa9:asdf'])
def testLargeKeyString(self):
"""Tests that keys > memcache.MAX_KEY_SIZE are allowed."""
server_to_user_dict = {}
large_key = b'a' * 500
self.assertGreater(len(large_key), memcache.MAX_KEY_SIZE)
memcache._key_string(large_key, server_to_user_dict=server_to_user_dict)
large_key_sha = hashlib.sha1(large_key).hexdigest()
if isinstance(large_key_sha, six.text_type):
large_key_sha = large_key_sha.encode('utf-8')
self.assertEqual(large_key, server_to_user_dict[large_key_sha])
def testEncodeValueHelperStringValue(self):
"""Tests that string values are passed through without modification."""
stored_value, flags = memcache._validate_encode_value(b'foobar', None)
self.assertEqual(b'foobar', stored_value)
self.assertEqual(0, flags)
def testEncodeValueHelperUnicodeValue(self):
"""Tests encoding for the server when the value is a unicode string."""
stored_value, flags = memcache._validate_encode_value(u'foobar\xe9', None)
self.assertEqual(b'foobar\xc3\xa9', stored_value)
self.assertEqual(memcache.TYPE_UNICODE, flags)
def testEncodeValueHelperIntValue(self):
"""Tests encoding for the server when the value is an int."""
stored_value, flags = memcache._validate_encode_value(42, None)
self.assertEqual(b'42', stored_value)
self.assertEqual(memcache.TYPE_INT, flags)
def testEncodeValueHelperLongValue(self):
"""Tests encoding for the server when the value is an int."""
type_long = memcache.TYPE_LONG
if six.PY3:
type_long = memcache.TYPE_INT
stored_value, flags = memcache._validate_encode_value(long(42), None)
self.assertEqual(b'42', stored_value)
self.assertEqual(type_long, flags)
def testEncodeValueHelperBoolValue(self):
"""Tests encoding for the server when the value is an bool."""
stored_value, flags = memcache._validate_encode_value(True, None)
self.assertEqual(b'1', stored_value)
self.assertEqual(memcache.TYPE_BOOL, flags)
stored_value, flags = memcache._validate_encode_value(False, None)
self.assertEqual(b'0', stored_value)
self.assertEqual(memcache.TYPE_BOOL, flags)
def testEncodeValueHelperPickledValue(self):
"""Tests encoding for the server when the value is a pickled object."""
my_value = {
'asdf': 1,
'foo': long(2),
'bar': 1.0,
}
stored_value, flags = memcache._validate_encode_value(
my_value, six.moves.cPickle.dumps)
self.assertEqual(my_value, six.moves.cPickle.loads(stored_value))
self.assertEqual(memcache.TYPE_PICKLED, flags)
def testDecodeValueHelperUnicodeValue(self):
"""Tests decoding the server value when it's a unicode string."""
value = memcache._decode_value(b'foobar\xc3\xa9', memcache.TYPE_UNICODE,
six.moves.cPickle.loads)
self.assertEqual(u'foobar\xe9', value)
def testDecodeValueHelperPickledValue(self):
"""Tests encoding the server value when it's a pickled object."""
my_value = {
'asdf': 1,
'foo': 2,
'bar': 1.0,
}
value = memcache._decode_value(
six.moves.cPickle.dumps(my_value), memcache.TYPE_PICKLED,
six.moves.cPickle.loads)
self.assertEqual(my_value, value)
def testDecodeValueHelperIntValue(self):
"""Tests encoding the server value when it's an int."""
my_value = 42
value = memcache._decode_value(b'42', memcache.TYPE_INT,
six.moves.cPickle.loads)
self.assertEqual(my_value, value)
self.assertIs(type(value), int)
def testDecodeValueHelperLongValue(self):
"""Tests encoding the server value when it's a long."""
my_value = long(42)
value = memcache._decode_value(b'42', memcache.TYPE_LONG,
six.moves.cPickle.loads)
self.assertEqual(my_value, value)
self.assertIs(type(value), long)
def testDecodeValueHelperBoolValue(self):
"""Tests encoding the server value when it's a bool."""
value = memcache._decode_value(b'1', memcache.TYPE_BOOL,
six.moves.cPickle.loads)
self.assertEqual(True, value)
self.assertIs(type(value), bool)
value = memcache._decode_value(b'0', memcache.TYPE_BOOL,
six.moves.cPickle.loads)
self.assertEqual(False, value)
self.assertIs(type(value), bool)
def testCreateRpc(self):
"""Tests create_rpc() function."""
with mock.patch.object(apiproxy_stub_map, 'CreateRPC'):
rpc = memcache.create_rpc()
self.assertIsInstance(rpc, apiproxy_stub_map.UserRPC)
self.assertEqual(rpc.deadline, None)
self.assertEqual(rpc.callback, None)
apiproxy_stub_map.CreateRPC.assert_called_once_with('memcache', mock.ANY)
def testCreateRpcArgs(self):
"""Tests create_rpc() function."""
with mock.patch.object(apiproxy_stub_map, 'CreateRPC'):
callback = lambda: None
rpc = memcache.create_rpc(deadline=2, callback=callback)
self.assertIsInstance(rpc, apiproxy_stub_map.UserRPC)
self.assertEqual(rpc.deadline, 2)
self.assertEqual(rpc.callback, callback)
apiproxy_stub_map.CreateRPC.assert_called_once_with('memcache', mock.ANY)
def testSetServers(self):
"""Tests no-op set_servers call."""
service_method, proto_request, return_value = self.CallMethod(
'set_servers', args=[['10.0.0.1:11211', '10.0.0.2:11212']])
self.assertEqual(None, proto_request)
self.assertEqual(None, return_value)
self.assertEqual(None, service_method)
def testDisconnectAll(self):
"""Tests no-op disconnect_all call."""
service_method, proto_request, return_value = self.CallMethod(
'disconnect_all')
self.assertEqual(None, proto_request)
self.assertEqual(None, return_value)
self.assertEqual(None, service_method)
def | |
activation
actv = activation == 'relu' and (lambda: LeakyReLU(0.0)) or activation == 'elu' and (lambda: ELU(1.0)) or None
# vertical 1
c1_1 = Conv2D(filters=filters // 16, kernel_size=(1, 1), padding='same',
activation=activation, kernel_initializer='he_normal')(inputs)
if version == 'b':
c1_2 = NConv2D(filters=filters // 8, kernel_size=(1, 5), padding='same',
activation=activation, kernel_initializer='he_normal')(c1_1)
c1 = Conv2D(filters=filters // 8, kernel_size=(5, 1), kernel_initializer='he_normal', padding='same')(c1_2)
else:
c1 = Conv2D(filters=filters // 8, kernel_size=(5, 5), kernel_initializer='he_normal', padding='same')(c1_1)
# vertical 2
c2_1 = Conv2D(filters=filters // 8 * 3, kernel_size=(1, 1), padding='same',
activation=activation, kernel_initializer='he_normal')(inputs)
if version == 'b':
c2_2 = NConv2D(filters=filters // 2, kernel_size=(1, 3), padding='same',
activation=activation, kernel_initializer='he_normal')(c2_1)
c2 = Conv2D(filters=filters // 2, kernel_size=(3, 1), kernel_initializer='he_normal', padding='same')(c2_2)
else:
c2 = Conv2D(filters=filters // 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(c2_1)
# vertical 3
p3_1 = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same')(inputs)
c3 = Conv2D(filters=filters // 8, kernel_size=(1, 1), padding='same', kernel_initializer='he_normal')(p3_1)
# vertical 4
c4 = Conv2D(filters=filters // 4, kernel_size=(1, 1), padding='same', kernel_initializer='he_normal')(inputs)
# concatenating verticals together, normalizing and applying activation
result = concatenate([c1, c2, c3, c4], axis=3)
result = BatchNormalization(axis=3)(result)
result = actv()(result)
return result
# ======================================================================================================================
# Combining blocks, allowing to use different blocks from before
# ======================================================================================================================
def pooling_block(inputs, filters, kernel_size=(3, 3), strides=(2, 2), padding='same', activation=None,
pool_size=(2, 2), trainable=True, pars={}, allowed_pars={}):
"""Function returning the output of one of the pooling blocks.
Allows not to make different versions of the u-net in terms of how pooling operation is performed:
1) trainable (default): through NConv2D custom function, see its documentation
2) non-trainable (alternative): through MaxPooling operation
To get the expected behaviour when changing 'trainable' assert strides == pool_size
Parameters starting with p_ are only to be used for (trainable=False) MaxPooling2D
Parameters starting with c_ are only to be used for (trainable=True) MaxPooling2D
:param inputs: 4D tensor (samples, rows, cols, channels)
:param filters: NConv2D argument, filters
:param kernel_size: NConv2D argument, kernel_size
:param strides: NConv2D argument, strides
:param padding: NConv2D/MaxPooling2D argument, padding
:param activation: NConv2D argument, activation
:param pool_size: MaxPooling2D argument, pool_size
:param trainable: boolean specifying the version of a pooling block with default behaviour
trainable=True: NConv2D(inputs._keras_shape[3], kernel_size=kernel_size, strides=strides, padding=padding)(
inputs)
trainable=False: MaxPooling2D(pool_size=pool_size)(inputs)
:param pars: dictionary of parameters passed to u-net, determines the version of the block
:param allowed_pars: dictionary of all allowed to be passed to u-net parameters
:return: 4D tensor (samples, rows, cols, channels) output of a pooling block
"""
# checking that the allowed trainable parameters did not change in ALLOWED_PARS
if allowed_pars != {}:
assert allowed_pars.get('pooling_block').get('trainable') == [True, False]
# keep trainable argument if need to use without PARS
assert trainable in [True, False]
# setting the version from pars
if pars.get('pooling_block').get('trainable') is not None:
trainable = pars.get('pooling_block').get('trainable')
# returning block's output
if trainable:
return NConv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, activation=activation)(inputs)
else:
return MaxPooling2D(pool_size=pool_size, padding=padding)(inputs)
def information_block(inputs, filters, kernel_size=(3, 3), padding='valid', activation=None,
block='inception', block_type='v2', version='b', pars={}, allowed_pars={}):
"""Function returning the output of one of the information blocks.
:param inputs: 4D tensor (samples, rows, cols, channels)
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the
convolution).
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution
window. Can be a single integer to specify the same value for all spatial dimensions.
:param padding: one of 'valid' or 'same' (case-insensitive), 'valid' by default to have the same as Conv2D
:param activation: string, specifies activation function to use everywhere in the block
Next 3 parameters are there to be able to leave 'pars' and 'allowed_pars' empty
:param block: one of 'inception' or 'convolution' (case-sensitive)
:param block_type: if block == 'inception', one of 'v1', 'v2', 'et' (case-sensitive)
if block == 'convolution': one of 'simple', 'dilated' (case-sensitive)
:param version: version of a block to use
:param pars: dictionary of parameters passed to u-net, determines the version of the block
:param allowed_pars: dictionary of all allowed to be passed to u-net parameters
:return: 4D tensor (samples, rows, cols, channels) output of a information block
"""
# getting which block, block_type, version to use as the information block
if pars.get('information_block') is not None:
block = list(pars.get('information_block').keys())[0]
block_type = list(pars.get('information_block').get(block).keys())[0]
version = pars.get('information_block').get(block).get(block_type)
# inception block
if block == 'inception':
if block_type == 'v1':
return inception_block_v1(inputs=inputs, filters=filters, activation=activation,
version=version, pars=pars, allowed_pars=allowed_pars)
elif block_type == 'v2':
return inception_block_v2(inputs=inputs, filters=filters, activation=activation,
version=version, pars=pars, allowed_pars=allowed_pars)
else:
return inception_block_et(inputs=inputs, filters=filters, activation=activation,
version=version, pars=pars, allowed_pars=allowed_pars)
# convolution block
else:
if block_type == 'simple':
return convolution_block(inputs=inputs, filters=filters, kernel_size=kernel_size,
padding=padding, activation=activation,
version=version, pars=pars, allowed_pars=allowed_pars)
else:
return dilated_convolution_block(inputs=inputs, filters=filters,
kernel_size=kernel_size, padding=padding,
activation=activation, version=version,
pars=pars, allowed_pars=allowed_pars)
def connection_block(inputs, filters, padding='valid', activation=None,
version='residual', pars={}, allowed_pars={}):
"""Function returning the output of one of the connection block.
:param inputs: 4D tensor (samples, rows, cols, channels)
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the
convolution).
:param padding: one of 'valid' or 'same' (case-insensitive), 'valid' by default to have the same as Conv2D
:param activation: string, one of 'elu' or 'relu' or None (case-sensitive),
specifies activation function to use everywhere in the block
Version parameter is there to be able to leave 'pars' and 'allowed_pars' empty
:param version: one of 'not_residual' or 'residual', version of a block to use
:param pars: dictionary of parameters passed to u-net, determines the version of the block
:param allowed_pars: dictionary of all allowed to be passed to u-net parameters
:return: 4D tensor (samples, rows, cols, channels) output of a connection block
"""
# checking that the allowed trainable parameters did not change in ALLOWED_PARS
if allowed_pars != {}:
assert allowed_pars.get('connection_block') == ['not_residual', 'residual']
# keep trainable argument if need to use without PARS
assert version in ['not_residual', 'residual']
# setting the version from pars
if pars.get('connection_block') is not None:
version = pars.get('connection_block')
if version == 'residual':
return rblock(inputs=inputs, filters=32, kernel_size=(1, 1), padding='same', activation=activation)
else:
return Conv2D(filters=filters, kernel_size=(2, 2), padding=padding, kernel_initializer='he_normal')(inputs)
########################################################################################################################
# ======================================================================================================================
# u_model
# ======================================================================================================================
########################################################################################################################
# needed for train
# standard-module imports
import numpy as np
from keras.layers import Input, concatenate, Conv2D, UpSampling2D, Dense
from keras.layers import Dropout, Flatten
from keras.models import Model
from keras.optimizers import Adam
from keras import backend as K
# # separate-module imports
# from metric import dice_coef, dice_coef_loss
# from u_model_blocks import pooling_block, connection_block, information_block
# from configuration import ALLOWED_PARS, PARS
IMG_ROWS, IMG_COLS = 80, 112
K.set_image_data_format('channels_last') # (number of images, rows per image, cols per image, channels)
# ======================================================================================================================
# U-net with Inception blocks, Normalised 2D Convolutions instead of Maxpooling
# ======================================================================================================================
def get_unet_customised(optimizer, pars=PARS, allowed_pars=ALLOWED_PARS):
"""
Creating and compiling the U-net
This version is fully customisable by choosing pars argument
:param optimizer: specifies the optimiser for the u-net, e.g. Adam, RMSProp, etc.
:param pars: optional, dictionary of parameters passed to customise the U-net
:param allowed_pars: optional, dictionary of parameters allowed to be passed to customise the U-net
:return: compiled u-net, Keras.Model object
"""
# string, activation function
activation = pars.get('activation')
# input
inputs = Input((IMG_ROWS, IMG_COLS, 1), name='main_input')
print('inputs:', inputs._keras_shape)
#
# down the U-net
#
conv1 = information_block(inputs, 32, padding='same', activation=activation, pars=pars, allowed_pars=allowed_pars)
print('conv1', conv1._keras_shape)
pool1 = pooling_block(inputs=conv1, filters=32, activation=activation, pars=pars, allowed_pars=allowed_pars)
print('pool1', pool1._keras_shape)
pool1 = Dropout(0.5)(pool1)
print('pool1', pool1._keras_shape)
conv2 = information_block(pool1, 64, padding='same', activation=activation, pars=pars, allowed_pars=allowed_pars)
print('conv2', conv2._keras_shape)
pool2 = pooling_block(inputs=conv2, filters=64, activation=activation, pars=pars, allowed_pars=allowed_pars)
print('pool2', pool2._keras_shape)
pool2 = Dropout(0.5)(pool2)
print('pool2', pool2._keras_shape)
conv3 = information_block(pool2, 128, padding='same', activation=activation, pars=pars, allowed_pars=allowed_pars)
print('conv3', conv3._keras_shape)
pool3 = pooling_block(inputs=conv3, filters=128, activation=activation, pars=pars, allowed_pars=allowed_pars)
print('pool3', pool3._keras_shape)
pool3 = Dropout(0.5)(pool3)
print('pool3', pool3._keras_shape)
conv4 = information_block(pool3, 256, padding='same', activation=activation, pars=pars, allowed_pars=allowed_pars)
print('conv4', conv4._keras_shape)
pool4 = pooling_block(inputs=conv4, filters=256, activation=activation, pars=pars, allowed_pars=allowed_pars)
print('pool4', pool4._keras_shape)
pool4 = Dropout(0.5)(pool4)
print('pool4', pool4._keras_shape)
#
# bottom level of the U-net
#
conv5 = information_block(pool4, 512, padding='same', activation=activation, pars=pars, allowed_pars=allowed_pars)
print('conv5', conv5._keras_shape)
conv5 = Dropout(0.5)(conv5)
print('conv5', conv5._keras_shape)
#
# auxiliary output for predicting probability of nerve presence
#
if pars['outputs'] == 2:
pre = Conv2D(1, kernel_size=(1, 1), kernel_initializer='he_normal', activation='sigmoid')(conv5)
pre = Flatten()(pre)
aux_out = Dense(1, activation='sigmoid', name='aux_output')(pre)
#
# up the U-net
#
after_conv4 = connection_block(conv4, 256, padding='same', activation=activation,
pars=pars, allowed_pars=allowed_pars)
print('after_conv4', after_conv4._keras_shape)
up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), after_conv4], axis=3)
conv6 = information_block(up6, 256, padding='same', activation=activation, pars=pars, allowed_pars=allowed_pars)
print('conv6', conv6._keras_shape)
conv6 | |
from flask import Flask, request, render_template, send_file, Response
import io
import base64
import csv
import json
import time
from collections import OrderedDict
import numpy
import pandas as pd
from numpy import genfromtxt
from flask import jsonify
from flask_cors import CORS
from LoadingNetwork import EchoWebSocket
import shutil
import gc
from tornado.wsgi import WSGIContainer
from tornado.web import Application, FallbackHandler
from tornado.websocket import WebSocketHandler
from tornado.ioloop import IOLoop
app = Flask('flasknado')
#app = Flask(__name__)
app.debug = True
CORS(app)
##initial netwrok csv data############################
rawdata = open('NetworkWithDistance.txt')
with open('NetworkWithDistance.txt') as f:
rawdata = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end
# of each line
rawdata = [x.strip() for x in rawdata]
my_data = genfromtxt('networkwithdist.csv', delimiter=',')
# my_data=numpy.delete(my_data,(0),axis=0)
header = ['id', 'id_to', 'lon', 'lat', 'basinid']
frame = pd.DataFrame(my_data, columns=header)
data = []
MY_GLOBAL = []
with open('tempcsv.csv') as f:
for line in f:
temp = line.strip().split(',')
data.append(temp)
#############################
data1 = []
with open('MyFile1.txt') as f:
r = 0
for line in f:
if(r > 0):
data2 = []
# print(line)
temp = line.split("\",")
data2.append(temp[0][1:])
temp1 = temp[1].split(",[")
data2.append(temp1[0])
data2.append(temp1[1][:-2])
data1.append(data2)
r += 1
header = ['celllist', 'cellid', 'cellto']
frame_celllist = pd.DataFrame(data1, columns=header)
frame_celllist = frame_celllist.drop_duplicates()
del data1[:]
##################
data_c = []
with open('powerplant_cell_loc.csv') as f:
r = 0
for line in f:
if(r > 0):
data_cc = line.split(",")
data_c.append(data_cc)
# print(line)
r += 1
header = ['cellid', 'loc']
frame_cell = pd.DataFrame(data_c, columns=header)
frame_cell = frame_cell.drop_duplicates()
del data_c[:]
########################################################
import os
import sys
from SimpleHTTPServer import SimpleHTTPRequestHandler
import BaseHTTPServer
# class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
# def translate_path(self,path):
# path = SimpleHTTPRequestHandler.translate_path(self,path)
# if os.path.isdir(path):
# for base in "index", "default":
# for ext in ".html", ".htm", ".txt":
# index = path + "/" + base + ext
# if os.path.exists(index):
# return index
# return path
# def test(HandlerClass = MyHTTPRequestHandler,
# ServerClass = BaseHTTPServer.HTTPServer):
# BaseHTTPServer.test(HandlerClass, ServerClass)
##################travesal network upstream############
'''def find_upstream(value):
gc.collect()
ii=0
li = []
temp=[]
a=frame.ix[int(value)]
temp.append(a)
#print(MY_GLOBAL)
MY_GLOBAL[:]=[]
#x=data[int(value)]
#x=frame[frame['id']==a['id_to']]
#print x
i=0
z=0
zz=0
while zz<len(temp):
item=temp[zz]
zz+=1
##print(z,len(temp))
## item=temp.pop()
## print item
#x=frame[frame['id_to']==item['id']]
x=data[int(float(item['id']))]
#print x
i=1
while i<len(x) :
# d = OrderedDict()
# xx=x.loc[x.index[i]]
xx=frame.ix[int(float(x[i]))]
# d['type'] = 'Feature'
# d['geometry'] = {
# 'type': 'MultiLineString',
# 'coordinates': [[[float(xx['lon']),float(xx['lat'])],[float(item['lon']), float(item['lat'])]]]
# }
# d['properties'] = { "id":int(xx['id']),"id_to":int(xx['id_to']),"lon": float(xx['lon']),"lat": float(xx['lat'])
# }
# li.append(d)
i+=1
# ii+=1
##if ii%1000==0:
## print ii
temp.append(xx)
print(len(temp))
while z<len(temp):
item=temp[z]
z+=1
##print(z,len(temp))
## item=temp.pop()
## print item
#x=frame[frame['id_to']==item['id']]
x=data[int(float(item['id']))]
#print x
i=1
while i<len(x) :
d = OrderedDict()
#xx=x.loc[x.index[i]]
xx=frame.ix[int(float(x[i]))]
d['type'] = 'Feature'
d['geometry'] = {
'type': 'MultiLineString',
'coordinates': [[[float(xx['lon']),float(xx['lat'])],[float(item['lon']), float(item['lat'])]]]
}
d['properties'] = { "id":int(xx['id']),"id_to":int(xx['id_to']),"lon": float(xx['lon']),"lat": float(xx['lat'])
}
li.append(d)
d = OrderedDict()
#xx=x.loc[x.index[i]]
# xx=frame.ix[int(float(x[i]))]
i+=1
ii+=1
if ii%1000==0 or (ii+1)/len(temp)==1:
MY_GLOBAL.append((int)((ii+1)/(len(temp)* 1.0)*100))
## print(checkInt,ii,len(temp))
## print ii
# temp.append(xx)
#d = OrderedDict()
#d['type'] = 'FeatureCollection'
#d['features'] = li
#print li
print(ii)
return li,200'''
def find_upstream(value):
gc.collect()
ii = 0
li = []
temp = []
a = frame.ix[int(value)]
temp.append(int(value))
MY_GLOBAL[:] = []
i = 0
z = 0
zz = 0
jstring = ''
while z < len(temp):
item = frame.ix[temp[z]]
z += 1
x = data[int(float(item['id']))]
#print x
i = 1
while i < len(x):
xx = frame.ix[int(float(x[i]))]
jstring += '{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[[' + str(float(xx['lon'])) + ',' + str(float(xx['lat'])) + '],[' + str(float(item['lon'])) + ',' + str(
float(item['lat'])) + ']]]},"properties": {"id_to": ' + str(int(xx['id_to'])) + ',"id":' + str(int(xx['id'])) + ',"lat":' + str(float(xx['lat'])) + ',"lon": ' + str(float(xx['lon'])) + '}},'
ii += 1
temp.append(int(float(x[i])))
i += 1
if ii % 1000 == 0:
# print(ii)
MY_GLOBAL.append((int)((ii + 1) / (200000 * 1.0) * 100))
# print(checkInt,ii,len(temp))
## print ii
# temp.append(xx)
#d = OrderedDict()
#d['type'] = 'FeatureCollection'
#d['features'] = li
#print li
# print(jstring)
MY_GLOBAL.append(100)
return jstring[:-1], 200
##################travesal network downstream############
def find_downstream(value, sourceid):
#print value,sourceid
ii = 0
li = []
temp = []
jstring = ''
# MY_GLOBAL[:]=[]
a = frame.ix[int(value)]
temp.append(a)
check = True
z = 0
while z < len(temp) and check:
item = temp[z]
z += 1
if(item['id_to'] == sourceid):
check = False
# break
## print item
# if(item['id']==sourceid):
# check=False
x = frame.ix[frame['id'] == item['id_to']]
#print x
i = 0
while i < len(x):
# d = OrderedDict()
xx = x.ix[x.index[i]]
jstring += '{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[[' + str(float(xx['lon'])) + ',' + str(float(xx['lat'])) + '],[' + str(float(item['lon'])) + ',' + str(
float(item['lat'])) + ']]]},"properties": {"id_to": ' + str(int(xx['id_to'])) + ',"id":' + str(int(xx['id'])) + ',"lat":' + str(float(xx['lat'])) + ',"lon": ' + str(float(xx['lon'])) + '}},'
# d['type'] = 'Feature'
# d['geometry'] = {
# 'type': 'MultiLineString',
# 'coordinates': [[[float(xx['lon']),float(xx['lat'])],[float(item['lon']), float(item['lat'])]]]
# }
# d['properties'] = { "id":int(xx['id']),"id_to":int(xx['id_to']),"lon": float(xx['lon']),"lat": float(xx['lat'])
# }
# li.append(d)
# d=OrderedDict()
i += 1
ii += 1
temp.append(xx)
# if(item['id']==sourceid):
# check=False
# MY_GLOBAL.append(100)
# d = OrderedDict()
# d['type'] = 'FeatureCollection'
# d['features'] = li
# print li
# if (check==False):
return jstring[:-1], 200
##################travesal network downstream############
def find_downstream1(value):
#print value,sourceid
ii = 0
li = []
temp = []
jstring = ''
# MY_GLOBAL[:]=[]
a = frame.ix[int(value)]
temp.append(a)
check = True
z = 0
while z < len(temp) and check:
item = temp[z]
z += 1
## print item
# if(item['id']==sourceid):
# check=False
x = frame.ix[frame['id'] == item['id_to']]
#print x
i = 0
while i < len(x):
# d = OrderedDict()
xx = x.ix[x.index[i]]
jstring += '{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[[' + str(float(xx['lon'])) + ',' + str(float(xx['lat'])) + '],[' + str(float(item['lon'])) + ',' + str(
float(item['lat'])) + ']]]},"properties": {"id_to": ' + str(int(xx['id_to'])) + ',"id":' + str(int(xx['id'])) + ',"lat":' + str(float(xx['lat'])) + ',"lon": ' + str(float(xx['lon'])) + '}},'
# d['type'] = 'Feature'
# d['geometry'] = {
# 'type': 'MultiLineString',
# 'coordinates': [[[float(xx['lon']),float(xx['lat'])],[float(item['lon']), float(item['lat'])]]]
# }
# d['properties'] = { "id":int(xx['id']),"id_to":int(xx['id_to']),"lon": float(xx['lon']),"lat": float(xx['lat'])
# }
# li.append(d)
# d=OrderedDict()
i += 1
ii += 1
temp.append(xx)
# if(item['id']==sourceid):
# check=False
# MY_GLOBAL.append(100)
# d = OrderedDict()
# d['type'] = 'FeatureCollection'
# d['features'] = li
# print li
# if (check==False):
return jstring[:-1], 200
#######################pp upstream#######################
def find_upstream_pp(cellid):
gc.collect()
# header=['celllist','cellid','cellto']
# header=['cellid','loc']
templi = frame_celllist[frame_celllist['cellid']
== cellid]['celllist'].tolist()
templist = templi[0][1:-1].split(",")
z = 0
jstring = ''
while z < len(templist):
curid = templist[z].strip()
# print(curid,templist)
curidloc = frame_cell[frame_cell['cellid'] == curid]['loc'].tolist()
curidloc1 = curidloc[0].split("_")
# print(curidloc1[0],curidloc1[1][:-1],curidloc[0])
z += 1
temp = frame_celllist[frame_celllist['cellid']
== curid]['cellto'].tolist()
print(temp)
temp = temp[0].split(",")
if len(temp) == 1 and temp[0][:-1] == "none":
# print(temp[0])
continue
else:
zz = 0
while zz < len(temp):
# print(temp[zz],temp)
x = temp[zz]
zz += 1
if zz == len(temp):
nextloc = frame_cell[frame_cell['cellid']
== x[:-1]]['loc'].tolist()
else:
nextloc = frame_cell[frame_cell['cellid']
== x]['loc'].tolist()
nextloc1 = nextloc[0].split("_")
# print(nextloc1[0],nextloc1[1][:-1],nextloc1)
jstring += '{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[[' + str(curidloc1[0]) + ',' + str(curidloc1[1][:-1]) + '],[' + str(
nextloc1[0]) + ',' + str(nextloc1[1][:-1]) + ']]]},"properties": {"lat":' + str(curidloc1[1][:-1]) + ',"lon": ' + str(curidloc1[0]) + '}},'
# jstring+='{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[['+str(float(xx['lon']))+','+str(float(xx['lat']))+'],['+str(float(item['lon']))+','+str(float(item['lat']))+']]]},"properties": {"id_to": '+str(int(xx['id_to']))+',"id":'+str(int(xx['id']))+',"lat":'+str(float(xx['lat']))+',"lon": '+str(float(xx['lon']))+'}},';
return jstring[:-1], 200
#######################pp downstream#######################
def find_downstream_pp(cellid, dcellid):
gc.collect()
# header=['celllist','cellid','cellto']
# header=['cellid','loc']
print(cellid, dcellid)
templi = frame_celllist[frame_celllist['cellid']
== cellid]['celllist'].tolist()
templist = templi[0][1:-1].split(",")
z = len(templist) - 1
jstring = ''
while z > 0:
print(templist[z].strip())
curid = templist[z].strip()
if curid != str(dcellid):
z -= 1
else:
print(z)
break
while z > 0:
curid = templist[z].strip()
# print(curid,templist)
curidloc = frame_cell[frame_cell['cellid'] == curid]['loc'].tolist()
curidloc1 = curidloc[0].split("_")
# print(curidloc1[0],curidloc1[1][:-1],curidloc[0])
temp = frame_celllist[frame_celllist['cellid']
== templist[z].strip()]['cellto'].tolist()
z -= 1
print(temp)
temp = temp[0].split(",")
if len(temp) == 1 and temp[0][:-1] == "none":
# print(temp[0])
z -= 1
continue
else:
zz = 0
aaaa = 'false'
while zz < len(temp):
# print(temp[zz],temp)
x = temp[zz]
zz += 1
if zz == len(temp):
if x[:-1] == curid:
aaaa = 'true'
nextloc = frame_cell[frame_cell['cellid']
== x[:-1]]['loc'].tolist()
else:
if x == curid:
aaaa = 'true'
nextloc = frame_cell[frame_cell['cellid']
== x]['loc'].tolist()
if aaaa == 'true':
nextloc1 = nextloc[0].split("_")
# print(nextloc1[0],nextloc1[1][:-1],nextloc1)
jstring += '{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[[' + str(curidloc1[0]) + ',' + str(curidloc1[1][:-1]) + '],[' + str(
nextloc1[0]) + ',' + str(nextloc1[1][:-1]) + ']]]},"properties": {"lat":' + str(curidloc1[1][:-1]) + ',"lon": ' + str(curidloc1[0]) + '}},'
# jstring+='{"type": "Feature","geometry": { "type": "MultiLineString", "coordinates": [[['+str(float(xx['lon']))+','+str(float(xx['lat']))+'],['+str(float(item['lon']))+','+str(float(item['lat']))+']]]},"properties": {"id_to": '+str(int(xx['id_to']))+',"id":'+str(int(xx['id']))+',"lat":'+str(float(xx['lat']))+',"lon": '+str(float(xx['lon']))+'}},';
print(jstring)
if len(jstring) > 0:
return jstring[:-1], 200
else:
return jstring, 200
@app.route("/", methods=['GET', 'POST'])
def index():
print(request)
return render_template('test1.html')
@app.route("/api/", methods=['GET', 'POST'])
def update():
print(request.method)
if request.method == "POST":
source = request.form["source"]
| |
"sollte es" sein. Und "$so$ ist $es$" hier.',
u'So "`sollte es"\' sein. Und \\@"`$so$ ist $es$\\@"\' hier.'],
[u'Bitte "ACRONYME" berücksichtigen.',
u'Bitte "`\\@\\acronym{ACRONYME}"\' berücksichtigen.'],
[u'"Altern der DNA"',
u'"`Altern der \\@\\acronym{DNA}"\''] ]
abbreviation = [ [u'Von 3760 v.Chr. bis 2012 n.Chr. und weiter',
u'Von 3760\,v.\\,Chr. bis 2012\,n.\\,Chr. und weiter'],
[u'Es ist z.B. so, s.o., s.u., etc., dass wir, d.h.',
u'Es ist z.\\,B. so, s.\\,o., s.\\,u., etc., dass wir, d.\\,h.'],
[u'aber u.a. auch o.ä. wie o.Ä.',
u'aber u.\\,a. auch o.\\,ä. wie o.\\,Ä.'],
[u'Keine erlaubet Abkuerzungen sind umgspr. und oBdA. im Exporter.',
u'Keine erlaubet Abkuerzungen sind umgspr. und oBdA. im Exporter.'],
# similar to above, but with spaces in input
[u'Von 3760 v. Chr. bis 2012 n. Chr. und weiter',
u'Von 3760\,v.\\,Chr. bis 2012\,n.\\,Chr. und weiter'],
[u'Es ist z. B. so, s. o., s. u., etc., dass wir,',
u'Es ist z.\\,B. so, s.\\,o., s.\\,u., etc., dass wir,'],
[u'd. h., der Exporter bzw. oder ca. oder so.',
u'd.\\,h., der Exporter bzw. oder ca. oder so.'],
[u'Aber u. a. auch o. ä. wie o. Ä.',
u'Aber u.\\,a. auch o.\\,ä. wie o.\\,Ä.']]
acronym = [ [u'Bitte ACRONYME wie EKGs anders setzen.',
u'Bitte \\@\\acronym{ACRONYME} wie \\@\\acronym{EKGs} anders setzen.'],
[u'Unterscheide T-shirt und DNA-Sequenz.',
u'Unterscheide T-shirt und \\@\\acronym{DNA}-Sequenz.'],
[u'Wenn 1D nicht reicht, nutze 2D oder 6D.',
u'Wenn 1\\@\\acronym{D} nicht reicht, nutze 2\\@\\acronym{D} oder\n6\\@\\acronym{D}.'],
[u'Wahlergebnis fuer die SPD: 9% (NRW).',
u'Wahlergebnis fuer die \\@\\acronym{SPD}: 9\\,\\% (\\@\\acronym{NRW}).'],
[u'FDP? CDU! CSU. ÖVP.',
u'\\@\\acronym{FDP}? \\@\\acronym{CDU}! \\@\\acronym{CSU}. \\@\\acronym{ÖVP}.'],
[u'Das ZNS.',
u'Das \\@\\acronym{ZNS}.'] ]
escaping = [ [u'Forbid \\mathbb and \\dangerous outside math.',
u'Forbid \\@\\forbidden\\mathbb and \\@\\forbidden\\dangerous outside math.'],
[u'Do not allow $a \\dangerous{b}$ commands!',
u'Do not allow $a \\@\\forbidden\\dangerous{b}$ commands!'],
[u'\\\\ok, $\\\\ok$',
u'\\\\ok, $\\\\ok$'],
[u'$\\\\\\bad$',
u'$\\\\\\@\\forbidden\\bad$'],
[u'Escaping in math like $\\evilmath$, but not $\\mathbb C$',
u'Escaping in math like $\\@\\forbidden\\evilmath$, but not $\\mathbb C$'],
[u'$\\circ$ $\\cap\\inf$ $\\times$',
u'$\\circ$ $\\cap\\inf$ $\\times$' ],
[u'$a &= b$',
u'$a \\@\\forbidden\\&= b$'],
[u'$$a &= b$$',
u'\\begin{equation*}\na \\@\\forbidden\\&= b\n\\end{equation*}'],
[u'Trailing \\',
u'Trailing \\@\\backslash'],
[u'$Trailing \\$',
u'$Trailing \\@\\backslash$'],
[u'f# ist eine Note',
u'f\\@\\# ist eine Note'],
[u'$a^b$ ist gut, aber a^b ist schlecht',
u'$a^b$ ist gut, aber a\\@\\caret{}b ist schlecht'],
[u'Heinemann&Co. ist vielleicht eine Firma',
u'Heinemann\\@\\&Co. ist vielleicht eine Firma'],
[u'10% sind ein Zehntel und mehr als 5 %.',
u'10\\,\\% sind ein Zehntel und mehr als 5\\@\\,\\%.'],
[u'Geschweifte Klammern { muessen } escaped werden.',
u'Geschweifte Klammern \\@\\{ muessen \\@\\} escaped werden.'],
[u'Tilde~ist unklar. $Auch~hier$.',
u'Tilde\\@~ist unklar. $Auch\\@~hier$.'] ]
mathSymbols = [ [u'$\\'+i+u'$', u'$\\'+i+u'$'] for i in allowedMathSymbolCommands ]
mathEnvironments = [ [u'b $$\\circ \\cap \\inf \\times$$ e',
u'b\n\\begin{equation*}\n\\circ \\cap \\inf \\times\n\\end{equation*}\n e'],
[u'b $$\n\\circ \\cap \\inf \\times\n$$ e',
u'b\n\\begin{equation*}\n\\circ \\cap \\inf \\times\n\\end{equation*}\n e'],
[u'b $$\n\\begin{equation}a + b = c\\end{equation}\n$$ e',
u'b\n\\begin{equation}\na + b = c\n\\end{equation}\n e'],
[u'b $$\n\\begin{equation*}a + b = c \\end{equation*}\n$$ e',
u'b\n\\begin{equation*}\na + b = c\n\\end{equation*}\n e'],
[u'b $$\n\\begin{align}\na + b &= c \\\\\na - b &= d\n\\end{align}\n$$ e',
u'b\n\\begin{align}\na + b &= c \\\\\na - b &= d\n\\end{align}\n e'],
[u'b $$\n\\begin{align*}\na + b &= c \\\\\na - b &= d\n\\end{align*}\n$$ e',
u'b\n\\begin{align*}\na + b &= c \\\\\na - b &= d\n\\end{align*}\n e'],
[u'b $$\n\\begin{align}\nabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz &= c\\\\\nabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz &= d \\end{align}\n$$ e',
u'b\n\\begin{align}\nabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz &= c\\\\\nabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz &= d\n\\end{align}\n e'],
[u'a $$\n\\begin{equation} b &= c \\end{equation}\n$$ d',
u'a\n\\begin{equation}\nb \\@\\forbidden\\&= c\n\\end{equation}\n d'],
[u'b $$\n\\begin{equation}a + b &= c\\end{equation}\n$$ e',
u'b\n\\begin{equation}\na + b \\@\\forbidden\\&= c\n\\end{equation}\n e'],
[u'b $$\n\\begin{align}a + b \evilmath = c\\end{align}\n$$ e',
u'b\n\\begin{align}\na + b \\@\\forbidden\\evilmath = c\n\\end{align}\n e'],
[u'Bla $$\n\\begin{align}\na + b &= c\\\\\na - b &= d \\end{align}\n$$ Blub',
u'Bla\n\\begin{align}\na + b &= c\\\\\na - b &= d\n\\end{align}\n Blub'],
[u'Matrix $\\begin{pmatrix} a & b \\\\ c & d \\end{pmatrix}$.',
u'Matrix $\\@\\forbidden\\begin{pmatrix} a \\@\\forbidden\\& b \\\\ c\n\\@\\forbidden\\& d \\@\\forbidden\\end{pmatrix}$.'],
[u'Chemische Formel fuer $\\ch{H3O+}$ protoniertes Wasser.',
u'Chemische Formel fuer $\\ch{H3O+}$ protoniertes Wasser.'] ]
evilUTF8 = [ [u'Bla … blub bloink.',
u'Bla~\\@\\dots{} blub bloink.'],
[u'Bla – blub — bloink.',
u'Bla \\@-- blub \\@--- bloink.'],
[u'Bla „deutsch“ “american” ”unusual“.',
u'Bla \\@"`deutsch\\@"\' \\@"`american\\@"\' \\@"`unusual\\@"\'.'],
[u'Bla «französisch» oder « französisch ».',
u'Bla \\@"`französisch\\@"\' oder \\@\\@"` französisch \\@\\@"`.'],
[u'Bla „(deutsch,“ “(american,” ”(unusual,“.',
u'Bla \\@"`(deutsch,\\@"\' \\@"`(american,\\@"\' \\@"`(unusual,\\@"\'.'],
[u'„$einsam$ $lonely$” $quote$“ here.',
u'\\@"`$einsam$ $lonely$\\@"\' $quote$\\@"\' here.'],
[u'Bla »blub« bloink.',
u'Bla \\@"`blub\\@"\' bloink.'],
[u'\'Bla\' ‚blub‘ ‚bloink’ ›blub‹ ‹bloink›.',
u'\\@\'Bla\\@\' \\@\'blub\\@\' \\@\'bloink\\@\' \\@\'blub\\@\' \\@\'bloink\\@\'.'],
[u'„‚Nested quotes‘”.',
u'\\@\\@"`\\@\'Nested quotes\\@\'\\@\\@"`.'] ]
nonstandardSpace = [ [u'x x', # standard ASCII space
u'x x' ],
[u'x x', # non-breaking space U+00A0
u'x\@ x' ],
[u'x x', # en quad U+2000
u'x\@ x' ],
[u'x x', # em quad U+2001
u'x\@ x' ],
[u'x x', # en space U+2002
u'x\@ x' ],
[u'x x', # em space U+2003
u'x\@ x' ],
[u'x x', # 1/3 em space U+2004
u'x\@ x' ],
[u'x x', # 1/4 em space U+2005
u'x\@ x' ],
[u'x x', # 1/6 em space U+2006
u'x\@ x' ],
[u'x x', # figure space U+2007
u'x\@ x' ],
[u'x x', # punctuation space U+2008
u'x\@ x' ],
[u'x x', # thin space U+2009
u'x\@ x' ],
[u'x x', # hair space U+200A
u'x\@ x' ],
[u'xx', # zero width space U+200B
u'x\@ x' ],
[u'x x', # narrow no-break space U+202F
u'x\@ x' ],
[u'x x', # medium mathematical space (4/18 em) U+205F
u'x\@ x' ],
[u'xx', # zero-width non-breaking space U+FEFF
u'x\@ x' ] ]
pageReferences = [ [u'Auf S. 4 Abs. 3 in Art. 7 steht',
u'Auf \\@S.\\,4 \\@Abs.\\,3 in \\@Art.\\,7 steht'],
[u'Auf Seite 4 Absatz 3 in Artikel 7 steht',
u'Auf Seite~4 Absatz~3 in Artikel~7 steht'],
[u'Auf S.4-6 steht',
u'Auf \\@S.\\,4\\@--6 steht'],
[u'Auf S.4--6 steht',
u'Auf \\@S.\\,4--6 steht'],
[u'Auf S. 4f steht',
u'Auf \\@S.\\,4\\,f. steht'],
[u'S. 4 ff. besagt',
u'\\@S.\\,4\\,ff. besagt'],
[u'Es fehlen Angaben zu S. Abs. Art.',
u'Es fehlen Angaben zu \\@S. \\@Abs. \\@Art.'] ]
spacing = [ [u'A number range 6--9 is nice.',
u'A number range 6--9 is nice.'],
[u'6 -- 9 is as nice as 6-- 9, 6 --9 and 6 - 9 or 6- 9.',
u'6\\@--9 is as nice as 6\\@--9, 6\\@--9 and 6\\@--9 or 6\\@--9.'],
[u'Now we do - with all due respect --, an intersperse.',
u'Now we do \\@-- with all due respect \\@--, an intersperse.'],
[u'Followed by an afterthougt -- here it comes.',
u'Followed by an afterthougt \\@-- here it comes.'],
[u'Followed by an afterthougt---here it comes.',
u'Followed by an afterthougt\\@---here it comes.'],
[u'Here come some dots ...',
u'Here come some dots~\\@\\dots{}'],
[u'Here come some dots...',
u'Here come some dots\\@\\dots{}'],
[u'Dots in math $a_1,...,a_n$ should work without spacing.',
u'Dots in math $a_1,\\@\\dots{},a_n$ should work without spacing.'],
[u'And dots ... in … the middle.',
u'And dots~\\@\\dots{} in~\\@\\dots{} the middle.'],
[u'And dots...in the middle.',
u'And dots\\@\\dots{}in the middle.'],
[u'And dots [...] for missing text.',
u'And dots [\\@\\ZitatEllipse] for missing text.'] ]
lawReference = [ [u'In §§1ff. HGB steht',
u'In §§\\,1\\,ff. \\@\\acronym{HGB} steht'],
[u'In § 1 f. HGB steht',
u'In §\\,1\\,f. \\@\\acronym{HGB} steht'],
[u'In § 1 Abs. 1 HGB steht',
u'In §\\,1 \\@Abs.\\,1 \\@\\acronym{HGB} steht'],
[u'In § 1 Absatz 1 Satz 2 HGB steht',
u'In §\\,1 Absatz~1 Satz~2 \\@\\acronym{HGB} steht'],
[u'In §§ 10-15 HGB steht',
u'In §§\\,10\\@--15 \\@\\acronym{HGB} steht'],
[u'Ein verlorener § und noch ein §',
u'Ein verlorener \\@§ und noch ein \\@§'] ]
numbers = [ [u'We have 10000, 2000 and 3000000 and -40000 and -5000.',
u'We have 10\\,000, 2000 and 3\\,000\\,000 and \\@$-$40\\,000 and \\@$-$5000.'],
[u'We are in the 21. regiment and again in the 21.regiment.',
u'We are in the \\@21. regiment and again in the \\@21.regiment.'],
[u'bis zu 30 000 Einwohner',
u'bis zu 30 000 Einwohner'],
[u'Kennwort 0000 ist unsicher, 00000 auch, 0000000 nicht weniger',
u'Kennwort 0000 ist unsicher, 00\,000 auch, 0\,000\,000 nicht weniger'],
[u'some 5,000 races',
u'some 5,000 races'],
[u'pi ist 3,14159',
u'pi ist 3,14\,159'], # this is not really what we want, but too rare and too complex to find an automatic solution
[u'bla 2004-2006 blub',
u'bla 2004\@--2006 blub']
]
dates = [ [u'The date is 19.5.2012 or 19. 10. 95 for good.',
u'The date is \\@19.\\,5.\\,2012 or \\@19.\\,10.\\,95 for good.'] ]
units = [ [u'Units: 21kg, 4MW, 1mV, 13-14TeV, 5°C.',
u'Units: 21\\,kg, 4\\,MW, | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
####################################################
#
# PRISM - Pipeline for animation and VFX projects
#
# www.prism-pipeline.com
#
# contact: <EMAIL>
#
####################################################
#
#
# Copyright (C) 2016-2020 <NAME>
#
# Licensed under GNU GPL-3.0-or-later
#
# This file is part of Prism.
#
# Prism is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prism is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prism. If not, see <https://www.gnu.org/licenses/>.
import os
try:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
except:
from PySide.QtCore import *
from PySide.QtGui import *
from PrismUtils.Decorators import err_catcher as err_catcher
class Prism_Fusion_Functions(object):
def __init__(self, core, plugin):
self.core = core
self.plugin = plugin
@err_catcher(name=__name__)
def instantStartup(self, origin):
# qapp = QApplication.instance()
with (
open(
os.path.join(
self.core.prismRoot,
"Plugins",
"Apps",
"Fusion",
"UserInterfaces",
"FusionStyleSheet",
"Fusion.qss",
),
"r",
)
) as ssFile:
ssheet = ssFile.read()
ssheet = ssheet.replace(
"qss:",
os.path.join(
self.core.prismRoot,
"Plugins",
"Apps",
"Fusion",
"UserInterfaces",
"FusionStyleSheet",
).replace("\\", "/")
+ "/",
)
# ssheet = ssheet.replace("#c8c8c8", "rgb(47, 48, 54)").replace("#727272", "rgb(40, 40, 46)").replace("#5e90fa", "rgb(70, 85, 132)").replace("#505050", "rgb(33, 33, 38)")
# ssheet = ssheet.replace("#a6a6a6", "rgb(37, 39, 42)").replace("#8a8a8a", "rgb(37, 39, 42)").replace("#b5b5b5", "rgb(47, 49, 52)").replace("#999999", "rgb(47, 49, 52)")
# ssheet = ssheet.replace("#9f9f9f", "rgb(31, 31, 31)").replace("#b2b2b2", "rgb(31, 31, 31)").replace("#aeaeae", "rgb(35, 35, 35)").replace("#c1c1c1", "rgb(35, 35, 35)")
# ssheet = ssheet.replace("#555555", "rgb(27, 29, 32)").replace("#717171", "rgb(27, 29, 32)").replace("#878787", "rgb(37, 39, 42)").replace("#7c7c7c", "rgb(37, 39, 42)")
# ssheet = ssheet.replace("#4c4c4c", "rgb(99, 101, 103)").replace("#5b5b5b", "rgb(99, 101, 103)").replace("#7aa3e5", "rgb(65, 76, 112)").replace("#5680c1", "rgb(65, 76, 112)")
# ssheet = ssheet.replace("#5a5a5a", "rgb(35, 35, 35)").replace("#535353", "rgb(35, 35, 41)").replace("#373737", "rgb(35, 35, 41)").replace("#858585", "rgb(31, 31, 31)").replace("#979797", "rgb(31, 31, 31)")
# ssheet = ssheet.replace("#4771b3", "rgb(70, 85, 132)").replace("#638dcf", "rgb(70, 85, 132)").replace("#626262", "rgb(45, 45, 51)").replace("#464646", "rgb(45, 45, 51)")
# ssheet = ssheet.replace("#7f7f7f", "rgb(60, 60, 66)").replace("#6c6c6c", "rgb(60, 60, 66)").replace("#565656", "rgb(35, 35, 41)").replace("#5d5d5d", "rgb(35, 35, 41)")
# ssheet = ssheet.replace("white", "rgb(200, 200, 200)")
if "parentWindows" in origin.prismArgs:
origin.messageParent.setStyleSheet(ssheet)
# origin.messageParent.resize(10,10)
# origin.messageParent.show()
origin.parentWindows = True
else:
qapp = QApplication.instance()
qapp.setStyleSheet(ssheet)
appIcon = QIcon(
os.path.join(
self.core.prismRoot, "Scripts", "UserInterfacesPrism", "p_tray.png"
)
)
qapp.setWindowIcon(appIcon)
self.isRendering = [False, ""]
return False
@err_catcher(name=__name__)
def startup(self, origin):
if not hasattr(self, "fusion"):
return False
origin.timer.stop()
return True
@err_catcher(name=__name__)
def onProjectChanged(self, origin):
pass
@err_catcher(name=__name__)
def sceneOpen(self, origin):
if hasattr(origin, "asThread") and origin.asThread.isRunning():
origin.startasThread()
@err_catcher(name=__name__)
def executeScript(self, origin, code, preventError=False):
if preventError:
try:
return eval(code)
except Exception as e:
msg = "\npython code:\n%s" % code
exec(
"raise type(e), type(e)(e.message + msg), sys.exc_info()[2]")
else:
return eval(code)
@err_catcher(name=__name__)
def getCurrentFileName(self, origin, path=True):
curComp = self.fusion.GetCurrentComp()
if curComp is None:
currentFileName = ""
else:
currentFileName = self.fusion.GetCurrentComp().GetAttrs()[
"COMPS_FileName"]
return currentFileName
@err_catcher(name=__name__)
def getSceneExtension(self, origin):
return self.sceneFormats[0]
@err_catcher(name=__name__)
def saveScene(self, origin, filepath, details={}):
try:
return self.fusion.GetCurrentComp().Save(filepath)
except:
return ""
@err_catcher(name=__name__)
def getImportPaths(self, origin):
return False
@err_catcher(name=__name__)
def getFrameRange(self, origin):
startframe = self.fusion.GetCurrentComp().GetAttrs()[
"COMPN_GlobalStart"]
endframe = self.fusion.GetCurrentComp().GetAttrs()["COMPN_GlobalEnd"]
return [startframe, endframe]
@err_catcher(name=__name__)
def setFrameRange(self, origin, startFrame, endFrame):
comp = self.fusion.GetCurrentComp()
comp.Lock()
comp.SetAttrs(
{
"COMPN_GlobalStart": startFrame,
"COMPN_RenderStart": startFrame,
"COMPN_GlobalEnd": endFrame,
"COMPN_RenderEnd": endFrame
}
)
comp.SetPrefs(
{
"Comp.Unsorted.GlobalStart": startFrame,
"Comp.Unsorted.GlobalEnd": endFrame,
}
)
comp.Unlock()
@err_catcher(name=__name__)
def getFPS(self, origin):
return self.fusion.GetCurrentComp().GetPrefs()["Comp"]["FrameFormat"]["Rate"]
@err_catcher(name=__name__)
def setFPS(self, origin, fps):
return self.fusion.GetCurrentComp().SetPrefs({"Comp.FrameFormat.Rate": fps})
@err_catcher(name=__name__)
def getResolution(self):
width = self.fusion.GetCurrentComp().GetPrefs()[
"Comp"]["FrameFormat"]["Height"]
height = self.fusion.GetCurrentComp().GetPrefs()[
"Comp"]["FrameFormat"]["Width"]
return [width, height]
@err_catcher(name=__name__)
def setResolution(self, width=None, height=None):
self.fusion.GetCurrentComp().SetPrefs(
{
"Comp.FrameFormat.Width": width,
"Comp.FrameFormat.Height": height,
}
)
@err_catcher(name=__name__)
def updateReadNodes(self):
updatedNodes = []
selNodes = self.fusion.GetCurrentComp().GetToolList(True, "Loader")
if len(selNodes) == 0:
selNodes = self.fusion.GetCurrentComp().GetToolList(False, "Loader")
if len(selNodes):
comp = self.fusion.GetCurrentComp()
comp.StartUndo("Updating loaders")
for k in selNodes:
i = selNodes[k]
curPath = comp.MapPath(i.GetAttrs()["TOOLST_Clip_Name"][1])
newPath = self.core.getLatestCompositingVersion(curPath)
if os.path.exists(os.path.dirname(newPath)) and not curPath.startswith(
os.path.dirname(newPath)
):
firstFrame = i.GetInput("GlobalIn")
lastFrame = i.GetInput("GlobalOut")
i.Clip = newPath
i.GlobalOut = lastFrame
i.GlobalIn = firstFrame
i.ClipTimeStart = 0
i.ClipTimeEnd = lastFrame - firstFrame
i.HoldLastFrame = 0
updatedNodes.append(i)
comp.EndUndo(True)
if len(updatedNodes) == 0:
QMessageBox.information(
self.core.messageParent, "Information", "No nodes were updated"
)
else:
mStr = "%s nodes were updated:\n\n" % len(updatedNodes)
for i in updatedNodes:
mStr += i.GetAttrs()["TOOLS_Name"] + "\n"
QMessageBox.information(
self.core.messageParent, "Information", mStr)
@err_catcher(name=__name__)
def getAppVersion(self, origin):
return self.fusion.Version
@err_catcher(name=__name__)
def onProjectBrowserStartup(self, origin):
origin.actionStateManager.setEnabled(False)
@err_catcher(name=__name__)
def openScene(self, origin, filepath, force=False):
if os.path.splitext(filepath)[1] not in self.sceneFormats:
return False
try:
self.fusion.LoadComp(filepath)
except:
pass
return True
@err_catcher(name=__name__)
def correctExt(self, origin, lfilepath):
return lfilepath
@err_catcher(name=__name__)
def setSaveColor(self, origin, btn):
btn.setPalette(origin.savedPalette)
@err_catcher(name=__name__)
def clearSaveColor(self, origin, btn):
btn.setPalette(origin.oldPalette)
@err_catcher(name=__name__)
def importImages(self, origin):
fString = "Please select an import option:"
msg = QMessageBox(
QMessageBox.NoIcon, "Fusion Import", fString, QMessageBox.Cancel
)
msg.addButton("Current pass", QMessageBox.YesRole)
msg.addButton("All passes", QMessageBox.YesRole)
# msg.addButton("Layout all passes", QMessageBox.YesRole)
self.core.parentWindow(msg)
action = msg.exec_()
if action == 0:
self.fusionImportSource(origin)
elif action == 1:
self.fusionImportPasses(origin)
else:
return
@err_catcher(name=__name__)
def fusionImportSource(self, origin):
self.fusion.GetCurrentComp().Lock()
sourceData = origin.compGetImportSource()
for i in sourceData:
filePath = i[0]
firstFrame = i[1]
lastFrame = i[2]
filePath = filePath.replace(
"#"*self.core.framePadding, "%04d".replace("4", str(self.core.framePadding)) % firstFrame)
tool = self.fusion.GetCurrentComp().AddTool("Loader", -32768, -32768)
tool.Clip = filePath
tool.GlobalOut = lastFrame
tool.GlobalIn = firstFrame
tool.ClipTimeStart = 0
tool.ClipTimeEnd = lastFrame - firstFrame
tool.HoldLastFrame = 0
self.fusion.GetCurrentComp().Unlock()
@err_catcher(name=__name__)
def fusionImportPasses(self, origin):
self.fusion.GetCurrentComp().Lock()
sourceData = origin.compGetImportPasses()
for i in sourceData:
filePath = i[0]
firstFrame = i[1]
lastFrame = i[2]
filePath = filePath.replace(
"#"*self.core.framePadding, "%04d".replace("4", str(self.core.framePadding)) % firstFrame)
self.fusion.GetCurrentComp().CurrentFrame.FlowView.Select()
tool = self.fusion.GetCurrentComp().AddTool("Loader", -32768, -32768)
tool.Clip = filePath
tool.GlobalOut = lastFrame
tool.GlobalIn = firstFrame
tool.ClipTimeStart = 0
tool.ClipTimeEnd = lastFrame - firstFrame
tool.HoldLastFrame = 0
self.fusion.GetCurrentComp().Unlock()
@err_catcher(name=__name__)
def setProject_loading(self, origin):
pass
@err_catcher(name=__name__)
def onPrismSettingsOpen(self, origin):
pass
@err_catcher(name=__name__)
def createProject_startup(self, origin):
pass
@err_catcher(name=__name__)
def editShot_startup(self, origin):
pass
@err_catcher(name=__name__)
def shotgunPublish_startup(self, origin):
pass
@err_catcher(name=__name__)
def getOutputPath(self, node, render=False):
self.isRendering = [False, ""]
if node is None:
msg = QMessageBox(
QMessageBox.Warning, "Prism Warning", "Please select one or more write nodes you wish to refresh"
)
self.core.parentWindow(msg)
if self.core.useOnTop:
msg.setWindowFlags(msg.windowFlags() ^ Qt.WindowStaysOnTopHint)
msg.exec_()
return ""
taskName = node.GetInput("PrismTaskControl")
origComment = node.GetInput("PrismCommentControl")
if origComment is None:
comment = ""
comment = self.core.validateStr(origComment)
if origComment != comment:
node.SetInput("PrismCommentControl", comment)
FormatID = node.GetInput("OutputFormat")
fileType = ""
if FormatID == "PIXFormat":
# Alias PIX
fileType = "pix"
elif FormatID == "IFFFormat":
# Amiga IFF
fileType = "iff"
elif FormatID == "CineonFormat":
# Kodak Cineon
fileType = "cin"
elif FormatID == "DPXFormat":
# DPX
fileType = "dpx"
elif FormatID == "FusePicFormat":
# Fuse Pic
fileType = "fusepic"
elif FormatID == "FlipbookFormat":
# Fusion Flipbooks
fileType = "fb"
elif FormatID == "RawFormat":
# Fusion RAW Image
fileType = "raw"
elif FormatID == "IFLFormat":
# Image File List (Text File)
fileType = "ifl"
elif FormatID == "IPLFormat":
# IPL
fileType = "ipl"
elif FormatID == "JpegFormat":
# JPEG
fileType = "jpg"
# fileType = 'jpeg'
elif FormatID == "Jpeg2000Format":
# JPEG2000
fileType = "jp2"
elif FormatID == "MXFFormat":
# MXF - Material Exchange Format
fileType = "mxf"
elif FormatID == "OpenEXRFormat":
# OpenEXR
fileType = "exr"
elif FormatID == "PandoraFormat":
# Pandora YUV
fileType = "piyuv10"
elif FormatID == "PNGFormat":
# PNG
fileType = "png"
elif FormatID == "VPBFormat":
# Quantel VPB
fileType = "vpb"
elif FormatID == "QuickTimeMovies":
# QuickTime Movie
fileType = "mov"
elif FormatID == "HDRFormat":
# Radiance
fileType = "hdr"
elif FormatID == "SixRNFormat":
# Rendition
fileType = "6RN"
elif FormatID == "SGIFormat":
# SGI
fileType = "sgi"
elif FormatID == "PICFormat":
# Softimage PIC
fileType = "si"
elif FormatID == "SUNFormat":
# SUN Raster
fileType = "RAS"
elif FormatID == "TargaFormat":
# Targa
fileType = "tga"
elif FormatID == "TiffFormat":
# TIFF
# fileType = 'tif'
fileType = "tiff"
elif FormatID == "rlaFormat":
# Wavefront RLA
fileType = "rla"
elif FormatID == "BMPFormat":
# Windows BMP
fileType = "bmp"
elif FormatID == "YUVFormat":
# YUV
fileType = "yuv"
else:
# EXR fallback format
fileType = "exr"
location = node.GetInput("Location")
useLastVersion = node.GetInput("RenderLastVersionControl")
if taskName is None or taskName == "":
msg = QMessageBox(
QMessageBox.Warning, "Prism Warning", "Please choose | |
<reponame>moniqklimek/training
import json
import pprint
"""
TITLE: imagine buy in bookshoop - interaktive fun with User :)
ISSUE : help you choose the right item, get to know the User's preferences, i.e. - the thematic category that interests him, the results improved for him, a detailed description of the selected item
assumptions:
no method has been developed to protect the program against entering incorrect answers by the User
established:
- that the categories will be written as displayed on the console with uppercase letters (no spaces, etc.)
- that the user will copy the entire title of the book as it is displayed on the console
logic
100. Ask the user what category of prince interests him(show him the sorted results)
101. Enter the selected category and ask if User wants to sort them by:
- increasing price,
- decreasing price,
- the highest number of stars,
- the lowest number of stars,
- availability,
and present the results
102.The user has chosen a given book - show him a short description and product description
logika - PL
100. spytaj Kupujacego jaka kategoria ksiazego go intresuje (pokaz mu posortowane wyniki)
101. wejdz do wybranej kategori i spytaj czy Kupujacy chce posortowac je po:
- cenie rosnacej,
- cenie malejacej,
- najwyzszej ilosci gwiazdek,
- najnizszej ilosci gwiazdek,
- dostepnosci,
i zaprezentuj wyniki do dalszego wyboru w postaci listy
102. user wybral dana ksiazke - pokaz mu do niej szczegolowy opis i opis produktu
"""
# open and read the content of files from part 01 this issue (scraping results)
f1 = open('resources/01_category_first_link.json')
scrap1 = json.load(f1)
f1.close()
f2 = open('resources/02_single_books.json')
scrap2 = json.load(f2)
f2.close()
f3 = open('resources/03_details_single_books.json')
scrap3 = json.load(f3)
f3.close()
class Game:
def __init__(self):
pass
# I am using a file called --> "01_category_first_link.json"
# important because each file has different keys to access the content of the dictionaries
def sorted_thematica_category(self,s1):
category_list = [letter['Book_Category'] for letter in s1]
sorted_category_list = sorted(category_list)
return sorted_category_list
# I am using a file called --> "02_single_books.json"
def show_all_books_ctagory(self, s2, choosen_category):
list_all_books_this_cat=[]
for el in s2:
if el['Book_Category'] == choosen_category:
list_all_books_this_cat.append(el['Book_Title'])
how_many_books = len(list_all_books_this_cat)
return how_many_books, list_all_books_this_cat
def printing_long_questions(self):
print('--------')
print('Please tell me how to sort the results for YOU. Write 1 or 2 or 3 or 4 or 5.')
print(' \t\t 1 - sort by price - DESC.')
print(' \t\t 2 - sort by price - ASC.')
print(' \t\t 3 - sort by popularity ranking - DESC.')
print(' \t\t 4 - sort by popularity ranking - ASC.')
print(' \t\t 5 - sort by Title alphabetically. ')
def user_choose_filter_method(self, nr, list_title):
if nr==1 or nr==2:
list_dict_title_and_price=self.generate_tab_title_price(scrap2, list_title)
if nr == 1:
result_method = self.sort_method_1(list_dict_title_and_price)
else:
#nr 2
result_method = self.sort_method_2(list_dict_title_and_price)
if nr == 3:
# create dict only with key like stars and title
list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title)
# sorted by stars
result_method = self.sort_method_3(list_dict_title_and_stars)
if nr == 4:
# create dict only with key like stars and title
list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title)
# sorted by stars
result_method = self.sort_method_4(list_dict_title_and_stars)
if nr == 5:
result_method = self.sort_method_5(list_title)
return result_method
# building a new DICTIONARY - cutting the content from existing DICTIONARIES
# idea from https://stackoverflow.com/questions/3420122/filter-dict-to-contain-only-certain-keys
def remove_key_from_existing_dict(self, existing_dict, *key_to_delete_from_existing_dict):
"""
input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1}
key_to_delete_from_existing_dict='Book_Stars'
output--> {'Book_Price': 10.97,'Book_Title': 'The Long Shadow', , 'Book_total_category_amouth': 1}
"""
new_dict = dict((key, value) for key, value in existing_dict.items() if key not in key_to_delete_from_existing_dict)
return new_dict
def leave_only_selected_keys_in_existing_dict(self,existing_dict, *key_to_stay):
"""
input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1}
key_to_stay='Book_Stars', 'Book_Title'
output--> {'Book_Stars': 1, 'Book_Title': 'The Long Shadow'}
"""
new_dict = dict((key, value) for key, value in existing_dict.items() if key in key_to_stay)
return new_dict
# building a new list of dictionaries - cutting the content from skraping 2 (list - dictionaries)
def generate_tab_title_price(self, scrap2, list_title):
# scrap2= big list dics
# i want filter and catch only interesting me title --list_title
# and return only key --'Book_Price', 'Book_Title'
list_dict_only_title_price=[]
for small_dict in scrap2:
for title in list_title:
if small_dict['Book_Title'] in title:
new_short_dict = self.leave_only_selected_keys_in_existing_dict(small_dict, 'Book_Price', 'Book_Title')
list_dict_only_title_price.append(new_short_dict)
return list_dict_only_title_price
def generate_tab_title_stars(self, scrap2, list_title):
# scrap2= big list dics
# i want filter and catch only interesting me title --list_title
# and return only key --'Book_Title', 'Book_Stars'
list_dict_only_title_stars = []
for small_dict in scrap2:
for title in list_title:
if small_dict['Book_Title'] in title:
new_short_dict = self.leave_only_selected_keys_in_existing_dict(
small_dict, 'Book_Title', 'Book_Stars')
list_dict_only_title_stars.append(new_short_dict)
return list_dict_only_title_stars
def sort_method_1(self,list_dict_title_and_price):
#Press 1 - sort by price descending (malejaco)
# return list with dict price and title
# inspiration - -> https: // stackoverflow.com/questions/1143671/how-to-sort-objects-by-multiple-keys-in-python
sorted_by_price_DESC= sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title']))
return sorted_by_price_DESC
def sort_method_2(self, list_dict_title_and_price):
# Press 2 - sorted by price in ascending order (rosnaco)
# return list with dict price and title
sorted_by_price_DESC = sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title']))
sorted_by_price_ASC = sorted_by_price_DESC[::-1]
return sorted_by_price_ASC
def sort_method_3(self, list_dict_only_title_AND_stars):
sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title']))
return sorted_by_stars_DESC
def sort_method_4(self, list_dict_only_title_AND_stars):
# catch list dict with stars and title and return sorted by stars
#Press 3 - sorted by popularity ranking - Max stars to min
sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title']))
sorted_by_stars_ASC = sorted_by_stars_DESC[::-1]
return sorted_by_stars_ASC
def sort_method_5(self, list_title):
# Press 5 - sort by title alphabetically
"""
["It's Only the Himalayas", 'Full Moon over Noah’s Ark: An Odyssey to Mount Ararat and Beyond', 'See America: A Celebration of Our National Parks & Treasured Sites', 'Vagabonding: An Uncommon Guide to the Art of Long-Term World Travel', 'Under the Tuscan Sun',
'A Summer In Europe', 'The Great Railway Bazaar', 'A Year in Provence (Provence #1)', 'The Road to Little Dribbling: Adventures of an American in Britain (Notes From a Small Island #2)', 'Neither Here nor There: Travels in Europe', '1,000 Places to See Before You Die']
"""
# mamy kategorie wybrana, mamy liste ksiazek - sort by price descending.
sorted_title = sorted(list_title)
return sorted_title
# choose inf detail from scrap 3
# I am using a file called --> "03_details_single_books.json"
def catch_index_if_have_title(self,title_choosen, scrap3):
# output: list dicts
# purpose: catch only index - for concret - value :title_choosen
# which help to link another parts this dict with information like
counter_index_in_list_dicts = 0
for el in scrap3:
if el['title_book'] == title_choosen:
break
else:
counter_index_in_list_dicts += 1
return counter_index_in_list_dicts
def return_details(self,title_choosen, scrap3):
# i need index link with this title
index_list_with_dicts = self.catch_index_if_have_title(title_choosen, scrap3)
tab_details=[]
title_book = scrap3[index_list_with_dicts]["title_book"]
tab_details.append(title_book)
category = scrap3[index_list_with_dicts]["category"]
tab_details.append(category)
price = scrap3[index_list_with_dicts]["price"]
tab_details.append(price)
productDescription = scrap3[index_list_with_dicts]["productDescription"]
tab_details.append(productDescription)
how_many = scrap3[index_list_with_dicts]["in_stock_how_many_available"]
tab_details.append(how_many)
about = scrap3[index_list_with_dicts]['detals_link_to_book']
tab_details.append(about)
upc = scrap3[index_list_with_dicts]["productInformation_UPC"]
tab_details.append(upc)
return tab_details
def printing_final_result(self, tab_details):
title_book = tab_details[0]
category = tab_details[1]
category = tab_details[1]
price = tab_details[2]
productDescription = tab_details[3]
in_stock_how_many_available = tab_details[4]
detals_link_to_book = tab_details[5]
productInformation_UPC = tab_details[6]
print('\n\t The book has a title: {}.Category is {}'.format(title_book, category))
print('\n\t Book Price:', price)
print('\n\t Content Description:', productDescription)
print('\n\t We still have {} item/s in stock'.format(in_stock_how_many_available))
print('\n\t If you want to know more about the book, please open the link:', detals_link_to_book)
print('\n\t UPC number:', productInformation_UPC)
# logic for conversation with User through Terminal
def logic(self):
answer1_user_if_play = input("Do you want to buy some interesting book? :) . Choose (n/y) \n")
if answer1_user_if_play == 'y':
print('--------')
print("\t Lets game :) ..... \n\t Below thematical book's Category for Your choose. \n")
#step one - choose category
sorted_category = self.sorted_thematica_category(scrap1)
print(sorted_category)
print('--------')
customer_choose_category_book = input(
'\t Please choose one and copy Your choice here ...\n\t (EXAMPLE:... Academic)\n\t (EXAMPLE:... Add a comment)\n\t YOUR TURN - Chose one Category from list : ...')
"""
while customer_choose_category_book not in sorted_category_list:
print('Please once again choose category. This one not exist in own base and list at top')
"""
if customer_choose_category_book in sorted_category:
how_books, title_books_this_choosen_category = self.show_all_books_ctagory(scrap2, customer_choose_category_book)
print('We have for You in shop {} book/books title for category {}'.format(how_books, customer_choose_category_book))
print(title_books_this_choosen_category)
else:
print('Please once again choose category. This one not exist in own base and list at top')
# step two - choose how user | |
used after
a database has been created."""
def __init__(self, conn):
self.conn = conn
def database_name(self):
"""Returns the name of the connected database."""
sql = ("SELECT catalog_name\n" +
"FROM information_schema.information_schema_catalog_name;")
with self.conn.cursor() as curs:
curs.execute(sql)
dbname = curs.fetchone()[0]
return dbname
def analyse(self):
"""Runs the ANALYSE command on the connected database."""
with self.conn.cursor() as curs:
curs.execute("ANALYSE;")
def __getattr__(self, attrname):
"""Delegate unknown attributes to the psycopg2 connection."""
return getattr(self.conn, attrname)
class MaintenanceWrapper(ConnectionWrapper):
"""Wrapper for a connection intented for maintenance commands."""
def exists(self, dbname):
"""Returns True if the named database exists."""
exists_sql = ("SELECT datname FROM pg_database\n" +
"WHERE datname = %(dbname)s;")
with self.conn.cursor() as curs:
curs.execute(exists_sql, {'dbname': dbname})
db_found = bool(curs.fetchone())
return db_found
def dblist(self):
"""Returns a list of the databases on the server."""
dblist_sql = "SELECT datname FROM pg_database;"
with self.conn.cursor() as curs:
curs.execute(dblist_sql)
result = [tup[0] for tup in curs.fetchall()]
return result
def drop(self, dbname):
"""Drops the named database."""
drop_sql = "DROP DATABASE %s;" % safe_name(dbname)
with self.conn.cursor() as curs:
curs.execute(drop_sql)
def create(self, dbname, template_db=TEMPLATE_DB):
"""Creates the named database."""
create_sql = ("CREATE DATABASE %s\n" % safe_name(dbname) +
"TEMPLATE %s;" % template_db)
with self.conn.cursor() as curs:
curs.execute(create_sql)
class BouncerWrapper(ConnectionWrapper):
"""Wrapper for a connection to the pgbouncer console pseudo-database.
Obviously these commands will not work if connected to an ordinary
database.
These commands will ignore errors since pgbouncer may
not know about the database the operations are being done on, but
the commands have to be run anyway in case it does."""
def pause(self, dbname):
"""Tells pgbouncer to pause the named database.
This should cause pgbouncer to disconnect from dbname, first
waiting for any queries to complete. This allows the database
to be dropped.
"""
pause_sql = "PAUSE %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(pause_sql)
except psycopg2.DatabaseError:
pass
def kill(self, dbname):
"""Tells pgbouncer to kill its connections to the named database.
This should cause pgbouncer to disconnect from dbname without waiting
for any queries to complete.
"""
kill_sql = "KILL %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(kill_sql)
except psycopg2.DatabaseError:
pass
def resume(self, dbname):
"""Tells pgbouncer to resume work on the named database.
If this is not called and the database was previously
paused then connection attempts will hang or give errors."""
resume_sql = "RESUME %s;" % safe_name(dbname)
with self.conn.cursor() as cur:
try:
cur.execute(resume_sql)
except psycopg2.DatabaseError:
pass
#
# Utility functions
#
def random_name(basename=""):
"""Returns a database name with a 9 digit random number appended."""
random_str = (RANDOM_STR_FORMAT %
random.randint(RANDOM_STR_MIN, RANDOM_STR_MAX))
return basename + "_" + random_str
def safe_name(dbname):
"""Returns a database name with non letter, digit, _ characters removed."""
char_list = [c for c in dbname if c.isalnum() or c == '_']
return "".join(char_list)
def resources_directory(*names):
"""Returns the path to a test resources directory, creating it if needed.
The path of the directory is TEST_RESOURCES_ROOT/name1/name2/...
where name1, name2, ... are the names passed in as parameters.
"""
test_dir = os.path.join(TEST_RESOURCES_ROOT, *names)
if not os.path.isdir(test_dir):
# Allow group permissions on the directory we are about to create
old_umask = os.umask(0o007)
# Make the directories
os.makedirs(test_dir)
# Put back the old umask
os.umask(old_umask)
return test_dir
def version_or_user(version=None, user=None):
"""Returns the version or user for a test resources directory.
Returns the version string, unless version is 'user', in which case
the user string is returned instead. Defaults are described below.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
"""
if not version:
# Using 'not version' rather than 'version is None' here because
# "" is NOT a valid version.
version = os.environ.get('DATACUBE_VERSION', 'user')
if version == 'user':
if not user:
# Using 'not user' rather than 'user is None' here because
# "" is NOT a valid user.
user = os.environ['USER']
return user
else:
return version
def input_directory(module, suite, version=None, user=None):
"""Returns a path to a test input directory, creating it if needed.
The path of the directory is
TEST_RESOURCES_ROOT/version/input/module/suite/. If the version is
'user' then the user argument takes the place of version in the path.
module: The name of the module being tested, eg 'dbcompare'.
suite: The name of the test suite of test class containting the test,
eg 'TestReporter'.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
The 'input' directory is for input or setup files for tests. The
files are expected to be named after the test that uses them.
"""
version = version_or_user(version, user)
return resources_directory(version, 'input', module, suite)
def output_directory(module, suite, user=None):
"""Returns the path to a test output directory, creating it if needed.
The path of the directory is TEST_RESOUCES_ROOT/user/output/module/suite/.
If user is not given, the environment variable USER is used as the
name of the user.
module: the name of the module being tested, eg 'dbcompare'
suite: the name of the test suite or test class containting the test,
eg 'TestReporter'
The 'output' directory is for the output of the tests. The files are
expected to be named after the test that produces them.
"""
version = version_or_user(version='user', user=user)
return resources_directory(version, 'output', module, suite)
def expected_directory(module, suite, version=None, user=None):
"""Returns a path to a test expected directory, creating it if needed.
The path of the directory is
TEST_RESOURCES_ROOT/version/expected/module/suite/. If the version is
'user' then the user argument takes the place of version in the path.
module: The name of the module being tested, eg 'dbcompare'.
suite: The name of the test suite of test class containting the test,
eg 'TestReporter'.
version: The version of the datacube code. This is expected to be either
'develop', 'user', or a version number. If not given it is taken
from the DATACUBE_VERSION environment variable. If the DATACUBE_VERSION
variable is not defined it is taken to be 'user'.
user: The user name. This is used in place of version if version is 'user'.
If this is not defined it is taken from the USER environment variable.
The 'expected' directory is for the expected output of the tests. The
files are expected to be named after the test that produces them. These
files are used to automate the tests by comparing output produced against
expected output.
"""
version = version_or_user(version, user)
return resources_directory(version, 'expected', module, suite)
def temp_directory(module, suite, test_dir, version=None, user=None):
"""Returns a path to a temp subdirectory, creating it if needed."""
version = version_or_user(version, user)
return resources_directory(version, test_dir, module, suite, 'temp')
def tile_root_directory(module, suite, test_dir, version=None, user=None):
"""Returns a path to a tile_root subdirectory, creating it if needed."""
version = version_or_user(version, user)
return resources_directory(version, test_dir, module, suite, 'tile_root')
def update_config_file(dbname, input_dir, output_dir, config_file_name,
output_file_name=None):
"""Creates a temporary agdc_default.config file by updating the database name.
This function returns the path to the updated config file.
dbname: the name of the database to connect to.
input_dir: the directory containing the config file template.
output_dir: the directory in which the updated config file will be written.
config_file_name: the name of the config file (template and updated).
output_file_name: the name of the updated config file - if this is not
specified, it is taken to be the same as the config_file_name.
"""
return update_config_file2({'dbname': dbname}, input_dir, output_dir,
config_file_name, output_file_name)
def update_config_file2(parameter_values_dict, input_dir, output_dir,
config_file_name, output_file_name=None):
"""Creates a temporary agdc_default.config file by updating those attributes
according to the dictionary parameter_values.
This function | |
#!/usr/bin/env python
# coding: utf-8
# this code is a modification of:
# notes:
# todo : I canceled the randomize weights for the last layer + freezed the weights for all of the layers (some weights were trained anyway).
#todo : mayb -- save to fule during evaluate function the outputs
# **Outline of Steps**
# + Initialization
# + Download COCO detection data from http://cocodataset.org/#download
# + http://images.cocodataset.org/zips/train2014.zip <= train images
# + http://images.cocodataset.org/zips/val2014.zip <= validation images
# + http://images.cocodataset.org/annotations/annotations_trainval2014.zip <= train and validation annotations
# + Run this script to convert annotations in COCO format to VOC format
# + https://gist.github.com/chicham/6ed3842d0d2014987186#file-coco2pascal-py
# + Download pre-trained weights from https://pjreddie.com/darknet/yolo/
# + https://pjreddie.com/media/files/yolo.weights
# + Specify the directory of train annotations (train_annot_folder) and train images (train_image_folder)
# + Specify the directory of validation annotations (valid_annot_folder) and validation images (valid_image_folder)
# + Specity the path of pre-trained weights by setting variable *wt_path*
# + Construct equivalent network in Keras
# + Network arch from https://github.com/pjreddie/darknet/blob/master/cfg/yolo-voc.cfg
# + Load the pretrained weights
# + Perform training
# + Perform detection on an image with newly trained weights
# + Perform detection on an video with newly trained weights
# # Initialization
# In[51]:
#from IPython import get_ipython
from keras.models import Sequential, Model
from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, \
UpSampling2D, TimeDistributed, LSTM
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.merge import concatenate
import matplotlib.pyplot as plt
import keras.backend as K
import tensorflow as tf
import imgaug as ia
from tqdm import tqdm
from imgaug import augmenters as iaa
import numpy as np
import pickle
import os, cv2
from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator
from utils import WeightReader, decode_netout, draw_boxes
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[52]:
SUP_NUM_IMAGES = 3
UNSUP_NUM_IMAGES = 3
EVAL_NUM_IMAGES = 3
LABELS = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
IMAGE_H, IMAGE_W = 416, 416
GRID_H, GRID_W = 13, 13
BOX = 5
CLASS = len(LABELS)
CLASS_WEIGHTS = np.ones(CLASS, dtype='float32')
OBJ_THRESHOLD = 0.3 # 0.5
NMS_THRESHOLD = 0.3 # 0.45
ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
NO_OBJECT_SCALE = 1.0
OBJECT_SCALE = 5.0
COORD_SCALE = 1.0
CLASS_SCALE = 1.0
BATCH_SIZE = 16
WARM_UP_BATCHES = 0
TRUE_BOX_BUFFER = 50
MAX_BOX_PER_IMAGE = 10
# In[53]:
wt_path = 'yolov2.weights'
train_image_folder = './data/images/train2014/'
train_annot_folder = './data/train_converted/'
valid_image_folder = './data/images/val2014/'
valid_annot_folder = './data/val_converted/'
# # Construct the network
# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
return tf.space_to_depth(x, block_size=2)
import frontend
""" creates a new dir names coco_x with the results, weights, and all the relevant files"""
# TB_COUNT = len([d for d in os.listdir(os.path.expanduser('./results_lstm/')) if 'coco_' in d]) + 1
# PATH = os.path.expanduser('./results_lstm/') + 'coco_' + '_' + str(TB_COUNT)
# os.makedirs(PATH)
PATH = './lstm/'
print("=================== Directory " , PATH , " Created ")
# PATH = "./results/coco__25"
class ToharGenerator2(BatchGenerator):
def __getitem__(self, item):
# t= [x_batch,b_batch],y_batch
# [input,goutndtruth],desired network output]
t = super().__getitem__(item)
x_batch = t[0][0] #the input
GT = t[0][1]
y_batch = t[1]
new_x_batch = predict(model,x_batch) #instead of input img vector we want the YOLO's output vector
t[0][0]= new_x_batch
return [new_x_batch, GT], y_batch
input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))
true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER, 4))
# Layer 1
x = Conv2D(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
encoded = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 2
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=False, trainable=False)(encoded)
x = BatchNormalization(name='norm_2', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 3
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_3', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 4
x = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_4', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 5
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_5', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 6
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_6', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 7
x = Conv2D(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_7', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 8
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_8', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 9
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_9', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 10
x = Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_10', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_10', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 11
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_11', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_11', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 12
x = Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_12', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_12', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 13
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_13', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_13', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
skip_connection = x
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 14
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_14', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 15
x = Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_15', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_15', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 16
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_16', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 17
x = Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_17', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 18
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_18', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 19
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_19', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 20
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_20', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 21
skip_connection = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias=False, trainable=False)(
skip_connection)
skip_connection = BatchNormalization(name='norm_21', trainable=False)(skip_connection)
skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
skip_connection = Lambda(space_to_depth_x2)(skip_connection)
x = concatenate([skip_connection, x])
# Layer 22
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_22', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 23
x = Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name='conv_23')(x)
output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)
# small hack to allow true_boxes to be registered when Keras build the model
# for more information: https://github.com/fchollet/keras/issues/2790
output = Lambda(lambda args: args[0])([output, true_boxes])
model = Model([input_image, true_boxes], output)
# model.summary()
print("output=====")
print(output.shape)
'''build lstm model: '''
lstm_input = Input(shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS))
input_dim = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
# input_dim=(GRID_H,GRID_W, BOX, 4 + 1 + CLASS, 1, 1, 1, TRUE_BOX_BUFFER, 4)
print(input_dim)
timesteps = EVAL_NUM_IMAGES
# lstm.add(units= Dense(input_shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS)))
# l=Lambda(lambda x: K.batch_flatten(x))(lstm_input)
# l=LSTM(input_dim, batch_input_shape= (None, timesteps, input_dim), activation='sigmoid',recurrent_activation='hard_sigmoid',return_sequences=True)(l)
# # l = (Dense(output_dim=input_dim, activation="relu"))(lstm)
# #
# # # l = LSTM(input_dim)(l)
# # # # hidden_layer = Dense(output_dim=input_shape, activation="relu")(x)
# # # # outputs = Dense(output_dim=input_shape, activation="softmax")(hidden_layer)
# #
# loutput = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(l)
# #
# # # small hack to allow true_boxes to be registered when Keras build the model
# # # for more information: https://github.com/fchollet/keras/issues/2790
# out = Lambda(lambda args: args[0])([loutput, true_boxes])
#
#
#
# lstm = Model([lstm_input, true_boxes], out)
# lstm.summary()
input_dim = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
#take 5 frames every time
frames = Input(shape=(5, IMAGE_H, IMAGE_W, 3))
x = TimeDistributed(model)(frames)
x = TimeDistributed(Flatten())(x)
#now- timestamsp=5
x = LSTM(input_dim, name='lstm')(x)
out = Dense(input_dim, name='out')(x)
lstm = Model(inputs=frames, outputs=out)
exit()
# # Load pretrained weights
# **Load the weights originally provided by YOLO**
print("**Load the weights originally provided by YOLO**")
weight_reader = WeightReader(wt_path)
weight_reader.reset() # don't worry! it doesn't delete the weights.
nb_conv = 23
for i in range(1, nb_conv + 1):
conv_layer = model.get_layer('conv_' + str(i))
if i < nb_conv:
norm_layer = model.get_layer('norm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = weight_reader.read_bytes(size)
gamma = weight_reader.read_bytes(size)
mean = weight_reader.read_bytes(size)
var = weight_reader.read_bytes(size)
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel, bias])
else:
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel])
# model_t = model #model that trained but not pre-trained
# model_un = model #model without training at all
# **Randomize weights of the last | |
_ida_hexrays.hx_boundaries_insert
hx_boundaries_erase = _ida_hexrays.hx_boundaries_erase
hx_boundaries_clear = _ida_hexrays.hx_boundaries_clear
hx_boundaries_size = _ida_hexrays.hx_boundaries_size
hx_boundaries_free = _ida_hexrays.hx_boundaries_free
hx_boundaries_new = _ida_hexrays.hx_boundaries_new
hx_block_chains_begin = _ida_hexrays.hx_block_chains_begin
hx_block_chains_end = _ida_hexrays.hx_block_chains_end
hx_block_chains_next = _ida_hexrays.hx_block_chains_next
hx_block_chains_prev = _ida_hexrays.hx_block_chains_prev
hx_block_chains_get = _ida_hexrays.hx_block_chains_get
hx_block_chains_find = _ida_hexrays.hx_block_chains_find
hx_block_chains_insert = _ida_hexrays.hx_block_chains_insert
hx_block_chains_erase = _ida_hexrays.hx_block_chains_erase
hx_block_chains_clear = _ida_hexrays.hx_block_chains_clear
hx_block_chains_size = _ida_hexrays.hx_block_chains_size
hx_block_chains_free = _ida_hexrays.hx_block_chains_free
hx_block_chains_new = _ida_hexrays.hx_block_chains_new
hx_valrng_t_clear = _ida_hexrays.hx_valrng_t_clear
hx_valrng_t_copy = _ida_hexrays.hx_valrng_t_copy
hx_valrng_t_assign = _ida_hexrays.hx_valrng_t_assign
hx_valrng_t_compare = _ida_hexrays.hx_valrng_t_compare
hx_valrng_t_set_eq = _ida_hexrays.hx_valrng_t_set_eq
hx_valrng_t_set_cmp = _ida_hexrays.hx_valrng_t_set_cmp
hx_valrng_t_reduce_size = _ida_hexrays.hx_valrng_t_reduce_size
hx_valrng_t_intersect_with = _ida_hexrays.hx_valrng_t_intersect_with
hx_valrng_t_unite_with = _ida_hexrays.hx_valrng_t_unite_with
hx_valrng_t_inverse = _ida_hexrays.hx_valrng_t_inverse
hx_valrng_t_has = _ida_hexrays.hx_valrng_t_has
hx_valrng_t_print = _ida_hexrays.hx_valrng_t_print
hx_valrng_t_dstr = _ida_hexrays.hx_valrng_t_dstr
hx_valrng_t_cvt_to_single_value = _ida_hexrays.hx_valrng_t_cvt_to_single_value
hx_valrng_t_cvt_to_cmp = _ida_hexrays.hx_valrng_t_cvt_to_cmp
hx_get_merror_desc = _ida_hexrays.hx_get_merror_desc
hx_reg2mreg = _ida_hexrays.hx_reg2mreg
hx_mreg2reg = _ida_hexrays.hx_mreg2reg
hx_install_optinsn_handler = _ida_hexrays.hx_install_optinsn_handler
hx_remove_optinsn_handler = _ida_hexrays.hx_remove_optinsn_handler
hx_install_optblock_handler = _ida_hexrays.hx_install_optblock_handler
hx_remove_optblock_handler = _ida_hexrays.hx_remove_optblock_handler
hx_must_mcode_close_block = _ida_hexrays.hx_must_mcode_close_block
hx_is_mcode_propagatable = _ida_hexrays.hx_is_mcode_propagatable
hx_negate_mcode_relation = _ida_hexrays.hx_negate_mcode_relation
hx_swap_mcode_relation = _ida_hexrays.hx_swap_mcode_relation
hx_get_signed_mcode = _ida_hexrays.hx_get_signed_mcode
hx_get_unsigned_mcode = _ida_hexrays.hx_get_unsigned_mcode
hx_mcode_modifies_d = _ida_hexrays.hx_mcode_modifies_d
hx_operand_locator_t_compare = _ida_hexrays.hx_operand_locator_t_compare
hx_vd_printer_t_print = _ida_hexrays.hx_vd_printer_t_print
hx_file_printer_t_print = _ida_hexrays.hx_file_printer_t_print
hx_qstring_printer_t_print = _ida_hexrays.hx_qstring_printer_t_print
hx_dstr = _ida_hexrays.hx_dstr
hx_is_type_correct = _ida_hexrays.hx_is_type_correct
hx_is_small_udt = _ida_hexrays.hx_is_small_udt
hx_is_nonbool_type = _ida_hexrays.hx_is_nonbool_type
hx_is_bool_type = _ida_hexrays.hx_is_bool_type
hx_partial_type_num = _ida_hexrays.hx_partial_type_num
hx_get_float_type = _ida_hexrays.hx_get_float_type
hx_get_int_type_by_width_and_sign = _ida_hexrays.hx_get_int_type_by_width_and_sign
hx_get_unk_type = _ida_hexrays.hx_get_unk_type
hx_dummy_ptrtype = _ida_hexrays.hx_dummy_ptrtype
hx_get_member_type = _ida_hexrays.hx_get_member_type
hx_make_pointer = _ida_hexrays.hx_make_pointer
hx_create_typedef = _ida_hexrays.hx_create_typedef
hx_get_type = _ida_hexrays.hx_get_type
hx_set_type = _ida_hexrays.hx_set_type
hx_vdloc_t_dstr = _ida_hexrays.hx_vdloc_t_dstr
hx_vdloc_t_compare = _ida_hexrays.hx_vdloc_t_compare
hx_vdloc_t_is_aliasable = _ida_hexrays.hx_vdloc_t_is_aliasable
hx_print_vdloc = _ida_hexrays.hx_print_vdloc
hx_arglocs_overlap = _ida_hexrays.hx_arglocs_overlap
hx_lvar_locator_t_compare = _ida_hexrays.hx_lvar_locator_t_compare
hx_lvar_locator_t_dstr = _ida_hexrays.hx_lvar_locator_t_dstr
hx_lvar_t_dstr = _ida_hexrays.hx_lvar_t_dstr
hx_lvar_t_is_promoted_arg = _ida_hexrays.hx_lvar_t_is_promoted_arg
hx_lvar_t_accepts_type = _ida_hexrays.hx_lvar_t_accepts_type
hx_lvar_t_set_lvar_type = _ida_hexrays.hx_lvar_t_set_lvar_type
hx_lvar_t_set_width = _ida_hexrays.hx_lvar_t_set_width
hx_lvar_t_append_list = _ida_hexrays.hx_lvar_t_append_list
hx_lvars_t_find_stkvar = _ida_hexrays.hx_lvars_t_find_stkvar
hx_lvars_t_find = _ida_hexrays.hx_lvars_t_find
hx_lvars_t_find_lvar = _ida_hexrays.hx_lvars_t_find_lvar
hx_restore_user_lvar_settings = _ida_hexrays.hx_restore_user_lvar_settings
hx_save_user_lvar_settings = _ida_hexrays.hx_save_user_lvar_settings
hx_modify_user_lvars = _ida_hexrays.hx_modify_user_lvars
hx_restore_user_defined_calls = _ida_hexrays.hx_restore_user_defined_calls
hx_save_user_defined_calls = _ida_hexrays.hx_save_user_defined_calls
hx_parse_user_call = _ida_hexrays.hx_parse_user_call
hx_convert_to_user_call = _ida_hexrays.hx_convert_to_user_call
hx_install_microcode_filter = _ida_hexrays.hx_install_microcode_filter
hx_udc_filter_t_init = _ida_hexrays.hx_udc_filter_t_init
hx_udc_filter_t_apply = _ida_hexrays.hx_udc_filter_t_apply
hx_bitset_t_bitset_t = _ida_hexrays.hx_bitset_t_bitset_t
hx_bitset_t_copy = _ida_hexrays.hx_bitset_t_copy
hx_bitset_t_add = _ida_hexrays.hx_bitset_t_add
hx_bitset_t_add_ = _ida_hexrays.hx_bitset_t_add_
hx_bitset_t_add__ = _ida_hexrays.hx_bitset_t_add__
hx_bitset_t_sub = _ida_hexrays.hx_bitset_t_sub
hx_bitset_t_sub_ = _ida_hexrays.hx_bitset_t_sub_
hx_bitset_t_sub__ = _ida_hexrays.hx_bitset_t_sub__
hx_bitset_t_cut_at = _ida_hexrays.hx_bitset_t_cut_at
hx_bitset_t_shift_down = _ida_hexrays.hx_bitset_t_shift_down
hx_bitset_t_has = _ida_hexrays.hx_bitset_t_has
hx_bitset_t_has_all = _ida_hexrays.hx_bitset_t_has_all
hx_bitset_t_has_any = _ida_hexrays.hx_bitset_t_has_any
hx_bitset_t_dstr = _ida_hexrays.hx_bitset_t_dstr
hx_bitset_t_empty = _ida_hexrays.hx_bitset_t_empty
hx_bitset_t_count = _ida_hexrays.hx_bitset_t_count
hx_bitset_t_count_ = _ida_hexrays.hx_bitset_t_count_
hx_bitset_t_last = _ida_hexrays.hx_bitset_t_last
hx_bitset_t_fill_with_ones = _ida_hexrays.hx_bitset_t_fill_with_ones
hx_bitset_t_has_common = _ida_hexrays.hx_bitset_t_has_common
hx_bitset_t_intersect = _ida_hexrays.hx_bitset_t_intersect
hx_bitset_t_is_subset_of = _ida_hexrays.hx_bitset_t_is_subset_of
hx_bitset_t_compare = _ida_hexrays.hx_bitset_t_compare
hx_bitset_t_goup = _ida_hexrays.hx_bitset_t_goup
hx_ivl_t_dstr = _ida_hexrays.hx_ivl_t_dstr
hx_ivl_t_compare = _ida_hexrays.hx_ivl_t_compare
hx_ivlset_t_add = _ida_hexrays.hx_ivlset_t_add
hx_ivlset_t_add_ = _ida_hexrays.hx_ivlset_t_add_
hx_ivlset_t_addmasked = _ida_hexrays.hx_ivlset_t_addmasked
hx_ivlset_t_sub = _ida_hexrays.hx_ivlset_t_sub
hx_ivlset_t_sub_ = _ida_hexrays.hx_ivlset_t_sub_
hx_ivlset_t_has_common = _ida_hexrays.hx_ivlset_t_has_common
hx_ivlset_t_print = _ida_hexrays.hx_ivlset_t_print
hx_ivlset_t_dstr = _ida_hexrays.hx_ivlset_t_dstr
hx_ivlset_t_count = _ida_hexrays.hx_ivlset_t_count
hx_ivlset_t_has_common_ = _ida_hexrays.hx_ivlset_t_has_common_
hx_ivlset_t_contains = _ida_hexrays.hx_ivlset_t_contains
hx_ivlset_t_includes = _ida_hexrays.hx_ivlset_t_includes
hx_ivlset_t_intersect = _ida_hexrays.hx_ivlset_t_intersect
hx_ivlset_t_compare = _ida_hexrays.hx_ivlset_t_compare
hx_get_mreg_name = _ida_hexrays.hx_get_mreg_name
hx_rlist_t_print = _ida_hexrays.hx_rlist_t_print
hx_rlist_t_dstr = _ida_hexrays.hx_rlist_t_dstr
hx_mlist_t_addmem = _ida_hexrays.hx_mlist_t_addmem
hx_mlist_t_print = _ida_hexrays.hx_mlist_t_print
hx_mlist_t_dstr = _ida_hexrays.hx_mlist_t_dstr
hx_mlist_t_compare = _ida_hexrays.hx_mlist_t_compare
hx_lvar_ref_t_compare = _ida_hexrays.hx_lvar_ref_t_compare
hx_lvar_ref_t_var = _ida_hexrays.hx_lvar_ref_t_var
hx_stkvar_ref_t_compare = _ida_hexrays.hx_stkvar_ref_t_compare
hx_stkvar_ref_t_get_stkvar = _ida_hexrays.hx_stkvar_ref_t_get_stkvar
hx_fnumber_t_print = _ida_hexrays.hx_fnumber_t_print
hx_fnumber_t_dstr = _ida_hexrays.hx_fnumber_t_dstr
hx_mop_t_copy = _ida_hexrays.hx_mop_t_copy
hx_mop_t_assign = _ida_hexrays.hx_mop_t_assign
hx_mop_t_swap = _ida_hexrays.hx_mop_t_swap
hx_mop_t_erase = _ida_hexrays.hx_mop_t_erase
hx_mop_t_print = _ida_hexrays.hx_mop_t_print
hx_mop_t_dstr = _ida_hexrays.hx_mop_t_dstr
hx_mop_t_create_from_mlist = _ida_hexrays.hx_mop_t_create_from_mlist
hx_mop_t_create_from_ivlset = _ida_hexrays.hx_mop_t_create_from_ivlset
hx_mop_t_create_from_vdloc = _ida_hexrays.hx_mop_t_create_from_vdloc
hx_mop_t_create_from_scattered_vdloc = _ida_hexrays.hx_mop_t_create_from_scattered_vdloc
hx_mop_t_create_from_insn = _ida_hexrays.hx_mop_t_create_from_insn
hx_mop_t_make_number = _ida_hexrays.hx_mop_t_make_number
hx_mop_t_make_fpnum = _ida_hexrays.hx_mop_t_make_fpnum
hx_mop_t_make_reg_pair = _ida_hexrays.hx_mop_t_make_reg_pair
hx_mop_t_make_helper = _ida_hexrays.hx_mop_t_make_helper
hx_mop_t_is_bit_reg = _ida_hexrays.hx_mop_t_is_bit_reg
hx_mop_t_may_use_aliased_memory = _ida_hexrays.hx_mop_t_may_use_aliased_memory
hx_mop_t_is01 = _ida_hexrays.hx_mop_t_is01
hx_mop_t_is_sign_extended_from = _ida_hexrays.hx_mop_t_is_sign_extended_from
hx_mop_t_is_zero_extended_from = _ida_hexrays.hx_mop_t_is_zero_extended_from
hx_mop_t_equal_mops = _ida_hexrays.hx_mop_t_equal_mops
hx_mop_t_lexcompare = _ida_hexrays.hx_mop_t_lexcompare
hx_mop_t_for_all_ops = _ida_hexrays.hx_mop_t_for_all_ops
hx_mop_t_for_all_scattered_submops = _ida_hexrays.hx_mop_t_for_all_scattered_submops
hx_mop_t_is_constant = _ida_hexrays.hx_mop_t_is_constant
hx_mop_t_get_stkoff = _ida_hexrays.hx_mop_t_get_stkoff
hx_mop_t_make_low_half = _ida_hexrays.hx_mop_t_make_low_half
hx_mop_t_make_high_half = _ida_hexrays.hx_mop_t_make_high_half
hx_mop_t_make_first_half = _ida_hexrays.hx_mop_t_make_first_half
hx_mop_t_make_second_half = _ida_hexrays.hx_mop_t_make_second_half
hx_mop_t_shift_mop = _ida_hexrays.hx_mop_t_shift_mop
hx_mop_t_change_size = _ida_hexrays.hx_mop_t_change_size
hx_mop_t_preserve_side_effects = _ida_hexrays.hx_mop_t_preserve_side_effects
hx_mop_t_apply_ld_mcode = _ida_hexrays.hx_mop_t_apply_ld_mcode
hx_mcallarg_t_print = _ida_hexrays.hx_mcallarg_t_print
hx_mcallarg_t_dstr = _ida_hexrays.hx_mcallarg_t_dstr
hx_mcallarg_t_set_regarg = _ida_hexrays.hx_mcallarg_t_set_regarg
hx_mcallinfo_t_lexcompare = _ida_hexrays.hx_mcallinfo_t_lexcompare
hx_mcallinfo_t_set_type = _ida_hexrays.hx_mcallinfo_t_set_type
hx_mcallinfo_t_get_type = _ida_hexrays.hx_mcallinfo_t_get_type
hx_mcallinfo_t_print = _ida_hexrays.hx_mcallinfo_t_print
hx_mcallinfo_t_dstr = _ida_hexrays.hx_mcallinfo_t_dstr
hx_mcases_t_compare = _ida_hexrays.hx_mcases_t_compare
hx_mcases_t_print = _ida_hexrays.hx_mcases_t_print
hx_mcases_t_dstr = _ida_hexrays.hx_mcases_t_dstr
hx_vivl_t_extend_to_cover = _ida_hexrays.hx_vivl_t_extend_to_cover
hx_vivl_t_intersect = _ida_hexrays.hx_vivl_t_intersect
hx_vivl_t_print = _ida_hexrays.hx_vivl_t_print
hx_vivl_t_dstr = _ida_hexrays.hx_vivl_t_dstr
hx_chain_t_print = _ida_hexrays.hx_chain_t_print
hx_chain_t_dstr = _ida_hexrays.hx_chain_t_dstr
hx_chain_t_append_list = _ida_hexrays.hx_chain_t_append_list
hx_block_chains_t_get_chain = _ida_hexrays.hx_block_chains_t_get_chain
hx_block_chains_t_print = _ida_hexrays.hx_block_chains_t_print
hx_block_chains_t_dstr = _ida_hexrays.hx_block_chains_t_dstr
hx_graph_chains_t_for_all_chains = _ida_hexrays.hx_graph_chains_t_for_all_chains
hx_graph_chains_t_release = _ida_hexrays.hx_graph_chains_t_release
hx_minsn_t_init = _ida_hexrays.hx_minsn_t_init
hx_minsn_t_copy = _ida_hexrays.hx_minsn_t_copy
hx_minsn_t_swap = _ida_hexrays.hx_minsn_t_swap
hx_minsn_t_print = _ida_hexrays.hx_minsn_t_print
hx_minsn_t_dstr = _ida_hexrays.hx_minsn_t_dstr
hx_minsn_t_setaddr = _ida_hexrays.hx_minsn_t_setaddr
hx_minsn_t_optimize_subtree = _ida_hexrays.hx_minsn_t_optimize_subtree
hx_minsn_t_for_all_ops = _ida_hexrays.hx_minsn_t_for_all_ops
hx_minsn_t_for_all_insns = _ida_hexrays.hx_minsn_t_for_all_insns
hx_minsn_t__make_nop = _ida_hexrays.hx_minsn_t__make_nop
hx_minsn_t_equal_insns = _ida_hexrays.hx_minsn_t_equal_insns
hx_minsn_t_lexcompare = _ida_hexrays.hx_minsn_t_lexcompare
hx_minsn_t_is_noret_call = _ida_hexrays.hx_minsn_t_is_noret_call
hx_minsn_t_is_helper = _ida_hexrays.hx_minsn_t_is_helper
hx_minsn_t_find_call = _ida_hexrays.hx_minsn_t_find_call
hx_minsn_t_has_side_effects = _ida_hexrays.hx_minsn_t_has_side_effects
hx_minsn_t_find_opcode = _ida_hexrays.hx_minsn_t_find_opcode
hx_minsn_t_find_ins_op = _ida_hexrays.hx_minsn_t_find_ins_op
hx_minsn_t_find_num_op = _ida_hexrays.hx_minsn_t_find_num_op
hx_minsn_t_modifes_d = _ida_hexrays.hx_minsn_t_modifes_d
hx_minsn_t_is_between = _ida_hexrays.hx_minsn_t_is_between
hx_minsn_t_may_use_aliased_memory = _ida_hexrays.hx_minsn_t_may_use_aliased_memory
hx_getf_reginsn = _ida_hexrays.hx_getf_reginsn
hx_getb_reginsn = _ida_hexrays.hx_getb_reginsn
hx_mblock_t_init = _ida_hexrays.hx_mblock_t_init
hx_mblock_t_print = _ida_hexrays.hx_mblock_t_print
hx_mblock_t_dump = _ida_hexrays.hx_mblock_t_dump
hx_mblock_t_vdump_block = _ida_hexrays.hx_mblock_t_vdump_block
hx_mblock_t_insert_into_block = _ida_hexrays.hx_mblock_t_insert_into_block
hx_mblock_t_remove_from_block = _ida_hexrays.hx_mblock_t_remove_from_block
hx_mblock_t_for_all_insns = _ida_hexrays.hx_mblock_t_for_all_insns
hx_mblock_t_for_all_ops = _ida_hexrays.hx_mblock_t_for_all_ops
hx_mblock_t_for_all_uses = _ida_hexrays.hx_mblock_t_for_all_uses
hx_mblock_t_optimize_insn = _ida_hexrays.hx_mblock_t_optimize_insn
hx_mblock_t_optimize_block = _ida_hexrays.hx_mblock_t_optimize_block
hx_mblock_t_build_lists = _ida_hexrays.hx_mblock_t_build_lists
hx_mblock_t_append_use_list = _ida_hexrays.hx_mblock_t_append_use_list
hx_mblock_t_append_def_list = _ida_hexrays.hx_mblock_t_append_def_list
hx_mblock_t_build_use_list = _ida_hexrays.hx_mblock_t_build_use_list
hx_mblock_t_build_def_list = _ida_hexrays.hx_mblock_t_build_def_list
hx_mblock_t_find_first_use = _ida_hexrays.hx_mblock_t_find_first_use
hx_mblock_t_find_redefinition = _ida_hexrays.hx_mblock_t_find_redefinition
hx_mblock_t_is_rhs_redefined = _ida_hexrays.hx_mblock_t_is_rhs_redefined
hx_mblock_t_find_access = _ida_hexrays.hx_mblock_t_find_access
hx_mblock_t_get_valranges = _ida_hexrays.hx_mblock_t_get_valranges
hx_mbl_array_t_idaloc2vd = _ida_hexrays.hx_mbl_array_t_idaloc2vd
hx_mbl_array_t_vd2idaloc = _ida_hexrays.hx_mbl_array_t_vd2idaloc
hx_mbl_array_t_term = _ida_hexrays.hx_mbl_array_t_term
hx_mbl_array_t_optimize_local = _ida_hexrays.hx_mbl_array_t_optimize_local
hx_mbl_array_t_build_graph = _ida_hexrays.hx_mbl_array_t_build_graph
hx_mbl_array_t_get_graph = _ida_hexrays.hx_mbl_array_t_get_graph
hx_mbl_array_t_analyze_calls = _ida_hexrays.hx_mbl_array_t_analyze_calls
hx_mbl_array_t_optimize_global = _ida_hexrays.hx_mbl_array_t_optimize_global
hx_mbl_array_t_alloc_lvars = _ida_hexrays.hx_mbl_array_t_alloc_lvars
hx_mbl_array_t_dump = _ida_hexrays.hx_mbl_array_t_dump
hx_mbl_array_t_vdump_mba = _ida_hexrays.hx_mbl_array_t_vdump_mba
hx_mbl_array_t_print = _ida_hexrays.hx_mbl_array_t_print
hx_mbl_array_t_verify = _ida_hexrays.hx_mbl_array_t_verify
hx_mbl_array_t_mark_chains_dirty = _ida_hexrays.hx_mbl_array_t_mark_chains_dirty
hx_mbl_array_t_insert_block = _ida_hexrays.hx_mbl_array_t_insert_block
hx_mbl_array_t_remove_block = _ida_hexrays.hx_mbl_array_t_remove_block
hx_mbl_array_t_remove_empty_blocks = _ida_hexrays.hx_mbl_array_t_remove_empty_blocks
hx_mbl_array_t_combine_blocks = _ida_hexrays.hx_mbl_array_t_combine_blocks
hx_mbl_array_t_for_all_ops = _ida_hexrays.hx_mbl_array_t_for_all_ops
hx_mbl_array_t_for_all_insns = _ida_hexrays.hx_mbl_array_t_for_all_insns
hx_mbl_array_t_for_all_topinsns = _ida_hexrays.hx_mbl_array_t_for_all_topinsns
hx_mbl_array_t_find_mop = _ida_hexrays.hx_mbl_array_t_find_mop
hx_mbl_array_t_arg = _ida_hexrays.hx_mbl_array_t_arg
hx_mbl_array_t_serialize = _ida_hexrays.hx_mbl_array_t_serialize
hx_mbl_array_t_deserialize = _ida_hexrays.hx_mbl_array_t_deserialize
hx_mbl_graph_t_is_accessed_globally = _ida_hexrays.hx_mbl_graph_t_is_accessed_globally
hx_mbl_graph_t_get_ud = _ida_hexrays.hx_mbl_graph_t_get_ud
hx_mbl_graph_t_get_du = _ida_hexrays.hx_mbl_graph_t_get_du
hx_codegen_t_emit = _ida_hexrays.hx_codegen_t_emit
hx_codegen_t_emit_ = _ida_hexrays.hx_codegen_t_emit_
hx_is_kreg = _ida_hexrays.hx_is_kreg
hx_get_temp_regs = _ida_hexrays.hx_get_temp_regs
hx_get_hexrays_version = _ida_hexrays.hx_get_hexrays_version
hx_open_pseudocode = _ida_hexrays.hx_open_pseudocode
hx_close_pseudocode = _ida_hexrays.hx_close_pseudocode
hx_get_widget_vdui = _ida_hexrays.hx_get_widget_vdui
hx_decompile_many = _ida_hexrays.hx_decompile_many
hx_hexrays_failure_t_desc = _ida_hexrays.hx_hexrays_failure_t_desc
hx_send_database = _ida_hexrays.hx_send_database
hx_gco_info_t_append_to_list = _ida_hexrays.hx_gco_info_t_append_to_list
hx_get_current_operand = _ida_hexrays.hx_get_current_operand
hx_remitem = _ida_hexrays.hx_remitem
hx_negated_relation = _ida_hexrays.hx_negated_relation
hx_swapped_relation = _ida_hexrays.hx_swapped_relation
hx_get_op_signness = _ida_hexrays.hx_get_op_signness
hx_asgop = _ida_hexrays.hx_asgop
hx_asgop_revert = _ida_hexrays.hx_asgop_revert
hx_cnumber_t_print = _ida_hexrays.hx_cnumber_t_print
hx_cnumber_t_value = _ida_hexrays.hx_cnumber_t_value
hx_cnumber_t_assign = _ida_hexrays.hx_cnumber_t_assign
hx_cnumber_t_compare = _ida_hexrays.hx_cnumber_t_compare
hx_var_ref_t_compare = _ida_hexrays.hx_var_ref_t_compare
hx_ctree_visitor_t_apply_to = _ida_hexrays.hx_ctree_visitor_t_apply_to
hx_ctree_visitor_t_apply_to_exprs = _ida_hexrays.hx_ctree_visitor_t_apply_to_exprs
hx_ctree_parentee_t_recalc_parent_types = _ida_hexrays.hx_ctree_parentee_t_recalc_parent_types
hx_cfunc_parentee_t_calc_rvalue_type = _ida_hexrays.hx_cfunc_parentee_t_calc_rvalue_type
hx_citem_locator_t_compare = _ida_hexrays.hx_citem_locator_t_compare
hx_citem_t_contains_expr = _ida_hexrays.hx_citem_t_contains_expr
hx_citem_t_contains_label = _ida_hexrays.hx_citem_t_contains_label
hx_citem_t_find_parent_of = _ida_hexrays.hx_citem_t_find_parent_of
hx_citem_t_find_closest_addr = _ida_hexrays.hx_citem_t_find_closest_addr
hx_cexpr_t_assign = _ida_hexrays.hx_cexpr_t_assign
hx_cexpr_t_compare = _ida_hexrays.hx_cexpr_t_compare
hx_cexpr_t_replace_by = _ida_hexrays.hx_cexpr_t_replace_by
hx_cexpr_t_cleanup = _ida_hexrays.hx_cexpr_t_cleanup
hx_cexpr_t_put_number = _ida_hexrays.hx_cexpr_t_put_number
hx_cexpr_t_print1 = _ida_hexrays.hx_cexpr_t_print1
hx_cexpr_t_calc_type = _ida_hexrays.hx_cexpr_t_calc_type
hx_cexpr_t_equal_effect = _ida_hexrays.hx_cexpr_t_equal_effect
hx_cexpr_t_is_child_of = _ida_hexrays.hx_cexpr_t_is_child_of
hx_cexpr_t_contains_operator = _ida_hexrays.hx_cexpr_t_contains_operator
hx_cexpr_t_get_high_nbit_bound = _ida_hexrays.hx_cexpr_t_get_high_nbit_bound
hx_cexpr_t_get_low_nbit_bound = _ida_hexrays.hx_cexpr_t_get_low_nbit_bound
hx_cexpr_t_requires_lvalue = _ida_hexrays.hx_cexpr_t_requires_lvalue
hx_cexpr_t_has_side_effects = _ida_hexrays.hx_cexpr_t_has_side_effects
hx_cif_t_assign = _ida_hexrays.hx_cif_t_assign
hx_cif_t_compare = _ida_hexrays.hx_cif_t_compare
hx_cloop_t_assign = _ida_hexrays.hx_cloop_t_assign
hx_cfor_t_compare = _ida_hexrays.hx_cfor_t_compare
hx_cwhile_t_compare = _ida_hexrays.hx_cwhile_t_compare
hx_cdo_t_compare = _ida_hexrays.hx_cdo_t_compare
hx_creturn_t_compare = _ida_hexrays.hx_creturn_t_compare
hx_cgoto_t_compare = _ida_hexrays.hx_cgoto_t_compare
hx_casm_t_compare = _ida_hexrays.hx_casm_t_compare
hx_cinsn_t_assign = _ida_hexrays.hx_cinsn_t_assign
hx_cinsn_t_compare = _ida_hexrays.hx_cinsn_t_compare
hx_cinsn_t_replace_by = _ida_hexrays.hx_cinsn_t_replace_by
hx_cinsn_t_cleanup = _ida_hexrays.hx_cinsn_t_cleanup
hx_cinsn_t_new_insn = _ida_hexrays.hx_cinsn_t_new_insn
hx_cinsn_t_create_if = _ida_hexrays.hx_cinsn_t_create_if
hx_cinsn_t_print = _ida_hexrays.hx_cinsn_t_print
hx_cinsn_t_print1 = _ida_hexrays.hx_cinsn_t_print1
hx_cinsn_t_is_ordinary_flow = _ida_hexrays.hx_cinsn_t_is_ordinary_flow
hx_cinsn_t_contains_insn = _ida_hexrays.hx_cinsn_t_contains_insn
hx_cinsn_t_collect_free_breaks = _ida_hexrays.hx_cinsn_t_collect_free_breaks
hx_cinsn_t_collect_free_continues = _ida_hexrays.hx_cinsn_t_collect_free_continues
hx_cblock_t_compare = _ida_hexrays.hx_cblock_t_compare
hx_carglist_t_compare = _ida_hexrays.hx_carglist_t_compare
hx_ccase_t_compare = _ida_hexrays.hx_ccase_t_compare
hx_ccases_t_compare = _ida_hexrays.hx_ccases_t_compare
hx_cswitch_t_compare = _ida_hexrays.hx_cswitch_t_compare
hx_ctree_item_t_get_memptr = _ida_hexrays.hx_ctree_item_t_get_memptr
hx_ctree_item_t_get_lvar = _ida_hexrays.hx_ctree_item_t_get_lvar
hx_ctree_item_t_get_ea = _ida_hexrays.hx_ctree_item_t_get_ea
hx_ctree_item_t_get_label_num = _ida_hexrays.hx_ctree_item_t_get_label_num
hx_lnot = _ida_hexrays.hx_lnot
hx_new_block = _ida_hexrays.hx_new_block
hx_vcreate_helper = _ida_hexrays.hx_vcreate_helper
hx_vcall_helper = _ida_hexrays.hx_vcall_helper
hx_make_num = _ida_hexrays.hx_make_num
hx_make_ref = _ida_hexrays.hx_make_ref
hx_dereference = _ida_hexrays.hx_dereference
hx_save_user_labels = _ida_hexrays.hx_save_user_labels
hx_save_user_cmts = _ida_hexrays.hx_save_user_cmts
hx_save_user_numforms = _ida_hexrays.hx_save_user_numforms
hx_save_user_iflags = _ida_hexrays.hx_save_user_iflags
hx_save_user_unions = _ida_hexrays.hx_save_user_unions
hx_restore_user_labels = _ida_hexrays.hx_restore_user_labels
hx_restore_user_cmts = _ida_hexrays.hx_restore_user_cmts
hx_restore_user_numforms = _ida_hexrays.hx_restore_user_numforms
hx_restore_user_iflags = _ida_hexrays.hx_restore_user_iflags
hx_restore_user_unions = _ida_hexrays.hx_restore_user_unions
hx_cfunc_t_build_c_tree = _ida_hexrays.hx_cfunc_t_build_c_tree
hx_cfunc_t_verify = _ida_hexrays.hx_cfunc_t_verify
hx_cfunc_t_print_dcl = _ida_hexrays.hx_cfunc_t_print_dcl
hx_cfunc_t_print_func = _ida_hexrays.hx_cfunc_t_print_func
hx_cfunc_t_get_func_type = _ida_hexrays.hx_cfunc_t_get_func_type
hx_cfunc_t_get_lvars = _ida_hexrays.hx_cfunc_t_get_lvars
hx_cfunc_t_get_stkoff_delta = _ida_hexrays.hx_cfunc_t_get_stkoff_delta
hx_cfunc_t_find_label = _ida_hexrays.hx_cfunc_t_find_label
hx_cfunc_t_remove_unused_labels = _ida_hexrays.hx_cfunc_t_remove_unused_labels
hx_cfunc_t_get_user_cmt = _ida_hexrays.hx_cfunc_t_get_user_cmt
hx_cfunc_t_set_user_cmt = _ida_hexrays.hx_cfunc_t_set_user_cmt
hx_cfunc_t_get_user_iflags = _ida_hexrays.hx_cfunc_t_get_user_iflags
hx_cfunc_t_set_user_iflags = _ida_hexrays.hx_cfunc_t_set_user_iflags
hx_cfunc_t_has_orphan_cmts = _ida_hexrays.hx_cfunc_t_has_orphan_cmts
hx_cfunc_t_del_orphan_cmts = _ida_hexrays.hx_cfunc_t_del_orphan_cmts
hx_cfunc_t_get_user_union_selection = _ida_hexrays.hx_cfunc_t_get_user_union_selection
hx_cfunc_t_set_user_union_selection = _ida_hexrays.hx_cfunc_t_set_user_union_selection
hx_cfunc_t_get_line_item = _ida_hexrays.hx_cfunc_t_get_line_item
hx_cfunc_t_get_warnings = _ida_hexrays.hx_cfunc_t_get_warnings
hx_cfunc_t_get_eamap = _ida_hexrays.hx_cfunc_t_get_eamap
hx_cfunc_t_get_boundaries = _ida_hexrays.hx_cfunc_t_get_boundaries
hx_cfunc_t_get_pseudocode = _ida_hexrays.hx_cfunc_t_get_pseudocode
hx_cfunc_t_gather_derefs = _ida_hexrays.hx_cfunc_t_gather_derefs
hx_cfunc_t_find_item_coords = _ida_hexrays.hx_cfunc_t_find_item_coords
hx_cfunc_t_cleanup = _ida_hexrays.hx_cfunc_t_cleanup
hx_decompile = _ida_hexrays.hx_decompile
hx_gen_microcode = _ida_hexrays.hx_gen_microcode
hx_mark_cfunc_dirty = _ida_hexrays.hx_mark_cfunc_dirty
hx_clear_cached_cfuncs = _ida_hexrays.hx_clear_cached_cfuncs
hx_has_cached_cfunc = _ida_hexrays.hx_has_cached_cfunc
hx_get_ctype_name = _ida_hexrays.hx_get_ctype_name
hx_create_field_name = _ida_hexrays.hx_create_field_name
hx_install_hexrays_callback = _ida_hexrays.hx_install_hexrays_callback
hx_remove_hexrays_callback = _ida_hexrays.hx_remove_hexrays_callback
hx_vdui_t_set_locked = _ida_hexrays.hx_vdui_t_set_locked
hx_vdui_t_refresh_view = _ida_hexrays.hx_vdui_t_refresh_view
hx_vdui_t_refresh_ctext = _ida_hexrays.hx_vdui_t_refresh_ctext
hx_vdui_t_switch_to = _ida_hexrays.hx_vdui_t_switch_to
hx_vdui_t_get_number = _ida_hexrays.hx_vdui_t_get_number
hx_vdui_t_get_current_label = _ida_hexrays.hx_vdui_t_get_current_label
hx_vdui_t_clear = _ida_hexrays.hx_vdui_t_clear
hx_vdui_t_refresh_cpos = _ida_hexrays.hx_vdui_t_refresh_cpos
hx_vdui_t_get_current_item = _ida_hexrays.hx_vdui_t_get_current_item
hx_vdui_t_ui_rename_lvar = _ida_hexrays.hx_vdui_t_ui_rename_lvar
hx_vdui_t_rename_lvar = _ida_hexrays.hx_vdui_t_rename_lvar
hx_vdui_t_ui_set_call_type = _ida_hexrays.hx_vdui_t_ui_set_call_type
hx_vdui_t_ui_set_lvar_type = _ida_hexrays.hx_vdui_t_ui_set_lvar_type
hx_vdui_t_set_lvar_type = _ida_hexrays.hx_vdui_t_set_lvar_type
hx_vdui_t_ui_edit_lvar_cmt = _ida_hexrays.hx_vdui_t_ui_edit_lvar_cmt
hx_vdui_t_set_lvar_cmt = _ida_hexrays.hx_vdui_t_set_lvar_cmt
hx_vdui_t_ui_map_lvar = _ida_hexrays.hx_vdui_t_ui_map_lvar
hx_vdui_t_ui_unmap_lvar = _ida_hexrays.hx_vdui_t_ui_unmap_lvar
hx_vdui_t_map_lvar = _ida_hexrays.hx_vdui_t_map_lvar
hx_vdui_t_set_strmem_type = _ida_hexrays.hx_vdui_t_set_strmem_type
hx_vdui_t_rename_strmem = _ida_hexrays.hx_vdui_t_rename_strmem
hx_vdui_t_set_global_type = _ida_hexrays.hx_vdui_t_set_global_type
hx_vdui_t_rename_global = _ida_hexrays.hx_vdui_t_rename_global
hx_vdui_t_rename_label = _ida_hexrays.hx_vdui_t_rename_label
hx_vdui_t_jump_enter = _ida_hexrays.hx_vdui_t_jump_enter
hx_vdui_t_ctree_to_disasm = _ida_hexrays.hx_vdui_t_ctree_to_disasm
hx_vdui_t_calc_cmt_type = _ida_hexrays.hx_vdui_t_calc_cmt_type
hx_vdui_t_edit_cmt = _ida_hexrays.hx_vdui_t_edit_cmt
hx_vdui_t_edit_func_cmt = _ida_hexrays.hx_vdui_t_edit_func_cmt
hx_vdui_t_del_orphan_cmts = _ida_hexrays.hx_vdui_t_del_orphan_cmts
hx_vdui_t_set_num_radix = _ida_hexrays.hx_vdui_t_set_num_radix
hx_vdui_t_set_num_enum = _ida_hexrays.hx_vdui_t_set_num_enum
hx_vdui_t_set_num_stroff = _ida_hexrays.hx_vdui_t_set_num_stroff
hx_vdui_t_invert_sign = _ida_hexrays.hx_vdui_t_invert_sign
hx_vdui_t_invert_bits = _ida_hexrays.hx_vdui_t_invert_bits
hx_vdui_t_collapse_item = _ida_hexrays.hx_vdui_t_collapse_item
hx_vdui_t_collapse_lvars = _ida_hexrays.hx_vdui_t_collapse_lvars
hx_vdui_t_split_item = _ida_hexrays.hx_vdui_t_split_item
hx_hexrays_alloc = _ida_hexrays.hx_hexrays_alloc
hx_hexrays_free = _ida_hexrays.hx_hexrays_free
hx_vdui_t_set_noptr_lvar = _ida_hexrays.hx_vdui_t_set_noptr_lvar
hx_select_udt_by_offset = _ida_hexrays.hx_select_udt_by_offset
hx_mblock_t_get_valranges_ = _ida_hexrays.hx_mblock_t_get_valranges_
hx_cfunc_t_refresh_func_ctext = _ida_hexrays.hx_cfunc_t_refresh_func_ctext
hx_checkout_hexrays_license = _ida_hexrays.hx_checkout_hexrays_license
hx_mbl_array_t_copy_block = _ida_hexrays.hx_mbl_array_t_copy_block
hx_mblock_t_optimize_useless_jump = _ida_hexrays.hx_mblock_t_optimize_useless_jump
hx_mblock_t_get_reginsn_qty = _ida_hexrays.hx_mblock_t_get_reginsn_qty
class user_numforms_iterator_t(object):
"""
Proxy of C++ user_numforms_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_numforms_iterator_t_x_get, _ida_hexrays.user_numforms_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_numforms_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_numforms_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_numforms_iterator_t
"""
this = _ida_hexrays.new_user_numforms_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_numforms_iterator_t
__del__ = lambda self : None;
user_numforms_iterator_t_swigregister = _ida_hexrays.user_numforms_iterator_t_swigregister
user_numforms_iterator_t_swigregister(user_numforms_iterator_t)
def user_numforms_begin(*args):
"""
user_numforms_begin(map) -> user_numforms_iterator_t
Get iterator pointing to the beginning of user_numforms_t.
@param map (C++: const user_numforms_t *)
"""
return _ida_hexrays.user_numforms_begin(*args)
def user_numforms_end(*args):
"""
user_numforms_end(map) -> user_numforms_iterator_t
Get iterator pointing to the end of user_numforms_t.
@param map (C++: const user_numforms_t *)
"""
return _ida_hexrays.user_numforms_end(*args)
def user_numforms_next(*args):
"""
user_numforms_next(p) -> user_numforms_iterator_t
Move to the next element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_next(*args)
def user_numforms_prev(*args):
"""
user_numforms_prev(p) -> user_numforms_iterator_t
Move to the previous element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_prev(*args)
def user_numforms_first(*args):
"""
user_numforms_first(p) -> operand_locator_t
Get reference to the current map key.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_first(*args)
def user_numforms_second(*args):
"""
user_numforms_second(p) -> number_format_t
Get reference to the current map value.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_second(*args)
def user_numforms_find(*args):
"""
user_numforms_find(map, key) -> user_numforms_iterator_t
Find the specified key in user_numforms_t.
@param map (C++: const user_numforms_t *)
@param key (C++: const operand_locator_t &)
"""
return _ida_hexrays.user_numforms_find(*args)
def user_numforms_insert(*args):
"""
user_numforms_insert(map, key, val) -> user_numforms_iterator_t
Insert new ( 'operand_locator_t' , 'number_format_t' ) pair into
user_numforms_t.
@param map (C++: user_numforms_t *)
@param key (C++: const operand_locator_t &)
@param val (C++: const number_format_t &)
"""
return _ida_hexrays.user_numforms_insert(*args)
def user_numforms_erase(*args):
"""
user_numforms_erase(map, p)
Erase current element from user_numforms_t.
@param map (C++: user_numforms_t *)
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_erase(*args)
def user_numforms_clear(*args):
"""
user_numforms_clear(map)
Clear user_numforms_t.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_clear(*args)
def user_numforms_size(*args):
"""
user_numforms_size(map) -> size_t
Get size of user_numforms_t.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_size(*args)
def user_numforms_free(*args):
"""
user_numforms_free(map)
Delete user_numforms_t instance.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_free(*args)
def user_numforms_new(*args):
"""
user_numforms_new() -> user_numforms_t
Create a new user_numforms_t instance.
"""
return _ida_hexrays.user_numforms_new(*args)
class lvar_mapping_iterator_t(object):
"""
Proxy of C++ lvar_mapping_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The | |
<reponame>mondO/pymtl3<filename>pymtl3/dsl/test/Slicing_test.py<gh_stars>1-10
"""
========================================================================
Slicing_test.py
========================================================================
Author : <NAME>
Date : Aug 23, 2018
"""
from pymtl3.datatypes import Bits2, Bits4, Bits14, Bits16, Bits24, Bits32
from pymtl3.dsl.ComponentLevel3 import ComponentLevel3, connect
from pymtl3.dsl.Connectable import Wire
from pymtl3.dsl.errors import MultiWriterError, NoWriterError
from .sim_utils import simple_sim_pass
def _test_model( cls ):
A = cls()
A.elaborate()
simple_sim_pass( A, 0x123 )
for i in range(10):
A.tick()
# write two disjoint slices
def test_write_two_disjoint_slices():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_0_16():
s.A[0:16] = Bits16( 0xff )
@s.update
def up_wr_16_30():
s.A[16:30] = Bits14( 0xff )
@s.update
def up_rd_12_30():
assert s.A[12:30] == 0xff0
_test_model( Top )
# write two disjoint slices, but one slice is not read at all
def test_write_two_disjoint_slices_no_reader():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_0_16():
s.A[0:16] = Bits16( 0xff )
@s.update
def up_wr_16_30():
s.A[16:30] = Bits14( 0xff )
@s.update
def up_rd_17_30():
assert s.A[16:30] == 0xff
m = Top()
m.elaborate()
simple_sim_pass( m, 0x123 )
# assert len(m._all_constraints) == 1
# x, y = list(m._all_constraints)[0]
# assert m._all_id_upblk[x].__name__ == "up_wr_16_30" and \
# m._all_id_upblk[y].__name__ == "up_rd_17_30" # only one constraint
# write two overlapping slices
def test_write_two_overlapping_slices():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_0_24():
s.A[0:24] = Bits24( 0xff )
@s.update
def up_wr_8_32():
s.A[8:32] = Bits24( 0xff )
@s.update
def up_rd_A():
x = s.A
try:
_test_model( Top )
except MultiWriterError as e:
print("{} is thrown\n{}".format( e.__class__.__name__, e ))
return
raise Exception("Should've thrown MultiWriterError.")
# write two slices and a single bit
def test_write_two_slices_and_bit():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_0_16():
s.A[0:16] = Bits16( 0xff )
@s.update
def up_wr_16_30():
s.A[16:30] = Bits14( 0xff )
@s.update
def up_wr_30_31():
s.A[30] = Bits1( 1 )
@s.update
def up_rd_A():
print(s.A[0:17])
m = Top()
m.elaborate()
simple_sim_pass( m, 0x123 )
# assert len(m._all_constraints) == 2
# _, x = list(m._all_constraints)[0]
# _, y = list(m._all_constraints)[1]
# two constraints are: up_wr_0_16 < up_rd_A and up_wr_16_30 < up_rd_A
# assert m._all_id_upblk[x].__name__ == "up_rd_A" and \
# m._all_id_upblk[y].__name__ == "up_rd_A"
# write a slice and a single bit, but they are overlapped
def test_write_slices_and_bit_overlapped():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_0_16():
s.A[0:16] = Bits16( 0xff )
@s.update
def up_wr_15():
s.A[15] = Bits1( 1 )
@s.update
def up_rd_A():
print(s.A[0:17])
try:
_test_model( Top )
except MultiWriterError as e:
print("{} is thrown\n{}".format( e.__class__.__name__, e ))
return
raise Exception("Should've thrown MultiWriterError.")
# write a slice and there are two reader
def test_multiple_readers():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_8_24():
s.A[8:24] = Bits16( 0x1234 )
@s.update
def up_rd_0_12():
assert s.A[0:12] == 0x400
@s.update
def up_rd_bunch():
assert s.A[23] == 0
assert s.A[22] == 0
assert s.A[21] == 0
assert s.A[20] == 1
assert s.A[19] == 0
assert s.A[18] == 0
assert s.A[17] == 1
assert s.A[16] == 0
_test_model( Top )
# 1. WR A[s], RD A (A[s] (=) A, SAME AS data struct)
# WR A[s], WR A (detect 2-writer conflict, SAME AS data struct)
# WR A[s], RD A[t] (A[s] (=) A[t] if s intersects t)
# WR A[s], WR A[t] (detect 2-writer conflict if s intersects t)
# 2. WR A , RD A[s] (A[s] (=) A, SAME AS data struct)
# 3. WR A[s], A |=y, RD y (mark A as writer in net {A,y}, SAME AS data struct)
# WR A[s], A |=y, WR y (detect 2-writer conflict, SAME AS data struct)
# WR A[s], A[t]|=y, RD y (mark A[t] as writer in net {A[t],y} if s intersects t)
# WR A[s], A[t]|=y, WR y (detect 2-writer conflict if s intersects t)
# 4. WR A , A[s]|=y, RD y (mark A[s] as writer in net {A[s],y}, SAME AS data struct)
# WR A , A[s]|=y, WR y (detect 2-writer conflict, SAME AS data struct)
# 5. WR x, x|=A[s], RD A (A[s] (=) A, SAME AS data struct)
# WR x, x|=A[s], RD A[t] (A[s] (=) A[t] if s intersects t)
# 6. WR x, x|=A , RD A[s] (A[s] (=) A, SAME AS data struct)
# 7. WR x, x|=A[s], A |=y, RD y (mark A as writer and implicit constraint)
# WR x, x|=A[s], A |=y, WR y (detect 2-writer conflict)
# WR x, x|=A[s], A[t]|=y, RD y (mark A[t] as writer and implicit constraint if s intersects t)
# WR x, x|=A[s], A[t]|=y, WR y (detect 2-writer conflict if s intersects t)
# 8. WR x, x|=A , A[s]|=y, RD y (mark A[s] as writer in net {A[s],y}, SAME AS data struct)
# --------------------------------------------------------------------------
# RD A[s]
# - WR A (A[s] (=) A, SAME AS data struct)
# - WR A[t] (A[s] (=) A[t] if s intersects t)
# - A |=x, WR x (A[s] (=) A, SAME AS data struct)
# - A[t]|=x, WR x (A[s] (=) A[t] if s intersects t)
# WR A[s]
# - RD A (A[s] (=) A, SAME AS data struct)
# - WR A (detect 2-writer conflict, SAME AS data struct)
# - WR A[t] (detect 2-writer conflict if s intersects t)
# - A |=x (mark A as writer in net {A,x}, SAME AS data struct)
# - A |=x, WR x (detect 2-writer conflict, SAME AS data struct)
# - A[t]|=x (mark A[t] as writer in net {A[t],x} if s intersects t)
# - A[t]|=x, WR x (detect 2-writer conflict if s intersects t)
# A[s]|=x
# - WR A (mark A[s] as writer in net {A[s],x}, SAME AS data struct)
# - A|=y, WR y (mark A[s] as writer in net {A[s],x}, SAME AS data struct)
# - A[t]|=y, WR y (mark A[s] as writer in net {A[s],x}, if s intersects t)
# A[s]|=x, WR x
# - RD A (A[s] (=) A, SAME AS data struct)
# - WR A (detect 2-writer conflict, SAME AS data struct)
# - A |=y (mark A as writer in net {A,y} SAME AS data struct)
# - A |=y, WR y (detect 2-writer conflict, SAME AS data struct)
# - A[t]|=y, WR y (detect 2-writer conflict if s intersects t)
# RD A[s] - WR A
def test_rd_As_wr_A_impl():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_A():
s.A = Bits32( 123 )
@s.update
def up_rd_As():
assert s.A[0:16] == 123
_test_model( Top )
# RD A[s] - WR A[t], intersect
def test_rd_As_wr_At_impl_intersect():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_At():
s.A[8:24] = Bits16( 0xff )
@s.update
def up_rd_As():
assert s.A[0:16] == 0xff00
_test_model( Top )
# RD A[s] - WR A[t], not intersect
def test_rd_As_wr_At_impl_disjoint():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_At():
s.A[16:32] = Bits16( 0xff )
@s.update
def up_rd_As():
assert s.A[0:16] == 0
m = Top()
m.elaborate()
simple_sim_pass( m, 0x123 )
# assert len(m._all_constraints) == 0 # no constraint at all!
# WR A[s] - WR A
def test_wr_As_wr_A_conflict():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_As():
s.A[1:3] = Bits2( 2 )
@s.update
def up_wr_A():
s.A = Bits32( 123 )
try:
_test_model( Top )
except MultiWriterError as e:
print("{} is thrown\n{}".format( e.__class__.__name__, e ))
return
raise Exception("Should've thrown MultiWriterError.")
# WR A[s] - WR A[t], intersect
def test_wr_As_wr_At_intersect():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_As():
s.A[1:3] = Bits2( 2 )
@s.update
def up_wr_At():
s.A[2:4] = Bits2( 2 )
@s.update
def up_rd_A():
z = s.A
try:
_test_model( Top )
except MultiWriterError as e:
print("{} is thrown\n{}".format( e.__class__.__name__, e ))
return
raise Exception("Should've thrown MultiWriterError.")
# WR A[s] - WR A[t], not intersect
def test_wr_As_wr_At_disjoint():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_As():
s.A[1:3] = Bits2( 2 )
@s.update
def up_wr_At():
s.A[5:7] = Bits2( 2 )
@s.update
def up_rd_A():
z = s.A
_test_model( Top )
# WR A[s] - RD A
def test_wr_As_rd_A_impl():
class Top( ComponentLevel3 ):
def construct( s ):
s.A = Wire( Bits32 )
@s.update
def up_wr_As():
s.A[1:3] = Bits2( 2 )
@s.update
def up_rd_A():
z = s.A
| |
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
###################################################################################################
########################### Global variables #################################
###################################################################################################
global afecInstanceName
afecSym_SEQ1R_USCH = []
afecCHMenu = []
afecSym_CH_CHER = []
afecSym_CH_NAME = []
afecSym_CH_PositiveInput = []
afecSym_CH_NegativeInput = []
afecSym_CH_SHMR_DUAL = []
afecSym_CH_DUAL_CHANNEL = []
afecSym_CH_CGR_GAIN = []
afecSym_CH_COCR_AOFF = []
afecSym_CH_IER_EOC = []
###################################################################################################
########################### Callback Functions #################################
###################################################################################################
def afecClockControl(symbol, event):
clockSet = False
Database.clearSymbolValue("core", afecInstanceName.getValue()+"_CLOCK_ENABLE")
for channelID in range(0, 12):
if (afecSym_CH_CHER[channelID].getValue() == True):
clockSet = True
if(clockSet == True):
Database.setSymbolValue("core", afecInstanceName.getValue()+"_CLOCK_ENABLE", True, 2)
else:
Database.setSymbolValue("core", afecInstanceName.getValue()+"_CLOCK_ENABLE", False, 2)
def afecinterruptControl(symbol, event):
nvicSet = False
interruptVector = afecInstanceName.getValue()+"_INTERRUPT_ENABLE"
interruptHandler = afecInstanceName.getValue()+"_INTERRUPT_HANDLER"
interruptHandlerLock = afecInstanceName.getValue()+"_INTERRUPT_HANDLER_LOCK"
Database.clearSymbolValue("core", interruptVector)
Database.clearSymbolValue("core", interruptHandler)
Database.clearSymbolValue("core", interruptHandlerLock)
for channelID in range(0, 12):
if (afecSym_CH_IER_EOC[channelID].getValue() == True):
nvicSet = True
if(nvicSet == True):
Database.setSymbolValue("core", interruptVector, True, 2)
Database.setSymbolValue("core", interruptHandler, afecInstanceName.getValue()+"_InterruptHandler", 2)
Database.setSymbolValue("core", interruptHandlerLock, True, 2)
else:
Database.setSymbolValue("core", interruptVector, False, 2)
Database.setSymbolValue("core", interruptHandler, afecInstanceName.getValue()+"_Handler", 2)
Database.setSymbolValue("core", interruptHandlerLock, False, 2)
def dependencyClockStatus(symbol, event):
clockSet = False
clock = bool(Database.getSymbolValue("core", afecInstanceName.getValue()+"_CLOCK_ENABLE"))
for channelID in range(0, 12):
if (afecSym_CH_CHER[channelID].getValue() == True):
clockSet = True
if(clockSet == True and clock == False):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def dependencyIntStatus(symbol, event):
nvicSet = False
interruptVectorUpdate = afecInstanceName.getValue()+"_INTERRUPT_ENABLE_UPDATE"
nvic = bool(Database.getSymbolValue("core", interruptVectorUpdate))
for channelID in range(0, 12):
if (afecSym_CH_IER_EOC[channelID].getValue() == True):
nvicSet = True
if(nvicSet == True and nvic == True):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def afecGetMasterClock():
main_clk_freq = int(Database.getSymbolValue("core", afecInstanceName.getValue() + "_CLOCK_FREQUENCY"))
return main_clk_freq
def afecPrescalWarning(symbol, event):
clock = afecGetMasterClock()
prescaler = afecSym_MR_PRESCAL.getValue() + 1
afecFreq = clock / prescaler
if (afecFreq < 4000000):
symbol.setLabel("AFEC Frequency < 4,000,000 Hz. Decrease prescaler value. ")
symbol.setVisible(True)
elif (afecFreq > 40000000):
symbol.setLabel("AFEC Frequency > 40,000,000 Hz. Increase prescaler value. ")
symbol.setVisible(True)
else:
symbol.setVisible(False)
def afecFreqCalc(symbol, event):
clock = afecGetMasterClock()
prescaler = afecSym_MR_PRESCAL.getValue() + 1
afecFreq = clock / prescaler
symbol.clearValue()
symbol.setValue(afecFreq, 2)
def afecCalcConversionTime(afecSym_CONV_TIME, event):
clock = afecGetMasterClock()
if (clock == 0):
Log.writeErrorMessage("Master clock frequency is zero")
clock = 1
prescaler = afecSym_MR_PRESCAL.getValue() + 1
result_resolution = afecSym_EMR_RES_VALUE.getSelectedKey()
multiplier = 1
if (result_resolution == "NO_AVERAGE"):
multiplier = 1
if (result_resolution == "OSR4"):
multiplier = 4
if (result_resolution == "OSR16"):
multiplier = 16
if (result_resolution == "OSR64"):
multiplier = 64
if (result_resolution == "OSR256"):
multiplier = 256
conv_time = (prescaler * 23.0 * 1000000.0 * multiplier) / clock
afecSym_CONV_TIME.setLabel("**** Conversion Time is "+str(conv_time)+" us ****")
def afecUserSeqVisible(afecSym_SEQ1R_USCHLocal, event):
for channelID in range(0, 12):
if (event["value"] == True):
afecSym_SEQ1R_USCH[channelID].setVisible(True)
else:
afecSym_SEQ1R_USCH[channelID].setVisible(False)
def afecCHNameVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_NAME[channelID].setVisible(True)
else:
afecSym_CH_NAME[channelID].setVisible(False)
def afecCHPosInpVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_PositiveInput[channelID].setVisible(True)
else:
afecSym_CH_PositiveInput[channelID].setVisible(False)
def afecCHNegInpVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_NegativeInput[channelID].setVisible(True)
else:
afecSym_CH_NegativeInput[channelID].setVisible(False)
def afecCHDualVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_SHMR_DUAL[channelID].setVisible(True)
else:
afecSym_CH_SHMR_DUAL[channelID].setVisible(False)
def afecCHDualCommentVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_DUAL_CHANNEL[channelID].setVisible(True)
else:
afecSym_CH_DUAL_CHANNEL[channelID].setVisible(False)
def afecCHGainVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_CGR_GAIN[channelID].setVisible(True)
else:
afecSym_CH_CGR_GAIN[channelID].setVisible(False)
def afecCHOffsetVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_COCR_AOFF[channelID].setVisible(True)
else:
afecSym_CH_COCR_AOFF[channelID].setVisible(False)
def afecCHInterruptVisible(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
if (event["value"] == True):
afecSym_CH_IER_EOC[channelID].setVisible(True)
else:
afecSym_CH_IER_EOC[channelID].setVisible(False)
def afecCHEnable(symbol, event):
id = []
#split as per "_" to get the channel number
id = symbol.getID().split("_")
channelID = int(id[1])
#for dual mode, enable corresponding channel pair
if(event["id"].find("_SHMR_DUAL") > 0):
if(event["value"] == True):
afecSym_CH_CHER[channelID].clearValue()
afecSym_CH_CHER[channelID].setValue(True, 1)
else:
afecSym_CH_CHER[channelID].clearValue()
afecSym_CH_CHER[channelID].setValue(False, 1)
#for diff mode, hide next odd channel
if(event["id"].find("_NEG_INP") > 0):
if(event["value"] == "AN"+str(channelID)):
afecCHMenu[channelID].setVisible(False)
else:
afecCHMenu[channelID].setVisible(True)
def afecTriggerVisible(symbol, event):
symObj = event["symbol"]
if(symObj.getSelectedKey() == "HW_TRIGGER"):
symbol.setVisible(True)
else:
symbol.setVisible(False)
###################################################################################################
########################### Dependency #################################
###################################################################################################
global lastAdcModuleU, lastAdcChU
lastAdcChU = 0
global lastAdcModuleV, lastAdcChV
lastAdcChV = 6
global lastAdcModulePot, lastADCChPot
lastADCChPot = 10
global lastAdcModuleVdc, lastADCChVdc
lastADCChVdc = 7
lastAdcModuleU = lastAdcModuleV = lastAdcModulePot = lastAdcModuleVdc = 0
def onAttachmentConnected(source, target):
localComponent = source["component"]
remoteComponent = target["component"]
remoteID = remoteComponent.getID()
connectID = source["id"]
targetID = target["id"]
def onAttachmentDisconnected(source, target):
localComponent = source["component"]
remoteComponent = target["component"]
remoteID = remoteComponent.getID()
connectID = source["id"]
targetID = target["id"]
resetChannelsForPMSMFOC()
# Disable ADC channels and interrupt
def resetChannelsForPMSMFOC():
global lastAdcModuleU,lastAdcChU
global lastAdcModuleV, lastAdcChV
global lastAdcModulePot, lastADCChPot
global lastAdcModuleVdc, lastADCChVdc
component = str(afecInstanceName.getValue()).lower()
instanceNum = int(filter(str.isdigit,str(afecInstanceName.getValue())))
if (int(lastAdcModuleU) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(lastAdcChU)+"_CHER", False)
Database.setSymbolValue(component, "AFEC"+str((lastAdcChU))+"_IER_EOC", False)
if (int(lastAdcModuleV) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(lastAdcChV)+"_CHER", False)
if (int(lastAdcModulePot) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(lastADCChPot)+"_CHER", False)
if (int(lastAdcModuleVdc) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(lastADCChVdc)+"_CHER", False)
Database.setSymbolValue(component, "AFEC_CONV_MODE", 0)
def handleMessage(messageID, args):
dict = {}
if (messageID == "PMSM_FOC_ADC_CH_CONF"):
component = str(afecInstanceName.getValue()).lower()
instanceNum = int(filter(str.isdigit,str(afecInstanceName.getValue())))
dict['ADC_MAX_CH'] = Database.getSymbolValue(component, "AFEC_NUM_CHANNELS")
dict['ADC_MAX_MODULES'] = Database.getSymbolValue(component, "AFEC_NUM_MODULES")
#Change ADC channels if they are changed in the PMSM_FOC
resetChannelsForPMSMFOC()
AdcConfigForPMSMFOC(component, instanceNum, args)
return dict
# ADC configurations needed for PMSM_FOC component
def AdcConfigForPMSMFOC(component, instanceNum, args):
global afecSym_MR_TRGSEL_VALUE
global lastAdcModuleU, lastAdcChU
global lastAdcModuleV, lastAdcChV
global lastAdcModulePot, lastADCChPot
global lastAdcModuleVdc, lastADCChVdc
lastAdcModuleU = phUModule = args['PHASE_U']
lastAdcChU = phUCh = args['PHASE_U_CH']
lastAdcModuleV = phVModule = args['PHASE_V']
lastAdcChV = phVCh = args['PHASE_V_CH']
lastAdcModuleVdc = phDCBusModule = args['VDC']
lastADCChVdc = phDCBusCh = args['VDC_CH']
lastAdcModulePot = phPotModule = args['POT']
lastADCChPot = phPotCh = args['POT_CH']
resolution = args['RESOLUTION']
trigger = args['TRIGGER']
# Find the key index of the trigger as a PWM channel as per TRIGGER argument
count = afecSym_MR_TRGSEL_VALUE.getKeyCount()
triggerIndex = 0
for i in range(0,count):
if ("PWM"+str(trigger) in afecSym_MR_TRGSEL_VALUE.getKeyDescription(i) ):
triggerIndex = i
break
#fine the key index of the RESOLUTION
count = afecSym_EMR_RES_VALUE.getKeyCount()
resIndex = 0
for i in range(0,count):
if (str(resolution) in afecSym_EMR_RES_VALUE.getKeyDescription(i) ):
resIndex = i
break
# Enable ADC modules, Ph U interrupt
if (int(phUModule) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(phUCh)+"_CHER", True)
Database.setSymbolValue(component, "AFEC_"+str((phUCh))+"_IER_EOC", True)
#dual sample and hold for ph U
if (phUCh < (len(channel)/2)):
Database.setSymbolValue(component, "AFEC_"+str(phUCh)+"_SHMR_DUAL", True)
Database.setSymbolValue(component, "ADC_CH_PHASE_U", "AFEC_CH"+str(phUCh))
if (int(phVModule) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(phVCh)+"_CHER", True)
Database.setSymbolValue(component, "ADC_CH_PHASE_V", "AFEC_CH"+str(phVCh))
if (int(phPotModule) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(phPotCh)+"_CHER", True)
Database.setSymbolValue(component, "ADC_CH_POT", "AFEC_CH"+str(phPotCh))
if (int(phDCBusModule) == instanceNum):
Database.setSymbolValue(component, "AFEC_"+str(phDCBusCh)+"_CHER", True)
Database.setSymbolValue(component, "ADC_CH_VDC_BUS", "AFEC_CH"+str(phDCBusCh))
if (int(phUModule) == instanceNum):
Database.setSymbolValue(component, "AFEC_CONV_MODE", 2) #HW trigger
Database.setSymbolValue(component, "AFEC_MR_TRGSEL_VALUE", triggerIndex) #trigger as PWM Phase U
Database.setSymbolValue(component, "AFEC_EMR_RES_VALUE", resIndex) #resolution
###################################################################################################
########################### Component #################################
###################################################################################################
def instantiateComponent(afecComponent):
global afecInstanceName
global channel
afecInstanceName = afecComponent.createStringSymbol("AFEC_INSTANCE_NAME", None)
afecInstanceName.setVisible(False)
afecInstanceName.setDefaultValue(afecComponent.getID().upper())
Log.writeInfoMessage("Running " + afecInstanceName.getValue())
#------------------------- ATDF Read -------------------------------------
packageName = str(Database.getSymbolValue("core", "COMPONENT_PACKAGE"))
availablePins = [] # array to save available pins
channel = ["False", "False", "False", "False", "False", "False", "False", "False", "False", "False", "False", "False"] #array to save available channels
afecChannelsValues = [] #array used for combo symbol
afecChannelsValues.append("NONE")
pinout = "LQFP144"
val = ATDF.getNode("/avr-tools-device-file/variants")
children = val.getChildren()
for index in range(0, len(children)):
if packageName in children[index].getAttribute("package"):
pinout = children[index].getAttribute("pinout")
children = []
val = ATDF.getNode("/avr-tools-device-file/pinouts/pinout@[name=\""+str(pinout)+"\"]")
children = val.getChildren()
for pad in range(0, len(children)):
availablePins.append(children[pad].getAttribute("pad"))
afec_signals | |
"""Utility functions to support DU437 activities.
Authors
-------
<NAME>
"""
import copy
import os
from astropy.table import Table
from astropy.time import Time
import numpy as np
import pylab as pl
from .. import gaia_astrometry, pystrometry
try:
from helpers.table_helpers import plot_columns_simple
except ImportError:
print('universal_helpers not available')
def apply_elimination_cuts(table, selection_cuts, parameter_mapping):
"""Eliminate rows in astropy table based on input parameters.
Parameters
----------
table
selection_cuts
parameter_mapping
Returns
-------
Examples
--------
selection_cuts = OrderedDict({'period_1': {'operator': '<', 'threshold': 1000.},
'period_2': {'operator': '>', 'threshold': 50.},
'm2sini': {'operator': '>', 'threshold': 10.},
})
parameter_mapping = {'period': 'PER',
'ecc': 'ECC',
'm2sini': 'MSINI',
'omega': 'OM',
'plx': 'PAR',
}
"""
string_repr = ''
for field, parameters in selection_cuts.items():
if parameters['operator'] == '>':
remove_index = np.where(table[parameter_mapping[field]] > parameters['threshold'])[0]
elif parameters['operator'] == '<':
remove_index = np.where(table[parameter_mapping[field]] < parameters['threshold'])[0]
table.remove_rows(remove_index)
string_repr += '{:>10} {} {:>6}\n'.format(field, parameters['operator'],
parameters['threshold'])
return table, string_repr
def apply_selection_cuts(table, selection_cuts, parameter_mapping):
"""
Parameters
----------
table
selection_cuts
parameter_mapping
Returns
-------
"""
string_repr = ''
for field, parameters in selection_cuts.items():
field = field.split('_')[0]
if parameters['operator'] == '>':
remove_index = np.where(table[parameter_mapping[field]] < parameters['threshold'])[0]
elif parameters['operator'] == '<':
remove_index = np.where(table[parameter_mapping[field]] > parameters['threshold'])[0]
table.remove_rows(remove_index)
string_repr += '{:>10} {} {:>6}\n'.format(field, parameters['operator'],
parameters['threshold'])
return table, string_repr
def period_phase_error(period_day_fit, period_day_truth, time_span_day):
"""Return the period phase error as defined in BH-011."""
return np.abs((period_day_fit - period_day_truth)/period_day_truth * time_span_day/period_day_truth)
def make_comparison_figures(table, parameter_mapping, mapping_dr3id_to_starname,
highlight_index=None, description_str='',
save_plot=True, plot_dir=os.getcwd(), time_span_day=1000.,
period_phase_error_threshold=0.2):
"""
Parameters
----------
table
parameter_mapping
highlight_index
description_str
save_plot
plot_dir
Returns
-------
"""
# also save table with discrepancies
discrepancy_table = Table()
discrepancy_table['sourceId'] = table['sourceId']
discrepancy_table['Name'] = table['Name']
discrepancy_table['Name_dedreq'] = table['Name_dedreq-695']
discrepancy_table['m2_mjup'] = table['{}_m2_mjup'.format('p1')]
for miks_name, mapped_name in parameter_mapping.items():
if miks_name not in 'plx'.split():
miks_field = 'p1_{}'.format(miks_name)
else:
miks_field = '{}'.format(miks_name)
if miks_field not in table.colnames:
continue
pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k')
pl.plot(table[mapped_name], table[miks_field], 'bo')
discrepancy_table[miks_field] = table[miks_field]
discrepancy_table[mapped_name] = table[mapped_name]
discrepancy_table['{}_discrepancy'.format(miks_field)] = table[mapped_name] - table[miks_field]
# discrepancy in percent
# discrepancy_table['{}_discr_rel'.format(miks_field)] = 100.*np.abs(table[mapped_name] - table[miks_field])/np.abs(table[miks_field])
discrepancy_table['{}_discr_rel'.format(miks_field)] = 100.*np.abs(table[mapped_name] - table[miks_field])/np.abs(table[mapped_name])
if highlight_index is not None:
pl.plot(table[mapped_name][highlight_index],
table[miks_field][highlight_index], 'ro', ms=15, mfc='none')
pl.axis('equal')
xymax = np.max(np.array([pl.xlim()[1], pl.ylim()[1]]))
pl.plot([0, xymax], [0, xymax], 'k--')
pl.xlabel('{} ({})'.format(mapped_name, table.meta['comparison_to']))
pl.ylabel('{} (DU437)'.format(miks_field))
pl.title('{} sources from {}'.format(len(table), table.meta['comparison_to']))
pl.text(0.01, 0.99, description_str, horizontalalignment='left', verticalalignment='top',
transform=pl.gca().transAxes)
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, '{}_comparison_to_{}.pdf'.format(miks_field, table.meta['comparison_to']))
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
# period phase error:
miks_name = 'period_day'
miks_field = 'p1_{}'.format(miks_name)
mapped_name = parameter_mapping[miks_name]
period_day_fit = table[miks_field]
period_day_truth = table[mapped_name]
discrepancy_table['period_phase_error'] = period_phase_error(period_day_fit, period_day_truth, time_span_day)
n_period_recovered = len(np.where(np.abs(discrepancy_table['period_phase_error'])<period_phase_error_threshold)[0])
pl.figure(figsize=(8, 4), facecolor='w', edgecolor='k')
pl.plot(period_day_truth, discrepancy_table['period_phase_error'], 'k.')
pl.ylim((-1,1))
pl.fill_between(pl.xlim(), period_phase_error_threshold, y2=-period_phase_error_threshold, color='g', alpha=0.5)
pl.xlabel('Truth period (day)')
pl.ylabel('Period phase error')
description_str_2 = '{}/{} = {:2.1f}% within +/- {:2.1f}\n'.format(n_period_recovered, len(discrepancy_table), n_period_recovered/len(discrepancy_table)*100, period_phase_error_threshold)+description_str
pl.text(0.01, 0.99, description_str_2, horizontalalignment='left', verticalalignment='top',
transform=pl.gca().transAxes)
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, 'period_phase_error_{}.pdf'.format(table.meta['comparison_to']))
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
# pl.close('all')
threshold = {'delta_chi2': {'value': 1000, 'operator': '>'},
'f_test_probability': {'value': 1e-100, 'operator': '<'}
}
for miks_field in ['meritFunction', 'chi2WithPlanet', 'chi2SingleStar', 'delta_chi2', 'f_test_probability', 'p1_estSNratio', 'p1_period_snr']:
pl.figure(figsize=(8, 4), facecolor='w', edgecolor='k')
index = np.where(discrepancy_table['period_phase_error'] < 100)[0]
pl.loglog(discrepancy_table['period_phase_error'][index], table[miks_field][index], 'bo', alpha=0.7)
# pl.ylim((-1,1))
# pl.fill_between(pl.xlim(), period_phase_error_threshold, y2=-period_phase_error_threshold, color='g', alpha=0.5)
pl.xlabel('Period phase error')
pl.ylabel(miks_field)
n_passed_threshold = None
if miks_field in ['delta_chi2', 'f_test_probability']:
value = threshold[miks_field]['value']
operator = threshold[miks_field]['operator']
if operator == '>':
n_passed_threshold = len(np.where(table[miks_field] > value)[0])
pl.fill_between(pl.xlim(), value, y2=pl.ylim()[1], color='g', alpha=0.5)
elif operator == '<':
n_passed_threshold = len(np.where(table[miks_field] < value)[0])
pl.fill_between(pl.xlim(), value, y2=pl.ylim()[0], color='g', alpha=0.5)
pl.title('{} of {} systems shown. {} pass threshold'.format(len(index), len(table), n_passed_threshold))
pl.text(0.01, 0.99, description_str, horizontalalignment='left', verticalalignment='top',
transform=pl.gca().transAxes)
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, 'period_phase_error_vs_{}_{}.pdf'.format(miks_field, table.meta['comparison_to']))
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
if 1:
formats = {}
for key in discrepancy_table.colnames:#['angular_distance', 'phot_g_mean_mag', 'parallax', 'pmra', 'pmdec']:
if 'discr' in key:
formats[key] = '%>2.1f'
elif 'm2' in key:
formats[key] = '%2.1f'
# else:
# formats[key] = '%2.3f'
discrepancy_table_file = os.path.join(plot_dir, 'comparison_to_{}.csv'.format(table.meta['comparison_to']))
if 'p1_period_discr_rel' in discrepancy_table.colnames:
discrepancy_table.sort('p1_period_discr_rel')
discrepancy_table.write(discrepancy_table_file, format='ascii.fixed_width', delimiter=',',
bookend=False, overwrite=True, formats=formats)
try:
pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k')
# pl.plot(table['p1_a1'], np.abs(table['p1_period'] - table[parameter_mapping['period']]), 'bo')
# pl.xlabel('Fitted semimajor axis (mas)')
pl.plot(table['a1_mas_minimum'], np.abs(table['p1_period'] - table[parameter_mapping['period']]), 'bo')
pl.xlabel('Expected semimajor axis (mas)')
pl.ylabel('Period error (day)')
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, 'period_error_vs_a1.pdf')
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
except KeyError:
pass
return discrepancy_table
def get_gaia_iad(source_id, t_ref_jd, epoch_data_dir, verbose=False):
"""Return Gaia Epoch Astrometry Data.
Parameters
----------
selected_systems
index
epoch_data_dir
Returns
-------
"""
t_ref_mjd = Time(t_ref_jd, format='jd').mjd
iad = gaia_astrometry.GaiaIad(source_id, epoch_data_dir)
iad.load_data()
iad_mjd = Time(iad.epoch_data[iad.time_column] * 365.25 + t_ref_jd, format='jd').mjd
iad.epoch_data['MJD'] = iad_mjd
iad.epoch_data_for_prototype = Table()
iad.epoch_data_for_prototype['t-t_ref'] = iad.epoch_data[iad.time_column]
for key in ['spsi_obs', 'cpsi_obs', 'ppfact_obs', 'da_mas_obs', 'errda_mas_obs', 'transitId',
'direction_AL0_AC1', 'OB']:
iad.epoch_data_for_prototype[key] = iad.epoch_data[key]
if key in ['spsi_obs', 'cpsi_obs']:
iad.epoch_data_for_prototype['t{}'.format(key)] = iad.epoch_data_for_prototype[
't-t_ref'] * \
iad.epoch_data_for_prototype[key]
iad.epoch_data = copy.deepcopy(iad.epoch_data_for_prototype)
iad.time_column = 't-t_ref'
iad.epoch_data['MJD'] = iad_mjd
iad.t_ref_mjd = t_ref_mjd
iad.scan_angle_definition = 'gaia'
if verbose:
iad.epoch_data.pprint()
return iad
def make_orbit_system(selected_systems, index, scan_angle_definition, t_ref_mjd,
m1_MS=1., degenerate_orbit=False,
verbose=False):
"""Return an OrbitSystem for the specified input table row.
Parameters
----------
selected_systems
index
epoch_data_dir
mapping_dr3id_to_starname
plot_dir
m1_MS
rv
show_plot
degenerate_orbit
Returns
-------
"""
alpha_mas = selected_systems['p1_a1_mas'][index]
absolute_parallax_mas = selected_systems['plx_mas'][index]
a_m = pystrometry.convert_from_angular_to_linear(alpha_mas, absolute_parallax_mas)
P_day = selected_systems['p1_period_day'][index]
m2_kg = pystrometry.pjGet_m2(m1_MS*pystrometry.MS_kg, a_m, P_day)
m2_MJ = m2_kg/pystrometry.MJ_kg
attribute_dict = {
'offset_alphastar_mas': selected_systems['alphaStarOffset_mas'][index],
'offset_delta_mas': selected_systems['deltaOffset_mas'][index],
# 'RA_deg': 0.,
# 'DE_deg': 0.,
'RA_deg': selected_systems['alpha0_deg'][index],
'DE_deg': selected_systems['delta0_deg'][index],
# 'plx_mas': selected_systems['plx'][index],
'absolute_plx_mas': selected_systems['plx_mas'][index],
'muRA_mas': selected_systems['muAlphaStar_masPyr'][index],
'muDE_mas': selected_systems['muDelta_masPyr'][index],
'P_day': selected_systems['p1_period_day'][index],
'ecc': selected_systems['p1_ecc'][index],
'omega_deg': selected_systems['p1_omega_deg'][index],
'OMEGA_deg': selected_systems['p1_OMEGA_deg'][index],
'i_deg': selected_systems['p1_incl_deg'][index],
'a_mas': selected_systems['p1_a1_mas'][index],
'Tp_day': t_ref_mjd + selected_systems['p1_Tp_day-T0'][index],
'm1_MS': m1_MS,
'm2_MJ': m2_MJ,
'Tref_MJD': t_ref_mjd,
'scan_angle_definition': scan_angle_definition,
}
if degenerate_orbit:
attribute_dict['omega_deg'] += 180.
attribute_dict['OMEGA_deg'] += 180.
orbit = pystrometry.OrbitSystem(attribute_dict=attribute_dict)
if verbose:
print(orbit)
return orbit
def make_astrometric_orbit_plotter(selected_systems, index, epoch_data_dir, degenerate_orbit=False,
verbose=False, m1_MS=1.):
"""Return AstrometricOrbitPlotter object
Parameters
----------
selected_systems
index
epoch_data_dir
degenerate_orbit
verbose
m1_MS
Returns
-------
"""
source_id = selected_systems['sourceId'][index]
t_ref_jd = selected_systems['T0_JD'][index]
iad = get_gaia_iad(source_id, t_ref_jd, epoch_data_dir, verbose=verbose)
orbit = make_orbit_system(selected_systems, index, iad.scan_angle_definition, iad.t_ref_mjd, m1_MS=m1_MS,
degenerate_orbit=degenerate_orbit, verbose=verbose)
# set coeffMatrix in orbit object
ppm_signal_mas = orbit.ppm(iad.epoch_data['MJD'], psi_deg=np.rad2deg(
np.arctan2(iad.epoch_data['spsi_obs'], iad.epoch_data['cpsi_obs'])),
offsetRA_mas=selected_systems['alphaStarOffset_mas'][index],
offsetDE_mas=selected_systems['deltaOffset_mas'][index],
externalParallaxFactors=iad.epoch_data['ppfact_obs'], verbose=True)
# 1/0
plot_dict = {}
plot_dict['model_parameters'] = {0: orbit.attribute_dict}
plot_dict['linear_coefficients'] = {'matrix': orbit.coeffMatrix} # dict ('matrix', 'table')
plot_dict['data_type'] = '1d'
if hasattr(iad, 'xi'):
plot_dict['data_type'] = 'gaia_2d'
else:
plot_dict['data_type'] = '1d'
plot_dict['scan_angle_definition'] = iad.scan_angle_definition
for key in iad.epoch_data.colnames:
if '_obs' in key:
new_key = key.replace('_obs', '')
if new_key == 'errda_mas':
new_key = 'sigma_da_mas'
iad.epoch_data[new_key] = iad.epoch_data[key]
plot_dict['data'] = iad
if verbose:
iad.epoch_data.pprint()
axp = pystrometry.AstrometricOrbitPlotter(plot_dict)
# axp.print_residual_statistics()
return axp
def make_orbit_figures(selected_systems, index, epoch_data_dir, mapping_dr3id_to_starname=None,
plot_dir=os.path.expanduser('~'),
m1_MS=1., rv=None, show_plot=True, degenerate_orbit=False, verbose=False):
axp = make_astrometric_orbit_plotter(selected_systems, index, epoch_data_dir,
degenerate_orbit=degenerate_orbit, verbose=verbose, m1_MS=m1_MS)
iad = axp.data
n_curve = 1500
timestamps_curve_2d = np.linspace(np.min(iad.epoch_data['MJD']), np.max(iad.epoch_data['MJD']), n_curve)
axp.t_curve_MJD = timestamps_curve_2d
if 'phot_g_mean_mag' in selected_systems.colnames:
mag_str = ' $G$={:2.1f}'.format(selected_systems['phot_g_mean_mag'][index])
else:
mag_str = ''
if mapping_dr3id_to_starname is not None:
axp.title = 'Gaia DR3 {} ({}{})'.format(source_id, mapping_dr3id_to_starname[source_id], mag_str)
name_seed = 'DR3_{}_{}'.format(source_id, mapping_dr3id_to_starname[source_id])
else:
name_seed = 'DR3_{}'.format(source_id)
argument_dict = {'plot_dir': plot_dir, 'ppm_panel': True, 'frame_residual_panel': True,
'orbit_only_panel': False, 'ppm_description': 'default', 'epoch_omc_description': 'default',
'orbit_description': 'default', 'arrow_offset_x': +100, 'arrow_offset_y': +100,
'name_seed': name_seed, 'scan_angle_definition': scan_angle_definition}
argument_dict['save_plot'] = True
argument_dict['omc_panel'] = True
argument_dict['orbit_only_panel'] = False
# argument_dict['make_condensed_summary_figure'] = True
# argument_dict['make_xy_residual_figure'] = True
argument_dict['make_condensed_summary_figure'] = False
argument_dict['make_xy_residual_figure'] = False
argument_dict['make_1d_overview_figure'] = True
argument_dict['excess_noise'] = selected_systems['excessNoise'][index]
argument_dict['merit_function'] = selected_systems['meritFunction'][index]
if show_plot:
axp.plot(argument_dict=argument_dict)
if rv is not None:
from ..pystrometry import plot_rv_data
my_orbit = copy.deepcopy(orbit)
# my_orbit.m2_MJ = orbit.m2_MJ/10.
plot_rv_data(rv, orbit_system=my_orbit, n_orbit=np.ceil(np.ptp(rv['MJD'])/orbit.P_day)+1)
pl.show()
return axp
def make_orbit_figure(selected_systems, index, epoch_data_dir, mapping_dr3id_to_starname=None,
plot_dir=os.path.expanduser('~'),
m1_MS=1., rv=None, show_plot=True, degenerate_orbit=False, epoch_data_suffix=None):
source_id = selected_systems['sourceId'][index]
t_ref_jd = selected_systems['T0_JD'][index]
t_ref_mjd = Time(t_ref_jd, format='jd').mjd
if epoch_data_suffix is None:
iad = gaia_astrometry.GaiaIad(source_id, epoch_data_dir)
else:
iad = gaia_astrometry.GaiaIad(source_id, epoch_data_dir, epoch_data_suffix=epoch_data_suffix)
iad.load_data(filter_on_frame_uncertainty=True)
# pl.close('all')
# pl.figure()
# pl.hist(iad.epoch_data['errda_mas_obs'])
# # pl.show()
# pl.savefig('test.png')
iad_mjd = Time(iad.epoch_data[iad.time_column]*365.25+t_ref_jd, format='jd').mjd
iad.epoch_data['MJD'] = iad_mjd
iad.epoch_data_for_prototype = Table()
iad.epoch_data_for_prototype['t-t_ref'] = iad.epoch_data[iad.time_column]
for key in ['spsi_obs', 'cpsi_obs', 'ppfact_obs', 'da_mas_obs', 'errda_mas_obs', 'transitId',
'direction_AL0_AC1', 'OB']:
iad.epoch_data_for_prototype[key] = iad.epoch_data[key]
if key in ['spsi_obs', 'cpsi_obs']:
iad.epoch_data_for_prototype['t{}'.format(key)] = iad.epoch_data_for_prototype['t-t_ref'] \
* iad.epoch_data_for_prototype[key]
iad.epoch_data = copy.deepcopy(iad.epoch_data_for_prototype)
iad.time_column = 't-t_ref'
iad.epoch_data['MJD'] = iad_mjd
iad.t_ref_mjd = t_ref_mjd
scan_angle_definition = 'gaia'
# loop over every companion in system
from collections import OrderedDict
model_parameters = OrderedDict()
orbit_description = OrderedDict()
# for planet_index in np.arange(1, selected_systems['Nplanets'][index]+1):
for planet_index in np.arange(selected_systems['Nplanets'][index]):
planet_number = planet_index + 1
alpha_mas = selected_systems['p{}_a1_mas'.format(planet_number)][index]
absolute_parallax_mas = selected_systems['plx_mas'][index]
a_m = pystrometry.convert_from_angular_to_linear(alpha_mas, absolute_parallax_mas)
P_day = selected_systems['p{}_period_day'.format(planet_number)][index]
m2_kg = pystrometry.pjGet_m2(m1_MS*pystrometry.MS_kg, a_m, P_day)
m2_MJ = m2_kg/pystrometry.MJ_kg
attribute_dict = {
'offset_alphastar_mas': selected_systems['alphaStarOffset_mas'][index],
'offset_delta_mas': selected_systems['deltaOffset_mas'][index],
# 'RA_deg': 0.,
# 'DE_deg': 0.,
'RA_deg': selected_systems['alpha0_deg'][index],
'DE_deg': selected_systems['delta0_deg'][index],
# 'plx_mas': selected_systems['plx'][index],
'absolute_plx_mas': selected_systems['plx_mas'][index],
'muRA_mas': selected_systems['muAlphaStar_masPyr'][index],
'muDE_mas': | |
# -*- coding: utf-8 -*-
"""
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2021-11-04'
__copyright__ = '(C) 2021, <NAME>'
from qgis.core import *
from qgis.gui import *
from lftools.geocapt.topogeo import str2HTML
from lftools.geocapt.imgs import Imgs
from qgis.core import *
from qgis.gui import *
import numpy as np
from numpy.linalg import norm, det, inv, solve
# Tradução
LOC = QgsApplication.locale()[:2]
def tr(*string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return string[0]
else:
return string[0]
# Validação dos Pontos Homólogos
def ValidacaoVetores(vetores, metodo):
# número de feições por modelo matemático
cont = vetores.featureCount()
sinal = True
if metodo == 0:
if cont < 1:
raise QgsProcessingException(tr('It takes 1 or more vectors to perform this transformation!', 'É necessario 1 ou mais vetores para realizar essa transformação!'))
elif metodo == 1:
if cont < 2:
raise QgsProcessingException(tr('It takes 2 or more vectors to perform this transformation!', 'É necessario 2 ou mais vetores para realizar essa transformação!'))
elif metodo == 2:
if cont < 3:
raise QgsProcessingException(tr('It takes 3 or more vectors to perform this transformation!', 'É necessario 3 ou mais vetores para realizar essa transformação!'))
# cada feição (vetor) deve ter 2 dois vértices distintos
for feat in vetores.getFeatures():
geom = feat.geometry()
coord = geom.asPolyline()
if len(coord) != 2:
raise QgsProcessingException(tr('The vector lines must be created with exactly two points!', 'As linhas de vetores devem ter exatamente 2 vértices!'))
return sinal
# Transformação de Coordenadas de Geometrias a partir de uma função de transformação
def transformGeom2D(geom, CoordTransf):
if geom.type() == 0: #Point
if geom.isMultipart():
pnts = geom.asMultiPoint()
newPnts = []
for pnt in pnts:
x, y = CoordTransf(pnt)
newPnts += [QgsPointXY(x,y)]
newGeom = QgsGeometry.fromMultiPointXY(newPnts)
return newGeom
else:
pnt = geom.asPoint()
newPnt = QgsPointXY(x,y)
newGeom = QgsGeometry.fromPointXY(newPnt)
return newGeom
elif geom.type() == 1: #Line
if geom.isMultipart():
linhas = geom.asMultiPolyline()
newLines = []
for linha in linhas:
newLine =[]
for pnt in linha:
x, y = CoordTransf(pnt)
newLine += [QgsPointXY(x,y)]
newLines += [newLine]
newGeom = QgsGeometry.fromMultiPolylineXY(newLines)
return newGeom
else:
linha = geom.asPolyline()
newLine =[]
for pnt in linha:
x, y = CoordTransf(pnt)
newLine += [QgsPointXY(x,y)]
newGeom = QgsGeometry.fromPolylineXY(newLine)
return newGeom
elif geom.type() == 2: #Polygon
if geom.isMultipart():
poligonos = geom.asMultiPolygon()
newPolygons = []
for pol in poligonos:
newPol = []
for anel in pol:
newAnel = []
for pnt in anel:
x, y = CoordTransf(pnt)
newAnel += [QgsPointXY(x,y)]
newPol += [newAnel]
newPolygons += [newPol]
newGeom = QgsGeometry.fromMultiPolygonXY(newPolygons)
return newGeom
else:
pol = geom.asPolygon()
newPol = []
for anel in pol:
newAnel = []
for pnt in anel:
x, y = CoordTransf(pnt)
newAnel += [QgsPointXY(x,y)]
newPol += [newAnel]
newGeom = QgsGeometry.fromPolygonXY(newPol)
return newGeom
else:
return None
# Ajustamento 2D
def Ajust2D(vetores, metodo):
# Métodos:
# 0 - translação, 1 - Helmert 2D, 2 - Afim
# numero de pontos homologos
n_pnts_homo = vetores.featureCount()
# numero minimo de pontos homologos por metodo
if metodo == 0: # 0 - translação
min_pnts_homo = n_pnts_homo == 1
elif metodo == 1: # 1 - Helmert 2D
min_pnts_homo = n_pnts_homo == 2
elif metodo == 2: # 2 - Afim
min_pnts_homo = n_pnts_homo == 3
A = [] # Matriz Design
L = [] # Coordenadas Finais
Lo = [] # Coordenadas Iniciais
for feat in vetores.getFeatures():
geom = feat.geometry()
coord = geom.asPolyline()
xa = coord[0].x()
ya = coord[0].y()
xb = coord[1].x()
yb = coord[1].y()
if metodo == 0:
A += [[1, 0], [0, 1]]
elif metodo == 1:
A += [[xa, -ya, 1, 0], [ya, xa, 0, 1]]
elif metodo == 2:
A += [[xa, ya, 1, 0, 0, 0], [0, 0, 0, xa, ya, 1]]
L +=[[xb], [yb]]
Lo +=[[xa], [ya]]
A = np.matrix(A)
L = np.matrix(L)
Lo = np.matrix(Lo)
msg_erro = tr('Georeferencing vectors should not be aligned!', 'Os vetores de georreferenciamento não podem ter a mesma direção (alinhados)!')
if metodo == 0:
if min_pnts_homo:
X = L - Lo
else:
M = A.T*A
if det(M):
X = solve(M, A.T*(L - Lo))
else:
raise QgsProcessingException(msg_erro)
else:
if min_pnts_homo:
if det(A):
X = solve(A, L)
else:
raise QgsProcessingException(msg_erro)
else: # asjustamento
M = A.T*A
if det(M):
X = solve(M, A.T*L)
else:
raise QgsProcessingException(msg_erro)
# Parametros e Função da Transformação
if metodo == 0:
a = X[0,0]
b = X[1,0]
def CoordTransf(pnt, a = a, b = b): # Translacao
X, Y = pnt.x(), pnt.y()
Xt = X + a
Yt = Y + b
return (Xt, Yt)
def CoordInvTransf(pnt, a = a, b = b): # Translacao (Inversa)
X, Y = pnt.x(), pnt.y()
Xit = X - a
Yit = Y - b
return (Xit, Yit)
elif metodo == 1:
a = X[0,0]
b = X[1,0]
c = X[2,0]
d = X[3,0]
def CoordTransf(pnt, a = a, b = b, c = c, d = d): # Transformação Conforme - Helmert 2D
'''
Xt = X*a - Y*b + c
Yt = X*b + Y*a + d
a = S*cos(alfa)
b = S*sin(alfa)
'''
X, Y = pnt.x(), pnt.y()
Xt = X*a - Y*b + c
Yt = X*b + Y*a + d
return (Xt, Yt)
def CoordInvTransf(pnt, a = a, b = b, c = c, d = d): # Transformação de Helmert 2D (Inversa)
X, Y = pnt.x(), pnt.y()
A = np.matrix([[a,-b],[b,a]])
B = np.matrix([[X-c],[Y-d]])
sol = solve(A,B)
Xit = sol[0,0]
Yit = sol[1,0]
return (Xit, Yit)
elif metodo == 2:
a = X[0,0]
b = X[1,0]
c = X[2,0]
d = X[3,0]
e = X[4,0]
f = X[5,0]
def CoordTransf(pnt, a = a, b = b, c = c, d = d, e = e, f = f): # Transformação Afim
X, Y = pnt.x(), pnt.y()
Xt = X*a + Y*b + c
Yt = X*d + Y*e + f
return (Xt, Yt)
def CoordInvTransf(pnt, a = a, b = b, c = c, d = d, e = e, f = f): # Transformação Afim (Inversa)
X, Y = pnt.x(), pnt.y()
A = np.matrix([[a,b],[d,e]])
B = np.matrix([[X-c],[Y-f]])
sol = solve(A,B)
Xit = sol[0,0]
Yit = sol[1,0]
return (Xit, Yit)
# Cálculo do Resíduos
transf = []
for feat in vetores.getFeatures():
geom = feat.geometry()
coord = geom.asPolyline()
Xt, Yt = CoordTransf(coord[0])
transf += [[Xt],[Yt]]
X, Y = coord[-1].x(), coord[-1].y()
Vx = X - Xt
Vy = Y - Yt
# MVC dos Parametros e das coordenadas Ajustadas
n = np.shape(A)[0] # número de observações
u = np.shape(A)[1] # número de parâmetros
if not min_pnts_homo:
# Residuos
V = L - np.matrix(transf)
# Sigma posteriori
sigma2 = V.T*V/(n-u)
# Precisão dos Pontos Ajustados
# MVC de Xa
SigmaXa = sigma2[0,0]*inv(A.T*A)
SigmaXa = np.matrix(SigmaXa).astype(float)
# MVC de La
SigmaLa = A*SigmaXa*A.T
SigmaLa = np.matrix(SigmaLa).astype(float)
# RMSE
RMSE = np.sqrt((V.T*V)[0,0]/n_pnts_homo)
else:
sigma2 = 0
RMSE = 0
# Lista de Coordenadas Ajustadas, Precisões e Resíduos
COORD = []
PREC = []
DELTA = []
for index, feat in enumerate(vetores.getFeatures()):
X = transf[2*index][0]
Y = transf[2*index+1][0]
COORD += [QgsPointXY(X, Y)]
if not min_pnts_homo:
s_X = float(np.sqrt(SigmaLa[2*index,2*index]))
s_Y = float(np.sqrt(SigmaLa[2*index+1,2*index+1]))
PREC += [(s_X, s_Y)]
d_X = float(V[2*index][0])
d_Y = float(V[2*index+1][0])
DELTA += [(d_X, d_Y)]
else:
PREC += [(0, 0)]
DELTA += [(0, 0)]
if metodo == 0:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">'''+ tr('Translation',str2HTML('Translação')) + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style=""></span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">X
= </span></i><i><span style="">x
</span></i><i><span style="">+</span></i><i><span
style=""> a</span></i><i><span style="">
+</span></i><i><span style=""> Vx<o:p></o:p></span></i></p>
<div style="text-align: center;"><i><span style="">Y =
</span></i><i><span style="">y
</span></i><i><span style="">+</span></i><i><span
style=""> b</span></i><i><span style="">
+</span></i><i><span style=""> </span></i><i><span
style=""></span></i><i><span style="">Vy</span></i></div>
'''
elif metodo == 1:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">'''+ tr('Helmert 2D (Conformal)',str2HTML('Helmert 2D (Conforme)')) + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">X = </span></i><i><span
style="">ax
</span></i><i><span style="">-</span></i><i><span
style=""> by </span></i><i><span
style=""></span></i><i><span style="">+
c +</span></i><i><span style=""> | |
<gh_stars>1-10
#!/usr/bin/env python
"""SeqAn documentation raw object representation.
This is the direct representation as it can be determined from the embedded
Doxygen-style comments without the interpretation of commands within clauses
and cross-linking.
"""
import textwrap
import dox_tokens
import raw_doc
class DoxFormatter(object):
"""Formatter for printing correctly indented and wrapped in Doxygen style.
"""
def __init__(self, width=77):
self.width = width
def formatCommand(self, name, text, leading=None):
"""RawReturn string with a formatted command.
The general format is "@$name $leading $text" where the text is wrapped
to the end of leading.
"""
if leading:
res = ['@', name, ' ', leading, ' ']
else:
res = ['@', name, ' ']
l = len(''.join(res))
indent = ' ' * l
wrapped_text = textwrap.wrap(text, self.width - l)
if wrapped_text:
res.append(wrapped_text[0])
for x in wrapped_text[1:]:
res += ['\n', indent, x]
return ''.join(res) + '\n'
def formatParagraph(self, text):
"""Format paragraph."""
return '\n'.join(textwrap.wrap(text, self.width)) + '\n'
class RawText(object):
"""List of token with easy concatenation into a string.
This type is used for collecting lists of tokens.
@ivar tokens: The list of token objects.
"""
def __init__(self, tokens=[]):
self.tokens = list(tokens)
def append(self, token):
"""Append the token to the list of tokens.
@param token: The lexer.Token object to add.
@return: Nothing
"""
self.tokens.append(token)
@property
def empty(self):
"""RawReturns whether the token set is empty.
@return: Whether or not the token list is empty.
"""
return not bool(self.tokens)
@property
def text(self):
"""RawReturns the concatenated tokens' text.
@return: The concatenated tokens' text.
"""
return ''.join([x.val for x in self.tokens])
def __eq__(self, other):
if not hasattr(other, 'tokens'):
return False
return self.tokens == other.tokens
class RawDoc(object):
"""The documentation consists of a number of documentation objects.
@ivar entries List of RawEntry objects.
"""
def __init__(self):
self.entries = []
def merge(self, other_doc):
for e in other_doc.entries:
self.addEntry(e)
def addEntry(self, entry):
self.entries.append(entry)
def getFormatted(self, width=77):
"""Get formatted and normalized in dox format."""
formatter = DoxFormatter(width)
res = []
first = True
for entry in self.entries:
res.append(entry.getFormatted(formatter))
first = False
return '\n\n'.join(res)
class RawEntry(object):
"""One top-level entry of the documentation.
@ivar first_token The first token for this entry.
@ivar name The identifier of the entry.
@ivar title The title of the entry.
@ivar brief A string object with a brief summary of the entry.
@ivar body A RawBody object with the entry's documentation.
@ivar sees A list of RawSee objects.
@ivar command The name of the command starting the entry type.
"""
def __init__(self, first_token, briefs=[], command='<entry>'):
self.first_token = first_token
self.name = RawText()
self.title = RawText()
self.briefs = list(briefs)
self.body = RawBody()
self.sees = []
self.command = command
def addBrief(self, b):
self.briefs.append(b)
def addSee(self, see):
while see.text.tokens and see.text.tokens[-1].type in dox_tokens.WHITESPACE:
see.text.tokens.pop()
self.sees.append(see)
@classmethod
def entryTypes(cls):
"""RawReturns iterable with all entry types."""
res = ('concept', 'class', 'function', 'metafunction', 'page', 'enum', 'var',
'tag', 'defgroup', 'macro', 'enum_value')
return res
def addParagraph(self, p):
self.body.addParagraph(p)
def getFormatted(self, formatter):
"""Get formatted and normalized in dox format."""
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res += ['\n', self.body.getFormatted(formatter)]
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawCodeEntry(RawEntry):
"""RawDoc for one code entry concept having a signature.
@ivar signatures A list of RawSignature objects.
"""
def __init__(self, first_token, briefs=[], command='<code entry>'):
RawEntry.__init__(self, first_token, briefs=briefs, command=command)
self.signatures = []
self.headerfiles = []
self.deprecation_msgs = []
self.notes = []
self.warnings = []
self.akas = []
self.internals = []
def addSignature(self, s):
self.signatures.append(s)
def addHeaderfile(self, h):
self.headerfiles.append(h)
def addDeprecationMsg(self, d):
self.deprecation_msgs.append(d)
def addNote(self, n):
self.notes.append(n)
def addWarning(self, w):
self.warnings.append(w)
def addAka(self, a):
self.akas.append(a)
def addInternal(self, i):
self.internals.append(i)
def getType(self):
return 'code'
def __str__(self):
res = RawEntry.__str__(self)
return res + '\n' + '\n'.join([' @signature %s' % x for x in self.signatures])
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawVariable(RawCodeEntry):
"""RawDoc for one variable constant.
@ivar type: The type of the variable as a RawText or None.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='var')
self.type = None
def getType(self):
if '::' in self.name.text:
return 'member_variable'
else:
return 'variable'
def getFormatted(self, formatter):
res = []
if self.type:
res.append(formatter.formatCommand(self.command, self.name.text + ';', self.type.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawEnumValue(RawVariable):
"""RawDoc for one enum value.
@ivar type: The type of the variable as a RawText or None.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='val')
self.type = None
def getType(self):
return 'enum_value'
class RawTag(RawCodeEntry):
"""RawDoc for one tag."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='tag')
self.tparams = []
def addTParam(self, p):
self.tparams.append(p)
def getType(self):
if '#' in self.name.text:
return 'grouped_tag'
else:
return 'tag'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
for x in self.tparams:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawConcept(RawCodeEntry):
"""RawDoc for one concept.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='concept')
self.extends = []
def addExtends(self, c):
self.extends.append(c)
def getType(self):
return 'concept'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.extends:
res.append('\n')
for x in self.extends:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawEnum(RawCodeEntry):
"""RawDoc for one enum."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='enum')
def getType(self):
return 'enum'
class RawTypedef(RawCodeEntry):
"""RawDoc for one typedef."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='typedef')
def getType(self):
if '#' in self.name.text:
return 'grouped_typedef'
elif '::' in self.name.text:
return 'member_typedef'
else:
return 'global_typedef'
class RawAdaption(RawCodeEntry):
"""RawDoc for one adaption."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='adaption')
def getType(self):
return 'adaption'
class RawClass(RawCodeEntry):
"""RawDoc for one class.
@ivar tparams List of RawParameter objects.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='class')
self.extends = []
self.implements = []
self.tparams = []
def addTParam(self, p):
self.tparams.append(p)
def addExtends(self, p):
self.extends.append(p)
def addImplements(self, p):
self.implements.append(p)
def getType(self):
return 'class'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.implements:
res.append('\n')
for x in self.implements:
res.append(x.getFormatted(formatter))
if self.extends:
res.append('\n')
for x in self.extends:
res.append(x.getFormatted(formatter))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if self.tparams:
res.append('\n')
for x in self.tparams:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in | |
denied.'), 'error')
return redirect(return_url)
if export_type == 'csv' or export_type == 'tsv':
return self._export_csv(return_url, export_type)
else:
return self._export_tablib(export_type, return_url)
def _export_csv(self, return_url, export_type):
"""Export a CSV or tsv of records as a stream."""
delimiter = ","
if export_type == 'tsv':
delimiter = "\t"
# Grab parameters from URL
view_args = self._get_list_extra_args()
# Map column index to column name
sort_column = self._get_column_by_idx(view_args.sort)
if sort_column is not None:
sort_column = sort_column[0]
# Get count and data
count, query = self.get_record_list(
0,
sort_column,
view_args.sort_desc,
view_args.search,
view_args.filters,
page_size=self.export_max_rows,
execute=False)
# https://docs.djangoproject.com/en/1.8/howto/outputting-csv/
class Echo(object):
"""An object that implements just the write method of the file-like interface."""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
writer = csv.writer(Echo(), delimiter=delimiter)
def generate():
# Append the column titles at the beginning
titles = [csv_encode(c[1]) for c in self._export_columns]
yield writer.writerow(titles)
for row in query:
vals = [csv_encode(self.get_export_value(row, c[0]))
for c in self._export_columns]
yield writer.writerow(vals)
filename = self.get_export_name(export_type=export_type)
disposition = 'attachment;filename=%s' % (secure_filename(filename), )
return Response(
stream_with_context(generate()),
headers={'Content-Disposition': disposition},
mimetype='text/' + export_type)
class FundingRecordAdmin(CompositeRecordModelView):
"""Funding record model view."""
column_exclude_list = ("task", "translated_title_language_code", "short_description", "disambiguation_source")
can_create = True
column_searchable_list = ("title",)
list_template = "funding_record_list.html"
column_export_list = (
"funding_id",
"local_identifier",
"put_code",
"title",
"translated_title",
"translated_title_language_code",
"type",
"organization_defined_type",
"short_description",
"amount",
"url",
"currency",
"start_date",
"end_date",
"org_name",
"city",
"region",
"country",
"disambiguated_id",
"disambiguation_source",
"visibility",
"orcid",
"email",
"first_name",
"last_name",
"external_id_type",
"external_id_url",
"external_id_relationship",
"status",
"is_active")
def get_record_list(self, page, sort_column, sort_desc, search, filters, execute=True, page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0,
sort_column,
sort_desc,
search,
filters,
page_size=page_size,
execute=False)
ext_ids = [r.id for r in
ExternalId.select(models.fn.min(ExternalId.id).alias("id")).join(FundingRecord).where(
FundingRecord.task == self.current_task_id).group_by(FundingRecord.id).objects()]
return count, query.select(
self.model,
FundingInvitee.email,
FundingInvitee.orcid,
FundingInvitee.identifier.alias("local_identifier"),
FundingInvitee.first_name,
FundingInvitee.last_name,
FundingInvitee.put_code,
FundingInvitee.visibility,
ExternalId.type.alias("external_id_type"),
ExternalId.value.alias("funding_id"),
ExternalId.url.alias("external_id_url"),
ExternalId.relationship.alias("external_id_relationship"),
).join(
ExternalId,
JOIN.LEFT_OUTER,
on=(ExternalId.record_id == self.model.id)).where(ExternalId.id << ext_ids).join(
FundingInvitee,
JOIN.LEFT_OUTER,
on=(FundingInvitee.record_id == self.model.id)).objects()
class WorkRecordAdmin(CompositeRecordModelView):
"""Work record model view."""
column_exclude_list = ("task", "translated_title_language_code", "short_description", "citation_value")
can_create = True
column_searchable_list = ("title",)
list_template = "work_record_list.html"
form_overrides = dict(publication_date=PartialDateField)
column_export_list = [
"work_id",
"put_code",
"title",
"subtitle",
"translated_title",
"translated_title_language_code",
"journal_title",
"short_description",
"citation_type",
"citation_value",
"type",
"publication_date",
"url",
"language_code",
"country",
"visibility",
"orcid",
"email",
"local_identifier",
"first_name",
"last_name",
"external_id_type",
"external_id_url",
"external_id_relationship",
"status",
"is_active"
]
def get_record_list(self, page, sort_column, sort_desc, search, filters, execute=True, page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0,
sort_column,
sort_desc,
search,
filters,
page_size=page_size,
execute=False)
ext_ids = [r.id for r in
WorkExternalId.select(models.fn.min(WorkExternalId.id).alias("id")).join(WorkRecord).where(
WorkRecord.task == self.current_task_id).group_by(WorkRecord.id).objects()]
return count, query.select(
self.model,
WorkInvitee.email,
WorkInvitee.orcid,
WorkInvitee.identifier.alias("local_identifier"),
WorkInvitee.first_name,
WorkInvitee.last_name,
WorkInvitee.put_code,
WorkInvitee.visibility,
WorkExternalId.type.alias("external_id_type"),
WorkExternalId.value.alias("work_id"),
WorkExternalId.url.alias("external_id_url"),
WorkExternalId.relationship.alias("external_id_relationship"),
).join(
WorkExternalId,
JOIN.LEFT_OUTER,
on=(WorkExternalId.record_id == self.model.id)).where(WorkExternalId.id << ext_ids).join(
WorkInvitee,
JOIN.LEFT_OUTER,
on=(WorkInvitee.record_id == self.model.id)).objects()
class PeerReviewRecordAdmin(CompositeRecordModelView):
"""Peer Review record model view."""
column_exclude_list = (
"task", "subject_external_id_type", "external_id_type", "convening_org_disambiguation_source")
can_create = True
column_searchable_list = ("review_group_id", )
list_template = "peer_review_record_list.html"
form_overrides = dict(review_completion_date=PartialDateField)
form_rules = [
rules.FieldSet([
"review_group_id", "reviewer_role", "review_url", "review_type",
"review_completion_date"
], "Review Group"),
rules.FieldSet([
"subject_external_id_type", "subject_external_id_value", "subject_external_id_url",
"subject_external_id_relationship", "subject_container_name", "subject_type",
"subject_name_title", "subject_name_subtitle",
"subject_name_translated_title_lang_code", "subject_name_translated_title",
"subject_url"
], "Subject"),
rules.FieldSet([
"convening_org_name", "convening_org_city", "convening_org_region",
"convening_org_country", "convening_org_disambiguated_identifier",
"convening_org_disambiguation_source"
], "Convening Organisation"),
"is_active",
]
column_export_list = [
"review_group_id",
"reviewer_role",
"review_url",
"review_type",
"review_completion_date",
"subject_external_id_type",
"subject_external_id_value",
"subject_external_id_url",
"subject_external_id_relationship",
"subject_container_name",
"subject_type",
"subject_name_title",
"subject_name_subtitle",
"subject_name_translated_title_lang_code",
"subject_name_translated_title",
"subject_url",
"convening_org_name",
"convening_org_city",
"convening_org_region",
"convening_org_country",
"convening_org_disambiguated_identifier",
"convening_org_disambiguation_source",
"email",
"orcid",
"local_identifier",
"first_name",
"last_name",
"put_code",
"visibility",
"external_id_type",
"peer_review_id",
"external_id_url",
"external_id_relationship",
"is_active"
]
def get_record_list(self,
page,
sort_column,
sort_desc,
search,
filters,
execute=True,
page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0, sort_column, sort_desc, search, filters, page_size=page_size, execute=False)
ext_ids = [r.id for r in
PeerReviewExternalId.select(models.fn.min(PeerReviewExternalId.id).alias("id")).join(
PeerReviewRecord).where(
PeerReviewRecord.task == self.current_task_id).group_by(PeerReviewRecord.id).objects()]
return count, query.select(
self.model,
PeerReviewInvitee.email,
PeerReviewInvitee.orcid,
PeerReviewInvitee.identifier.alias("local_identifier"),
PeerReviewInvitee.first_name,
PeerReviewInvitee.last_name,
PeerReviewInvitee.put_code,
PeerReviewInvitee.visibility,
PeerReviewExternalId.type.alias("external_id_type"),
PeerReviewExternalId.value.alias("peer_review_id"),
PeerReviewExternalId.url.alias("external_id_url"),
PeerReviewExternalId.relationship.alias("external_id_relationship"),
).join(
PeerReviewExternalId,
JOIN.LEFT_OUTER,
on=(PeerReviewExternalId.record_id == self.model.id)).where(PeerReviewExternalId.id << ext_ids).join(
PeerReviewInvitee,
JOIN.LEFT_OUTER,
on=(PeerReviewInvitee.record_id == self.model.id)).objects()
class AffiliationRecordAdmin(CompositeRecordModelView):
"""Affiliation record model view."""
can_create = True
column_exclude_list = (
"task",
"organisation",
)
column_searchable_list = (
"first_name",
"last_name",
"email",
"role",
"department",
"region",
"status",
)
column_export_list = [
"is_active",
"put_code",
"local_id",
"processed_at",
"status",
"first_name",
"last_name",
"email",
"visibility",
"orcid",
"organisation",
"affiliation_type",
"role",
"department",
"start_date",
"end_date",
"city",
"region",
"country",
"disambiguated_id",
"disambiguation_source",
"delete_record",
"visibility",
"url",
"display_index",
"external_id_type",
"external_id_value",
"external_id_url",
"external_id_relationship",
]
form_widget_args = {"task": {"readonly": True}}
def validate_form(self, form):
"""Validate the input."""
if request.method == "POST" and hasattr(form, "orcid") and hasattr(
form, "email") and hasattr(form, "put_code"):
if not (form.orcid.data or form.email.data or form.put_code.data):
flash(
"Either <b>email</b>, <b>ORCID iD</b>, or <b>put-code</b> should be provided.",
"danger")
return False
return super().validate_form(form)
@expose("/export/<export_type>/")
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
if export_type not in ["json", "yaml", "yml"]:
return super().export(export_type)
return_url = get_redirect_target() or self.get_url(".index_view")
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return redirect(return_url)
if not self.can_export or (export_type not in self.export_types):
flash("Permission denied.", "danger")
return redirect(return_url)
data = Task.get(int(task_id)).to_dict()
if export_type == "json":
resp = jsonify(data)
else:
resp = yamlfy(data)
resp.headers[
"Content-Disposition"] = f"attachment;filename={secure_filename(self.get_export_name(export_type))}"
return resp
def get_record_list(self,
page,
sort_column,
sort_desc,
search,
filters,
execute=True,
page_size=None):
"""Return records and realated to the record data."""
count, query = self.get_list(
0, sort_column, sort_desc, search, filters, page_size=page_size, execute=False)
ext_ids = [r.id for r in
AffiliationExternalId.select(models.fn.min(AffiliationExternalId.id).alias("id")).join(
AffiliationRecord).where(
AffiliationRecord.task == self.current_task_id).group_by(AffiliationRecord.id).objects()]
return count, query.select(
self.model,
AffiliationExternalId.type.alias("external_id_type"),
AffiliationExternalId.value.alias("external_id_value"),
AffiliationExternalId.url.alias("external_id_url"),
AffiliationExternalId.relationship.alias("external_id_relationship"),
).join(
AffiliationExternalId,
JOIN.LEFT_OUTER,
on=((AffiliationExternalId.record_id == self.model.id) & (AffiliationExternalId.id << ext_ids))).objects()
class ProfilePropertyRecordAdmin(RecordModelView):
"""Researcher Url, Other Name, and Keyword record model view."""
can_create = True
form_widget_args = {"task": {"readonly": True}}
def __init__(self, model_class, *args, **kwargs):
"""Set up model specific attributes."""
self.column_searchable_list = [
f for f in ["content", "name", "value", "first_name", "last_name", "email"]
if f in model_class._meta.fields
]
super().__init__(model_class, *args, **kwargs)
def validate_form(self, form):
"""Validate the input."""
if request.method == "POST" and hasattr(form, "orcid") and hasattr(
form, "email") and hasattr(form, "put_code"):
if not (form.orcid.data or form.email.data or form.put_code.data):
flash(
"Either <b>email</b>, <b>ORCID iD</b>, or <b>put-code</b> should be provided.",
"danger")
return False
return super().validate_form(form)
@expose("/export/<export_type>/")
def export(self, export_type):
"""Check the export type whether it is csv, tsv or other format."""
if export_type not in ["json", "yaml", "yml"]:
return super().export(export_type)
return_url = get_redirect_target() or self.get_url(".index_view")
task_id = self.current_task_id
if not task_id:
flash("Missing task ID.", "danger")
return redirect(return_url)
if not self.can_export or (export_type not in self.export_types):
flash("Permission denied.", "danger")
return redirect(return_url)
data = Task.get(int(task_id)).to_dict()
if export_type == "json":
resp = jsonify(data)
else:
resp = yamlfy(data)
resp.headers[
"Content-Disposition"] = f"attachment;filename={secure_filename(self.get_export_name(export_type))}"
return resp
class ViewMembersAdmin(AppModelView):
"""Organisation member model (User beloging to the current org.admin oganisation) view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "viewMembers.html"
edit_template = "admin/member_edit.html"
form_columns = ["name", "orcid", "email", "eppn"]
form_widget_args = {c: {"readonly": True} for c in form_columns if c != "email"}
column_list = ["email", "orcid", "created_at", "updated_at", "orcid_updated_at"]
column_formatters_export = dict(orcid=lambda v, c, m, p: m.orcid)
column_exclude_list = None
column_searchable_list = ["email", "orcid", "name", "first_name", "last_name"]
column_export_list = ("email", "eppn", "orcid")
model = User
can_edit = True
can_create = False
can_delete = True
can_view_details = False
can_export = True
column_filters = (
filters.DateBetweenFilter(column=User.created_at, name="Registration Date"),
filters.DateBetweenFilter(column=User.updated_at, name="Update Date"),
filters.DateBetweenFilter(column=User.orcid_updated_at, name="ORCID Update Date"),
)
column_labels = {"created_at": "Registered At"}
def get_query(self):
"""Get quiery for the user belonging to the organistation of the current user."""
return current_user.organisation.users
def _order_by(self, query, joins, order):
"""Add ID for determenistic order of rows if sorting is by NULLable field."""
query, joins = super()._order_by(query, joins, order)
# add ID only if all fields are NULLable (exlcude ones given by str):
if all(not isinstance(f, str) and f.null for (f, _) in order):
query = query.order_by(*query._order_by,
self.model.id.desc() if order[0][1] else self.model.id)
return query, joins
def get_one(self, rec_id):
"""Limit access only to the userers belonging to the current organisation."""
try:
user = User.get(id=rec_id)
if not user.organisations.where(UserOrg.org == current_user.organisation).exists():
flash("Access Denied!", "danger")
abort(403)
return user
except User.DoesNotExist:
flash(f"The user with given ID: {rec_id} doesn't exist or it was deleted.", "danger")
abort(404)
def delete_model(self, model):
"""Delete a row and revoke all access tokens issues for the organisation."""
org = current_user.organisation
token_revoke_url = app.config["ORCID_BASE_URL"] + "oauth/revoke"
if UserOrg.select().where(UserOrg.user_id == model.id, UserOrg.org_id == org.id,
UserOrg.is_admin).exists():
flash(
f"Failed to delete record for {model}, As User appears to be one of the admins. "
f"Please contact {MAIL_SUPPORT_ADDRESS} for support", "danger")
return False
for token in OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == model):
try:
resp | |
<gh_stars>0
"""
This is a module holding the rule network class
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, \
check_random_state
class DeepRuleNetworkClassifier(BaseEstimator, ClassifierMixin):
""" A classifier which uses a network structure to learn rule sets.
Parameters
----------
init_method : {'probabilistic'},
default='probabilistic'
The method used to initialize the rules. Must be 'probabilistic' to
initialize each attribute of a rule with a fixed probability. Further
initialization methods are planned.
hidden_layer_sizes : list[int], default=[10]
The number of nodes per layer in the network. Does not include
size for the input and output layer which will be set automatically.
first_layer_conjunctive : bool, default=True
The type of the first layer in the network. Must be True to emulate a
conjunctive ('and') behavior or False for a disjunctive ('or') behavior.
avg_rule_length : int, default=3
The average number of conditions in an initial rule. Each attribute
is set to a random value and added to the rule with a probability of
3/|A| with |A| being the number of attributes. Only has an effect if
init_method='probabilistic'.
init_prob : float, default=0.2
The probability of each initial weight in the network to be True (apart
from first layer). Only has an effect if init_method='probabilistic'.
n_epochs : int, default=None
The number of repetitions the data is passed through the network. If
None, the training will be stopped if the accuracy did not increase
in the last epoch.
batch_size : int, default=None
The number of samples per mini-batch. If None, all samples will be
processed in a single batch.
max_flips : int, default=None
The maximum number of flips per mini-batch. If None, the network will
be (over)fitted as close as possible to the current mini-batch.
optimize_last_layer_separately : bool, default=False
If True, the last layer will be optimized in a separate step after
all other layers.
pos_class_method : {'least-frequent', 'most-frequent'},
default='least-frequent'
The class chosen to be converted to True and to be the head of the
generated rules.
plot_accuracies : bool, default=False
If True, after fit method the accuracy development will be plotted.
random_state : int, default=None
A random number generator instance to define the state of the
random permutations generator.
Attributes
----------
n_layers : int
The number of layers in the network. Includes input and output layer.
last_layer_conjunctive : bool
The type of the last layer in the network. Will be set automatically
depending on 'first_layer' and n_layers since conjunctive and
disjunctive layers alternate.
X_ : ndarray, shape (n_samples, n_features)
The categorical, one-hot-encoded features of the training samples
passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
n_attributes_ : int
The number of attributes passed during :meth:`fit`.
attributes_ : list[str] of shape (n_attributes,)
String names for (non-boolean) attributes passed during :meth:`fit`.
attribute_lengths_ : list[int], shape (n_attributes,)
The number of unique values per attribute, passed during :meth:`fit`.
attribute_lengths_cumsum_ : list[int], shape (n_attributes,)
The cumulative sum of attribute_lengths_, used as indexes for X_.
n_features_ : int
The number of features seen at :meth:`fit`.
features_ : list[str], shape (n_features,)
String names for (boolean) features passed during :meth:`fit`.
n_classes_ : int
The number of classes seen at :meth:`_preprocess_classes`.
classes_ : list[str], shape (n_classes,)
String names for the classes seen at :meth:`_preprocess_classes`.
output_feature_ : str
String name for output feature.
coefs_ : list[ndarray], shape (n_layers - 1,)
The ith element in the list represents the weight matrix
corresponding to layer i, i.e. coefs_[i][j][k] represents if node j
in layer i passes its output to node k in layer i+1.
n_batches_ : int
The number of batches used during :meth:`fit`.
batch_accuracies_ : ndarray, shape (n_batches + 2,)
The accuracies on the mini-batch after optimization on it. The first
element is the accuracy on the training set after initialization and
the last one the accuracy on the training set after optimization.
train_accuracies_ : ndarray, shape (n_batches + 2,)
The accuracies on the training set after optimization on a
mini-batch. The first element is the accuracy on the training set
after initialization and the last one the accuracy on the training
set after optimization.
random_state_ : int
A random number generator instance to define the state of the
random permutations generator.
"""
def __init__(self, init_method='probabilistic', hidden_layer_sizes=None,
first_layer_conjunctive=True, avg_rule_length=3,
init_prob=0.2, n_epochs=5, batch_size=50, max_flips=None,
optimize_last_layer_separately=False,
pos_class_method='least-frequent',
plot_accuracies=False, random_state=None):
if hidden_layer_sizes is None:
hidden_layer_sizes = [10]
self._class_logger = logging.getLogger(__name__).getChild(
self.__class__.__name__)
self.init_method = init_method
self.hidden_layer_sizes = hidden_layer_sizes
self.n_layers = len(hidden_layer_sizes) + 2
self.first_layer_conjunctive = first_layer_conjunctive
if self.n_layers % 2:
self.last_layer_conjunctive = not first_layer_conjunctive
else:
self.last_layer_conjunctive = first_layer_conjunctive
self.avg_rule_length = avg_rule_length
self.init_prob = init_prob
self.n_epochs = n_epochs
self.batch_size = batch_size
self.max_flips = max_flips
self.optimize_last_layer_separately = optimize_last_layer_separately
self.pos_class_method = pos_class_method
self.plot_accuracies = plot_accuracies
self.random_state = random_state
def fit(self, X, y, attributes=None, attribute_lengths=None,
features=None, target='class'):
""" The fitting function creates binary layers adjusted to the
size of the input (n_features). It learns a model by flipping
the boolean values to create suitable rules.
Parameters
----------
X : array-like, shape (n_samples, n_attributes)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
attributes : list[str], default=None
The names of each attribute (in order of the original features in
X).
attribute_lengths : list[int], default=None
The cardinality of the attributes.
features : list[str], default=None
The names of each column after in order of the features in X.
target : str, default='class'
The name of the target.
Returns
-------
self : RuleNetworkClassifier
Returns fitted classifier.
"""
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Create dummy names/lengths for attributes and features if None were
# given
if attributes is None:
if attribute_lengths is None:
attributes = ['x%d' % i for i in range(X.shape[1])]
else:
attributes = ['x%d' % i for i in range(len(attribute_lengths))]
if attribute_lengths is None:
attribute_lengths = [1] * len(attributes)
if features is None:
features = attributes
# Check additional fit parameters
if len(attributes) != len(attribute_lengths):
raise ValueError('%s attributes, but %s attribute lengths are given'
% (len(attributes), len(attribute_lengths)))
if len(features) != sum(attribute_lengths):
raise ValueError('%s features given, but attribute lengths sum up '
'to %s)' % (len(features), sum(attribute_lengths)))
# Preprocess classes
pos_class = self._preprocess_classes(y)
self.output_feature_ = target + '=' + pos_class
# Initialize attributes
self.X_ = X
self.n_attributes_ = len(attributes)
self.attributes_ = attributes
self.attribute_lengths_ = attribute_lengths
self.attribute_lengths_cumsum_ = np.cumsum(self.attribute_lengths_)
self.n_features_ = len(features)
self.features_ = features
self.n_outputs_ = 1
# Calculate number of batches
batch_size = (X.shape[0] if self.batch_size is None else
self.batch_size)
if batch_size > X.shape[0]:
batch_size = X.shape[0]
self._class_logger.warning('Batch size was higher than number of '
'training samples. Use single batch of '
'size %s for training.', self.batch_size)
self.n_batches_ = X.shape[0] // batch_size
# Initialize rule network layers
self._init_layers()
# Initialize arrays for storing accuracies
new_accuracy = accuracy_score(y, self.predict(X))
self.batch_accuracies_ = np.empty(shape=(2,))
self.batch_accuracies_[0] = new_accuracy
self.train_accuracies_ = np.empty(shape=(2,))
self.train_accuracies_[0] = new_accuracy
best_accuracy = new_accuracy
best_coefs = self.coefs_
self._class_logger.info('Training network...')
n_epochs = (np.iinfo(np.int32).max if self.n_epochs is None else
self.n_epochs)
for epoch in range(n_epochs):
improved = False
# expand arrays to store more accuracies
self.batch_accuracies_ = \
np.concatenate((self.batch_accuracies_, np.empty(shape=(
self.n_batches_ + self.optimize_last_layer_separately,))))
self.train_accuracies_ = \
np.concatenate((self.train_accuracies_, np.empty(shape=(
self.n_batches_ + self.optimize_last_layer_separately,))))
# number of iterations done so far
it = (self.n_batches_ + self.optimize_last_layer_separately) * epoch
self._class_logger.debug('Processing epoch %s of %s...', epoch +
1, self.n_epochs)
for batch in range(self.n_batches_):
X, y = shuffle(X, y, random_state=self.random_state + batch)
self._class_logger.debug('Processing mini-batch %s of %s...',
batch + 1, self.n_batches_)
X_mini = X[batch * batch_size:(batch + 1) * batch_size]
y_mini = y[batch * batch_size:(batch + 1) * batch_size]
self.batch_accuracies_[it + batch + 1] = \
self._optimize_coefs(X_mini, y_mini)
new_accuracy = accuracy_score(y, self.predict(X))
if new_accuracy > best_accuracy:
best_accuracy = new_accuracy
best_coefs = self.coefs_
improved = True
self.train_accuracies_[it + batch + 1] = new_accuracy
# optimize the last layer
if self.optimize_last_layer_separately:
new_accuracy = self._optimize_last_layer(X, y)
self.batch_accuracies_[it + self.n_batches_ + 1] = new_accuracy
self.train_accuracies_[it + self.n_batches_ | |
<filename>application/pages/volume_profile_analysis/volume_profile_analysis.py
"""This example app demonstrates how to use Panel and the HoloViews ecosystem to
analyze Volume Profiles as described in the [How to Analyze Volume Profiles With Python Blog Post]\
(https://medium.com/swlh/how-to-analyze-volume-profiles-with-python-3166bb10ff24)
by [<NAME>](https://www.linkedin.com/in/minhnguyen001/)
"""
import pathlib
from datetime import timedelta
import holoviews as hv
import numpy as np
import pandas as pd
import panel as pn
import param
import yfinance as yf
from bokeh.models import HoverTool
from bokeh.models.formatters import NumeralTickFormatter
from diskcache import FanoutCache
from panel.io.loading import start_loading_spinner, stop_loading_spinner
from panel.template import FastListTemplate
from scipy import signal, stats
from application.config import site
hv.extension("bokeh")
cache = FanoutCache(".cache")
APPLICATION = site.create_application(
url="volume-profile-analysis",
name="Volume Profile Analysis",
author="<NAME>",
introduction="""An example of Volume Profile Analysis of time series from commodity, currency,
debt and equity markets.""",
description=__doc__,
thumbnail_url="volume-profile-analysis.png",
code_url="volume_profile_analysis/volume_profile_analysis.py",
mp4_url="volume-profile-analysis.mp4",
tags=["Panel", "HoloViz", "Volume Profiles", "Finance", "Quant", "Signal Processing"],
)
ROOT = pathlib.Path(__file__).parent
DATA_PATH = ROOT / "bs_btcusd_ohlcv_1h_2020.csv.gz"
DATA_URL = (
"https://cdn.shopify.com/s/files/1/1365/1139/files/bs_btcusd_ohlcv_1h_2020.csv.gz?v=1585597359"
)
ACCENT_COLOR = "#C01754"
# Source: https://mycolor.space/?hex=%23C01754&sub=1
COLOR_PALETTE = [ACCENT_COLOR, "#007813", "#0061E5"]
GENERIC_GRADIENT_PALETTE = [ACCENT_COLOR, "#A22E74", "#784184", "#4E4A81", "#314C70", "#2F4858"]
RED = "#c01754"
GREEN = "#007400"
GRAY = "#b0ab99"
CACHE_EXPIRY = 60 * 60 * 24 # seconds, i.e. one Day
# region DATA
@cache.memoize(name="shared", expire=CACHE_EXPIRY)
def _extract_raw_data(ticker="ORSTED.CO", period="6mo", interval="1d"):
extractor = yf.Ticker(ticker)
return extractor.history(period=period, interval=interval).reset_index()
def _transform_data(raw_data: pd.DataFrame):
data = (
raw_data[["Date", "Open", "High", "Low", "Close", "Volume"]]
.copy(deep=True)
.rename(
columns={
"Date": "time",
"Open": "open",
"High": "high",
"Low": "low",
"Close": "close",
"Volume": "volume",
}
)
)
return data
# endregion DATA
# region UTILS
def set_toolbar_none(plot, _):
"""Removes the bokeh toolbar"""
bokeh_plot = plot.state
bokeh_plot.toolbar.logo = None
bokeh_plot.toolbar_location = None
# endregion UTILS
# region CANDLESTICK
def create_candle_stick(data: pd.DataFrame) -> hv.Layout:
"""Creates a candle stick plot
Args:
data (pd.DataFrame): A dataframe with columns time, open, high, low, close and volume
Returns:
hv.Layout: A candle stick plot
"""
data = data.copy(deep=True)
t_delta = timedelta(hours=1)
data["time_start"] = data.time - 9 * t_delta # rectangles start
data["time_end"] = data.time + 9 * t_delta # rectangles end
data["positive"] = ((data.close - data.open) > 0).astype(int)
tooltips = [
("Time", "@{time}{%F}"),
("Open", "@open"),
("High", "@high"),
("Low", "@{price}"),
("Close", "@close"),
("Volume", "@volume{0,0}"),
]
hover = HoverTool(tooltips=tooltips, formatters={"@{time}": "datetime"})
candlestick = hv.Segments(data, kdims=["time", "low", "time", "high"]) * hv.Rectangles(
data,
kdims=["time_start", "open", "time_end", "close"],
vdims=["positive", "high", "low", "time", "volume"],
)
candlestick = candlestick.redim(low="price")
candlestick.opts(
hv.opts.Rectangles(
color="positive",
cmap=[RED, GREEN],
responsive=True,
tools=["box_zoom", "pan", "wheel_zoom", "save", "reset", hover],
default_tools=[],
active_tools=["box_zoom"],
),
hv.opts.Segments(
color=GRAY,
height=400,
responsive=True,
tools=["box_zoom", "pan", "reset"],
default_tools=[],
active_tools=["box_zoom"],
),
)
return candlestick
def _create_time_and_volume_histogram(data, time="time", volume="volume", color=GRAY, alpha=0.5):
formatter = NumeralTickFormatter(format="0,0")
hist_data = zip(data[time], data[volume])
tooltips = [
("Time", "@{time}{%F}"),
("Volume", "@volume{0,0}"),
]
hover = HoverTool(tooltips=tooltips, formatters={"@{time}": "datetime"})
return (
hv.Histogram(hist_data)
.redim(Frequency="volume", x="time")
.opts(
yformatter=formatter,
color=color,
alpha=alpha,
tools=["xbox_zoom", "reset", hover],
default_tools=[],
hooks=[set_toolbar_none],
yticks=4,
)
)
def _create_price_and_volume_histogram( # pylint: disable=too-many-arguments
data, price="close", volume="volume", bins=150, color=GRAY, alpha=0.5
):
formatter = NumeralTickFormatter(format="0,0")
hist = np.histogram(data[price], bins=bins, weights=data[volume])
hist_data = zip(hist[1], hist[0])
tooltips = [
("Price", "@{price}"),
("Volume", "@volume{0,0}"),
]
hover = HoverTool(tooltips=tooltips)
return (
hv.Histogram(hist_data)
.redim(Frequency="volume", x="price")
.opts(
xformatter=formatter,
color=color,
alpha=alpha,
tools=["ybox_zoom", "reset", hover],
default_tools=[],
invert_axes=True,
hooks=[set_toolbar_none],
xticks=2,
)
)
def create_candle_stick_with_histograms(data: pd.DataFrame) -> pn.GridSpec:
"""Returns a candle stick plot with volume distributions on the sides
Args:
data (pd.DataFrame): A dataframe with columns time, open, high, low, close and volume
Returns:
pn.GridSpec: A GridSpec containing the plots
"""
gridspec = pn.GridSpec(sizing_mode="stretch_both", min_height=600, margin=0)
if not data is None:
volume_plot = _create_time_and_volume_histogram(data).opts(responsive=True)
candle_stick_plot = create_candle_stick(data).opts(responsive=True)
pav_plot = _create_price_and_volume_histogram(data).opts(responsive=True)
gridspec[0:2, 0:8] = volume_plot
gridspec[2:10, 0:8] = candle_stick_plot
gridspec[2:10, 8:10] = pav_plot
return gridspec
# endregion CANDLESTICK
# def _create_volume_distribution(data, volume="volume", bins=50, color=ACCENT_COLOR, alpha=0.5):
# formatter = NumeralTickFormatter(format="0,0")
# hist = np.histogram(data[volume], bins=bins)
# hist_data = zip(hist[1], hist[0])
# tooltips = [
# ("Frequency", "@{Frequency}"),
# ("Volume", "@volume{0,0}"),
# ]
# hover = HoverTool(tooltips=tooltips)
# return (
# hv.Histogram(hist_data)
# .redim(x="volume")
# .opts(
# xformatter=formatter,
# color=color,
# alpha=alpha,
# tools=[hover],
# default_tools=[],
# # invert_axes=True,
# # hooks=[set_toolbar_none],
# # xticks=2,
# )
# )
# region DISTRIBUTION
# region SIGNAL ANALYSIS
def _calculate_vol_distribution_analysis( # pylint: disable=too-many-arguments
data, price="close", volume="volume", bins=150
):
hist = np.histogram(data[price], bins=bins, weights=data[volume], density=True)
return list(zip(hist[1], hist[0]))
def _create_normalized_price_and_volume_histogram(hist_data, color=ACCENT_COLOR, alpha=0.5):
formatter = NumeralTickFormatter(format="0,0")
tooltips = [
("Price", "@{Price}{0,0.000}"),
("Volume Density", "@{VolumeDensity}{0,0.000}"),
]
hover = HoverTool(tooltips=tooltips)
return (
hv.Histogram(hist_data)
.redim(Frequency="VolumeDensity", x="Price")
.opts(
xformatter=formatter,
color=color,
alpha=alpha,
tools=["xbox_zoom", "reset", hover, "save"],
default_tools=[],
ylabel="Volume Density",
# invert_axes=True,
# hooks=[set_toolbar_none],
# xticks=2,
)
)
def _kde_analysis(
data,
price="close",
volume="volume",
kde_factor=0.05,
num_samples=500,
):
kde = stats.gaussian_kde(data[price], weights=data[volume], bw_method=kde_factor)
xrange = np.linspace(data[price].min(), data[price].max(), num_samples)
ticks_per_sample = (xrange.max() - xrange.min()) / num_samples
kdy = kde(xrange)
return kde, xrange, ticks_per_sample, kdy
def _signal_analysis(xrange, kdy, min_prom_factor=0.3):
width_range = 1
min_prom = kdy.max() * min_prom_factor * 0.3
peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom, width=width_range)
pkx = xrange[peaks]
pky = kdy[peaks]
return peaks, pkx, pky, peak_props
def _create_kde(
xrange,
kdy,
color=ACCENT_COLOR,
):
tooltips = [
("Price", "@{Price}{0,0.000}"),
("Volume Density", "@{VolumeDensity}{0,0.000}"),
]
hover = HoverTool(tooltips=tooltips)
# ticks_per_sample = (xr.max() - xr.min()) / num_samples
return (
hv.Curve(data={"x": xrange, "y": kdy})
.opts(color=color, tools=[hover], default_tools=[], ylabel="Volume Density")
.redim(y="VolumeDensity", x="Price")
)
def _create_kde_peaks(
pkx,
pky,
color=GREEN,
):
return (
hv.Scatter(data={"x": pkx, "y": pky})
.opts(color=color, size=8, default_tools=[])
.redim(y="Volume Density", x="Price")
)
def _create_promince_plot(pkx, pky, peak_props, color=GREEN):
line_x = pkx
line_y0 = pky
line_y1 = pky - peak_props["prominences"]
lines = []
for xxx, yy0, yy1 in zip(line_x, line_y0, line_y1):
data = {
"x": [xxx, xxx],
"y": [yy0, yy1],
}
plot = hv.Curve(data).opts(color=color, line_width=4, default_tools=[])
lines.append(plot)
return hv.Overlay(lines)
def _create_width_plot(xrange, ticks_per_sample, peak_props, color=GREEN):
left_ips = peak_props["left_ips"]
right_ips = peak_props["right_ips"]
width_x0 = xrange.min() + (left_ips * ticks_per_sample)
width_x1 = xrange.min() + (right_ips * ticks_per_sample)
width_y = peak_props["width_heights"]
lines = []
for xx0, xx1, yyy in zip(width_x0, width_x1, width_y):
data = {
"x": [xx0, xx1],
"y": [yyy, yyy],
}
plot = hv.Curve(data).opts(color=color, line_width=4, default_tools=[])
lines.append(plot)
return hv.Overlay(lines)
def _create_signal_analysis_plot( # pylint: disable=too-many-arguments
hist_data,
xrange,
ticks_per_sample,
kdy,
pkx,
pky,
peak_props,
add_peak_and_prominence=True,
):
plots = [
_create_normalized_price_and_volume_histogram(hist_data).opts(responsive=True),
_create_kde(xrange, kdy).opts(responsive=True),
_create_kde_peaks(pkx, pky).opts(responsive=True),
]
if add_peak_and_prominence:
plots.append(_create_promince_plot(pkx, pky, peak_props).opts(responsive=True))
plots.append(_create_width_plot(xrange, ticks_per_sample, peak_props).opts(responsive=True))
return hv.Overlay(plots)
# endregion SIGNAL ANALYSIS
# endregion DISTRIBUTION
# region SECTIONS
class BaseSection(param.Parameterized):
"""Abstract Class for Section of App"""
data = param.DataFrame()
loading = param.Boolean(default=False)
view = param.ClassSelector(class_=pn.Column)
def __init__(self, **params):
super().__init__(**params)
self._init_view()
self._update_view()
self.param.watch(self._update_view, "data")
def _init_view(self):
raise NotImplementedError()
def _update_view(self, *events):
raise NotImplementedError()
@pn.depends("loading", watch=True)
def _update_loading(self):
if self.loading:
start_loading_spinner(self.view)
else:
stop_loading_spinner(self.view)
class LoadDataSection(BaseSection):
"""Section describing the loading of data"""
ticker = param.ObjectSelector("ORSTED.CO", objects=["MSFT", "ORSTED.CO"])
period = param.Integer(default=6, bounds=(1, 12), step=1, label="Period (months)")
interval = param.ObjectSelector(default="1d", objects=["1d"], label="Interval", constant=True)
def _init_view(self):
info_head = """We can use the [`yfinance`](https://pypi.org/project/yfinance/) package to
load the data and [DiskCache](http://www.grantjenks.com/docs/diskcache/tutorial.html) to cache the
data for one day."""
if self.data is None:
self._load_data()
self._dataframe_panel = pn.pane.DataFrame(
self.data.sort_values(by="time", ascending=False).head(), index=False
)
self._total_rows_panel = pn.pane.Markdown(f"Total rows: {len(self.data)}")
self.view = pn.Column(
pn.pane.Markdown("## Data Load"),
pn.pane.Markdown(info_head),
pn.Param(self, parameters=["ticker", "period"], show_name=False),
self._dataframe_panel,
self._total_rows_panel,
)
@pn.depends("ticker", "period", "interval", watch=True)
def _load_data(self):
self.loading = True
if self.period > 1:
self.param.interval.constant = True
self.interval = "1d"
else:
self.param.interval.constant = False
raw_data = _extract_raw_data(
ticker=self.ticker, period=f"{self.period}mo", interval=self.interval
)
data = _transform_data(raw_data)
self.data = data
self.loading = False
def _update_view(self, *events):
if not self.data is None:
self._dataframe_panel.object = self.data.sort_values("time").head()
self._total_rows_panel.object = f"Total rows: {len(self.data)}"
class CandleStickSection(BaseSection):
"""Section with CandleStick analysis tool"""
def _init_view(self):
info = """## Candle Stick Plot
We can use a [*candlestick chart*](https://en.wikipedia.org/wiki/Candlestick_chart) to visualize the
Open, High, Low, Close price data and histograms to visualize the Volume.
Technically we use HoloViews [`Segments`]\
(https://holoviews.org/reference/elements/bokeh/Segments.html), [`Rectangles`]\
(https://holoviews.org/reference/elements/bokeh/Rectangles.html) and [`Histogram`]\
(https://holoviews.org/reference/elements/bokeh/Histogram.html) to create the plots and Panel
[`GridSpec`](https://panel.holoviz.org/reference/layouts/GridSpec.html) to lay them
out.
"""
self.view = pn.Column(
pn.pane.Markdown(info),
pn.GridSpec(sizing_mode="stretch_both", margin=0),
sizing_mode="stretch_both",
)
def _update_view(self, *events):
if not self.data is None:
self.view[1] = create_candle_stick_with_histograms(self.data)
class SignalAnalysisSection(BaseSection):
"""Section with Signal Processing analysis tool"""
bins = param.Integer(default=50, bounds=(10, 200), step=10, label="Bins")
kde_factor = param.Number(default=0.1, bounds=(0.01, 0.5), step=0.01, label="KDE Factor")
min_prom_factor = param.Number(
default=0.3, bounds=(0.01, 1.0), step=0.01, label="Prominence Factor"
)
add_peak_and_prominence = param.Boolean(default=False, label="Show Peak Prominence and Width")
def _init_view(self):
# pylint: disable=line-too-long
info_distribution = """## Signal Processing
We can use [Signal Processing](https://en.wikipedia.org/wiki/Signal_processing) to better
understand the relation between Price and Volume.
Technically we use [`np.histogram`]\
(https://numpy.org/doc/stable/reference/generated/numpy.histogram.html) and [`holoviews.Histogram`]\
(https://holoviews.org/reference/elements/bokeh/Histogram.html) to create a *normed histogram*.
We use [`scipy.stats.gaussian_kde`]\
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html]) to find an
estimate of the [*probability density function*]\
(https://en.wikipedia.org/wiki/Probability_density_function)
and [`holoviews.Curve`](https://holoviews.org/reference/elements/bokeh/Curve.html) to visualize it.
We use [`scipy.signal.find_peaks`]\
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html?highlight=find_peaks#scipy.signal.find_peaks)
to find the *peaks* and their *prominence* and *width*. We use [`holoviews.Scatter`]\
(https://holoviews.org/reference/elements/matplotlib/Scatter.html) and [`holoviews.Point`]\
(https://holoviews.org/reference/elements/matplotlib/Points.html) to visualize it."""
self._plot_panel = pn.pane.HoloViews(sizing_mode="stretch_both")
self.view = pn.Column(
pn.pane.Markdown(info_distribution),
pn.Param(
self,
parameters=["bins", "kde_factor", "min_prom_factor", "add_peak_and_prominence"],
show_name=False,
),
self._plot_panel,
sizing_mode="stretch_both",
)
@pn.depends("kde_factor", "min_prom_factor", "bins", "add_peak_and_prominence", watch=True)
def _update_view2(self, *_):
self._update_view()
def _update_view(self, *_):
hist_data = _calculate_vol_distribution_analysis(self.data, bins=self.bins)
_, xrange, ticks_per_sample, kdy = _kde_analysis(self.data, kde_factor=self.kde_factor)
_, pkx, pky, peak_props = _signal_analysis(xrange, kdy, self.min_prom_factor)
plot = (
_create_signal_analysis_plot(
| |
<filename>robot/Link.py
"""
Link object.
@author: <NAME>
@copyright: <NAME>
"""
from numpy import *
from .utility import *
from .transform import *
import copy
import numpy as np
import math
import time
class Link:
"""
LINK create a new LINK object
A LINK object holds all information related to a robot link such as
kinematics of the joint
- alpha; the link twist angle
- an; the link length
- theta; the link rotation angle
- dn; the link offset
- sigma; 0 for a revolute joint, non-zero for prismatic
rigid-body inertial parameters
- I; 3x3 inertia matrix about link COG
- m; link mass
- r; link COG wrt link coordinate frame 3x1
motor and transmission parameters
- B; link viscous friction (motor referred)
- Tc; link Coulomb friction 1 element if symmetric, else 2
- G; gear ratio
- Jm; inertia (motor referred)
and miscellaneous
- qlim; joint limit matrix [lower upper] 2 x 1
- offset; joint coordinate offset
Handling the different kinematic conventions is now hidden within the LINK
object.
Conceivably all sorts of stuff could live in the LINK object such as
graphical models of links and so on.
@see: L{Robot}
"""
LINK_DH = 1
LINK_MDH = 2
def __init__(self, theta=0, d=0, a=0, alpha=0, sigma=0, offset=0, convention=LINK_DH):
"""
L = LINK([theta d a alpha])
L = LINK([theta d a alpha sigma])
L = LINK([theta d a alpha sigma offset])
L = LINK([theta d a alpha], CONVENTION)
L = LINK([theta d a alpha sigma], CONVENTION)
L = LINK([theta d a alpha sigma offset], CONVENTION)
If sigma or offset are not provided they default to zero. Offset is a
constant amount added to the joint angle variable before forward kinematics
and is useful if you want the robot to adopt a 'sensible' pose for zero
joint angle configuration.
The optional CONVENTION argument is 'standard' for standard D&H parameters
or 'modified' for modified D&H parameters. If not specified the default
'standard'.
"""
self.theta = theta
self.d = d
self.a = a
self.alpha = alpha
self.sigma = sigma
self.convention = convention
self.offset = offset
# we know nothing about the dynamics
self.m = None
self.r = None
self.v = None
self.I = None
self.Jm = None
self.G = None
self.B = None
self.Tc = None
self.qlim = None
self._cached_tr = np.mat(np.zeros([4,4]))
self._cached_tr[-1,-1] = 1
self.prev_q = -10000
return None
def __repr__(self):
if self.convention == Link.LINK_DH:
conv = 'std'
else:
conv = 'mod'
if self.sigma == 0:
jtype = 'R'
else:
jtype = 'P'
if self.d == None:
return "theta=%f, a=%f, alpha=%f jtype: (%c) conv: (%s)" % (self.theta,
self.a, self.alpha, jtype, conv)
elif self.theta == None:
return "d=%f, a=%f, alpha=%f, jtype: (%c) conv: (%s)" % (self.d,
self.a, self.alpha, jtype, conv)
else:
return "theta=%f, d=%f, a=%f, alpha=%f jtype: (%c) conv: (%s)" % (self.theta,
self.d, self.a, self.alpha, jtype, conv)
# invoked at print
def __str__(self):
if self.convention == Link.LINK_DH:
conv = 'std'
else:
conv = 'mod'
if self.sigma == 0:
jtype = 'R'
else:
jtype = 'P'
if self.d == None:
return "theta = %f\ta = %f\talpha = %f\t--\tjtype: %c\tconv: (%s)" % (
self.theta, self.a, self.alpha, jtype, conv)
elif self.theta == None:
return "d = %f\ta = %f\talpha = %f\t--\tjtype: %c\tconv: (%s)" % (
self.d, self.a, self.alpha, jtype, conv)
else:
return "theta = %f\td=%f\ta = %f\talpha = %f\tjtype: %c\tconv: (%s)" % (
self.theta, self.d, self.a, self.alpha, jtype, conv)
def display(self):
print(self);
print()
if self.m != None:
print("m:", self.m)
if self.r != None:
print("r:", self.r)
if self.I != None:
print("I:\n", self.I)
if self.Jm != None:
print("Jm:", self.Jm)
if self.B != None:
print("B:", self.B)
if self.Tc != None:
print("Tc:", self.Tc)
if self.G != None:
print("G:", self.G)
if self.qlim != None:
print("qlim:\n", self.qlim)
def copy(self):
"""
Return copy of this Link
"""
return copy.copy(self);
def friction(self, qd):
"""
Compute friction torque for joint rate C{qd}.
Depending on fields in the Link object viscous and/or Coulomb friction
are computed.
@type qd: number
@param qd: joint rate
@rtype: number
@return: joint friction torque
"""
tau = 0.0
if isinstance(qd, (ndarray, matrix)):
qd = qd.flatten().T
if self.B == None:
self.B = 0
tau = self.B * qd
if self.Tc == None:
self.Tc = mat([0,0])
tau = tau + (qd > 0) * self.Tc[0,0] + (qd < 0) * self.Tc[0,1]
return tau
def nofriction(self, all=False):
"""
Return a copy of the Link object with friction parameters set to zero.
@type all: boolean
@param all: if True then also zero viscous friction
@rtype: Link
@return: Copy of original Link object with zero friction
@see: L{robot.nofriction}
"""
l2 = self.copy()
l2.Tc = array([0, 0])
if all:
l2.B = 0
return l2;
# methods to set kinematic or dynamic parameters
fields = ["theta", "d", "a", "alpha", "sigma", "offset", "m", "Jm", "G", "B", "convention"];
def __setattr__(self, name, value):
"""
Set attributes of the Link object
- theta; scalar
- d; scalar
- a; scalar
- alpha; scalar
- sigma; scalar
- offset; scalar
- m; scalar
- Jm; scalar
- G; scalar
- B; scalar
- r; 3-vector
- I; 3x3 matrix, 3-vector or 6-vector
- Tc; scalar or 2-vector
- qlim; 2-vector
Inertia, I, can be specified as:
- 3x3 inertia tensor
- 3-vector, the diagonal of the inertia tensor
- 6-vector, the unique elements of the inertia tensor [Ixx Iyy Izz Ixy Iyz Ixz]
Coloumb friction, Tc, can be specifed as:
- scalar, for the symmetric case when Tc- = Tc+
- 2-vector, the assymetric case [Tc- Tc+]
Joint angle limits, qlim, is a 2-vector giving the lower and upper limits
of motion.
"""
if value is None:
self.__dict__[name] = value
return
if name == 'alpha':
self.__dict__['alpha'] = value
self.__dict__['sa'] = np.sin(value)
self.__dict__['ca'] = np.cos(value)
elif name in self.fields:
# scalar parameter
if isinstance(value, (ndarray,matrix)) and value.shape != (1,1):
raise ValueError("Scalar required")
if not isinstance(value, (int,float,int32,float64)):
raise ValueError;
self.__dict__[name] = value
elif name == "r":
r = arg2array(value);
if len(r) != 3:
raise ValueError("matrix required")
self.__dict__[name] = mat(r)
elif name == "I":
if isinstance(value, matrix) and value.shape == (3,3):
self.__dict__[name] = value;
else:
v = arg2array(value);
if len(v) == 3:
self.__dict__[name] = mat(diag(v))
elif len(v) == 6:
self.__dict__[name] = mat([
[v[0],v[3],v[5]],
[v[3],v[1],v[4]],
[v[5],v[4],v[2]]])
else:
raise ValueError("matrix required");
elif name == "Tc":
v = arg2array(value)
if len(v) == 1:
self.__dict__[name] = mat([-v[0], v[0]])
elif len(v) == 2:
self.__dict__[name] = mat(v)
else:
raise ValueError;
elif name == "qlim":
v = arg2array(value);
if len(v) == 2:
self.__dict__[name] = mat(v);
else:
raise ValueError
elif name == '_cached_tr':
self.__dict__['_cached_tr'] = value
elif name == 'prev_q':
self.__dict__['prev_q'] = value
else:
raise NameError("Unknown attribute <%s> of link" % name)
# LINK.islimit(q) return if limit is exceeded: -1, 0, +1
def islimit(self,q):
"""
Check if joint limits exceeded. Returns
- -1 if C{q} is less than the lower limit
- 0 if C{q} is within the limits
- +1 if C{q} is greater than the high limit
@type q: number
@param q: Joint coordinate
@rtype: -1, 0, +1
@return: joint limit status
"""
if not self.qlim:
return 0
return (q > self.qlim[1,0]) - (q < self.qlim[0,0])
def tr(self, q, deep_copy=False):
"""
Compute the transformation matrix for this link. This is a function
of kinematic parameters, the kinematic model (DH or MDH) and the joint
coordinate C{q}.
@type q: number
@param q: joint coordinatez
@rtype: homogeneous transformation
@return: Link transform M{A(q)}
"""
if hasattr(self, 'offset'):
q += self.offset
an = self.a
dn = self.d
theta = self.theta
if self.sigma == 0:
theta = q # revolute
else:
dn = q # prismatic
sa = self.sa #sin(self.alpha)
ca = self.ca #cos(self.alpha)
st = math.sin(theta)
ct = math.cos(theta)
if self.convention == Link.LINK_DH:
# standard
if deep_copy:
t = np.mat([[ct, -st*ca, st*sa, an*ct],
[st, ct*ca, -ct*sa, an*st],
[0, sa, ca, dn],
[0, 0, 0, 1]]);
return t
elif q == self.prev_q:
| |
from __future__ import print_function
from math import pi
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import sys
import softpool as sp
sys.path.append("../expansion_penalty/")
import expansion_penalty_module as expansion
sys.path.append("../MDS/")
import MDS_module
import grnet
def feature_transform_regularizer(trans):
d = trans.size()[1]
batchsize = trans.size()[0]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(
torch.norm(torch.bmm(trans, trans.transpose(2, 1)) - I, dim=(1, 2)))
return loss
class Periodics(nn.Module):
def __init__(self, dim_input=2, dim_output=512, is_first=True):
super(Periodics, self).__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.is_first = is_first
self.with_frequency = True
self.with_phase = True
# Omega determines the upper frequencies
self.omega_0 = 30
if self.with_frequency:
if self.with_phase:
self.Li = nn.Conv1d(
self.dim_input, self.dim_output, 1,
bias=self.with_phase).cuda()
else:
self.Li = nn.Conv1d(
self.dim_input,
self.dim_output // 2,
1,
bias=self.with_phase).cuda()
# nn.init.normal_(B.weight, std=10.0)
with torch.no_grad():
if self.is_first:
self.Li.weight.uniform_(-1 / self.dim_input,
1 / self.dim_input)
else:
self.Li.weight.uniform_(
-np.sqrt(6 / self.dim_input) / self.omega_0,
np.sqrt(6 / self.dim_input) / self.omega_0)
else:
self.Li = nn.Conv1d(self.dim_input, self.dim_output, 1).cuda()
self.BN = nn.BatchNorm1d(self.dim_output).cuda()
def filter(self, x):
filters = torch.cat([
torch.ones(1, dim_output // 128),
torch.zeros(1, dim_output // 128 * 63)
], 1).cuda()
filters = torch.unsqueeze(filters, 2)
return filters
def forward(self, x):
# here are some options to check how to form the fourier feature
if self.with_frequency:
if self.with_phase:
sinside = torch.sin(self.Li(x) * self.omega_0)
return sinside
else:
"""
here filter could be applied
"""
sinside = torch.sin(self.Li(x) * self.omega_0)
cosside = torch.cos(self.Li(x) * self.omega_0)
return torch.cat([sinside, cosside], 1)
else:
return F.relu(self.BN(self.Li(x)))
class STN3d(nn.Module):
def __init__(self, dim_pn=1024):
super(STN3d, self).__init__()
self.dim_pn = dim_pn
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, self.dim_pn, 1)
self.fc1 = nn.Linear(self.dim_pn, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x, _ = torch.max(x, 2)
x = x.view(-1, self.dim_pn)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(
torch.from_numpy(
np.array([1, 0, 0, 0, 1, 0, 0, 0,
1]).astype(np.float32))).view(1, 9).repeat(
batchsize, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=3 + 16):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(
torch.from_numpy(np.eye(self.k).flatten().astype(
np.float32))).view(1, self.k * self.k).repeat(batchsize, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetFeat(nn.Module):
def __init__(self, num_points=8192, dim_pn=1024):
super(PointNetFeat, self).__init__()
self.dim_pn = dim_pn
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, dim_pn, 1)
self.bn1 = torch.nn.BatchNorm1d(64)
self.bn2 = torch.nn.BatchNorm1d(128)
self.bn3 = torch.nn.BatchNorm1d(dim_pn)
self.fourier_map1 = Periodics(dim_input=3, dim_output=32)
self.fourier_map2 = Periodics(
dim_input=32, dim_output=128, is_first=False)
self.fourier_map3 = Periodics(
dim_input=128, dim_output=128, is_first=False)
self.num_points = num_points
def forward(self, inputs):
"""
x = self.fourier_map1(inputs)
x = self.fourier_map2(x)
x = self.fourier_map3(x)
"""
x = F.relu(self.bn1(self.conv1(inputs)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x, _ = torch.max(x, 2)
x = x.view(-1, self.dim_pn)
return x
class SoftPoolFeat(nn.Module):
def __init__(self, num_points=8192, regions=16, sp_points=2048,
sp_ratio=8):
super(SoftPoolFeat, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 256, 1)
self.bn1 = torch.nn.BatchNorm1d(64)
self.bn2 = torch.nn.BatchNorm1d(128)
self.bn3 = torch.nn.BatchNorm1d(256)
self.fourier_map1 = Periodics(dim_input=3, dim_output=32)
self.fourier_map2 = Periodics(
dim_input=32, dim_output=128, is_first=False)
self.fourier_map3 = Periodics(
dim_input=128, dim_output=128, is_first=False)
self.stn = STNkd(k=regions + 3)
self.num_points = num_points
self.regions = regions
self.sp_points = sp_points // sp_ratio
self.softpool = sp.SoftPool(self.regions, cabins=8, sp_ratio=sp_ratio)
def mlp(self, inputs):
"""
x = self.fourier_map1(inputs)
x = self.fourier_map2(x)
x = self.fourier_map3(x)
"""
x = F.relu(self.bn1(self.conv1(inputs)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
return x
def forward(self, x, x_seg=None):
part = x
x = self.mlp(x)
sp_cube, sp_idx, cabins, id_activa = self.softpool(x)
# transform
id_activa = torch.nn.functional.one_hot(
id_activa.to(torch.int64), self.regions).transpose(1, 2)
if x_seg is None:
point_wi_seg = torch.cat((id_activa.float(), part), 1)
else:
point_wi_seg = torch.cat((x_seg.float(), part), 1)
trans = self.stn(point_wi_seg)
"""
point_wi_seg = point_wi_seg.transpose(2, 1)
point_wi_seg = torch.bmm(point_wi_seg, trans)
point_wi_seg = point_wi_seg.transpose(2, 1)
"""
point_wi_seg = point_wi_seg.unsqueeze(2).repeat(1, 1, self.regions, 1)
point_wi_seg = torch.gather(point_wi_seg, dim=3, index=sp_idx.long())
feature = torch.cat((sp_cube, point_wi_seg), 1).contiguous()
feature = feature.view(feature.shape[0], feature.shape[1], 1,
self.regions * self.sp_points)
sp_cube = sp_cube.view(sp_cube.shape[0], sp_cube.shape[1], 1,
self.regions * self.sp_points)
sp_idx = sp_idx.view(sp_idx.shape[0], sp_idx.shape[1], 1,
self.regions * self.sp_points)
# return feature, cabins, sp_idx, trans
return sp_cube, cabins, sp_idx, trans
class PointGenCon(nn.Module):
def __init__(self, bottleneck_size=8192):
self.bottleneck_size = bottleneck_size
super(PointGenCon, self).__init__()
self.conv1 = torch.nn.Conv1d(self.bottleneck_size,
self.bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(self.bottleneck_size,
self.bottleneck_size // 2, 1)
self.conv3 = torch.nn.Conv1d(self.bottleneck_size // 2,
self.bottleneck_size // 4, 1)
self.conv4 = torch.nn.Conv1d(self.bottleneck_size // 4, 3, 1)
self.th = nn.Tanh()
self.bn1 = torch.nn.BatchNorm1d(self.bottleneck_size)
self.bn2 = torch.nn.BatchNorm1d(self.bottleneck_size // 2)
self.bn3 = torch.nn.BatchNorm1d(self.bottleneck_size // 4)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# x = self.th(self.conv4(x))
x = self.conv4(x)
return x
class PointGenCon2D(nn.Module):
def __init__(self, bottleneck_size=8192):
self.bottleneck_size = bottleneck_size
super(PointGenCon2D, self).__init__()
self.conv1 = torch.nn.Conv2d(
self.bottleneck_size,
self.bottleneck_size,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
padding_mode='same')
self.conv2 = torch.nn.Conv2d(
self.bottleneck_size,
self.bottleneck_size // 2,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
padding_mode='same')
self.conv3 = torch.nn.Conv2d(
self.bottleneck_size // 2,
self.bottleneck_size // 4,
kernel_size=(1, 1),
stride=(1, 1),
padding=(0, 0),
padding_mode='same')
self.conv4 = torch.nn.Conv2d(
self.bottleneck_size // 4,
3,
kernel_size=(8, 1),
stride=(1, 1),
padding=(0, 0),
padding_mode='same')
self.th = nn.Tanh()
self.bn1 = torch.nn.BatchNorm2d(self.bottleneck_size)
self.bn2 = torch.nn.BatchNorm2d(self.bottleneck_size // 2)
self.bn3 = torch.nn.BatchNorm2d(self.bottleneck_size // 4)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# x = self.th(self.conv4(x))
x = self.conv4(x)
return x
class PointNetRes(nn.Module):
def __init__(self):
super(PointNetRes, self).__init__()
self.conv1 = torch.nn.Conv1d(
4, 64, kernel_size=5, padding=2, padding_mode='replicate')
self.conv2 = torch.nn.Conv1d(
64, 128, kernel_size=5, padding=2, padding_mode='replicate')
self.conv3 = torch.nn.Conv1d(
128, 1024, kernel_size=5, padding=2, padding_mode='replicate')
self.conv4 = torch.nn.Conv1d(
1088, 512, kernel_size=5, padding=2, padding_mode='replicate')
self.conv5 = torch.nn.Conv1d(
512, 256, kernel_size=5, padding=2, padding_mode='replicate')
self.conv6 = torch.nn.Conv1d(
256, 128, kernel_size=5, padding=2, padding_mode='replicate')
self.conv7 = torch.nn.Conv1d(
128, 3, kernel_size=5, padding=2, padding_mode='replicate')
self.bn1 = torch.nn.BatchNorm1d(64)
self.bn2 = torch.nn.BatchNorm1d(128)
self.bn3 = torch.nn.BatchNorm1d(1024)
self.bn4 = torch.nn.BatchNorm1d(512)
self.bn5 = torch.nn.BatchNorm1d(256)
self.bn6 = torch.nn.BatchNorm1d(128)
self.bn7 = torch.nn.BatchNorm1d(3)
self.th = nn.Tanh()
def forward(self, x):
npoints = x.size()[2]
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x, _ = torch.max(x, 2)
x = x.view(-1, 1024)
x = x.view(-1, 1024, 1).repeat(1, 1, npoints)
x = torch.cat([x, pointfeat], 1)
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.relu(self.bn6(self.conv6(x)))
x = self.th(self.conv7(x))
return x
class Network(nn.Module):
def __init__(self,
num_points=8192,
n_regions=16,
dim_pn=256,
sp_points=1024):
super(Network, self).__init__()
self.num_points = num_points
self.dim_pn = dim_pn
self.n_regions = n_regions
self.sp_points = sp_points
self.sp_ratio = n_regions
self.pn_enc = nn.Sequential(
PointNetFeat(num_points, 1024), nn.Linear(1024, dim_pn),
nn.BatchNorm1d(dim_pn), nn.ReLU())
self.softpool_enc = SoftPoolFeat(
num_points,
regions=self.n_regions,
sp_points=2048,
sp_ratio=self.sp_ratio)
# Firstly we do not merge information among regions
# We merge regional informations in latent space
self.reg_conv1 = nn.Sequential(
nn.Conv2d(
1 * dim_pn,
dim_pn,
kernel_size=(1, 3),
stride=(1, 2),
padding=(0, 1),
padding_mode='same'), nn.Tanh())
self.reg_conv2 = nn.Sequential(
nn.Conv2d(
dim_pn,
2 * dim_pn,
kernel_size=(1, 3),
stride=(1, 2),
padding=(0, 1),
padding_mode='same'), nn.Tanh())
self.reg_conv3 = nn.Sequential(
nn.Conv2d(
2 * dim_pn,
2 * dim_pn,
kernel_size=(1, 3),
stride=(1, 2),
padding=(0, 1),
padding_mode='same'), nn.Tanh())
# input for embedding has 32 points now, then in total it is regions x 32 points
# down-sampled by 2*2*2=8
ebd_pnt_reg = self.num_points // (self.sp_ratio * 8)
if self.n_regions == 1:
ebd_pnt_out = 256
elif self.n_regions > 1:
ebd_pnt_out = 512
self.embedding = nn.Sequential(
nn.MaxPool2d(
kernel_size=(1, ebd_pnt_reg), stride=(1, ebd_pnt_reg)),
nn.MaxPool2d(
kernel_size=(1, self.n_regions), stride=(1, self.n_regions)),
nn.ConvTranspose2d(
2 * dim_pn,
2 * dim_pn,
kernel_size=(1, ebd_pnt_out),
stride=(1, ebd_pnt_out),
padding=(0, 0)))
"""
self.embedding = nn.Sequential(
nn.MaxPool2d(
kernel_size=(1, ebd_pnt_reg), stride=(1, ebd_pnt_reg)),
nn.Conv2d(
2 * dim_pn,
2 * dim_pn,
kernel_size=(1, self.n_regions)),
nn.ConvTranspose2d(
2 * dim_pn,
2 * dim_pn,
kernel_size=(1, 4),
stride=(1, 4),
padding=(0, 0)),
nn.UpsamplingBilinear2d(scale_factor=(1, 4)),
nn.Conv2d(
2 * dim_pn,
2 * dim_pn,
kernel_size=(1, 5),
| |
import sqlite3
from sqlite3 import Error
class StoringData:
__instance = None
@staticmethod
def getInstance():
# Static access method.
if StoringData.__instance == None:
StoringData()
return StoringData.__instance
def __init__(self): # init method or constructor
# Virtually private constructor.
if StoringData.__instance != None:
raise Exception("This class is a singleton!")
else:
StoringData.__instance = self
# create a database connection to a SQLite database
def create_connection(self, db_file):
conn = None
try:
# create a database connection
conn = sqlite3.connect(db_file)
self.__check_to_create_table(conn, db_file)
except Error as e:
print("Failed to create connection ", e)
return conn
# close current a database connection to a SQLite database
def close_connection(self, conn):
if (conn):
conn.close()
# check if not exist to create a table IMDb
# :param conn: current connection to the SQLite database
# :param db_file: database file path
def __check_to_create_table(self, conn, db_file):
sql_create_imdb_table = """CREATE TABLE IF NOT EXISTS IMDb (
Id INTEGER PRIMARY KEY,
Key TEXT,
Title TEXT,
Release TEXT,
Audience_Rating TEXT,
Runtime TEXT,
Genre TEXT,
Imdb_Rating DECIMAL(1,1),
Votes INTEGER,
Director TEXT,
Actors TEXT,
Desc TEXT,
Created_On TEXT,
Modified_On TEXT
);"""
try:
cur = conn.cursor()
#get the count of tables with the name
cur.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='IMDb' ''')
#if the count is 1, then table exists
if cur.fetchone()[0]==0 :
# create IMDb table
cur.execute(sql_create_imdb_table)
except Error as e:
if conn:
conn.rollback()
print("Error create the database connection", e)
finally:
cur.close()
# If you want to pass arguments to the INSERT statement, you use the question mark (?) as the placeholder for each argument.
# Create a new IMDb item
# :param conn: current connection to the SQLite database
# :param imdb: IMDb item (Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors, Desc, Created_On, Modified_On)
# :return: lastrowid (the AUTO_INCREMENT value for the new row)
def create_imdb(self, conn, imdb):
lastRowID = 0
try:
sql_insert_query = ''' INSERT INTO IMDb
(
Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors
)
VALUES
(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_insert_query, imdb)
conn.commit()
lastRowID = cur.lastrowid
except Error as e:
if conn:
conn.rollback()
print("Error insert data from sqlite table", e)
finally:
cur.close()
return lastRowID
# If you want to pass arguments to the INSERT statement, you use the question mark (?) as the placeholder for each argument.
# Create a new multi IMDb item
# :param conn: current connection to the SQLite database
# :param imdbs: multi IMDb item (Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors, Desc, Created_On, Modified_On)
# :return: result (row count after inserted)
def create_multi_imdb(self, conn, imdbs):
result = 0
try:
# , Desc, Created_On, Modified_On
sql_insert_query = ''' INSERT INTO IMDb
(
Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors, Desc
)
VALUES
(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
);
'''
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.executemany(sql_insert_query, imdbs)
conn.commit()
result = cur.rowcount
except Error as e: # the changes are rolled back and an error message is printed to the terminal.
if conn:
conn.rollback()
print("Error insert data from sqlite table", e)
finally:
cur.close()
return result
# Read an IMDb item by key
# :param conn: current connection to the SQLite database
# :param key: Key of IMDb
# :return: IMDb item(Id, Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors, Desc, Created_On, Modified_On)
def read_imdb(self, conn, key):
imdb_item = None
try:
sql_select_query = ''' SELECT Id, Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors
FROM IMDb
WHERE Key = ?; '''
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_select_query, [key])
imdb_item = cur.fetchone()
except Error as e:
if conn:
conn.rollback()
print("Error reading data from sqlite table", e)
finally:
cur.close()
return imdb_item
# If you want to pass arguments to the UPDATE statement, you use the question mark (?) as the placeholder for each argument.
# Update an exist IMDb item into the IMDb table
# :param conn: current connection to the SQLite database
# :param imdb: IMDb item(Id, Key, Title, Release, Audience_Rating, Runtime, Genre, Imdb_Rating, Votes, Director, Actors, Desc, Created_On, Modified_On)
# :return: lastrowid (imdb id updated)
def update_imdb(self, conn, imdb):
lastRowID = 0
try:
# ,
# Desc = ?,
# Modified_On = ?
sql_update_query = ''' UPDATE IMDb
SET Title = ?,
Release = ?,
Audience_Rating = ?,
Runtime = ?,
Genre = ?,
Imdb_Rating = ?,
Votes = ?,
Director = ?,
Actors = ?
WHERE Key = ?; '''
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_update_query, (imdb[1], imdb[2], imdb[3], imdb[4], imdb[5], imdb[6], imdb[7], imdb[8], imdb[9], imdb[0],))
conn.commit()
lastRowID = cur.lastrowid
except Error as e:
if conn:
conn.rollback()
print("Error update data from sqlite table", e)
finally:
cur.close()
return lastRowID
# If you want to pass arguments to the UPDATE statement, you use the question mark (?) as the placeholder for each argument.
# Update an exist Modified_On into the IMDb table
# :param conn: current connection
# :param modifiedOn: Modified_On of IMDb table
# :param Id: Id of IMDb table
# :return: lastrowid (imdb id updated)
def update_imdb_modifiedOn(self, conn, modifiedOn, key):
lastRowID = 0
try:
sql_update_query = ''' UPDATE IMDb
SET Modified_On = ?
WHERE Key = ?; '''
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_update_query, (modifiedOn, key, ))
conn.commit()
lastRowID = cur.lastrowid
except Error as e:
if conn:
conn.rollback()
print("Error update data from sqlite table", e)
finally:
cur.close()
return lastRowID
# If you want to pass arguments to the DELETE statement, you use the question mark (?) as the placeholder for each argument.
# Delete a IMDb item by id
# :param conn: Connection to the SQLite database
# :param id: id of the task
# :return: result (row count after deleted)
def delete_imdb_by_Id(self, conn, key):
result = 0
try:
sql_delete_query = 'DELETE FROM IMDb WHERE Key = ?'
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_delete_query, (key,))
conn.commit()
result = cur.rowcount
except Error as e:
if conn:
conn.rollback()
print("Error delete item from sqlite table", e)
finally:
cur.close()
return result
# Delete all rows in the IMDb table
# :param conn: Connection to the SQLite database
# :return: result (row count after deleted)
def delete_all_imdb(self, conn):
result = 0
try:
sql_delete_query = 'DELETE FROM IMDb'
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_delete_query)
conn.commit()
result = cur.rowcount
except Error as e:
if conn:
conn.rollback()
print("Error delete all item from sqlite table", e)
finally:
cur.close()
return result
# Delete all rows in the IMDb table
# :param conn: Connection to the SQLite database
# :return: lastRowID
def delete_all_empty_imdb_key(self, conn):
result = 0
try:
sql_delete_query = "DELETE FROM IMDb WHERE Key = '' "
# create a Cursor object by calling the cursor method of the Connection object.
cur = conn.cursor()
cur.execute(sql_delete_query)
conn.commit()
result = cur.rowcount
except Error as e:
if conn:
conn.rollback()
print("Error delete all empty item from sqlite table", e)
finally:
cur.close()
return result
# VACUUM imdb_sqlite database
# ---First, solve the database file size remains unchanged
# ---Second, the database that has a high number of inserts, updates, and deletes
# ---Third, decreases the number of rows that can be stored in a single page, increases the number of pages to hold a table
# :param conn: Connection to the SQLite database
def vacuum_imdb_sqlite(self, | |
<gh_stars>10-100
import os
import sys
import re
import ast
from heapq import nlargest
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle, Circle, PathPatch, Path
import numpy as np
import scipy.interpolate
import tkinter as tk
import cv2
import traceback
from itertools import combinations
from collections import namedtuple
from natsort import natsorted
# from MyWidgets import Slider, Button, MyRadioButtons
from skimage.measure import label, regionprops
from functools import partial
import skimage.filters
import skimage.measure
import skimage.morphology
import skimage.exposure
import skimage.draw
import skimage.registration
import skimage.color
import skimage.segmentation
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk
)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import math
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon, QFontMetrics, QKeySequence, QFont
from PyQt5.QtCore import Qt, QSize, QEvent, pyqtSignal, QEventLoop, QTimer
from PyQt5.QtWidgets import (
QAction, QApplication, QMainWindow, QMenu, QLabel, QToolBar,
QScrollBar, QWidget, QVBoxLayout, QLineEdit, QPushButton,
QHBoxLayout, QDialog, QFormLayout, QListWidget, QAbstractItemView,
QButtonGroup, QCheckBox, QSizePolicy, QComboBox, QSlider, QGridLayout,
QSpinBox, QToolButton, QTableView, QTextBrowser, QDoubleSpinBox,
QScrollArea, QFrame, QProgressBar, QGroupBox, QRadioButton,
QDockWidget, QMessageBox, QStyle, QPlainTextEdit, QSpacerItem
)
from . import myutils, load, prompts, widgets, core, measurements, html_utils
from . import is_mac, is_win, is_linux
from . import qrc_resources
pg.setConfigOption('imageAxisOrder', 'row-major') # best performance
font = QtGui.QFont()
font.setPixelSize(13)
class baseDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Dialog | Qt.WindowStaysOnTopHint)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class installJavaDialog(widgets.myMessageBox):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Install Java')
self.setIcon('SP_MessageBoxWarning')
txt_macOS = ("""
<p style="font-size:13px">
Your system doesn't have the <code>Java Development Kit</code>
installed<br> and/or a C++ compiler.which is required for the installation of
<code>javabridge</code><br><br>
<b>Cell-ACDC is now going to install Java for you</b>.<br><br>
<i><b>NOTE: After clicking on "Install", follow the instructions<br>
on the terminal</b>. You will be asked to confirm steps and insert<br>
your password to allow the installation.</i><br><br>
If you prefer to do it manually, cancel the process<br>
and follow the instructions below.
</p>
""")
txt_windows = ("""
<p style="font-size:13px">
Unfortunately, installing pre-compiled version of
<code>javabridge</code> <b>failed</b>.<br><br>
Cell-ACDC is going to <b>try to compile it now</b>.<br><br>
However, <b>before proceeding</b>, you need to install
<code>Java Development Kit</code><br> and a <b>C++ compiler</b>.<br><br>
<b>See instructions below on how to install it.</b>
</p>
""")
if not is_win:
self.instructionsButton = self.addButton('Show intructions...')
self.instructionsButton.setCheckable(True)
self.instructionsButton.disconnect()
self.instructionsButton.clicked.connect(self.showInstructions)
installButton = self.addButton('Install')
installButton.disconnect()
installButton.clicked.connect(self.installJava)
txt = txt_macOS
else:
okButton = self.addButton('Ok')
txt = txt_windows
self.cancelButton = self.addButton('Cancel')
label = self.addText(txt)
label.setWordWrap(False)
self.resizeCount = 0
def addInstructionsWindows(self):
self.scrollArea = QScrollArea()
_container = QWidget()
_layout = QVBoxLayout()
for t, text in enumerate(myutils.install_javabridge_instructions_text()):
label = QLabel()
label.setText(text)
if (t == 1 or t == 2):
label.setOpenExternalLinks(True)
label.setTextInteractionFlags(Qt.TextBrowserInteraction)
code_layout = QHBoxLayout()
code_layout.addWidget(label)
copyButton = QToolButton()
copyButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
copyButton.setIcon(QIcon(':edit-copy.svg'))
copyButton.setText('Copy link')
if t==1:
copyButton.textToCopy = myutils.jdk_windows_url()
code_layout.addWidget(copyButton, alignment=Qt.AlignLeft)
else:
copyButton.textToCopy = myutils.cpp_windows_url()
screenshotButton = QToolButton()
screenshotButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
screenshotButton.setIcon(QIcon(':cog.svg'))
screenshotButton.setText('See screenshot')
code_layout.addWidget(screenshotButton, alignment=Qt.AlignLeft)
code_layout.addWidget(copyButton, alignment=Qt.AlignLeft)
screenshotButton.clicked.connect(self.viewScreenshot)
copyButton.clicked.connect(self.copyToClipboard)
code_layout.setStretch(0, 2)
code_layout.setStretch(1, 0)
_layout.addLayout(code_layout)
else:
_layout.addWidget(label)
_container.setLayout(_layout)
self.scrollArea.setWidget(_container)
self.currentRow += 1
self.layout.addWidget(
self.scrollArea, self.currentRow, 1, alignment=Qt.AlignTop
)
# Stretch last row
self.currentRow += 1
self.layout.setRowStretch(self.currentRow, 1)
def viewScreenshot(self, checked=False):
self.screenShotWin = widgets.view_visualcpp_screenshot()
self.screenShotWin.show()
def addInstructionsMacOS(self):
self.scrollArea = QScrollArea()
_container = QWidget()
_layout = QVBoxLayout()
for t, text in enumerate(myutils.install_javabridge_instructions_text()):
label = QLabel()
label.setText(text)
# label.setWordWrap(True)
if (t == 1 or t == 2):
label.setWordWrap(True)
code_layout = QHBoxLayout()
code_layout.addWidget(label)
copyButton = QToolButton()
copyButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
copyButton.setIcon(QIcon(':edit-copy.svg'))
copyButton.setText('Copy')
if t==1:
copyButton.textToCopy = myutils._install_homebrew_command()
else:
copyButton.textToCopy = myutils._brew_install_java_command()
copyButton.clicked.connect(self.copyToClipboard)
code_layout.addWidget(copyButton, alignment=Qt.AlignLeft)
# code_layout.addStretch(1)
code_layout.setStretch(0, 2)
code_layout.setStretch(1, 0)
_layout.addLayout(code_layout)
else:
_layout.addWidget(label)
_container.setLayout(_layout)
self.scrollArea.setWidget(_container)
self.currentRow += 1
self.layout.addWidget(
self.scrollArea, self.currentRow, 1, alignment=Qt.AlignTop
)
# Stretch last row
self.currentRow += 1
self.layout.setRowStretch(self.currentRow, 1)
self.scrollArea.hide()
def addInstructionsLinux(self):
self.scrollArea = QScrollArea()
_container = QWidget()
_layout = QVBoxLayout()
for t, text in enumerate(myutils.install_javabridge_instructions_text()):
label = QLabel()
label.setText(text)
# label.setWordWrap(True)
if (t == 1 or t == 2 or t==3):
label.setWordWrap(True)
code_layout = QHBoxLayout()
code_layout.addWidget(label)
copyButton = QToolButton()
copyButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
copyButton.setIcon(QIcon(':edit-copy.svg'))
copyButton.setText('Copy')
if t==1:
copyButton.textToCopy = myutils._apt_update_command()
elif t==2:
copyButton.textToCopy = myutils._apt_install_java_command()
elif t==3:
copyButton.textToCopy = myutils._apt_gcc_command()
copyButton.clicked.connect(self.copyToClipboard)
code_layout.addWidget(copyButton, alignment=Qt.AlignLeft)
# code_layout.addStretch(1)
code_layout.setStretch(0, 2)
code_layout.setStretch(1, 0)
_layout.addLayout(code_layout)
else:
_layout.addWidget(label)
_container.setLayout(_layout)
self.scrollArea.setWidget(_container)
self.currentRow += 1
self.layout.addWidget(
self.scrollArea, self.currentRow, 1, alignment=Qt.AlignTop
)
# Stretch last row
self.currentRow += 1
self.layout.setRowStretch(self.currentRow, 1)
self.scrollArea.hide()
def copyToClipboard(self):
cb = QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText(self.sender().textToCopy, mode=cb.Clipboard)
print('Command copied!')
def showInstructions(self, checked):
if checked:
self.instructionsButton.setText('Hide instructions')
self.origHeight = self.height()
self.resize(self.width(), self.height()+300)
self.scrollArea.show()
else:
self.instructionsButton.setText('Show instructions...')
self.scrollArea.hide()
func = partial(self.resize, self.width(), self.origHeight)
QTimer.singleShot(50, func)
def installJava(self):
import subprocess
try:
if is_mac:
try:
subprocess.check_call(['brew', 'update'])
except Exception as e:
subprocess.run(
myutils._install_homebrew_command(),
check=True, text=True, shell=True
)
subprocess.run(
myutils._brew_install_java_command(),
check=True, text=True, shell=True
)
elif is_linux:
subprocess.run(
myutils._apt_gcc_command()(),
check=True, text=True, shell=True
)
subprocess.run(
myutils._apt_update_command()(),
check=True, text=True, shell=True
)
subprocess.run(
myutils._apt_install_java_command()(),
check=True, text=True, shell=True
)
self.close()
except Exception as e:
print('=======================')
traceback.print_exc()
print('=======================')
msg = QMessageBox()
err_msg = ("""
<p style="font-size:13px">
Automatic installation of Java failed.<br><br>
Please, try manually by following the instructions provided
with the "Show instructions..." button. Thanks
</p>
""")
msg.critical(
self, 'Java installation failed', err_msg, msg.Ok
)
def show(self, block=False):
super().show(block=False)
print(is_linux)
if is_win:
self.addInstructionsWindows()
elif is_mac:
self.addInstructionsMacOS()
elif is_linux:
self.addInstructionsLinux()
self.move(self.pos().x(), 20)
if is_win:
self.resize(self.width(), self.height()+200)
if block:
self._block()
def exec_(self):
self.show(block=True)
class customAnnotationDialog(QDialog):
sigDeleteSelecAnnot = pyqtSignal(object)
def __init__(self, savedCustomAnnot, parent=None, state=None):
self.cancel = True
self.loop = None
self.clickedButton = None
self.savedCustomAnnot = savedCustomAnnot
self.internalNames = measurements.get_all_acdc_df_colnames()
super().__init__(parent)
self.setWindowTitle('Custom annotation')
self.setWindowFlags(Qt.Window | Qt.WindowStaysOnTopHint)
layout = widgets.myFormLayout()
row = 0
typeCombobox = QComboBox()
typeCombobox.addItems([
'Single time-point',
'Multiple time-points',
'Multiple values class'
])
if state is not None:
typeCombobox.setCurrentText(state['type'])
self.typeCombobox = typeCombobox
body_txt = ("""
<b>Single time-point</b> annotation: use this to annotate
an event that happens on a <b>single frame in time</b>
(e.g. cell division).
<br><br>
<b>Multiple time-points</b> annotation: use this to annotate
an event that has a <b>duration</b>, i.e., a start frame and a stop
frame (e.g. cell cycle phase).<br><br>
<b>Multiple values class</b> annotation: use this to annotate a class
that has <b>multiple values</b>. An example could be a cell cycle stage
that can have different values, such as 2-cells division
or 4-cells division.
""")
typeInfoTxt = (f'{html_utils.paragraph(body_txt)}')
self.typeWidget = widgets.formWidget(
typeCombobox, addInfoButton=True, labelTextLeft='Type: ',
parent=self, infoTxt=typeInfoTxt
)
layout.addFormWidget(self.typeWidget, row=row)
typeCombobox.currentTextChanged.connect(self.warnType)
row += 1
nameInfoTxt = ("""
<b>Name of the column</b> that will be saved in the <code>acdc_output.csv</code>
file.<br><br>
Valid charachters are letters and numbers separate by underscore
or dash only.<br><br>
Additionally, some names are <b>reserved</b> because they are used
by Cell-ACDC for standard measurements.<br><br>
Internally reserved names:
""")
self.nameInfoTxt = (f'{html_utils.paragraph(nameInfoTxt)}')
self.nameWidget = widgets.formWidget(
widgets.alphaNumericLineEdit(), addInfoButton=True,
labelTextLeft='Name: ', parent=self, infoTxt=self.nameInfoTxt
)
self.nameWidget.infoButton.disconnect()
self.nameWidget.infoButton.clicked.connect(self.showNameInfo)
if state is not None:
self.nameWidget.widget.setText(state['name'])
self.nameWidget.widget.textChanged.connect(self.checkName)
layout.addFormWidget(self.nameWidget, row=row)
row += 1
self.nameInfoLabel = QLabel()
layout.addWidget(
self.nameInfoLabel, row, 0, 1, 2, alignment=Qt.AlignCenter
)
row += 1
spacing = QSpacerItem(10, 10)
layout.addItem(spacing, row, 0)
row += 1
symbolInfoTxt = ("""
<b>Symbol</b> that will be drawn on the annotated cell at
the requested time frame.
""")
symbolInfoTxt = (f'{html_utils.paragraph(symbolInfoTxt)}')
self.symbolWidget = widgets.formWidget(
widgets.pgScatterSymbolsCombobox(), addInfoButton=True,
labelTextLeft='Symbol: ', parent=self, infoTxt=symbolInfoTxt
)
if state is not None:
self.symbolWidget.widget.setCurrentText(state['symbol'])
layout.addFormWidget(self.symbolWidget, row=row)
row += 1
shortcutInfoTxt = ("""
<b>Shortcut</b> that you can use to <b>activate/deactivate</b> annotation
of this event.<br><br> Leave empty if you don't need a shortcut.
""")
shortcutInfoTxt = (f'{html_utils.paragraph(shortcutInfoTxt)}')
self.shortcutWidget = widgets.formWidget(
widgets.shortCutLineEdit(), addInfoButton=True,
labelTextLeft='Shortcut: ', parent=self, infoTxt=shortcutInfoTxt
)
if state is not None:
self.shortcutWidget.widget.setText(state['shortcut'])
layout.addFormWidget(self.shortcutWidget, row=row)
row += 1
descInfoTxt = ("""
<b>Description</b> will be used as the <b>tool tip</b> that will be
displayed when you hover with th mouse cursor on the toolbar button
specific for this annotation
""")
descInfoTxt = (f'{html_utils.paragraph(descInfoTxt)}')
self.descWidget = widgets.formWidget(
QPlainTextEdit(), addInfoButton=True,
labelTextLeft='Description: ', parent=self, infoTxt=descInfoTxt
)
if state is not None:
self.descWidget.widget.setPlainText(state['description'])
layout.addFormWidget(self.descWidget, row=row)
row += 1
optionsGroupBox = QGroupBox('Additional options')
optionsLayout = QGridLayout()
toggle = widgets.Toggle()
toggle.setChecked(True)
self.keepActiveToggle = toggle
toggleLabel = QLabel('Keep tool active after using it: ')
colorButtonLabel = QLabel('Symbol color: ')
self.hideAnnotTooggle = widgets.Toggle()
self.hideAnnotTooggle.setChecked(True)
hideAnnotTooggleLabel = QLabel(
'Hide annotation when button is not active: '
)
self.colorButton = pg.ColorButton(color=(255, 0, 0))
self.colorButton.clicked.disconnect()
self.colorButton.clicked.connect(self.selectColor)
optionsLayout.setColumnStretch(0, 1)
optRow = 0
optionsLayout.addWidget(toggleLabel, optRow, 1)
optionsLayout.addWidget(toggle, optRow, 2)
optRow += 1
optionsLayout.addWidget(hideAnnotTooggleLabel, optRow, 1)
optionsLayout.addWidget(self.hideAnnotTooggle, optRow, 2)
optionsLayout.setColumnStretch(3, 1)
optRow += 1
optionsLayout.addWidget(colorButtonLabel, optRow, 1)
optionsLayout.addWidget(self.colorButton, optRow, 2)
optionsGroupBox.setLayout(optionsLayout)
layout.addWidget(optionsGroupBox, row, 1, alignment=Qt.AlignCenter)
optionsInfoButton = QPushButton(self)
optionsInfoButton.setCursor(Qt.WhatsThisCursor)
optionsInfoButton.setIcon(QIcon(":info.svg"))
optionsInfoButton.clicked.connect(self.showOptionsInfo)
layout.addWidget(optionsInfoButton, row, 3, alignment=Qt.AlignRight)
row += 1
layout.addItem(QSpacerItem(5, | |
context):
ConvertedObj = Ue4SubObj_set("SK_Socket")
if len(ConvertedObj) > 0 :
self.report({'INFO'}, str(len(ConvertedObj)) + " object(s) of the selection have be converted to to UE4 Socket." )
else :
self.report({'WARNING'}, "Please select two objects. (Active object is the owner of the socket)")
return {'FINISHED'}
def draw(self, context):
addon_prefs = bpy.context.preferences.addons["blender-for-unrealengine"].preferences
def ActiveModeIs(targetMode): #Return True is active mode ==
obj = bpy.context.active_object
if obj is not None:
if obj.mode == targetMode:
return True
return False
def ActiveTypeIs(targetType): #Return True is active type ==
obj = bpy.context.active_object
if obj is not None:
if obj.type == targetType:
return True
return False
def FoundTypeInSelect(targetType): #Return True if a specific type is found
for obj in bpy.context.selected_objects:
if obj != bpy.context.active_object:
if obj.type == targetType:
return True
return False
layout = self.layout
layout.label(text="Convert selected object to Unreal collision or socket", icon='PHYSICS')
layout.label(text="Select your collider shape(s) or Empty(s) then the owner object.")
convertButtons = layout.row().split(factor = 0.80 )
convertStaticCollisionButtons = convertButtons.column()
convertStaticCollisionButtons.enabled = ActiveModeIs("OBJECT") and ActiveTypeIs("MESH") and FoundTypeInSelect("MESH")
convertStaticCollisionButtons.operator("object.converttoboxcollision", icon='MESH_CUBE')
convertStaticCollisionButtons.operator("object.converttoconvexcollision", icon='MESH_ICOSPHERE')
convertStaticCollisionButtons.operator("object.converttocapsulecollision", icon='MESH_CAPSULE')
convertStaticCollisionButtons.operator("object.converttospherecollision", icon='MESH_UVSPHERE')
convertButtons = self.layout.row().split(factor = 0.80 )
convertStaticSocketButtons = convertButtons.column()
convertStaticSocketButtons.enabled = ActiveModeIs("OBJECT") and ActiveTypeIs("MESH") and FoundTypeInSelect("EMPTY")
convertStaticSocketButtons.operator("object.converttostaticsocket", icon='OUTLINER_DATA_EMPTY')
if addon_prefs.UseGeneratedScripts == True:
layout.label(text="Select the Empty(s) then the owner bone in PoseMode.")
convertButtons = self.layout.row().split(factor = 0.80 )
convertSkeletalSocketButtons = convertButtons.column()
convertSkeletalSocketButtons.enabled = ActiveModeIs("POSE") and ActiveTypeIs("ARMATURE") and FoundTypeInSelect("EMPTY")
convertSkeletalSocketButtons.operator("object.converttoskeletalsocket", icon='OUTLINER_DATA_EMPTY')
class BFU_PT_Nomenclature(bpy.types.Panel):
#Is FPS Export panel
bl_idname = "BFU_PT_Nomenclature"
bl_label = "Nomenclature"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Unreal Engine 4"
bl_parent_id = "BFU_PT_BlenderForUnreal"
class BFU_MT_NomenclaturePresets(bpy.types.Menu):
bl_label = 'Nomenclature Presets'
preset_subdir = 'blender-for-unrealengine/nomenclature-presets'
preset_operator = 'script.execute_preset'
draw = bpy.types.Menu.draw_preset
from bl_operators.presets import AddPresetBase
class BFU_OT_AddNomenclaturePreset(AddPresetBase, Operator):
bl_idname = 'object.add_nomenclature_preset'
bl_label = 'Add or remove a preset for Nomenclature'
bl_description = 'Add or remove a preset for Nomenclature'
preset_menu = 'BFU_MT_NomenclaturePresets'
# Common variable used for all preset values
preset_defines = [
'obj = bpy.context.object',
'scene = bpy.context.scene'
]
# Properties to store in the preset
preset_values = [
'scene.static_prefix_export_name',
'scene.skeletal_prefix_export_name',
'scene.alembic_prefix_export_name',
'scene.anim_prefix_export_name',
'scene.pose_prefix_export_name',
'scene.camera_prefix_export_name',
'scene.anim_subfolder_name',
'scene.export_static_file_path',
'scene.export_skeletal_file_path',
'scene.export_alembic_file_path',
'scene.export_camera_file_path',
'scene.export_other_file_path',
'scene.file_export_log_name',
'scene.file_import_asset_script_name',
'scene.file_import_sequencer_script_name',
]
# Directory to store the presets
preset_subdir = 'blender-for-unrealengine/nomenclature-presets'
#Prefix
bpy.types.Scene.static_prefix_export_name = bpy.props.StringProperty(
name = "StaticMesh Prefix",
description = "Prefix of staticMesh",
maxlen = 32,
default = "SM_")
bpy.types.Scene.skeletal_prefix_export_name = bpy.props.StringProperty(
name = "SkeletalMesh Prefix ",
description = "Prefix of SkeletalMesh",
maxlen = 32,
default = "SK_")
bpy.types.Scene.alembic_prefix_export_name = bpy.props.StringProperty(
name = "Alembic Prefix ",
description = "Prefix of Alembic (SkeletalMesh in unreal)",
maxlen = 32,
default = "SK_")
bpy.types.Scene.anim_prefix_export_name = bpy.props.StringProperty(
name = "AnimationSequence Prefix",
description = "Prefix of AnimationSequence",
maxlen = 32,
default = "Anim_")
bpy.types.Scene.pose_prefix_export_name = bpy.props.StringProperty(
name = "AnimationSequence(Pose) Prefix",
description = "Prefix of AnimationSequence with only one frame",
maxlen = 32,
default = "Pose_")
bpy.types.Scene.camera_prefix_export_name = bpy.props.StringProperty(
name = "Camera anim Prefix",
description = "Prefix of camera animations",
maxlen = 32,
default = "Cam_")
#Sub folder
bpy.types.Scene.anim_subfolder_name = bpy.props.StringProperty(
name = "Animations sub folder name",
description = "name of sub folder for animations",
maxlen = 32,
default = "Anim")
#File path
bpy.types.Scene.export_static_file_path = bpy.props.StringProperty(
name = "StaticMesh export file path",
description = "Choose a directory to export StaticMesh(s)",
maxlen = 512,
default = os.path.join("//","ExportedFbx","StaticMesh"),
subtype = 'DIR_PATH')
bpy.types.Scene.export_skeletal_file_path = bpy.props.StringProperty(
name = "SkeletalMesh export file path",
description = "Choose a directory to export SkeletalMesh(s)",
maxlen = 512,
default = os.path.join("//","ExportedFbx","SkeletalMesh"),
subtype = 'DIR_PATH')
bpy.types.Scene.export_alembic_file_path = bpy.props.StringProperty(
name = "Alembic export file path",
description = "Choose a directory to export Alembic(s)",
maxlen = 512,
default = os.path.join("//","ExportedFbx","Alembic"),
subtype = 'DIR_PATH')
bpy.types.Scene.export_camera_file_path = bpy.props.StringProperty(
name = "Camera export file path",
description = "Choose a directory to export Camera(s)",
maxlen = 512,
default = os.path.join("//","ExportedFbx","Sequencer"),
subtype = 'DIR_PATH')
bpy.types.Scene.export_other_file_path = bpy.props.StringProperty(
name = "Other export file path",
description = "Choose a directory to export text file and other",
maxlen = 512,
default = os.path.join("//","ExportedFbx"),
subtype = 'DIR_PATH')
#File name
bpy.types.Scene.file_export_log_name = bpy.props.StringProperty(
name = "Export log name",
description = "Export log name",
maxlen = 64,
default = "ExportLog.txt")
bpy.types.Scene.file_import_asset_script_name = bpy.props.StringProperty(
name = "Import asset script name",
description = "Import asset script name",
maxlen = 64,
default = "ImportAssetScript.py")
bpy.types.Scene.file_import_sequencer_script_name = bpy.props.StringProperty(
name = "Import sequencer script Name",
description = "Import sequencer script name",
maxlen = 64,
default = "ImportSequencerScript.py")
def draw(self, context):
scn = context.scene
addon_prefs = bpy.context.preferences.addons["blender-for-unrealengine"].preferences
#Presets
row = self.layout.row(align=True)
row.menu('BFU_MT_NomenclaturePresets', text='Nomenclature Presets')
row.operator('object.add_nomenclature_preset', text='', icon='ADD')
row.operator('object.add_nomenclature_preset', text='', icon='REMOVE').remove_active = True
#Prefix
propsPrefix = self.layout.row()
propsPrefix = propsPrefix.column()
propsPrefix.prop(scn, 'static_prefix_export_name', icon='OBJECT_DATA')
propsPrefix.prop(scn, 'skeletal_prefix_export_name', icon='OBJECT_DATA')
propsPrefix.prop(scn, 'alembic_prefix_export_name', icon='OBJECT_DATA')
propsPrefix.prop(scn, 'anim_prefix_export_name', icon='OBJECT_DATA')
propsPrefix.prop(scn, 'pose_prefix_export_name', icon='OBJECT_DATA')
propsPrefix.prop(scn, 'camera_prefix_export_name', icon='OBJECT_DATA')
#Sub folder
propsSub = self.layout.row()
propsSub = propsSub.column()
propsSub.prop(scn, 'anim_subfolder_name', icon='FILE_FOLDER')
#File path
filePath = self.layout.row()
filePath = filePath.column()
filePath.prop(scn, 'export_static_file_path')
filePath.prop(scn, 'export_skeletal_file_path')
filePath.prop(scn, 'export_alembic_file_path')
filePath.prop(scn, 'export_camera_file_path')
filePath.prop(scn, 'export_other_file_path')
#File name
fileName = self.layout.row()
fileName = fileName.column()
fileName.prop(scn, 'file_export_log_name', icon='FILE')
if addon_prefs.UseGeneratedScripts == True:
fileName.prop(scn, 'file_import_asset_script_name', icon='FILE')
fileName.prop(scn, 'file_import_sequencer_script_name', icon='FILE')
class BFU_PT_ImportScript(bpy.types.Panel):
#Is Import script panel
bl_idname = "BFU_PT_ImportScript"
bl_label = "Import Script"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Unreal Engine 4"
bl_parent_id = "BFU_PT_BlenderForUnreal"
bpy.types.Scene.unreal_import_location = bpy.props.StringProperty(
name = "Unreal import location",
description = "Unreal assets import location in /Game/",
maxlen = 512,
default = 'ImportedFbx')
bpy.types.Scene.unreal_levelsequence_import_location = bpy.props.StringProperty(
name = "Unreal sequencer import location",
description = "Unreal sequencer import location in /Game/",
maxlen = 512,
default = r'ImportedFbx/Sequencer')
bpy.types.Scene.unreal_levelsequence_name = bpy.props.StringProperty(
name = "Unreal sequencer name",
description = "Unreal sequencer name",
maxlen = 512,
default = 'MySequence')
def draw(self, context):
scn = context.scene
addon_prefs = bpy.context.preferences.addons["blender-for-unrealengine"].preferences
#Sub folder
if addon_prefs.UseGeneratedScripts == True:
propsSub = self.layout.row()
propsSub = propsSub.column()
propsSub.prop(scn, 'unreal_import_location', icon='FILE_FOLDER')
propsSub.prop(scn, 'unreal_levelsequence_import_location', icon='FILE_FOLDER')
propsSub.prop(scn, 'unreal_levelsequence_name', icon='FILE')
else:
self.layout.label(text='(Generated scripts are deactivated.)')
class BFU_OT_UnrealExportedAsset(bpy.types.PropertyGroup):
#[AssetName , AssetType , ExportPath, ExportTime]
assetName: StringProperty(default="None")
assetType: StringProperty(default="None") #return from GetAssetType()
exportPath: StringProperty(default="None")
exportTime: FloatProperty(default=0)
object: PointerProperty(type=bpy.types.Object)
class BFU_OT_UnrealPotentialError(bpy.types.PropertyGroup):
type: IntProperty(default=0) #0:Info, 1:Warning, 2:Error
object: PointerProperty(type=bpy.types.Object)
vertexErrorType: StringProperty(default="None") #0:Info, 1:Warning, 2:Error
itemName: StringProperty(default="None")
text: StringProperty(default="Unknown")
correctRef: StringProperty(default="None")
correctlabel: StringProperty(default="Fix it !")
correctDesc: StringProperty(default="Correct target error")
class BFU_PT_Export(bpy.types.Panel):
#Is Export panel
bl_idname = "BFU_PT_Export"
bl_label = "Export"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Unreal Engine 4"
bl_parent_id = "BFU_PT_BlenderForUnreal"
class BFU_OT_ShowAssetToExport(Operator):
bl_label = "Show asset(s)"
bl_idname = "object.showasset"
bl_description = "Click to show assets that are to be exported."
def execute(self, context):
obj = context.object
assets = GetFinalAssetToExport()
popup_title = "Assets list"
if len(assets) > 0:
popup_title = str(len(assets))+' asset(s) will be exported.'
else:
popup_title = 'No exportable assets were found.'
def draw(self, context):
col = self.layout.column()
for asset in assets:
row = col.row()
if asset.obj is not None:
if asset.action is not None:
if (type(asset.action) is bpy.types.Action): #Action name
action = asset.action.name
elif (type(asset.action) is bpy.types.AnimData): #Nonlinear name
action = asset.obj.NLAAnimName
else:
action = "..."
row.label(text="- ["+asset.obj.name+"] --> "+action+" ("+asset.type+")")
else:
row.label(text="- "+asset.obj.name+" ("+asset.type+")")
else:
row.label(text="- ("+asset.type+")")
bpy.context.window_manager.popup_menu(draw, title=popup_title, icon='PACKAGE')
return {'FINISHED'}
class BFU_OT_CheckPotentialErrorPopup(Operator):
bl_label = "Check potential errors"
bl_idname = "object.checkpotentialerror"
bl_description = "Check potential errors"
correctedProperty = 0
class BFU_OT_FixitTarget(Operator):
bl_label = "Fix it !"
bl_idname = "object.fixit_objet"
bl_description = "Correct target error"
errorIndex : bpy.props.IntProperty(default=-1)
def execute(self, context):
result = TryToCorrectPotentialError(self.errorIndex)
self.report({'INFO'}, result)
return {'FINISHED'}
class BFU_OT_SelectObjetButton(Operator):
bl_label = "Select"
bl_idname = "object.select_error_objet"
bl_description = "Select target objet."
errorIndex : bpy.props.IntProperty(default=-1)
def execute(self, context):
result = SelectPotentialErrorObject(self.errorIndex)
return {'FINISHED'}
class BFU_OT_SelectVertexButton(Operator):
bl_label = "Select(Vertex)"
bl_idname = "object.select_error_vertex"
bl_description = "Select target vertex."
errorIndex : bpy.props.IntProperty(default=-1)
def execute(self, context):
result = SelectPotentialErrorVertex(self.errorIndex)
return {'FINISHED'}
def execute(self, context):
self.correctedProperty = CorrectBadProperty()
UpdateNameHierarchy()
UpdateUnrealPotentialError()
return {'FINISHED'}
def invoke(self, context, event):
self.correctedProperty = CorrectBadProperty()
UpdateNameHierarchy()
UpdateUnrealPotentialError()
wm = context.window_manager
return wm.invoke_popup(self, width = 1020)
def check(self, context):
return True
def draw(self, context):
layout = self.layout
if len(bpy.context.scene.potentialErrorList) > 0 :
popup_title = str(len(bpy.context.scene.potentialErrorList))+" potential error(s) found!"
else:
popup_title = "No potential error to correct!"
if self.correctedProperty > 0 :
CheckInfo = str(self.correctedProperty) + " properties corrected."
else:
CheckInfo = "no properties to correct."
layout.label(text=popup_title)
layout.label(text="Hierarchy names updated and " + CheckInfo)
layout.separator()
row = layout.row()
col = row.column()
for x in range(len(bpy.context.scene.potentialErrorList)):
error = bpy.context.scene.potentialErrorList[x]
myLine = col.box().split(factor = 0.85 )
#----
if error.type == 0:
msgType = 'INFO'
msgIcon = 'INFO'
elif error.type == 1:
msgType = 'WARNING'
msgIcon = 'ERROR'
elif error.type == 2:
msgType = 'ERROR'
msgIcon = 'CANCEL'
#----
errorFullMsg = msgType+": "+error.text
TextLine = myLine.column()
splitedText = errorFullMsg.split("\n")
for text, Line in enumerate(splitedText):
if (text<1):
TextLine.label(text=Line, icon=msgIcon)
else:
TextLine.label(text=Line)
ButtonLine = myLine.column()
if (error.correctRef != "None"):
props = ButtonLine.operator("object.fixit_objet", text=error.correctlabel)
props.errorIndex = x
if (error.object is not None):
props = ButtonLine.operator("object.select_error_objet")
props.errorIndex = x
if (error.vertexErrorType != "None"):
props = ButtonLine.operator("object.select_error_vertex")
props.errorIndex = x
class BFU_OT_ExportForUnrealEngineButton(Operator):
bl_label = "Export for UnrealEngine 4"
bl_idname = "object.exportforunreal"
bl_description = "Export all assets of this scene."
def execute(self, context):
scene = bpy.context.scene
def GetIfOneTypeCheck():
if (scene.static_export
or scene.skeletal_export
or scene.anin_export
or scene.alembic_export
or scene.camera_export):
return True
else:
return False
if GetIfOneTypeCheck():
#Primary check if file is saved to avoid windows PermissionError
if bpy.data.is_saved:
scene.UnrealExportedAssetsList.clear()
start_time = time.process_time()
UpdateNameHierarchy()
bfu_ExportAsset.ExportForUnrealEngine()
bfu_WriteText.WriteAllTextFiles()
if len(scene.UnrealExportedAssetsList) > 0:
self.report({'INFO'}, "Export of "+str(len(scene.UnrealExportedAssetsList))+
" asset(s) has been finalized in "+str(time.process_time()-start_time)+" sec. Look in console for more info.")
print("========================= Exported asset(s) =========================")
print("")
for line in bfu_WriteText.WriteExportLog().splitlines():
print(line)
print("")
print("========================= ... =========================")
else:
self.report({'WARNING'}, "Not found assets with \"Export and child\" properties.")
else:
self.report({'WARNING'}, "Please save this blend file before export")
else:
self.report({'WARNING'}, "No asset type is checked.")
return {'FINISHED'}
#Categories :
bpy.types.Scene.static_export = bpy.props.BoolProperty(
name = "StaticMesh(s)",
description = "Check mark to export StaticMesh(es)",
default = True
)
bpy.types.Scene.skeletal_export = bpy.props.BoolProperty(
name = "SkeletalMesh(s)",
description = "Check mark to export SkeletalMesh(es)",
default = True
)
bpy.types.Scene.anin_export = bpy.props.BoolProperty(
name = "Animation(s)",
description = "Check mark to export Animation(s)",
default = True
)
bpy.types.Scene.alembic_export = bpy.props.BoolProperty(
name = "Alembic animation(s)",
description = "Check mark to export Alembic animation(s)",
default = False
)
bpy.types.Scene.camera_export = bpy.props.BoolProperty(
name = "Camera(s)",
description = "Check mark to export Camera(s)",
default = False
)
#Additional file
bpy.types.Scene.text_ExportLog = bpy.props.BoolProperty(
name = "Export Log",
description = "Check mark to write export log file",
default = True
)
bpy.types.Scene.text_ImportAssetScript = bpy.props.BoolProperty(
name = "Import assets script",
description = "Check mark to write import asset script file",
default = True
)
bpy.types.Scene.text_ImportSequenceScript = bpy.props.BoolProperty(
name = "Import sequence script",
description = "Check mark to write import sequencer script file",
default = True
)
bpy.types.Scene.text_AdditionalData = bpy.props.BoolProperty(
name = "Additional data",
description = "Check mark to write additional data like parameter or anim tracks",
default = True
)
#exportProperty
bpy.types.Scene.export_ExportOnlySelected = bpy.props.BoolProperty(
name = "Export only select",
description = "Check mark to export only selected export group. (export_recursive objects and auto childs) " ,
default = False
)
def draw(self, context):
scn = context.scene
addon_prefs = bpy.context.preferences.addons["blender-for-unrealengine"].preferences
#Categories :
layout = self.layout
row = layout.row()
col = row.column()
#Assets
AssetsCol = row.column()
AssetsCol.label(text="Asset types to export", icon='PACKAGE')
AssetsCol.prop(scn, 'static_export')
AssetsCol.prop(scn, 'skeletal_export')
AssetsCol.prop(scn, 'anin_export')
AssetsCol.prop(scn, 'alembic_export')
AssetsCol.prop(scn, 'camera_export')
layout.separator()
#Additional file
FileCol = row.column()
FileCol.label(text="Additional file", icon='PACKAGE')
FileCol.prop(scn, 'text_ExportLog')
FileCol.prop(scn, 'text_ImportAssetScript')
FileCol.prop(scn, 'text_ImportSequenceScript')
if addon_prefs.UseGeneratedScripts == True:
FileCol.prop(scn, 'text_AdditionalData')
#Feedback info :
AssetNum = len(GetFinalAssetToExport())
AssetInfo = layout.row().box().split(factor | |
w1 = {linear_regression.coef_[0]:.2f}, "
f"best intercept: w0 = {linear_regression.intercept_:.2f}"
)
# %% [markdown]
# It is important to note that the model learnt will not be able to handle
# the non-linear relationship between `x` and `y` since linear models assume
# the relationship between `x` and `y` to be linear. To obtain a better model,
# we have 3 main solutions:
#
# 1. choose a model that natively can deal with non-linearity,
# 2. "augment" features by including expert knowledge which can be used by
# the model, or
# 2. use a "kernel" to have a locally-based decision function instead of a
# global linear decision function.
#
# Let's illustrate quickly the first point by using a decision tree regressor
# which can natively handle non-linearity.
# %%
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3).fit(X, y)
y_pred = tree.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, tree.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# In this case, the model can handle non-linearity. Instead of having a model
# which can natively deal with non-linearity, we could also modify our data: we
# could create new features, derived from the original features, using some
# expert knowledge. For instance, here we know that we have a cubic and squared
# relationship between `x` and `y` (because we generated the data). Indeed,
# we could create two new features (`x^2` and `x^3`) using this information.
# %%
X = np.vstack([x, x ** 2, x ** 3]).T
linear_regression.fit(X, y)
grid_augmented = np.vstack([grid, grid ** 2, grid ** 3]).T
y_pred = linear_regression.predict(grid_augmented)
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, linear_regression.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# We can see that even with a linear model, we can overcome the linearity
# limitation of the model by adding the non-linear component into the design of
# additional
# features. Here, we created new feature by knowing the way the target was
# generated. In practice, this is usually not the case. Instead, one is usually
# creating interaction between features (e.g. $x_1 * x_2$) with different orders
# (e.g. $x_1, x_1^2, x_1^3$), at the risk of
# creating a model with too much expressivity and which might overfit. In
# scikit-learn, the `PolynomialFeatures` is a transformer to create such
# feature interactions which we could have used instead of manually creating
# new features.
#
#
# To demonstrate `PolynomialFeatures`, we are going to use a scikit-learn
# pipeline which will first create the new features and then fit the model.
# We come back to scikit-learn pipelines and discuss them in more detail later.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
X = x.reshape(-1, 1)
model = make_pipeline(
PolynomialFeatures(degree=3), LinearRegression()
)
model.fit(X, y)
y_pred = model.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, model.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# Thus, we saw that `PolynomialFeatures` is actually doing the same
# operation that we did manually above.
# %% [markdown]
# **FIXME: it might be to complex to be introduced here but it seems good in
# the flow. However, we go away from linear model.**
#
# The last possibility to make a linear model more expressive is to use a
# "kernel". Instead of learning a weight per feature as we previously
# emphasized, a weight will be assign by sample instead. However, not all
# samples will be used. This is the base of the support vector machine
# algorithm.
# %%
from sklearn.svm import SVR
svr = SVR(kernel="linear").fit(X, y)
y_pred = svr.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, svr.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# The algorithm can be modified such that it can use non-linear kernel. Then,
# it will compute interaction between samples using this non-linear
# interaction.
svr = SVR(kernel="poly", degree=3).fit(X, y)
y_pred = svr.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, svr.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# Therefore, kernel can make a model more expressive.
# %% [markdown]
# ### Linear regression in higher dimension
# In the previous example, we only used a single feature. But we have
# already shown that we could add new feature to make the model more expressive
# by deriving new features, based on the original feature.
#
# Indeed, we could also use additional features (not related to the
# original feature) and these could help us to predict the target.
#
# We will load a dataset about house prices in California.
# The dataset consists of 8 features regarding the demography and geography of
# districts in California and the aim is to predict the median house price of
# each district. We will use all 8 features to predict the target, median
# house price.
# %%
from sklearn.datasets import fetch_california_housing
X, y = fetch_california_housing(as_frame=True, return_X_y=True)
X.head()
# %% [markdown]
# We will compare the score of `LinearRegression` and `Ridge` (which is a
# regularized version of linear regression).
#
# The scorer we will use to evaluate our model is the mean squared error, as in
# the previous example. The lower the score, the better.
# %% [markdown]
# Here, we will divide our data into a training set, a validation set and a
# testing set.
# The validation set will be used to evaluate selection of the
# hyper-parameters, while the testing set should only be used to calculate the
# score of our final model.
# %%
from sklearn.model_selection import train_test_split
X_train_valid, X_test, y_train_valid, y_test = train_test_split(
X, y, random_state=1
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_valid, y_train_valid, random_state=1
)
# %% [markdown]
# Note that in the first example, we did not care about scaling our data in
# order to keep the original units and have better intuition. However, it is
# good practice to scale the data such that each feature has a similar standard
# deviation. It will be even more important if the solver used by the model
# is a gradient-descent-based solver.
# %%
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit(X_train).transform(X_train)
X_valid_scaled = scaler.transform(X_valid)
# %% [markdown]
# Scikit-learn provides several tools to preprocess the data. The
# `StandardScaler` transforms the data such that each feature will have a mean
# of zero and a standard deviation of 1.
#
# This scikit-learn estimator is known as a transformer: it computes some
# statistics (i.e the mean and the standard deviation) and stores them as
# attributes (scaler.mean_, scaler.scale_)
# when calling `fit`. Using these statistics, it
# transform the data when `transform` is called. Therefore, it is important to
# note that `fit` should only be called on the training data, similar to
# classifiers and regressors.
# %%
print('mean records on the training set:', scaler.mean_)
print('standard deviation records on the training set:', scaler.scale_)
# %% [markdown]
# In the example above, `X_train_scaled` is the data scaled, using the
# mean and standard deviation of each feature, computed using the training
# data `X_train`.
# %%
linear_regression = LinearRegression()
linear_regression.fit(X_train_scaled, y_train)
y_pred = linear_regression.predict(X_valid_scaled)
print(
f"Mean squared error on the validation set: "
f"{mean_squared_error(y_valid, y_pred):.4f}"
)
# %% [markdown]
# Instead of calling the transformer to transform the data and then calling
# the regressor, scikit-learn provides a `Pipeline`, which 'chains' the
# transformer and regressor together. The pipeline allows you to use a
# sequence of transformer(s) followed by a regressor or a classifier, in one
# call. (i.e. fitting the pipeline will fit both the transformer(s) and the regressor.
# Then predicting from the pipeline will first transform the data through the transformer(s)
# then predict with the regressor from the transformed data)
# This pipeline exposes the same API as the regressor and classifier
# and will manage the calls to `fit` and `transform` for you, avoiding any
# problems with data leakage (when knowledge of the test data was
# inadvertently included in training a model, as when fitting a transformer
# on the test data).
#
# We already presented `Pipeline` in the second notebook and we will use it
# here to combine both the scaling and the linear regression.
#
# We will can create a `Pipeline` by using `make_pipeline` and giving as
# arguments the transformation(s) to be performed (in order) and the regressor
# model.
#
# So the two cells above can be reduced to this new one:
# %%
from sklearn.pipeline import make_pipeline
linear_regression = make_pipeline(StandardScaler(), LinearRegression())
linear_regression.fit(X_train, y_train)
y_pred_valid = linear_regression.predict(X_valid)
linear_regression_score = mean_squared_error(y_valid, y_pred_valid)
y_pred_test = linear_regression.predict(X_test)
print(
f"Mean squared error on the validation set: "
f"{mean_squared_error(y_valid, y_pred_valid):.4f}"
)
print(
| |
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
class BasicFCModel(nn.Module):
"""
Fully connected Actor-Critic model. Set architecture that is smaller and quicker to train.
Designed for default MiniGrid observation space and simplified action space (n=3).
"""
def __init__(self, obs_space, action_space):
"""
Initialize the model.
:param obs_space: (gym.Spaces) Observation space of the environment being used for training. Used to determine
the size of the input layer.
:param action_space: (gym.Spaces) Action space of the environment being used for training. Used to determine
the size of the actor's output later.
"""
super().__init__()
self.recurrent = False # required for using torch_ac package
self.preprocess_obss = None # Default torch_ac pre-processing works for this model
# Define state embedding
self.state_emb = nn.Sequential(
nn.Linear(np.prod(obs_space.shape), 100),
nn.ReLU(),
nn.Linear(100, 64),
nn.ReLU()
)
self.state_embedding_size = 64
# Define actor's model
self.actor = nn.Sequential(
nn.Linear(self.state_embedding_size, 32),
nn.ReLU(),
nn.Linear(32, action_space.n)
)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.state_embedding_size, 32),
nn.ReLU(),
nn.Linear(32, 1)
)
def forward(self, obs):
x = obs.transpose(1, 3).transpose(2, 3).reshape(obs.size()[0], -1)
x = self.state_emb(x.float())
x = x.reshape(x.shape[0], -1)
x_act = self.actor(x)
dist = Categorical(logits=F.log_softmax(x_act, dim=1))
x_crit = self.critic(x)
value = x_crit.squeeze(1)
return dist, value
class SimplifiedRLStarter(nn.Module):
"""
Modified actor-critic model from https://github.com/lcswillems/rl-starter-files/blob/master/model.py.
Simplified to be easier to understand and used for early testing.
Designed for default MiniGrid observation space and simplified action space (n=3).
"""
def __init__(self, obs_space, action_space, grayscale=False):
"""
Initialize the model.
:param obs_space: (gym.Spaces) Observation space of the environment being used for training. Used to determine
the size of the embedding layer.
:param action_space: (gym.Spaces) Action space of the environment being used for training. Used to determine
the size of the actor's output later.
:param grayscale: (bool) Merge the three state-space arrays into one using an RGB to grayscale conversion, and
set the CNN to expect 1 channel instead of 3. NOT RECOMMENDED. Shrinks the observation space, which may
speed up training, but is likely unnecessary and may have unintended consequences.
"""
super().__init__()
self.recurrent = False # required for using torch_ac package
self.grayscale = grayscale
num_channels = 1 if grayscale else 3
# Define image embedding
self.image_conv = nn.Sequential(
nn.Conv2d(num_channels, 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU()
)
n = obs_space.shape[0]
m = obs_space.shape[1]
self.embedding_size = ((n - 1) // 2 - 2) * ((m - 1) // 2 - 2) * 64
# Define actor's model
self.actor = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, action_space.n)
)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
# Initialize parameters correctly
self.apply(self.init_params)
def preprocess_obss(self, obss, device=None):
if self.grayscale: # simplify state space using grayscale conversion (even though it isn't an RGB image)
if not (type(obss) is list or type(obss) is tuple):
obss = [obss]
new_obss = []
for i in range(len(obss)):
new_obss.append(cv2.cvtColor(obss[i], cv2.COLOR_RGB2GRAY))
return torch.tensor(new_obss, device=device).unsqueeze(-1)
else:
# default torch_ac preprocess_obss call
return torch.tensor(obss, device=device)
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
@staticmethod
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
def forward(self, obs):
x = obs.transpose(1, 3).transpose(2, 3)
x = self.image_conv(x.float())
x = x.reshape(x.shape[0], -1)
embedding = x
x = self.actor(embedding)
dist = Categorical(logits=F.log_softmax(x, dim=1))
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value
class ModdedRLStarter(nn.Module, torch_ac.RecurrentACModel):
"""
Modified actor-critic model from https://github.com/lcswillems/rl-starter-files/blob/master/model.py.
Designed for default MiniGrid observation space and simplified action space (n=3).
"""
def __init__(self, obs_space, action_space, use_memory=True, layer_width=64):
"""
Initialize the model.
:param obs_space: (gym.Spaces) Observation space of the environment being used for training. Used to determine
the size of the embedding layer.
:param action_space: (gym.Spaces) Action space of the environment being used for training. Used to determine
the size of the actor's output later.
:param use_memory: (bool) Use the LSTM capability to add memory to the embedding. Required to be True if
recurrence is set to > 1 in torch_ac's PPO algorithm (via TorchACOptConfig). Mostly untested.
:param layer_width: (int) Number of nodes to put in each hidden layer used for the actor and critic.
"""
super().__init__()
# Since recurrence is optional for this model, we need to check and set this here.
if not use_memory:
self.recurrent = False
self.layer_width = layer_width
self.preprocess_obss = None # Use Default torch_ac pre-processing for this model
self.use_memory = use_memory
# Define image embedding
self.image_conv = nn.Sequential(
nn.Conv2d(3, 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU()
)
n = obs_space.shape[0]
m = obs_space.shape[1]
self.image_embedding_size = ((n - 1) // 2 - 2) * ((m - 1) // 2 - 2) * 64
if self.use_memory:
self.memory_rnn = nn.LSTMCell(self.image_embedding_size, self.semi_memory_size)
self.embedding_size = self.semi_memory_size
# Define actor's model
self.actor = nn.Sequential(
nn.Linear(self.embedding_size, self.layer_width),
nn.Tanh(),
nn.Linear(self.layer_width, self.layer_width),
nn.Tanh(),
nn.Linear(self.layer_width, action_space.n)
)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, self.layer_width),
nn.Tanh(),
nn.Linear(self.layer_width, self.layer_width),
nn.Tanh(),
nn.Linear(self.layer_width, 1)
)
# Initialize parameters correctly
self.apply(self.init_params)
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
@staticmethod
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
@property
def memory_size(self):
return 2 * self.semi_memory_size
@property
def semi_memory_size(self):
return self.image_embedding_size
def forward(self, obs, memory):
x = obs.transpose(1, 3).transpose(2, 3)
x = self.image_conv(x.float())
x = x.reshape(x.shape[0], -1)
if self.use_memory:
hidden = (memory[:, :self.semi_memory_size], memory[:, self.semi_memory_size:])
hidden = self.memory_rnn(x, hidden)
embedding = hidden[0]
memory = torch.cat(hidden, dim=1)
else:
embedding = x
x = self.actor(embedding)
dist = Categorical(logits=F.log_softmax(x, dim=1))
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value, memory
class ImageACModel(nn.Module):
"""
Simple CNN Actor-Critic model designed for MiniGrid with torch_ac. Contains pre-processing function that converts
the minigrid RGB observation to a 48x48 grayscale or RGB image.
Designed for RGB/Grayscale MiniGrid observation space and simplified action space (n=3).
"""
def __init__(self, obs_space, action_space, grayscale=False):
"""
Initialize the model.
:param obs_space: (gym.Spaces) Observation space of the environment being used for training. Technically unused
for this model, but stored both for consistency between models and to be used for later reference if needed.
:param action_space: (gym.Spaces) Action space of the environment being used for training. Used to determine
the size of the actor's output later.
:param grayscale: (bool) Convert RGB image to grayscale. Reduces the number of input channels to the first
convolution from 3 to 1.
"""
super().__init__()
self.recurrent = False # required for using torch_ac package
# technically don't need to be stored, but may be useful later.
self.obs_space = obs_space
self.action_space = action_space
self.image_size = 48 # this is the size of image this CNN was designed for
self.grayscale = grayscale
num_channels = 1 if grayscale else 3
# Define image embedding
self.image_conv = nn.Sequential(
nn.Conv2d(num_channels, 8, (3, 3), stride=3),
nn.ReLU(),
nn.Conv2d(8, 16, (4, 4), stride=2),
nn.ReLU(),
nn.Conv2d(16, 32, (3, 3), stride=2),
nn.ReLU()
)
self.image_embedding_size = 3 * 3 * 32
# Define actor's model
self.actor = nn.Sequential(
nn.Linear(self.image_embedding_size, 144),
nn.ReLU(),
nn.Linear(144, action_space.n)
)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.image_embedding_size, 144),
nn.ReLU(),
nn.Linear(144, 1)
)
def preprocess_obss(self, obss, device=None):
if not (type(obss) is list or type(obss) is tuple):
obss = [obss]
new_obss = []
for i in range(len(obss)):
if self.grayscale:
img = cv2.resize(cv2.cvtColor(obss[i], cv2.COLOR_RGB2GRAY), (self.image_size, self.image_size))
else:
img = cv2.resize(obss[i], (self.image_size, self.image_size))
new_obss.append(img)
if self.grayscale:
return torch.tensor(new_obss, device=device).unsqueeze(-1)
else:
return torch.tensor(new_obss, device=device)
def forward(self, obs):
x = obs.transpose(1, 3).transpose(2, 3)
x = self.image_conv(x.float())
x = x.reshape(x.shape[0], -1)
x_act = self.actor(x)
dist = Categorical(logits=F.log_softmax(x_act, dim=1))
x_crit = self.critic(x)
value = x_crit.squeeze(1)
return dist, value
class GRUActorCriticModel(nn.Module, torch_ac.RecurrentACModel):
"""
Modified actor-critic model from https://github.com/lcswillems/rl-starter-files/blob/master/model.py, using a GRU
in the embedding layer. Note that this model should have the 'recurrence' argument set to 1 in the TorchACOptimizer.
Designed for default MiniGrid observation space and simplified action space (n=3).
"""
def __init__(self, obs_space,
action_space,
rnn1_hidden_shape=64,
rnn1_n_layers=2,
rnn2_hidden_shape=64,
rnn2_n_layers=2,
fc_layer_width=64):
super().__init__()
self.preprocess_obss = None # Use Default torch_ac pre-processing for this model
self.layer_width = | |
")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://lowelab.ucsc.edu/software/tRNAscan-SE.tar.gz -o trnascan.tar")
os.system("tar xvf trnascan.tar")
os.system("mv tRNAscan-SE-1.3.1 ./Utilities/cpp/%s%s-%s%strnascan"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%strnascan"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf trnascan.tar")
# now workflow specific tools
if "optional" in enabledWorkflows or manual:
if not os.path.exists("./Utilities/cpp/%s-%s/metaphylerClassify"%(OSTYPE, MACHINETYPE)) or not os.path.exists("./Utilities/perl/metaphyler/markers/markers.protein") or not os.path.exists("./Utilities/perl/metaphyler/markers/markers.dna"):
if "metaphyler" in packagesToInstall:
dl = 'y'
else:
print "Metaphyler (latest version) not found, optional for Annotate, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://metaphyler.cbcb.umd.edu/MetaPhylerV1.25.tar.gz -o metaphyler.tar.gz")
os.system("tar -C ./Utilities/perl/ -xvf metaphyler.tar.gz")
os.system("mv ./Utilities/perl/MetaPhylerV1.25 ./Utilities/perl/metaphyler")
os.system("mv ./Utilities/perl/metaphyler/installMetaphyler.pl ./Utilities/perl/metaphyler/installMetaphylerFORMATDB.pl");
os.system("cat ./Utilities/perl/metaphyler/installMetaphylerFORMATDB.pl |sed 's/formatdb/\.\/Utilities\/cpp\/%s-%s\/formatdb/g' > ./Utilities/perl/metaphyler/installMetaphyler.pl"%(OSTYPE, MACHINETYPE));
os.system("perl ./Utilities/perl/metaphyler/installMetaphyler.pl")
os.system("cp ./Utilities/perl/metaphyler/metaphylerClassify ./Utilities/cpp/%s-%s/metaphylerClassify"%(OSTYPE, MACHINETYPE))
if not os.path.exists("./Utilities/models") or not os.path.exists("./Utilities/DB/blast_data"):
if "fcp" in packagesToInstall:
dl = 'y'
else:
print "Genome models not found, optional for FCP/NB, download now?"
dl = raw_input("Enter Y/N: ")
if (dl == 'y' or dl == 'Y') and not nodbs:
archive = "fcp_models.tar.gz"
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/%s -o %s" %(archive, archive))
os.system("rm -rf ./Utilities/DB/blast_data")
os.system("rm -rf ./Utilities/models")
os.system("tar -C ./Utilities/ -xvf %s" % archive)
os.system("rm %s"%archive)
if not os.path.exists("./phylosift") or not os.path.exists("./phylosift/legacy/version.pm") or not os.path.exists("./phylosift/lib/Params"):
if "phylosift" in packagesToInstall:
dl = 'y'
else:
print "PhyloSift binaries not found, optional for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if not os.path.exists("./phylosift"):
#phylosift OSX binaries included inside Linux X86_64 tarball..
os.system("curl -L http://edhar.genomecenter.ucdavis.edu/~koadman/phylosift/devel/phylosift_20130829.tar.bz2 -o ./phylosift.tar.bz2")
os.system("tar -xvjf phylosift.tar.bz2")
os.system("rm -rf phylosift.tar.bz2")
os.system("mv phylosift_20130829 phylosift")
if not os.path.exists("./phylosift/legacy/version.pm"):
#phylosift needs version but doesn't include it
os.system("curl -L http://www.cpan.org/authors/id/J/JP/JPEACOCK/version-0.9903.tar.gz -o version.tar.gz")
os.system("tar xvzf version.tar.gz")
os.chdir("./version-0.9903/")
os.system("perl Makefile.PL")
os.system("make")
os.system("cp -r blib/lib/* ../phylosift/legacy")
os.chdir(METAMOS_ROOT)
os.system("rm -rf version.tar.gz")
os.system("rm -rf version-0.9903")
if not os.path.exists("./phylosift/lib/Params"):
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/params-validate.tar.gz -o ./params-validate.tar.gz")
os.system("tar xvzf params-validate.tar.gz")
os.system("rm -rf params-validate.tar.gz")
# download markers dbs
if not os.path.exists("./phylosift/share"):
markerUrl = utils.getCommandOutput("cat phylosift/phylosiftrc |grep marker_base |awk '{print $NF}' |sed s/\;//g", False)
ncbiUrl = utils.getCommandOutput("cat phylosift/phylosiftrc |grep ncbi_url |awk '{print $NF}' |sed s/\;//g", False)
os.system("mkdir -p ./phylosift/share/phylosift")
os.chdir("./phylosift/share/phylosift")
os.system("curl -L %s/markers.tgz -o marker.tgz"%(markerUrl))
os.system("tar xvzf marker.tgz")
os.system("rm marker.tgz")
os.system("curl -L %s -o ncbi.tgz"%(ncbiUrl))
os.system("tar xvzf ncbi.tgz")
os.system("rm ncbi.tgz")
os.chdir(METAMOS_ROOT)
# check the number of files the DB currently is and see if we have the expected number
dbResult = ""
if not nodbs:
dbResult = utils.getCommandOutput("perl ./Utilities/perl/update_blastdb.pl refseq_protein --numpartitions", False)
if not nodbs and dbResult == "":
print "Error: could not connect to NCBI, will not be installing refseq protein DB"
elif not nodbs:
(dbName, numPartitions) = dbResult.split("\t", 1)
print "Checking whether %s is complete. Expecting %d partitions.\n"%(dbName, int(numPartitions))
numPartitions = int(numPartitions) - 1
if not os.path.exists("./Utilities/DB/refseq_protein.pal") or not os.path.exists("./Utilities/DB/refseq_protein.%02d.psq"%(int(numPartitions))) or not os.path.exists("./Utilities/DB/allprots.faa"):
if "phmmer" in packagesToInstall:
dl = 'y'
else:
print "refseq protein DB not found or incomplete, needed for Annotate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
print "Download and install refseq protein DB.."
os.system("perl ./Utilities/perl/update_blastdb.pl refseq_protein")
os.system("mv refseq_protein.*.tar.gz ./Utilities/DB/")
fileList = glob.glob("./Utilities/DB/refseq_protein.*.tar.gz")
for file in fileList:
os.system("tar -C ./Utilities/DB/ -xvf %s"%(file))
print " running fastacmd (might take a few min)..."
os.system(".%sUtilities%scpp%s%s-%s%sfastacmd -d ./Utilities/DB/refseq_protein -p T -a T -D 1 -o ./Utilities/DB/allprots.faa"%(os.sep, os.sep, os.sep, OSTYPE, MACHINETYPE, os.sep))
# sra toolkit
if not os.path.exists("./Utilities/cpp%s%s-%s%ssra"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
sra = utils.getFromPath("srapath", "SRA PATH", False)
if sra == "":
if "sra" in packagesToInstall:
dl = 'y'
else:
print "SRA binaries not found, optional for initPipeline step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == 'Linux' and MACHINETYPE == "x86_64":
os.system("curl -L http://ftp-trace.ncbi.nlm.nih.gov/sra/sdk/2.3.3-3/sratoolkit.2.3.3-3-centos_linux64.tar.gz -o sra.tar.gz")
elif OSTYPE == "Darwin" and MACHINETYPE == "x86_64":
os.system("curl -L http://ftp-trace.ncbi.nlm.nih.gov/sra/sdk/2.3.3-3/sratoolkit.2.3.3-3-mac64.tar.gz -o sra.tar.gz")
os.system("tar xvzf sra.tar.gz")
os.system("mv sratoolkit.2.3.3-3-* ./Utilities/cpp%s%s-%s%ssra"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf sra.tar.gz")
if "isolate" in enabledWorkflows or "imetamos" in enabledWorkflows or manual:
# check for cmake
if not os.path.exists("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
cmake = utils.getFromPath("cmake", "CMAKE", False)
if cmake == "":
if "cmake" in packagesToInstall:
dl = 'y'
else:
print "cmake binaries not found, optional for initPipeline step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://www.cmake.org/files/v2.8/cmake-2.8.12.tar.gz -o cmake.tar.gz")
os.system("tar xvzf cmake.tar.gz")
os.system("mv cmake-2.8.12 ./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./bootstrap --prefix=`pwd`/build;make;make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm cmake.tar.gz")
if os.path.exists("./Utilities/cpp%s%s-%s%scmake"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
pathUpdate = "%s/Utilities/cpp%s%s-%s%scmake/build/bin"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep)
if "PATH" in os.environ:
pathUpdate = "%s%s%s"%(os.environ["PATH"], os.pathsep, pathUpdate)
os.environ["PATH"]=pathUpdate
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./CA"):
if "ca" in packagesToInstall:
dl = 'y'
else:
print "Celera Assembler binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://downloads.sourceforge.net/project/wgs-assembler/wgs-assembler/wgs-8.1/wgs-8.1.tar.bz2 -o wgs-8.1.tar.bz2")
os.system("tar xvjf wgs-8.1.tar.bz2")
os.system("rm -rf wgs-8.1.tar.bz2")
os.system("mv wgs-8.1 CA")
# patch CA to support PacBio sequences and non-apple compilers on OSX
if not ALLOW_FAST:
os.system("cd CA/kmer/ && cp configure.sh configure.original")
os.system("cd CA/kmer/ && cat configure.original |sed s/\-fast//g > configure.sh")
os.system("cd CA/src/ && cp c_make.as c_make.original")
os.system("cd CA/src/ && cat c_make.original |sed s/\-fast//g > c_make.as")
if not HAVE_GCC42:
os.system("cd CA/src/ && cp c_make.as c_make.original")
os.system("cd CA/src/ && cat c_make.original |sed s/\-4.2//g > c_make.as")
if GCC_VERSION >= 4.7:
os.system("cd CA/src/ && cp c_make.as c_make.original")
os.system("cd CA/src/ && cat c_make.original |sed s/\-rdynamic//g > c_make.as")
updateMakeFileForDarwin("CA/kmer/Makefile", addedCFlags, addedLDFlags)
updateMakeFileForDarwin("CA/samtools/Makefile", addedCFlags, addedLDFlags)
updateMakeFileForDarwin("CA/src/c_make.as", addedCFlags, addedLDFlags)
os.system("cd CA/samtools && make")
os.system("cd CA/kmer && ./configure.sh && gmake install")
os.system("cd CA/src && gmake")
if not os.path.exists("./Utilities/cpp%s%s-%s%sRay"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "ray" in packagesToInstall:
dl = 'y'
else:
print "Ray binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
# check for mpi which is required
command="mpicxx"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
command="openmpicxx"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
mpi = command = ""
print "Error: cannot find MPI, required to build Ray. Please add it to your path."
if command != "":
os.system("curl -L http://downloads.sourceforge.net/project/denovoassembler/Ray-v2.2.0.tar.bz2 -o Ray-v2.2.0.tar.bz2")
os.system("tar xvjf Ray-v2.2.0.tar.bz2")
os.system("mv Ray-v2.2.0 ./Utilities/cpp/%s%s-%s%sRay"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sRay"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("make PREFIX=bin MPICXX=%s%s%s MAXKMERLENGTH=128 MPI_IO=y DEBUG=n ASSERT=n EXTRA=\" -march=native\""%(mpi, os.sep, command))
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf Ray-v2.2.0.tar.bz2")
if not os.path.exists("./Utilities/cpp%s%s-%s%skmergenie"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
kmerGenie = utils.getFromPath("kmergenie", "Kmer Genie", False)
if kmerGenie == "":
if "kmergenie" in packagesToInstall:
dl = 'y'
else:
print "Kmer Genie was not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/kmergenie-1.5692.tar.gz -o kmer.tar.gz")
os.system("tar xvzf kmer.tar.gz")
os.system("mv kmergenie-1.5692 ./Utilities/cpp%s%s-%s%skmergenie"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%skmergenie"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("makefile", addedCFlags, addedLDFlags)
os.system("make k=300")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf kmer.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
spades = utils.getFromPath("spades.py", "SPAdes", False)
if spades == "":
if "spades" in packagesToInstall:
dl = 'y'
else:
print "SPAdes was not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == "Darwin":
if GCC_VERSION < 4.7:
print "Error: SPAdes requires gcc at least version 4.7, found version %s. Please update and try again"%(GCC_VERSION)
else:
os.system("curl -L http://spades.bioinf.spbau.ru/release3.0.0/SPAdes-3.0.0.tar.gz -o spades.tar.gz")
os.system("tar xvzf spades.tar.gz")
os.system("mv SPAdes-3.0.0 ./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("export CC=`which gcc` && bash spades_compile.sh")
os.chdir("%s"%(METAMOS_ROOT))
else:
os.system("curl -L http://spades.bioinf.spbau.ru/release3.0.0/SPAdes-3.0.0-Linux.tar.gz -o spades.tar.gz")
os.system("tar xvzf spades.tar.gz")
os.system("mv SPAdes-3.0.0-Linux ./Utilities/cpp%s%s-%s%sspades"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("rm -rf spades.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sprokka"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
prokaBin = utils.getFromPath("prokka", "Prokka", False)
dl = 'n'
if prokaBin == "":
if "prokka" in packagesToInstall:
dl = 'y'
else:
print "Prokka binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
signalp = utils.getFromPath("signalp", "SignalP", False)
if signalp == "":
print "Warning: SignalP is not installed and is required for Prokka's gram option. Please download it and add it to your path."
os.system("curl -L http://www.vicbioinformatics.com/prokka-1.11.tar.gz -o prokka-1.11.tar.gz")
os.system("tar xvzf prokka-1.11.tar.gz")
os.system("mv prokka-1.11 ./Utilities/cpp%s%s-%s%sprokka"%(os.sep, | |
= self.imgStdSize[1]//2 + width//2
#print(xlim_1, xlim_2, ylim_1, ylim_2)
#print("Image shape:", image.shape)
background[xlim_1 : xlim_2, ylim_1: ylim_2] = image
return {'props': blobProperties, 'image': background}
def getFileNumber(self, filename):
return int(filename.split('.')[0].split('/')[-1])
# From the net you get scores as output, you will have to construct appropriate links
# here before
def constructLinks(self, prevfilenames, currfilenames, prevBlobNumbers, currBlobNumbers, netScores):
self.fileNamesProcessed = []
self.links = []
self.scoresArray = np.zeros(shape=(len(prevfilenames), 5))
for i in range(len(prevfilenames)):
self.scoresArray[i, 0] = self.getFileNumber(prevfilenames[i])
self.scoresArray[i, 1] = self.getFileNumber(currfilenames[i])
self.scoresArray[i, 2] = prevBlobNumbers[i]
self.scoresArray[i, 3] = currBlobNumbers[i]
self.scoresArray[i, 4] = netScores[i][0]
for fileIndex in range(len(self.filenames) - 1):
alreadyLinked = [] # trying to avoid linking things already linked
for blob1index in range(self.nBlobsPerImage[fileIndex]):
scores = -np.zeros((self.nBlobsPerImage[fileIndex + 1]))
for blob2index in range(self.nBlobsPerImage[fileIndex + 1]):
scores[blob2index] = self.getScore(fileIndex, fileIndex + 1, blob1index, blob2index)
#indicesAfterCutoff = np.where(scores < self.threshold)[0]
sorted_scores = np.sort(scores)
#if len(indicesAfterCutoff) == 1 and (indicesAfterCutoff[0] not in alreadLinkedInNext):
# self.links.append([fileIndex, fileIndex+1, blob1index, indicesAfterCutoff[0]])
# alreadLinkedInNext.append(indicesAfterCutoff[0])
#elif len(indicesAfterCutoff) == 2:
# check if both pass the difference threshold
#daughter1Index = indicesAfterCutoff[0]
#daughter2Index = indicesAfterCutoff[1]
#if (abs(scores[daughter1Index] - scores[daughter2Index]) <= self.differenceThreshold):
# # link both if area constraints are met
# if ((self.properties[fileIndex+1][daughter1Index]['area'] < self.divisionAreaRatio * self.properties[fileIndex][blob1index]['area'])
# and (self.properties[fileIndex+1][daughter2Index]['area'] < self.divisionAreaRatio * self.properties[fileIndex][blob1index]['area'])):
# if (daughter1Index not in alreadLinkedInNext):
# self.links.append([fileIndex, fileIndex+1, blob1index, daughter1Index])
# alreadLinkedInNext.append(daughter1Index)
# if (daughter2Index not in alreadLinkedInNext):
# self.links.append([fileIndex, fileIndex + 1, blob1index, daughter2Index])
# alreadLinkedInNext.append(daughter2Index)
# else:
# linkedblobIndex = daughter1Index
# if daughter1Index not in alreadLinkedInNext:
# self.links.append([fileIndex, fileIndex+1, blob1index, linkedblobIndex])
# alreadLinkedInNext.append(linkedblobIndex)
# elif (linkedblobIndex in alreadLinkedInNext):
# linkedblobIndex = daughter2Index
# self.links.append([fileIndex, fileIndex+1, blob1index, linkedblobIndex])
# alreadLinkedInNext.append(linkedblobIndex)
if len(sorted_scores) == 1:
# check if it is less than threshold and add up to the links
# and also the centroid of teh blob2 is not past the cutoff
if(sorted_scores <= self.threshold and
self.properties[fileIndex+1][blob2index]['centroid'][0] < self.lastBlobCentroidCutoff):
self.links.append([fileIndex, fileIndex+1, blob1index, blob2index])
alreadyLinked.append(blob2index)
elif len(sorted_scores) >= 2:
# check the difference between the first elements and if they pass the threshold
# find the indiecs of thes two elements as they give the blob index
if((sorted_scores[0] <= self.threshold) and (sorted_scores[1] <= self.threshold)):
# both are less than threshold, check the difference and add links
# They are daughters, find the blob indices in the original array scores and
# add them to the links
daughterIndex1 = np.where(scores == sorted_scores[0])[0][0]
daughterIndex2 = np.where(scores == sorted_scores[1])[0][0]
if(abs(sorted_scores[0] - sorted_scores[1]) <= self.differenceThreshold):
# check for area constrains and alrad linked or not constraint
if((self.properties[fileIndex+1][daughterIndex1]['area'] < self.divisionAreaRatio * self.properties[fileIndex][blob1index]['area'])
and (self.properties[fileIndex+1][daughterIndex2]['area'] < self.divisionAreaRatio * self.properties[fileIndex][blob1index]['area'])):
if (daughterIndex1 not in alreadyLinked):
self.links.append([fileIndex, fileIndex + 1, blob1index, daughterIndex1])
alreadyLinked.append(daughterIndex1)
if (daughterIndex2 not in alreadyLinked):
self.links.append([fileIndex, fileIndex + 1, blob1index, daughterIndex2])
alreadyLinked.append(daughterIndex2)
else:
linkedblobIndex = daughterIndex1
if(linkedblobIndex not in alreadyLinked):
self.links.append([fileIndex, fileIndex+1, blob1index, linkedblobIndex])
alreadyLinked.append(linkedblobIndex)
elif(linkedblobIndex in alreadyLinked):
linkedblobIndex = daughterIndex2
self.links.append([fileIndex , fileIndex+1, blob1index, linkedblobIndex])
alreadyLinked.append(linkedblobIndex)
else:
# They don't reach the differene threshold
# so only link the closed blob, if it is not linked already, if linked, go
# to the next blob
linkedblobIndex = daughterIndex1
if (linkedblobIndex not in alreadyLinked):
self.links.append([fileIndex, fileIndex +1, blob1index, linkedblobIndex])
alreadyLinked.append(linkedblobIndex)
elif (linkedblobIndex in alreadyLinked):
linkedblobIndex = daughterIndex2
self.links.append([fileIndex, fileIndex + 1, blob1index, linkedblobIndex])
alreadyLinked.append(linkedblobIndex)
# only one score is less than threshold
elif (sorted_scores[0] <= self.threshold and sorted_scores[1] > self.threshold):
# Just the least scores is less than threshold
linkedblobIndex = np.where(scores == sorted_scores[0])[0][0]
if (linkedblobIndex not in alreadyLinked):
if(self.properties[fileIndex+1][linkedblobIndex]['centroid'][0] < self.lastBlobCentroidCutoff):
self.links.append([fileIndex, fileIndex+1, blob1index, linkedblobIndex])
alreadyLinked.append(linkedblobIndex)
self.fileNamesProcessed.append(self.dirName + str(fileIndex) + self.fileformat)
# Look in side the lines of score structure and spit out the float value of the
# score with the currentFileaname, nextFileName, blob1index, blob2index
def getScore(self, currentFileIndex, nextFileIndex, blob1index, blob2index):
#linksArray = np.asarray(self.scoresStructure)
indices = np.where((self.scoresArray[:, :4] == [currentFileIndex, nextFileIndex, blob1index, blob2index]).all(axis = 1))[0]
if len(indices) == 0:
return -1.0 #if no match return a large value instead
else:
return float(self.scoresArray[indices][0][4])
# convert the links made from scores, to convert to track structures
def convertLinksToTracks(self):
self.tracks = []
# Here you will have to do the clean up as well
# In this function you will have to construct the lineages of one cell and then
# add it to the list of tracks
# TODO: Here is where you construct tracks
# TODO: Needs a bit more sophistication depending on how good
# the track links are
# This is a function that constructs tracks from the links which
# is a array of list: [file1, file2, blob1, blob2].
# Iterate till you go no where or have two blobs connected to one blob
#
#self.tracks = [] # a list of dictionaries, each track is a dictionary
# keys are frame numbers and values are indices of the blobs
# create local copies tot not change the classes internal ones
linksArray = np.asarray(self.links)
while linksArray.size != 0:
#for row in range(linksArray.shape[0]):
# start with one element and end when the track ends
# or when there are two blobs connecting the same blob
first_row = linksArray[0] # start linking from here
one_track = oneCellLineage(self.dirName, exptTime= self.exptTime)
one_track.add(first_row[0], first_row[2]) # add the first blob
# Now loop till there is nothing matching
# How many blobs is this blob connected to : this blob means the first row
while True:
current_blob_index = np.where((linksArray[:, 0::2] == [first_row[0], first_row[2]]).all(axis=1))[0]
#print("Current_blob_index: ", current_blob_index)
connections_from_current_blob = np.where((linksArray[:, 0::2] == [first_row[0], first_row[2]]).all(axis=1))[0]
if(len(connections_from_current_blob) == 2):
# Time to split the track and not add the track, find out which are daughters and keep track
#print("Blob has daughter and split: ", first_row, connections_from_current_blob)
daughterIndex1 = connections_from_current_blob[0]
daughterIndex2 = connections_from_current_blob[1]
one_track.setDaughter(linksArray[daughterIndex1][1], linksArray[daughterIndex1][3])
#print(linksArray[daughterIndex1][1], linksArray[daughterIndex1][3])
#print(linksArray[daughterIndex2][1], linksArray[daughterIndex2][3])
one_track.setDaughter(linksArray[daughterIndex2][1], linksArray[daughterIndex2][3])
linksArray = np.delete(linksArray, (current_blob_index), axis = 0)
break
elif(len(connections_from_current_blob) == 1):
area_ratio = self.properties[first_row[1]][int(first_row[3])]['area']/self.properties[first_row[0]][int(first_row[2])]['area']
#print(area_ratio)
# link only if the area ratio falls in certain range
if area_ratio > 0.7 and area_ratio < (1/0.7):
one_track.add(first_row[1], first_row[3])
linksArray = np.delete(linksArray, (current_blob_index), axis = 0)
next_blob_index = np.where((linksArray[:,0::2] == [first_row[1], first_row[3]]).all(axis=1))[0] # Grab the index of the next blob from the array
if(next_blob_index.size != 0):
first_row = linksArray[next_blob_index[0]]
# one daughter case
elif area_ratio < 0.7 and area_ratio > 0.35:
daughterIndex1 = connections_from_current_blob[0]
one_track.setDaughter(linksArray[daughterIndex1][1], linksArray[daughterIndex1][3])
linksArray = np.delete(linksArray, (current_blob_index), axis = 0)
break
# don't link anything if area ration is off
else:
linksArray = np.delete(linksArray, (current_blob_index), axis = 0)
break
elif(len(connections_from_current_blob) == 0):
break
#print(one_track)
self.tracks.append(one_track)
def labelTracksWithFluorChannels(self, maxIterations=10, printFluorLabels=False):
if self.fishdata == None:
print("Fish linking failed")
return None
#print(f"Labeling some of the {len(self.tracks)}")
# label all thracks that end in the last frame first
lastFileName = self.fileNamesProcessed[-1]
for cellLineage in self.tracks:
# check if the track ends in the lastFrame
if lastFileName in cellLineage.trackDictionary:
# label track with correct flour channels to each of the track
centroid = cellLineage.props[lastFileName]['centroid']
cellLineage.fluorChannels = self.getFluorChannels(centroid)
#print(cellLineage.fluorChannels)
# Loop over and set the internal daughter indices of each track in the set of tracks
self.setDaughterIndices()
#print("Set internal daughter indices")
while(maxIterations > 0):
for _, oneLineage in enumerate(self.tracks, 0):
indices = oneLineage._indexToDaughters
#print(f"{indices} ----> {oneLineage.fluorChannels}")
if oneLineage.fluorChannels == None and len(indices) > 0:
# set it to one of the daughters
for index in indices:
oneLineage.fluorChannels = self.tracks[index].fluorChannels
if oneLineage.fluorChannels != None:
break
maxIterations -= 1
# Loop over and check if the tracks's and their daughter species mathc
# If not label it as conflict
for _, oneLineage in enumerate(self.tracks, 0):
daughterIndices = oneLineage._indexToDaughters
for index in indices:
if oneLineage.fluorChannels != self.tracks[index].fluorChannels:
oneLineage.fluorChannels = ['Conflict']
break
if printFluorLabels == True:
for oneLineage in self.tracks:
print(oneLineage.fluorChannels)
# Loop over the set of tracks and set the internal indices of the lineages in the overall
# tracks list
def setDaughterIndices(self):
for index, oneLineage in enumerate(self.tracks, 0):
oneLineage._indexToDaughters = []
if oneLineage.numDaughters() > 0:
| |
<gh_stars>0
import torch.nn as nn
import torch
import os
from function import adaptive_instance_normalization as adain
from function import calc_mean_std
# from torch.utils.serialization import load_lua
import numpy as np
def load_param_from_t7(model, in_layer_index, out_layer):
out_layer.weight = torch.nn.Parameter(model.get(in_layer_index).weight.float())
out_layer.bias = torch.nn.Parameter(model.get(in_layer_index).bias.float())
load_param = load_param_from_t7
class Encoder5(nn.Module):
def __init__(self, model=None, fixed=False):
super(Encoder5, self).__init__()
self.fixed = fixed
self.conv0 = nn.Conv2d( 3, 3,1,1,0)
self.conv0.weight = nn.Parameter(torch.from_numpy(np.array(
[[[[0]],[[0]],[[255]]],
[[[0]],[[255]],[[0]]],
[[[255]],[[0]],[[0]]]])).float())
self.conv0.bias = nn.Parameter(torch.from_numpy(np.array(
[-103.939,-116.779,-123.68])).float())
self.conv11 = nn.Conv2d( 3, 64,3,1,0) # conv1_1
self.conv12 = nn.Conv2d( 64, 64,3,1,0) # conv1_2
self.conv21 = nn.Conv2d( 64,128,3,1,0) # conv2_1
self.conv22 = nn.Conv2d(128,128,3,1,0) # conv2_2
self.conv31 = nn.Conv2d(128,256,3,1,0) # conv3_1
self.conv32 = nn.Conv2d(256,256,3,1,0) # conv3_2
self.conv33 = nn.Conv2d(256,256,3,1,0) # conv3_3
self.conv34 = nn.Conv2d(256,256,3,1,0) # conv3_4
self.conv41 = nn.Conv2d(256,512,3,1,0) # conv4_1
self.conv42 = nn.Conv2d(512,512,3,1,0) # conv4_2
self.conv43 = nn.Conv2d(512,512,3,1,0) # conv4_3
self.conv44 = nn.Conv2d(512,512,3,1,0) # conv4_4
self.conv51 = nn.Conv2d(512,512,3,1,0) # conv5_1
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices=False)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
assert(os.path.splitext(model)[1] in {".t7", ".pth"})
if model.endswith(".t7"):
t7_model = load_lua(model)
load_param(t7_model, 0, self.conv0)
load_param(t7_model, 2, self.conv11)
load_param(t7_model, 5, self.conv12)
load_param(t7_model, 9, self.conv21)
load_param(t7_model, 12, self.conv22)
load_param(t7_model, 16, self.conv31)
load_param(t7_model, 19, self.conv32)
load_param(t7_model, 22, self.conv33)
load_param(t7_model, 25, self.conv34)
load_param(t7_model, 29, self.conv41)
load_param(t7_model, 32, self.conv42)
load_param(t7_model, 35, self.conv43)
load_param(t7_model, 38, self.conv44)
load_param(t7_model, 42, self.conv51)
else:
self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage))
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
y = self.conv0(input)
y = self.relu(self.conv11(self.pad(y)))
y = self.relu(self.conv12(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv21(self.pad(y)))
y = self.relu(self.conv22(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv31(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv34(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv41(self.pad(y)))
y = self.relu(self.conv42(self.pad(y)))
y = self.relu(self.conv43(self.pad(y)))
y = self.relu(self.conv44(self.pad(y)))
y = self.pool(y)
y = self.relu(self.conv51(self.pad(y)))
return y
class Decoder5(nn.Module):
def __init__(self, model=None, fixed=False):
super(Decoder5, self).__init__()
self.fixed = fixed
self.conv51 = nn.Conv2d(512,512,3,1,0)
self.conv44 = nn.Conv2d(512,512,3,1,0)
self.conv43 = nn.Conv2d(512,512,3,1,0)
self.conv42 = nn.Conv2d(512,512,3,1,0)
self.conv41 = nn.Conv2d(512,256,3,1,0)
self.conv34 = nn.Conv2d(256,256,3,1,0)
self.conv33 = nn.Conv2d(256,256,3,1,0)
self.conv32 = nn.Conv2d(256,256,3,1,0)
self.conv31 = nn.Conv2d(256,128,3,1,0)
self.conv22 = nn.Conv2d(128,128,3,1,0)
self.conv21 = nn.Conv2d(128, 64,3,1,0)
self.conv12 = nn.Conv2d( 64, 64,3,1,0)
self.conv11 = nn.Conv2d( 64, 3,3,1,0)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
assert(os.path.splitext(model)[1] in {".t7", ".pth"})
if model.endswith(".t7"):
t7_model = load_lua(model)
load_param(t7_model, 1, self.conv51)
load_param(t7_model, 5, self.conv44)
load_param(t7_model, 8, self.conv43)
load_param(t7_model, 11, self.conv42)
load_param(t7_model, 14, self.conv41)
load_param(t7_model, 18, self.conv34)
load_param(t7_model, 21, self.conv33)
load_param(t7_model, 24, self.conv32)
load_param(t7_model, 27, self.conv31)
load_param(t7_model, 31, self.conv22)
load_param(t7_model, 34, self.conv21)
load_param(t7_model, 38, self.conv12)
load_param(t7_model, 41, self.conv11)
print("Given torch model, saving pytorch model")
torch.save(self.state_dict(), os.path.splitext(model)[0] + ".pth")
print("Saving done")
else:
self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage))
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
y = self.relu(self.conv51(self.pad(input)))
y = self.unpool(y)
y = self.relu(self.conv44(self.pad(y)))
y = self.relu(self.conv43(self.pad(y)))
y = self.relu(self.conv42(self.pad(y)))
y = self.relu(self.conv41(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv34(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv31(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv22(self.pad(y)))
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y)))
return y
class Encoder4_2(nn.Module):
def __init__(self, model=None, fixed=False):
super(Encoder4_2, self).__init__()
self.fixed = fixed
self.vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1 # 3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2 # 6
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1 # 10
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2 # 13
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1 # 17
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2 # 20
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3 # 23
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4 # 26
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1 # 30
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2 # 33
)
if model:
assert(os.path.splitext(model)[1] in {".t7", ".pth"})
if model.endswith(".t7"):
t7_model = load_lua(model)
load_param(t7_model, 0, self.vgg[0])
load_param(t7_model, 2, self.vgg[2])
load_param(t7_model, 5, self.vgg[5])
load_param(t7_model, 9, self.vgg[9])
load_param(t7_model, 12, self.vgg[12])
load_param(t7_model, 16, self.vgg[16])
load_param(t7_model, 19, self.vgg[19])
load_param(t7_model, 22, self.vgg[22])
load_param(t7_model, 25, self.vgg[25])
load_param(t7_model, 29, self.vgg[29])
load_param(t7_model, 32, self.vgg[32])
else: # pth model
net = torch.load(model)
odict_keys = list(net.keys())
cnt = 0; i = 0
for m in self.vgg.children():
if isinstance(m, nn.Conv2d):
print("layer %s is loaded with trained params" % i)
m.weight.data.copy_(net[odict_keys[cnt]]); cnt += 1
m.bias.data.copy_(net[odict_keys[cnt]]); cnt += 1
i += 1
def forward(self, x):
return self.vgg(x)
def forward_branch(self, x):
y_relu1_1 = self.vgg[ : 4](x)
y_relu2_1 = self.vgg[ 4:11](x)
y_relu3_1 = self.vgg[11:18](x)
y_relu4_1 = self.vgg[18:31](x)
return y_relu1_1, y_relu2_1, y_relu3_1, y_relu4_1
class Decoder4_2(nn.Module):
def __init__(self, model=None, fixed=False):
super(Decoder4_2, self).__init__()
self.fixed = fixed
self.conv42 = nn.Conv2d(512,512,3,1,0)
self.conv41 = nn.Conv2d(512,256,3,1,0)
self.conv34 = nn.Conv2d(256,256,3,1,0)
self.conv33 = nn.Conv2d(256,256,3,1,0)
self.conv32 = nn.Conv2d(256,256,3,1,0)
self.conv31 = nn.Conv2d(256,128,3,1,0)
self.conv22 = nn.Conv2d(128,128,3,1,0)
self.conv21 = nn.Conv2d(128, 64,3,1,0)
self.conv12 = nn.Conv2d( 64, 64,3,1,0)
self.conv11 = nn.Conv2d( 64, 3,3,1,0)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.pad = nn.ReflectionPad2d((1,1,1,1))
if model:
assert(os.path.splitext(model)[1] in {".t7", ".pth"})
if model.endswith(".t7"):
t7_model = load_lua(model)
load_param(t7_model, 1, self.conv51)
load_param(t7_model, 5, self.conv44)
load_param(t7_model, 8, self.conv43)
load_param(t7_model, 11, self.conv42)
load_param(t7_model, 14, self.conv41)
load_param(t7_model, 18, self.conv34)
load_param(t7_model, 21, self.conv33)
load_param(t7_model, 24, self.conv32)
load_param(t7_model, 27, self.conv31)
load_param(t7_model, 31, self.conv22)
load_param(t7_model, 34, self.conv21)
load_param(t7_model, 38, self.conv12)
load_param(t7_model, 41, self.conv11)
else:
self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage))
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, y):
y = self.relu(self.conv42(self.pad(y)))
y = self.relu(self.conv41(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv34(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv31(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv22(self.pad(y)))
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y)))
return y
class SmallEncoder4_2(nn.Module):
def __init__(self, model=None):
super(SmallEncoder4_2, self).__init__()
self.vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 16, (3, 3)),
nn.ReLU(), # relu1-1 # 3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(16, 16, (3, 3)),
nn.ReLU(), # relu1-2 # 6
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(16, 32, (3, 3)),
nn.ReLU(), # relu2-1 # 10
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(32, 32, (3, 3)),
nn.ReLU(), # relu2-2 # 13
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(32, 64, (3, 3)),
nn.ReLU(), # relu3-1 # 17
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu3-2 # 20
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu3-3 # 23
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu3-4 # 26
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu4-1 # 30
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 512, (3, 3)),
nn.ReLU(), # relu4-2 # 33
)
if model:
self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage))
def forward(self, x):
return self.vgg(x)
class Encoder4(nn.Module):
def __init__(self, model=None, fixed=False):
super(Encoder4, self).__init__()
self.fixed = fixed
self.vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
)
if model:
assert(os.path.splitext(model)[1] in {".t7", ".pth"})
if model.endswith(".t7"):
t7_model = load_lua(model)
load_param(t7_model, 0, self.vgg[0])
load_param(t7_model, 2, self.vgg[2])
load_param(t7_model, 5, self.vgg[5])
load_param(t7_model, 9, self.vgg[9])
load_param(t7_model, 12, self.vgg[12])
load_param(t7_model, 16, self.vgg[16])
load_param(t7_model, 19, self.vgg[19])
load_param(t7_model, 22, self.vgg[22])
load_param(t7_model, 25, | |
#!/usr/bin/env python
import os
import sys
import argparse
import subprocess # nosec
import re
import fnmatch
import json
import urllib
from urllib.parse import urlencode
from urllib.request import Request, urlopen
env = os.environ
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--include", nargs='+',
help="Files to include, can cointain unix-style wildcard. (default *.xml)",
default=["*.xml", "*.json", "*.trx", "*.tap"])
parser.add_argument("-x", "--exclude", nargs='+', help="Files to exclude, can cointain unix-style wildcard.",
default=[])
parser.add_argument("-l", "--file_list", nargs='+',
help="Explicit file list, if given include and exclude are ignored.", default=None)
parser.add_argument("-t", "--token",
help="Token to authenticate (not needed for public projects on appveyor, travis and circle-ci")
parser.add_argument("-n", "--name",
help="Custom defined name of the upload when commiting several builds with the same ci system")
parser.add_argument("-f", "--framework",
choices=["boost", "junit", "testng", "xunit", "cmocka", "unity", "criterion", "bandit",
"catch", "cpputest", "cute", "cxxtest", "gtest", "qtest", "go", "testunit", "rspec",
"minitest",
"nunit", "mstest", "xunitnet", "phpunit", "pytest", "pyunit", "mocha", "ava", "tap",
"tape", "qunit", "doctest", "nunit"],
help="The used unit test framework - if not provided the script will try to determine it")
parser.add_argument("-r", "--root_dir",
help="The root directory of the git-project, to be used for aligning paths properly. "
"Default is the git-root.")
parser.add_argument("-s", "--ci_system", help="Set the CI System manually. Should not be needed")
parser.add_argument("-b", "--build_id",
help="The identifer The Identifer for the build. When used on a CI system this will be "
"automatically generated.")
parser.add_argument("-a", "--sha", help="Specify the commit sha - normally determined by invoking git")
parser.add_argument("-c", "--check_run", help="The check-run id used by github, used to update reports.")
parser.add_argument("-d", "--id_file", help="The file to hold the check id given by github.",
default=".report-ci-id.json")
parser.add_argument("-D", "--define", help="Define a preprocessor token for the name lookup.", nargs='+')
parser.add_argument("-p", "--preset", help="Select a definition & include preset from .report-ci.yaml.")
parser.add_argument("-m", "--merge", help="Merge similar annotations from different check-runs.")
args = parser.parse_args()
if not args.check_run:
try:
args.check_run = json.loads(open(args.id_file, "r").read())["github"]
except Exception as e:
print(e)
root_dir = None
branch = None
service = None
pr = None
commit = None
build = None
build_url = None
search_in = None
slug = None
run_name = args.name
build_id = None
account_name = None
os_name = None
if env.get("GITHUB_ACTIONS") == "true":
print(BColors.HEADER + " Github actions CI detected." + BColors.ENDC)
# https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables
service = "github-actions"
build_id = env.get("GITHUB_RUN_ID")
commit = env.get("GITHUB_SHA")
slug = env.get("GITHUB_REPOSITORY")
account_name = env.get("GITHUB_ACTOR")
root_dir = env.get("GITHUB_WORKSPACE")
branch = env.get("GITHUB_REF")
if branch:
branch = os.path.basename(branch)
else:
print(BColors.HEADER + " No CI detected." + BColors.ENDC)
if args.root_dir:
root_dir = args.root_dir
if args.sha:
commit = args.sha
if not commit:
commit = subprocess.check_output(["/bin/git", "rev-parse", "HEAD"]).decode().replace('\n', '') # nosec
print(BColors.OKBLUE + " Commit hash: " + commit + BColors.ENDC)
if not root_dir:
root_dir = subprocess.check_output(["/bin/git", "rev-parse", "--show-toplevel"]).decode().replace('\n', '') # nosec
print(BColors.OKBLUE + " Root dir: " + root_dir + BColors.ENDC)
owner, repo = None, None
if slug:
try:
(owner, repo) = slug.split('/')
except Exception as e:
print(BColors.WARNING + "Invalid Slug: '{0}'".format(slug) + BColors.ENDC)
if not owner or not repo:
remote_v = subprocess.check_output(["/bin/git", "remote", "-v"]).decode() # nosec
match = re.search(r"(?:https://|ssh://git@)github.com/([-_A-Za-z0-9]+)/((?:(?!\.git(?:\s|$))[-._A-Za-z0-9])+)",
remote_v)
if match:
owner = match.group(1)
repo = match.group(2)
else:
match = re.search(r"git@github\.com:([-_A-Za-z0-9]+)/((?:(?!\.git(?:\s|$))[-._A-Za-z0-9])+)", remote_v)
owner = match.group(1)
repo = match.group(2)
print(BColors.OKBLUE + " Project: " + owner + '/' + repo + BColors.ENDC)
def match_file(file_abs):
match = False
file = os.path.relpath(file_abs)
for inc in args.include:
if fnmatch.fnmatch(file, inc) or fnmatch.fnmatch(file_abs, inc):
match = True
break
for exc in args.exclude:
if fnmatch.fnmatch(file, exc) or fnmatch.fnmatch(file_abs, exc):
match = False
break
return match
boost_test = []
junit_test = []
xunit_test = []
testng_test = []
criterion_test = []
complete_content = []
file_list = []
bandit = []
catch_test = []
cxxtest = []
qtest = []
go_test = []
testunit = []
rspec = []
phpunit = 0
pytest = 0
mocha = []
tap_test = []
ava = 0
mstest = []
xunitnet = []
nunit = []
doctest = []
if not args.file_list:
for wk in os.walk(root_dir):
(path, subfolders, files) = wk
if fnmatch.fnmatch(path, "*/.git*"):
continue
for file in files:
abs_file = os.path.join(path, file)
file_list.append(abs_file)
else:
for file in args.file_list:
abs_ = os.path.abspath(file)
if not os.path.isfile(abs_):
print(BColors.FAIL + "Could not find file '" + file + "'" + BColors.ENDC)
exit(1)
else:
file_list.append(abs_)
for abs_file in file_list:
if match_file(abs_file):
content = None
ext = os.path.splitext(abs_file)[1].lower()
binary_content = open(abs_file, "rb").read()
try:
content = binary_content.decode('ascii')
except UnicodeDecodeError:
try:
content = binary_content.decode('utf-8').encode("ascii", "ignore").decode('ascii')
except UnicodeDecodeError:
try:
content = binary_content.decode('utf-16').encode("ascii", "ignore").decode('ascii')
except UnicodeDecodeError:
print(
BColors.FAIL + "Can't figure out encoding of file " + abs_file + ", ignoring it" + BColors.ENDC)
continue
complete_content.append(content)
if ext == ".xml":
if re.match(r"(<\?[^?]*\?>\s*)?<(?:TestResult|TestLog)>\s*<TestSuite", content):
print(" Found " + abs_file + " looks like boost.test")
boost_test.append(content)
continue
if re.match(r"(<\?[^?]*\?>\s*)?<TestCase", content) and (
content.find("<QtVersion>") != -1 or content.find("<qtversion>") != -1):
print(" Found " + abs_file + ", looks like qtest")
qtest.append(content)
continue
if re.match(
r'(<\?[^?]*\?>\s*)?<!-- Tests compiled with Criterion v[0-9.]+ -->\s*<testsuites name="Criterion Tests"',
content):
print(" Found " + abs_file + ", looks like criterion")
criterion_test.append(content)
continue
if re.match(r"(<\?[^?]*\?>\s*)?(<testsuites>\s*)?<testsuite[^>]", content): # xUnit thingy
if content.find('"java.version"') != -1 and (
content.find('org.junit') != -1 or content.find('org/junit') != -1 or
content.find('org\\junit') != -1):
print(" Found " + abs_file + ", looks like JUnit")
junit_test.append(content)
elif content.find('"java.version"') != -1 and (
content.find('org.testng') != -1 or content.find('org/testng') != -1 or
content.find('org\ estng') != -1):
print(" Found " + abs_file + ", looks like TestNG")
testng_test.append(content)
elif content.find('"java.version"') == -1 and content.find('<testsuite name="bandit" tests="') != -1:
print(" Found " + abs_file + ", looks like Bandit")
bandit.append(content)
elif content.find('.php') != -1:
print(" Found " + abs_file + ", looks like PHPUnit")
phpunit += 1
xunit_test.append(content)
elif content.find('.py') != -1:
print(" Found " + abs_file + ", looks like PyTest")
pytest += 1
xunit_test.append(content)
else:
print(" Found " + abs_file + ", looks like some xUnit")
xunit_test.append(content)
continue
if re.match(r'(<\?[^?]*\?>\s*)?<Catch\s+name=', content):
print(" Found " + abs_file + ", looks like catch")
catch_test.append(content)
continue
if re.match(r'(<\?[^?]*\?>\s*)?<stream>\s*<ready-test-suite>', content):
print(" Found " + abs_file + ", looks like TestUnit")
testunit.append(content)
continue
if re.match(
r'(<\?[^?]*\?>\s*)?(<!--This file represents the results of running a test suite-->)?<test-results\s+name',
content) or \
re.match(r'(<\?[^?]*\?>\s*)?<test-run id="2"', content):
print(" Found " + abs_file + ", looks like NUnit")
nunit.append(content)
continue
if re.match(r'(<\?[^?]*\?>)?\s*<assemblies', content):
print(" Found " + abs_file + ", looks like xUnit.net")
xunitnet.append(content)
continue
if re.match(r'(<\?[^?]*\?>)?\s*<doctest', content):
print(" Found " + abs_file + ", looks like doctest")
doctest.append(content)
continue
elif ext == ".json" and re.match(r"\s*({|\[)", content): # Might be JSON, let's see if it fits go
try:
lines = content.splitlines()
json_lines = [json.loads(ln) for ln in lines]
if all(val in json_lines[0] for val in ["Time", "Action", "Package"]): # assumption
print("Found " + abs_file + ", looks like GoTest")
go_test = go_test + [json.loads(ln) for ln in lines]
continue
except Exception as e:
print(e)
try:
data = json.loads(content)
if "version" in data and "examples" in data and "summary" in data and "summary_line" in data:
print("Found " + abs_file + ", looks like RSpec")
rspec.append(data)
continue
if "stats" in data and "tests" in data and "pending" in data and "passes" in data and "failures" in data:
print("Found " + abs_file + ", looks like Mocha")
mocha.append(data)
continue
except Exception as e:
print(e)
# data = loadJson(content)
elif ext == ".trx" and re.match(r"(<\?[^?]*\?>\s*)?<TestRun", content):
print("Found " + abs_file + ", looks like MsTest")
mstest.append((abs_file, content))
elif ext == ".tap" and re.match(r"TAP version \d+", content): # is Test anything protocol
if re.match(r"ava[\\\/]cli.js", content):
print("Found " + abs_file + ", looks like AVA")
ava += 1
else:
print("Found " + abs_file + ", looks like TAP")
tap_test.append(content)
upload_content = ""
content_type = ""
if not args.framework:
# check for different test frameworks
if len(testng_test) > 0:
framework = "testng"
print(BColors.HEADER + "TestNG detected" + BColors.ENDC)
elif len(junit_test) > 0:
framework = "junit"
print(BColors.HEADER + "JUnit detected" + BColors.ENDC)
elif len(bandit) > 0:
framework = "bandit"
print(BColors.HEADER + "Bandit detected" + BColors.ENDC)
elif phpunit > 0:
framework = "phpunit"
print(BColors.HEADER + "PHPUnit detected" + BColors.ENDC)
elif pytest > 0:
framework = "pytest"
print(BColors.HEADER + "PyTest detected" + BColors.ENDC)
elif len(xunit_test) > 0:
framework = "xunit"
print(BColors.HEADER + "Unspecified xUnit detected" + BColors.ENDC)
elif len(boost_test) > 0:
framework = "boost"
print(BColors.HEADER + "Boost.test detected" + BColors.ENDC)
elif len(criterion_test) > 0:
framework = "criterion"
print(BColors.HEADER + "Criterion detected" + BColors.ENDC)
elif len(catch_test) > 0:
framework = "catch"
print(BColors.HEADER + "Catch detected" + BColors.ENDC)
elif len(cxxtest) > | |
options.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If the given spec and/or option contains error.
"""
task_id = self._invoke('check$task',
{
'spec': spec,
'options': options,
})
task_svc = Tasks(self._config)
task_instance = Task(task_id, task_svc, type.VoidType())
return task_instance
def start_task(self,
spec,
):
"""
Deploys the appliance for the given specification. The result of this
operation can be queried by calling the cis/tasks/{task-id} with the
task-id in the response of this call.
:type spec: :class:`Install.Spec`
:param spec: The specification of the deployment.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If the given specification contains error.
"""
task_id = self._invoke('start$task',
{
'spec': spec,
})
task_svc = Tasks(self._config)
task_instance = Task(task_id, task_svc, type.VoidType())
return task_instance
class Log(VapiInterface):
"""
The service that provides logs associated with a task of a given task ID.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.lcm.log'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _LogStub)
def get(self,
task_id,
):
"""
Retrieves the zipped files that contains operation log, serialized task
flow, record of all configuration, and a current status of the
operation.
:type task_id: :class:`str`
:param task_id: The :class:`vmodl.lang_client.ID` of the operation. must exist in
the server. If for any reason the server reboots during an
operation, all :class:`vmodl.lang_client.ID`s previously stored is
lost.
The parameter must be an identifier for the resource type:
``com.vmware.cis.task``.
:rtype: :class:`str`
:return: A zipped file that contains the files mentioned above.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If the given ID does not exist in the server. There might be a
cause of task ID does not exist including, error in taskID, or log
files been purged by system or manually.
"""
return self._invoke('get',
{
'task_id': task_id,
})
class Migrate(VapiInterface):
"""
The service to migrate a windows VC to Embedded VCSA, PSC, and Management
VCSA.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.lcm.migrate'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _MigrateStub)
class MigrateDestinationAppliance(VapiStruct):
"""
Spec to describe the new appliance.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'appliance_type',
{
'VCSA_EMBEDDED' : [('appliance_size', False), ('appliance_disk_size', False), ('vcsa_embedded', True)],
'VCSA_EXTERNAL' : [('appliance_size', False), ('appliance_disk_size', False)],
'PSC' : [('psc', True)],
'VMC' : [],
}
),
]
def __init__(self,
appliance_name=None,
appliance_type=None,
appliance_size=None,
appliance_disk_size=None,
root_password=<PASSWORD>,
thin_disk_mode=None,
ova_location=None,
ova_location_ssl_verify=None,
ova_location_ssl_thumbprint=None,
ovftool_location=None,
ovftool_location_ssl_verify=None,
ovftool_location_ssl_thumbprint=None,
active_directory_domain=None,
active_directory_username=None,
active_directory_password=<PASSWORD>,
services=None,
temporary_network=None,
history=None,
ovftool_arguments=None,
vcsa_embedded=None,
psc=None,
):
"""
:type appliance_name: :class:`str`
:param appliance_name: The name of the appliance to deploy.
:type appliance_type: :class:`ApplianceType` or ``None``
:param appliance_type: The type of appliance to deploy.
If None, defaults to VCSA_EMBEDDED
:type appliance_size: :class:`ApplianceSize` or ``None``
:param appliance_size: A size descriptor based on the number of virtual machines which
will be managed by the new vCenter appliance.
If None, defaults to SMALL
:type appliance_disk_size: :class:`StorageSize` or ``None``
:param appliance_disk_size: The disk size of the new vCenter appliance.
If None, defaults to REGULAR
:type root_password: :class:`str`
:param root_password: Password must conform to the following requirements: 1. At least 8
characters. 2. No more than 20 characters. 3. At least 1 uppercase
character. 4. At least 1 lowercase character. 5. At least 1 number.
6. At least 1 special character (e.g., '!', '(', '\\\\@', etc.). 7.
Only visible A-Z, a-z, 0-9 and punctuation (spaces are not allowed)
:type thin_disk_mode: :class:`bool`
:param thin_disk_mode: Whether to deploy the appliance with thin mode virtual disks.
:type ova_location: :class:`str`
:param ova_location: The location of the OVA file.
:type ova_location_ssl_verify: :class:`bool` or ``None``
:param ova_location_ssl_verify: A flag to indicate whether the ssl verification is required.
If ``ovaLocationSslThumbprint`` is provided, this field can be
omitted If None, defaults to True
:type ova_location_ssl_thumbprint: :class:`str` or ``None``
:param ova_location_ssl_thumbprint: SSL thumbprint of ssl verification. If provided, ssl_verify can be
omitted or set to true. If omitted, ssl_verify must be false. If
omitted and ssl_verify is true, an error will occur.
If ova_location_ssl_verify is False, this field can be omitted
:type ovftool_location: :class:`str`
:param ovftool_location: The location of the OVF Tool.
:type ovftool_location_ssl_verify: :class:`bool` or ``None``
:param ovftool_location_ssl_verify: Flag to indicate whether or not to verify the SSL thumbprint of OVF
Tool location.
if None, Default to be True.
:type ovftool_location_ssl_thumbprint: :class:`str` or ``None``
:param ovftool_location_ssl_thumbprint: SSL thumbprint of OVF Tool location to be verified.
When ovftoolLocationSslVerify is set to False, this field can be
omitted.
:type active_directory_domain: :class:`str` or ``None``
:param active_directory_domain: The name of the Active Directory domain to which the source Windows
installation is joined. If the source Windows installation is not
joined to an Active Directory domain, omit this parameter.
Not required when active directory is not applicable
:type active_directory_username: :class:`str` or ``None``
:param active_directory_username: Administrator user name of the Active Directory domain to which the
source Windows installation is joined. The format can be either
'username' or 'username\\\\@domain'
Not required when active directory is not applicable
:type active_directory_password: :class:`str` or ``None``
:param active_directory_password: Password for the active directory user.
Not required when active directory is not applicable
:type services: :class:`UpgradeDestinationApplianceService`
:param services: Spec to configure vCenter server services.
:type temporary_network: :class:`TemporaryNetwork`
:param temporary_network: The network settings of the appliance to be deployed.
:type history: :class:`History` or ``None``
:param history: History data to be included in the upgrade and migrate
Default value will be applied when absent
:type ovftool_arguments: (:class:`dict` of :class:`str` and :class:`str`) or ``None``
:param ovftool_arguments: The OVF Tool arguments to be included.
Not required when no OVF Tool argument to pass through
:type vcsa_embedded: :class:`CeipOnlySso`
:param vcsa_embedded: Spec used to configure an embedded vCenter Server. This field
describes how the embedded vCenter Server appliance should be
configured.
This attribute is optional and it is only relevant when the value
of ``applianceType`` is :attr:`ApplianceType.VCSA_EMBEDDED`.
:type psc: :class:`CeipOnlySso`
:param psc: Spec used to configure a Platform Services Controller. This section
describes how the Platform Services Controller appliance should be
configured. If unset, either ``vcsaEmbedded`` or ``#vcsaExternal``
must be provided.
This attribute is optional and it is only relevant when the value
of ``applianceType`` is :attr:`ApplianceType.PSC`.
"""
self.appliance_name = appliance_name
self.appliance_type = appliance_type
self.appliance_size = appliance_size
self.appliance_disk_size = appliance_disk_size
self.root_password = <PASSWORD>
self.thin_disk_mode = thin_disk_mode
self.ova_location = ova_location
self.ova_location_ssl_verify = ova_location_ssl_verify
self.ova_location_ssl_thumbprint = ova_location_ssl_thumbprint
self.ovftool_location = ovftool_location
self.ovftool_location_ssl_verify = ovftool_location_ssl_verify
self.ovftool_location_ssl_thumbprint = ovftool_location_ssl_thumbprint
self.active_directory_domain = active_directory_domain
self.active_directory_username = active_directory_username
self.active_directory_password = <PASSWORD>_directory_password
self.services = services
self.temporary_network = temporary_network
self.history = history
self.ovftool_arguments = ovftool_arguments
self.vcsa_embedded = vcsa_embedded
self.psc = psc
VapiStruct.__init__(self)
MigrateDestinationAppliance._set_binding_type(type.StructType(
'com.vmware.vcenter.lcm.migrate.migrate_destination_appliance', {
'appliance_name': type.StringType(),
'appliance_type': type.OptionalType(type.ReferenceType(__name__, 'ApplianceType')),
'appliance_size': type.OptionalType(type.ReferenceType(__name__, 'ApplianceSize')),
'appliance_disk_size': type.OptionalType(type.ReferenceType(__name__, 'StorageSize')),
'root_password': type.SecretType(),
'thin_disk_mode': type.BooleanType(),
'ova_location': type.StringType(),
'ova_location_ssl_verify': type.OptionalType(type.BooleanType()),
'ova_location_ssl_thumbprint': type.OptionalType(type.StringType()),
'ovftool_location': type.StringType(),
'ovftool_location_ssl_verify': type.OptionalType(type.BooleanType()),
'ovftool_location_ssl_thumbprint': type.OptionalType(type.StringType()),
'active_directory_domain': type.OptionalType(type.StringType()),
'active_directory_username': type.OptionalType(type.StringType()),
'active_directory_password': type.OptionalType(type.SecretType()),
'services': type.ReferenceType(__name__, 'UpgradeDestinationApplianceService'),
'temporary_network': type.ReferenceType(__name__, 'TemporaryNetwork'),
'history': type.OptionalType(type.ReferenceType(__name__, 'History')),
'ovftool_arguments': type.OptionalType(type.MapType(type.StringType(), type.StringType())),
'vcsa_embedded': type.OptionalType(type.ReferenceType(__name__, 'CeipOnlySso')),
'psc': type.OptionalType(type.ReferenceType(__name__, 'CeipOnlySso')),
},
MigrateDestinationAppliance,
False,
None))
class Spec(VapiStruct):
"""
Spec to describe the configuration parameters that are required for
migrating a Windows vCenter Server.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
destination_location=None,
destination_appliance=None,
source_vc_windows=None,
existing_migration_assistant=None,
start_migration_assistant=None,
source_vum_location=None,
source_vum=None,
):
"""
:type destination_location: :class:`DestinationLocation`
:param destination_location: This subsection describes the ESX or VC on which to deploy the
appliance.
:type destination_appliance: :class:`Migrate.MigrateDestinationAppliance`
:param destination_appliance: Spec to describe the new appliance.
:type source_vc_windows: :class:`SourceVcWindows`
:param source_vc_windows: Spec to describe the existing Windows vCenter server to migrate.
:type existing_migration_assistant: :class:`ExistingMigrationAssistant` or ``None``
:param existing_migration_assistant: Spec to describe the attributes of a running Migration Assistant on
the Windows vCenter server.
Only applicable when migration assistant is already running on the
source appliance
:type start_migration_assistant: :class:`MigrationAssistant` or ``None``
:param start_migration_assistant: Spec to automate the invocation of Migration Assistant. Automatic
invocation works only if the source Windows installation is running
as a virtual machine.
Only applicable when migration assistant is not running on the
source | |
"""defines various GUI unit tests"""
import os
import shutil
import unittest
import numpy as np
try:
import matplotlib.pyplot as plt
IS_MATPLOTLIB = True
except ImportError:
IS_MATPLOTLIB = False
import pyNastran
from pyNastran.gui.utils.load_results import (
load_csv, load_deflection_csv, load_user_geom, create_res_obj)
from pyNastran.gui.utils.version import check_for_newer_version
from pyNastran.gui.utils.utils import find_next_value_in_sorted_list
from pyNastran.gui.menus.legend.write_gif import (
setup_animation, make_two_sided, make_symmetric, write_gif, IS_IMAGEIO)
from pyNastran.gui.menus.results_sidebar_utils import get_cases_from_tree, build_pruned_tree
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, '..', 'models')
from pyNastran.gui.gui_objects.gui_result import GuiResult
class GuiUtils(unittest.TestCase):
def test_gui_result(self):
"""tests GuiResult"""
subcase_id = 1
header = 'cat'
title = 'dog'
location = 'node'
scalar = np.ones(10)
x = GuiResult(subcase_id, header, title, location, scalar,
mask_value=None, nlabels=None, labelsize=None, ncolors=None,
colormap='jet', data_map=None, data_format=None, uname='GuiResult')
y = x + x
#print(y.scalar)
y = x + 3
#print(y.scalar)
y = x - 3
#print(y.scalar)
y = x * 3
y = x / 3
y = 2 * x
y = 2 - x
y = 2 + x
#print(y.scalar)
y = (-x - 1) ** 3
#print(y.scalar)
y = (+x + 1) ** 3
#print(y.scalar)
scalar = np.ones(8) * 2.
x2 = GuiResult(subcase_id, header, title, location, scalar,
mask_value=None, nlabels=None, labelsize=None, ncolors=None,
colormap='jet', data_map=None, data_format=None, uname='GuiResult')
y2 = 3. / x2
#print(x2.scalar)
#print(y2.scalar)
x2 + y2
x2 / y2
x2 * y2
x2 ** y2
x2 % 3
x2 % y2
def test_check_version_fake(self):
"""
Tests ``check_for_newer_version``
we're faking the version for the purpose of the test
"""
# no dev versions
version_current_test = '1.0.0'
version_latest_test = '1.1.0'
version_latest, version_current, is_newer = check_for_newer_version(
version_current=version_current_test,
version_latest=version_latest_test,
quiet=True)
#print(version_latest, version_current, is_newer)
assert version_current == version_current_test
assert version_latest == version_latest_test
assert is_newer is True, (version_latest, version_current, is_newer)
# ------------------------
# a bigger number version takes priority
version_current_test = '1.4.0+dev.5378fd363'
version_latest_test = '1.0.0'
(version_latest, version_current, is_newer) = check_for_newer_version(
version_current=version_current_test,
version_latest=version_latest_test,
quiet=True)
assert version_current == version_current_test
assert version_latest == version_latest_test
assert is_newer is False, (version_latest, version_current, is_newer)
# ------------------------
# a dev version is newer than a non-dev version
version_current_test = '1.4.0+dev.5378fd363'
version_latest_test = '1.4.0'
(version_latest, version_current, is_newer) = check_for_newer_version(
version_current=version_current_test,
version_latest=version_latest_test,
quiet=True)
assert version_current == version_current_test
assert version_latest == version_latest_test
assert is_newer is True, (version_latest, version_current, is_newer)
def test_check_version_actual(self):
"""tests ``check_for_newer_version`` with actual data"""
version_latest, version_current, is_newer = check_for_newer_version(
'1.0.0', quiet=True)
assert is_newer is True, (version_latest, version_current, is_newer)
# current/dev release version -> False
(version_latest, version_current, is_newer) = check_for_newer_version(
version_current=None,
version_latest=None,
quiet=True)
assert is_newer is False, (version_latest, version_current, is_newer)
def test_gui_csv_01(self):
"""tests solid_bending.txt"""
csv_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending.txt')
with self.assertRaises(ValueError):
load_deflection_csv(csv_filename) # bad shape
load_csv(csv_filename)
def test_gui_csv_02(self):
"""tests solid_bending_multi_node.txt"""
csv_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending_multi_node.txt')
with self.assertRaises(ValueError):
load_deflection_csv(csv_filename) # bad shape
load_csv(csv_filename)
def test_gui_csv_03a(self):
"""tests solid_bending_multi_node.csv with deflection loader"""
csv_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending_multi_node.csv')
with self.assertRaises(ValueError):
load_deflection_csv(csv_filename) # bad shape
def test_gui_csv_03b(self):
"""tests solid_bending_multi_node.csv"""
csv_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending_multi_node.csv')
load_csv(csv_filename)
def test_gui_deflection_csv_01a(self):
"""tests solid_bending_multi_node.csv with deflection loader"""
csv_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending_multi_node.csv')
with self.assertRaises(ValueError):
load_deflection_csv(csv_filename) # bad shape
def test_gui_deflection_csv_01b(self):
"""tests solid_bending_multi_deflection_node.txt with deflection loader"""
csv_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending_multi_deflection_node.txt')
A, nids_index, fmt_dict, headers = load_deflection_csv(csv_filename)
result_type = 'node'
header0 = headers[0]
unused_result0 = A[header0]
#nrows = result0.shape[0]
#assert nrows == self.nnodes, 'nrows=%s nnodes=%s' % (nrows, self.nnodes)
header = header0
islot = 0
with self.assertRaises(RuntimeError):
create_res_obj(islot, headers, header, A, fmt_dict, result_type,
dim_max=1.0, xyz_cid0=None,
is_deflection=False, is_force=False)
create_res_obj(islot, headers, header, A, fmt_dict, result_type,
dim_max=1.0, xyz_cid0=None,
is_deflection=True, is_force=False)
def test_gui_custom_geom_01(self):
"""tests custom_geom.csv"""
csv_filename = os.path.join(MODEL_PATH, 'custom_geom.csv')
load_user_geom(csv_filename)
def test_animation_scale_0(self):
"""0 to scale"""
scale = 2.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='0 to scale',
fps=5)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 1.0), analysis_time
assert np.allclose(scales.min(), 0.), scales
assert np.allclose(scales.max(), scale), scales
expected_scales = [0.0, 0.25*scale, 0.5*scale, 0.75*scale, scale]
assert_array(scales, expected_scales, 'scales')
def test_animation_scale_1a(self):
"""0 to scale to 0"""
scale = 2.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='0 to scale to 0',
fps=11)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 0.5), analysis_time
assert np.allclose(scales.min(), 0.), scales
assert np.allclose(scales.max(), scale), scales
expected_scales = [0., 0.5*scale, scale]
assert_array(scales, expected_scales, 'scales')
def test_animation_scale_1b(self):
"""0 to scale to 0"""
scale = 1.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='0 to scale to 0',
fps=10)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 0.5), analysis_time
assert np.allclose(scales.min(), 0.), scales
assert np.allclose(scales.max(), scale), scales
expected_scales = [0., 0.5, 1.]
assert_array(scales, expected_scales, 'scales')
def test_animation_scale_2(self):
"""-scale to scale"""
scale = 2.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='-scale to scale',
fps=5)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 1.0), analysis_time
assert np.allclose(scales.min(), -scale), scales
assert np.allclose(scales.max(), scale), scales
expected_scales = [-scale, -0.5*scale, 0., 0.5*scale, scale]
assert_array(scales, expected_scales, 'scales')
def test_animation_scale_3a(self):
"""-scale to scale to -scale"""
scale = 2.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='-scale to scale to -scale',
fps=11)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 0.5), analysis_time
assert np.allclose(scales.min(), -scale), scales
assert np.allclose(scales.max(), scale), scales
expected_scales = [-scale, 0., scale]
assert_array(scales, expected_scales, 'scales')
def test_animation_scale_3b(self):
"""-scale to scale to -scale"""
scale = 2.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='-scale to scale to -scale',
fps=10)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 0.5), analysis_time
assert np.allclose(scales.min(), -scale), scales
assert np.allclose(scales.max(), scale), scales
assert len(scales) == 3, scales
expected_scales = [-scale, 0., scale]
assert_array(scales, expected_scales, 'scales')
def test_animation_scale_3c(self):
"""-scale to scale to -scale"""
scale = 2.0
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='-scale to scale to -scale',
fps=1)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
expected_scales = [-scale, scale]
assert_array(scales, expected_scales, 'scales')
assert np.allclose(analysis_time, 0.5), analysis_time
assert np.allclose(scales.min(), -scale), scales
assert np.allclose(scales.max(), scale), scales
assert len(scales) == 2, scales
def test_animation_scale_4a(self):
"""0 to scale to -scale to 0"""
scale_atols = [(1., 0.00000001), (10.0, 0.00000001)]
for scale, atol in scale_atols:
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='0 to scale to -scale to 0',
fps=5, )
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 1.0), analysis_time
expected_scales = [0., scale, 0., -scale]
assert_array(scales, expected_scales, 'scales')
msg = ''
if not np.allclose(scales.min(), -scale, atol=atol): # pragma: no cover
msg += 'scales=%s min=%s expected=%s\n' % (scales, scales.min(), -scale)
if not np.allclose(scales.max(), scale, atol=atol): # pragma: no cover
msg += 'scales=%s max=%s expected=%s' % (scales, scales.max(), scale)
if msg: # pragma: no cover
raise ValueError(msg)
def test_animation_scale_4b(self):
"""0 to scale to -scale to 0"""
scale, atol = (1., 0.00000001)
out = setup_animation(
scale, istep=None,
animate_scale=True, animate_phase=False, animate_time=False,
icase_disp=42,
icase_start=None, icase_end=None, icase_delta=None,
time=1.0, animation_profile='0 to scale to -scale to 0',
fps=7, animate_in_gui=True)
phases, icases_fringe, icases_disp, icases_vector, isteps, scales, analysis_time, onesided, endpoint = out
assert len(np.unique(icases_disp)) == 1
assert len(np.unique(phases)) == 1
assert np.allclose(analysis_time, 1.0), analysis_time
expected_scales = [0., 0.5*scale, scale, 0.5*scale,
0., -0.5*scale, -scale, -0.5*scale]
assert_array(scales, expected_scales, 'scales')
scales, phases, icases_fringe, icases_disp, icases_vector, isteps = make_two_sided(
scales, phases, icases_fringe, icases_disp, icases_vector, isteps, onesided)
expected_scales = [0., 0.5*scale, scale, 0.5*scale, 0., -0.5*scale, -scale, -0.5*scale,
0., 0.5*scale, scale, 0.5*scale, 0., -0.5*scale, -scale, -0.5*scale, ]
assert_array(scales, expected_scales, 'scales')
msg = ''
if not np.allclose(scales.min(), -scale, atol=atol): # pragma: no cover
msg += 'scales=%s min=%s expected=%s\n' % (scales, scales.min(), -scale)
if not np.allclose(scales.max(), scale, atol=atol): # pragma: no cover
msg += 'scales=%s max=%s expected=%s' % (scales, scales.max(), scale)
if msg: # pragma: no cover
| |
字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': '<NAME>, <NAME> and <NAME>',
'track': 'Dark Walk - Position Music',
'artist': '<NAME>, <NAME> and <NAME>',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': '<NAME>',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': '<NAME>',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# Retrieve 'artist' field from 'Artist:' in video description
# when it is present on youtube music video
'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY',
'info_dict': {
'id': 'k0jLE7tTwjY',
'ext': 'mp4',
'title': 'Latch Feat. <NAME>',
'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335',
'upload_date': '20150110',
'uploader': 'Various Artists - Topic',
'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w',
'artist': 'Disclosure',
'track': 'Latch Feat. <NAME>',
'album': 'Latch Featuring Sam Smith',
'release_date': '20121008',
'release_year': 2012,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# handle multiple artists on youtube music video
'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA',
'info_dict': {
'id': '74qn0eJSjpA',
'ext': 'mp4',
'title': 'Eastside',
'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2',
'upload_date': '20180710',
'uploader': '<NAME> - Topic',
'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A',
'artist': '<NAME>, <NAME>',
'track': 'Eastside',
'album': 'Eastside',
'release_date': '20180713',
'release_year': 2018,
},
'params': {
'skip_download': True,
},
},
{
# Youtube Music Auto-generated description
# handle youtube music video with release_year and no release_date
'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M',
'info_dict': {
'id': '-hcAI0g-f5M',
'ext': 'mp4',
'title': 'Put It On Me',
'description': 'md5:f6422397c07c4c907c6638e1fee380a5',
'upload_date': '20180426',
'uploader': '<NAME> - Topic',
'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ',
'artist': '<NAME>',
'track': 'Put It On Me',
'album': 'The Hearse',
'release_date': None,
'release_year': 2018,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=BnC-cpUCdns',
'only_matching': True,
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://youtube.com/shorts/7awd-y_DTQY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/video/2NDLF-k2PwA',
'only_matching': True,
}
]
_VALID_SIG_VALUE_RE = r'^AO[a-zA-Z0-9_-]+=*$'
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _extract_signature_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
cache_spec = self._downloader.cache.load('youtube-sigfuncs', player_id)
if cache_spec is not None:
return cache_spec
if not player_url.startswith('http'):
player_url = 'https://www.youtube.com' + player_url
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading js player %s' % player_id
)
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of js player %s failed' % player_url)
res = self._parse_sig_js(code)
self._downloader.cache.store('youtube-sigfuncs', player_id, res)
return res
def _parse_sig_js(self, js_player):
shit_parser = re.search(r'[a-z]\=a\.split\((?:""|\'\')\);(([a-zA-Z_][a-zA-Z\d_]+).*);return a\.join', js_player)
if not shit_parser:
raise ExtractorError('Signature decryption code not found')
func, obfuscated_name = shit_parser.group(1, 2)
obfuscated_func = re.search(r'%s\s*=\s*{([\s\w(){}[\].,:;=%s"\']*?})};' % (re.escape(obfuscated_name), '%'),
js_player)
if not obfuscated_func:
raise ExtractorError('Signature decrypting deobfuscated functions not found')
obfuscated_stack = obfuscated_func.group(1)
obf_map = {}
for obffun in re.finditer(r'(?P<kp>["\'`]?)([a-zA-Z_][a-zA-Z\d_]+)(?P=kp):function\(a(?:,b)?\){(.*?)}', obfuscated_stack):
obfname, obfval = obffun.group(2, 3)
if 'splice' in obfval:
obf_map[obfname] = 'splice'
elif 'reverse' in obfval:
obf_map[obfname] = 'reverse'
elif 'var' in obfval and 'length' in obfval:
obf_map[obfname] = 'mess'
else:
self.to_screen(obfval)
raise ExtractorError('Unknown obfuscation function type: %s.%s' % (obfuscated_name, obfname))
decryptor_stack = []
for instruction in re.finditer(r'%s(?:\.|\[["\'`])([a-zA-Z_][a-zA-Z\d_]+)(?:["\'`]\])?\(a,(\d+)\);?' % re.escape(obfuscated_name),
func):
obf_name, obf_arg = instruction.group(1, 2)
inst = obf_map.get(obf_name)
if self._downloader.params.get('verbose', True):
self.to_screen('sig %s %s %s' % (obf_name, inst, obf_arg))
if inst:
decryptor_stack.append((inst, int(obf_arg) if inst != 'reverse' else None))
else:
raise ExtractorError('Unknown obfuscation function: %s.%s' % (obfuscated_name, obf_name))
return decryptor_stack
def _do_decrypt_signature(self, sig, stack):
a = list(sig)
for fun in stack:
if fun[0] == 'splice':
a = a[fun[1]:]
elif fun[0] == 'reverse':
a.reverse()
elif fun[0] == 'mess':
a = self.mess(a, fun[1])
else:
raise ExtractorError('Unknown stack action: %s' % (fun[0]))
return ''.join(a)
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
@staticmethod
def mess(a, b):
c = a[0]
a[0] = a[b % len(a)]
a[b % len(a)] = c
return a
def _full_signature_handling(self, sig, player_url, video_id):
if _decrypt_signature_protected:
signature = _decrypt_signature_protected(sig)
if re.match(self._VALID_SIG_VALUE_RE, signature):
return signature
if self._downloader.params.get('verbose'):
self.to_screen("Built-in signature decryption failed, trying dynamic")
sig_decrypt_stack = self._extract_signature_function(video_id, player_url)
return self._do_decrypt_signature(sig, sig_decrypt_stack)
def _generate_prerelease_file(self):
# It's Monday, so I'm in a bad mood, but at least my sailor uniform is super cute!
video_id = 'ieQ1rAIjzXc'
self._set_consent()
webpage = self._download_webpage('https://www.youtube.com/watch?v=%s' % video_id, video_id)
player_url = self._search_regex(r'"jsUrl":"(/s/player/.*?/player_ias.vflset/.*?/base.js)', webpage, 'player url')
sig_decrypt_stack = self._extract_signature_function(video_id, player_url)
func = re.sub(r'(?m)^ ', '', getsource(self.mess).replace('@staticmethod', ''))
func += '\n\ndef _decrypt_signature_protected(sig):\n'
stack = ['a = list(sig)']
for fun in sig_decrypt_stack:
if fun[0] == 'splice':
stack.append(f'a = a[{fun[1]}:]')
elif fun[0] == 'reverse':
stack.append('a.reverse()')
elif fun[0] == 'mess':
stack.append(f'a = mess(a, {fun[1]})')
else:
raise ExtractorError('Unknown stack action: %s' % (fun[0]))
stack.append("return ''.join(a)")
return func + '\n'.join(map(lambda x: ' ' * 4 + x, stack)) | |
def library_config_path(self):
"""
The library Config path.
:return: The library config directory
:rtype: str
"""
if self._options is not None:
return self._options.config_path
else:
return None
@property
def library_user_path(self):
"""
The library User path.
:return: The user directory to store user configuration
:rtype: str
"""
if self._options is not None:
return self._options.user_path
else:
return None
@property
def device(self):
"""
The device path.
:return: The device (ie /dev/zwave)
:rtype: str
"""
if self._options is not None:
return self._options.device
else:
return None
@property
def options(self):
"""
The starting options of the manager.
:return: The options used to start the manager
:rtype: ZWaveOption
"""
return self._options
@property
def stats(self):
"""
Retrieve statistics from driver.
Statistics:
* s_SOFCnt : Number of SOF bytes received
* s_ACKWaiting : Number of unsolicited messages while waiting for an ACK
* s_readAborts : Number of times read were aborted due to timeouts
* s_badChecksum : Number of bad checksums
* s_readCnt : Number of messages successfully read
* s_writeCnt : Number of messages successfully sent
* s_CANCnt : Number of CAN bytes received
* s_NAKCnt : Number of NAK bytes received
* s_ACKCnt : Number of ACK bytes received
* s_OOFCnt : Number of bytes out of framing
* s_dropped : Number of messages dropped & not delivered
* s_retries : Number of messages retransmitted
* s_controllerReadCnt : Number of controller messages read
* s_controllerWriteCnt : Number of controller messages sent
:return: Statistics of the controller
:rtype: dict()
"""
return self._network.manager.getDriverStatistics(self.home_id)
def get_stats_label(self, stat):
"""
Retrieve label of the statistic from driver.
:param stat: The code of the stat label to retrieve.
:type stat:
:return: The label or the stat.
:rtype: str
"""
#print "stat = %s" % stat
return PyStatDriver[stat]
def do_poll_statistics(self):
"""
Timer based polling system for statistics
"""
self._timer_statistics = None
stats = self.stats
dispatcher.send(self.SIGNAL_CONTROLLER_STATS, \
**{'controller':self, 'stats':stats})
self._timer_statistics = threading.Timer(self._interval_statistics, self.do_poll_statistics)
self._timer_statistics.start()
@property
def poll_stats(self):
"""
The interval for polling statistics
:return: The interval in seconds
:rtype: float
"""
return self._interval_statistics
@poll_stats.setter
def poll_stats(self, value):
"""
The interval for polling statistics
:return: The interval in seconds
:rtype: ZWaveNode
:param value: The interval in seconds
:type value: float
"""
if value != self._interval_statistics:
if self._timer_statistics is not None:
self._timer_statistics.cancel()
if value != 0:
self._interval_statistics = value
self._timer_statistics = threading.Timer(self._interval_statistics, self.do_poll_statistics)
self._timer_statistics.start()
@property
def capabilities(self):
"""
The capabilities of the controller.
:return: The capabilities of the controller
:rtype: set
"""
caps = set()
if self.is_primary_controller:
caps.add('primaryController')
if self.is_static_update_controller:
caps.add('staticUpdateController')
if self.is_bridge_controller:
caps.add('bridgeController')
return caps
@property
def is_primary_controller(self):
"""
Is this node a primary controller of the network.
:rtype: bool
"""
return self._network.manager.isPrimaryController(self.home_id)
@property
def is_static_update_controller(self):
"""
Is this controller a static update controller (SUC).
:rtype: bool
"""
return self._network.manager.isStaticUpdateController(self.home_id)
@property
def is_bridge_controller(self):
"""
Is this controller using the bridge controller library.
:rtype: bool
"""
return self._network.manager.isBridgeController(self.home_id)
@property
def send_queue_count(self):
"""
Get count of messages in the outgoing send queue.
:return: The count of messages in the outgoing send queue.
:rtype: int
"""
if self.home_id is not None:
return self._network.manager.getSendQueueCount(self.home_id)
return -1
def hard_reset(self):
"""
Hard Reset a PC Z-Wave Controller.
Resets a controller and erases its network configuration settings.
The controller becomes a primary controller ready to add devices to a new network.
This command fires a lot of louie signals.
Louie's clients must disconnect from nodes and values signals
.. code-block:: python
dispatcher.send(self._network.SIGNAL_NETWORK_RESETTED, **{'network': self._network})
"""
self._network.state = self._network.STATE_RESETTED
dispatcher.send(self._network.SIGNAL_NETWORK_RESETTED, \
**{'network':self._network})
self._network.manager.resetController(self._network.home_id)
try:
self.network.network_event.wait(5.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
def soft_reset(self):
"""
Soft Reset a PC Z-Wave Controller.
Resets a controller without erasing its network configuration settings.
"""
self._network.manager.softResetController(self.home_id)
def create_new_primary(self):
'''Create a new primary controller when old primary fails. Requires SUC.
This command creates a new Primary Controller when the Old Primary has Failed. Requires a SUC on the network to function.
Results of the CreateNewPrimary Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'create_new_primary')
return self._network.manager.createNewPrimary(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'create_new_primary')
return False
def transfer_primary_role(self):
'''
Add a new controller to the network and make it the primary.
The existing primary will become a secondary controller.
Results of the TransferPrimaryRole Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'transfer_primary_role')
return self._network.manager.transferPrimaryRole(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'create_new_primary')
return False
def receive_configuration(self):
'''Receive network configuration information from primary controller. Requires secondary.
This command prepares the controller to recieve Network Configuration from a Secondary Controller.
Results of the ReceiveConfiguration Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'receive_configuration')
return self._network.manager.receiveConfiguration(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'receive_configuration')
return False
def add_node(self, doSecurity=False):
'''Start the Inclusion Process to add a Node to the Network.
The Status of the Node Inclusion is communicated via Notifications. Specifically, you should
monitor ControllerCommand Notifications.
Results of the AddNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param doSecurity: Whether to initialize the Network Key on the device if it supports the Security CC
:type doSecurity: bool
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : secure : %s', 'add_node', doSecurity)
return self._network.manager.addNode(self.home_id, doSecurity)
else:
logger.warning(u"Can't lock controller for command : %s", 'add_node')
return False
def remove_node(self):
'''Remove a Device from the Z-Wave Network
The Status of the Node Removal is communicated via Notifications. Specifically, you should
monitor ControllerCommand Notifications.
Results of the RemoveNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param doSecurity: Whether to initialize the Network Key on the device if it supports the Security CC
:type doSecurity: bool
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s', 'remove_node')
return self._network.manager.removeNode(self.home_id)
else:
logger.warning(u"Can't lock controller for command : %s", 'remove_node')
return False
def remove_failed_node(self, nodeid):
'''Remove a Failed Device from the Z-Wave Network
This Command will remove a failed node from the network. The Node should be on the Controllers Failed
Node List, otherwise this command will fail. You can use the HasNodeFailed function below to test if the Controller
believes the Node has Failed.
The Status of the Node Removal is communicated via Notifications. Specifically, you should
monitor ControllerCommand Notifications.
Results of the RemoveFailedNode Command will be send as a Notification with the Notification type as
Notification::Type_ControllerCommand
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'remove_failed_node', nodeid)
return self._network.manager.removeFailedNode(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'remove_failed_node')
return False
def has_node_failed(self, nodeid):
'''Check if the Controller Believes a Node has Failed.
This is different from the IsNodeFailed call in that we test the Controllers Failed Node List, whereas the IsNodeFailed is testing
our list of Failed Nodes, which might be different.
The Results will be communicated via Notifications. Specifically, you should monitor the ControllerCommand notifications
:param nodeId: The ID of the node to query.
:type nodeId: int
:return: True if the request was sent successfully.
:rtype: bool
'''
if self._lock_controller():
logger.debug(u'Send controller command : %s, : node : %s', 'has_node_failed', nodeid)
return self._network.manager.hasNodeFailed(self.home_id, nodeid)
else:
logger.warning(u"Can't lock controller for command : %s", 'has_node_failed')
return False
def request_node_neighbor_update(self, nodeid):
'''Ask a Node to update its Neighbor Tables
This command will ask a | |
#
#
# A class to parse HDF5 based NeuroML files.
# Calls the appropriate methods in DefaultNetworkHandler when cell locations,
# network connections are found. The DefaultNetworkHandler can either print
# information, or if it's a class overriding DefaultNetworkHandler can create
# the appropriate network in a simulator dependent fashion
#
#
# Author: <NAME>
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council & Wellcome Trust
#
#
import logging
import sys
import inspect
import tables # pytables for HDF5 support
import numpy as np
import neuroml
from neuroml.hdf5 import get_str_attribute_group
from neuroml.hdf5.NetworkContainer import NetworkContainer
from neuroml.hdf5.NetworkContainer import PopulationContainer
from neuroml.hdf5.NetworkContainer import InstanceList
from neuroml.hdf5.NetworkContainer import ProjectionContainer
from neuroml.hdf5.NetworkContainer import ElectricalProjectionContainer
from neuroml.hdf5.NetworkContainer import ConnectionList
from neuroml.hdf5.NetworkContainer import InputListContainer
from neuroml.hdf5.NetworkContainer import InputsList
from neuroml.loaders import read_neuroml2_string
from neuroml.utils import add_all_to_document
import os.path
class NeuroMLHdf5Parser:
log = logging.getLogger("NeuroMLHdf5Parser")
currPopulation = ""
currentComponent = ""
totalInstances = 0
currentProjectionId = ""
currentProjectionType = ""
currentProjectionPrePop = ""
currentProjectionPostPop = ""
currentSynapse = ""
currentPreSynapse = ""
currInputList = ""
nml_doc_extra_elements = None
optimized = False
def __init__(self, netHandler, optimized=False):
self.netHandler = netHandler
self.optimized = optimized
if not self.optimized:
# For continued use with old API
if not hasattr(self.netHandler, "handle_network") or hasattr(
self.netHandler, "handleNetwork"
):
self.netHandler.handle_network = self.netHandler.handleNetwork
if not hasattr(self.netHandler, "handle_document_start") or hasattr(
self.netHandler, "handleDocumentStart"
):
self.netHandler.handle_document_start = (
self.netHandler.handleDocumentStart
)
if not hasattr(self.netHandler, "handle_population") or hasattr(
self.netHandler, "handlePopulation"
):
self.netHandler.handle_population = self.netHandler.handlePopulation
if not hasattr(self.netHandler, "handle_location") or hasattr(
self.netHandler, "handleLocation"
):
self.netHandler.handle_location = self.netHandler.handleLocation
if not hasattr(self.netHandler, "handle_projection") or hasattr(
self.netHandler, "handleProjection"
):
self.netHandler.handle_projection = self.netHandler.handleProjection
if not hasattr(self.netHandler, "finalise_projection") or hasattr(
self.netHandler, "finaliseProjection"
):
self.netHandler.finalise_projection = self.netHandler.finaliseProjection
if not hasattr(self.netHandler, "handle_connection") or hasattr(
self.netHandler, "handleConnection"
):
self.netHandler.handle_connection = self.netHandler.handleConnection
if not hasattr(self.netHandler, "handle_input_list") or hasattr(
self.netHandler, "handleInputList"
):
self.netHandler.handle_input_list = self.netHandler.handleInputList
if not hasattr(self.netHandler, "handle_single_input") or hasattr(
self.netHandler, "handleSingleInput"
):
self.netHandler.handle_single_input = self.netHandler.handleSingleInput
if not hasattr(self.netHandler, "finalise_input_source") or hasattr(
self.netHandler, "finaliseInputSource"
):
self.netHandler.finalise_input_source = (
self.netHandler.finaliseInputSource
)
def parse(self, filename):
h5file = tables.open_file(filename, mode="r")
self.log.info(
"Opened HDF5 file: %s; id=%s"
% (h5file.filename, h5file.root.neuroml._v_attrs.id)
)
if hasattr(h5file.root.neuroml._v_attrs, "neuroml_top_level"):
nml = get_str_attribute_group(h5file.root.neuroml, "neuroml_top_level")
try:
self.nml_doc_extra_elements = read_neuroml2_string(
nml,
include_includes=True,
verbose=False,
base_path=os.path.dirname(os.path.abspath(filename)),
)
except Exception as e:
print("Unable to read XML string extracted from HDF5 file!")
print(e)
print(nml)
raise e
self.log.info(
"Extracted NeuroML2 elements from extra string found in HDF5 file"
)
self.parse_group(h5file.root.neuroml)
h5file.close()
def get_nml_doc(self):
if not self.optimized:
nml2_doc = nmlHandler.get_nml_doc()
if self.nml_doc_extra_elements:
add_all_to_document(self.nml_doc_extra_elements, nml2_doc)
return nml2_doc
else:
nml_doc = neuroml.NeuroMLDocument(id=self.doc_id, notes=self.doc_notes)
if self.nml_doc_extra_elements:
add_all_to_document(self.nml_doc_extra_elements, nml_doc)
nml_doc.networks.append(self.optimizedNetwork)
return nml_doc
def _is_dataset(self, node):
return node._c_classid == "ARRAY" or node._c_classid == "CARRAY"
def parse_group(self, g):
self.log.debug("Parsing group: " + str(g))
self.start_group(g)
# Note this ensures groups are parsed before datasets. Important for synapse props
# Ensure populations parsed first!
for node in g:
if node._c_classid == "GROUP" and node._v_name.count("population_") >= 1:
self.log.debug("Sub node: " + str(node) + ", class: " + node._c_classid)
self.parse_group(node)
# Non populations!
for node in g:
if node._c_classid == "GROUP" and node._v_name.count("population_") == 0:
self.log.debug(
"Sub node (ng): " + str(node) + ", class: " + node._c_classid
)
self.parse_group(node)
for node in g:
self.log.debug("Sub node: " + str(node) + ", class: " + node._c_classid)
if self._is_dataset(node):
self.parse_dataset(node)
self.end_group(g)
def _extract_named_indices(self, d):
named_indices = {}
for attrName in d.attrs._v_attrnames:
if "column_" in attrName:
val = d.attrs.__getattr__(attrName)
if isinstance(val, str):
name = val
else:
name = val[0]
index = int(attrName[len("column_") :])
named_indices[name] = index
return named_indices
def parse_dataset(self, d):
self.log.debug("Parsing dataset/array: " + str(d))
if self.currPopulation != "":
self.log.debug("Using data for population: " + self.currPopulation)
self.log.debug(
"Size is: "
+ str(d.shape[0])
+ " rows of: "
+ str(d.shape[1])
+ " entries"
)
if not self.optimized:
indexId = -1
indexX = -1
indexY = -1
indexZ = -1
for attrName in d.attrs._v_attrnames:
val = d.attrs.__getattr__(attrName)
if val == "id" or val[0] == "id":
indexId = int(attrName[len("column_") :])
if val == "x" or val[0] == "x":
indexX = int(attrName[len("column_") :])
if val == "y" or val[0] == "y":
indexY = int(attrName[len("column_") :])
if val == "z" or val[0] == "z":
indexZ = int(attrName[len("column_") :])
if indexId < 0:
if len(d[0]) == 4:
indexId = 0
if indexX < 0:
if len(d[0]) == 3:
indexX = 0
if len(d[0]) == 4:
indexX = 1
if indexY < 0:
if len(d[0]) == 3:
indexY = 1
if len(d[0]) == 4:
indexY = 2
if indexZ < 0:
if len(d[0]) == 3:
indexZ = 2
if len(d[0]) == 4:
indexY = 3
for i in range(0, d.shape[0]):
self.netHandler.handle_location(
int(d[i, indexId]) if indexId >= 0 else i,
self.currPopulation,
self.currentComponent,
float(d[i, indexX]),
float(d[i, indexY]),
float(d[i, indexZ]),
)
else:
# TODO: a better way to convert???
a = np.array(d)
self.currOptPopulation.instances = InstanceList(
array=a, indices=self._extract_named_indices(d)
)
elif self.currentProjectionId != "":
self.log.debug("Using data for proj: " + self.currentProjectionId)
self.log.debug(
"Size is: "
+ str(d.shape[0])
+ " rows of: "
+ str(d.shape[1])
+ " entries"
)
if not self.optimized:
indexId = -1
indexPreCellId = -1
indexPreSegId = -1
indexPreFractAlong = -1
indexPostCellId = -1
indexPostSegId = -1
indexPostFractAlong = -1
indexWeight = -1
indexDelay = -1
id = -1
preCellId = -1
preSegId = 0
preFractAlong = 0.5
postCellId = -1
postSegId = 0
postFractAlong = 0.5
type = "projection"
extraParamIndices = {}
for attrName in d.attrs._v_attrnames:
val = d.attrs.__getattr__(attrName)
if not isinstance(val, str):
val = val.decode()
# self.log.debug("Val of attribute: "+ attrName + " is "+ str(val))
if val == "id" or val[0] == "id":
indexId = int(attrName[len("column_") :])
elif val == "pre_cell_id" or val[0] == "pre_cell_id":
indexPreCellId = int(attrName[len("column_") :])
elif val == "pre_segment_id" or val[0] == "pre_segment_id":
indexPreSegId = int(attrName[len("column_") :])
elif val == "pre_fraction_along" or val[0] == "pre_fraction_along":
indexPreFractAlong = int(attrName[len("column_") :])
elif val == "post_cell_id" or val[0] == "post_cell_id":
indexPostCellId = int(attrName[len("column_") :])
elif val == "post_segment_id" or val[0] == "post_segment_id":
indexPostSegId = int(attrName[len("column_") :])
elif (
val == "post_fraction_along" or val[0] == "post_fraction_along"
):
indexPostFractAlong = int(attrName[len("column_") :])
elif val == "weight" or val[0] == "weight":
indexWeight = int(attrName[len("column_") :])
elif val == "delay" or val[0] == "delay":
indexDelay = int(attrName[len("column_") :])
if self.nml_doc_extra_elements:
synapse_obj = self.nml_doc_extra_elements.get_by_id(
self.currentSynapse
)
pre_synapse_obj = (
self.nml_doc_extra_elements.get_by_id(self.currentPreSynapse)
if len(self.currentPreSynapse) > 0
else None
)
else:
synapse_obj = None
pre_synapse_obj = None
self.netHandler.handle_projection(
self.currentProjectionId,
self.currentProjectionPrePop,
self.currentProjectionPostPop,
self.currentSynapse,
hasWeights=indexWeight > 0,
hasDelays=indexDelay > 0,
type=self.currentProjectionType,
synapse_obj=synapse_obj,
pre_synapse_obj=pre_synapse_obj,
)
self.log.debug(
"Cols: Id: %d precell: %d, postcell: %d, pre fract: %d, post fract: %d"
% (
indexId,
indexPreCellId,
indexPostCellId,
indexPreFractAlong,
indexPostFractAlong,
)
)
self.log.debug("Extra cols: " + str(extraParamIndices))
for i in range(0, d.shape[0]):
row = d[i, :]
id = int(row[indexId]) if indexId > 0 else i
preCellId = int(row[indexPreCellId])
if indexPreSegId >= 0:
preSegId = int(row[indexPreSegId])
if indexPreFractAlong >= 0:
preFractAlong = row[indexPreFractAlong]
postCellId = int(row[indexPostCellId])
if indexPostSegId >= 0:
postSegId = int(row[indexPostSegId])
if indexPostFractAlong >= 0:
postFractAlong = row[indexPostFractAlong]
if indexWeight >= 0:
weight = row[indexWeight]
else:
weight = 1
if indexDelay >= 0:
delay = row[indexDelay]
else:
delay = 0
# self.log.debug("Connection %d from %f to %f" % (id, preCellId, postCellId))
self.netHandler.handle_connection(
self.currentProjectionId,
id,
self.currentProjectionPrePop,
self.currentProjectionPostPop,
self.currentSynapse,
preCellId,
postCellId,
preSegId,
preFractAlong,
postSegId,
postFractAlong,
delay=delay,
weight=weight,
)
else:
# TODO: a better way to convert???
a = np.array(d)
self.currOptProjection.connections = ConnectionList(
array=a, indices=self._extract_named_indices(d)
)
if self.currInputList != "":
self.log.debug("Using data for input list: " + self.currInputList)
self.log.debug(
"Size is: "
+ str(d.shape[0])
+ " rows of: "
+ str(d.shape[1])
+ " entries"
)
if not self.optimized:
indexId = -1
indexTargetCellId = -1
indexSegId = -1
indexFractAlong = -1
indexWeight = -1
segId = 0
fractAlong = 0.5
weight = 1
for attrName in d.attrs._v_attrnames:
val = d.attrs.__getattr__(attrName)
if not isinstance(val, str):
val = val.decode()
self.log.debug("Val of attribute: " + attrName + " is " + str(val))
if val == "id" or val[0] == "id":
indexId = int(attrName[len("column_") :])
elif val == "target_cell_id" or val[0] == "target_cell_id":
indexTargetCellId = int(attrName[len("column_") :])
elif val == "segment_id" or val[0] == "segment_id":
indexSegId = int(attrName[len("column_") :])
elif val == "fraction_along" or val[0] == | |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
#### Terminology
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
#### Purpose
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
#### Examples
We show examples of distribution shape semantics.
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.div(1., tf.reduce_prod(x, event_dims))
```
We show examples using this class.
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
#### Argument Validation
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, | |
<gh_stars>100-1000
# Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.volume.drivers.dell_emc.sc import storagecenter_api
from cinder.volume.drivers.dell_emc.sc import storagecenter_fc
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(storagecenter_api.HttpClient,
'__init__',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'open_connection')
@mock.patch.object(storagecenter_api.SCApi,
'close_connection')
class DellSCSanFCDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.4829',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 4831,
u'objectType': u'ScVolume',
u'index': 4829,
u'volumeFolderPath': u'dopnstktst/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'statusMessage': u'',
u'status': u'Down',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'opnstktst',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe0000000000000012df',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-000012df',
u'replayAllowed': False,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'opnstktst/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'opnstktst',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell EMC Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPING = {u'instanceId': u'64702.2183',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.4829',
u'instanceName':
u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 2,
u'instanceName': u'4829-47',
u'lunRequested': u'N/A'
}
def setUp(self):
super(DellSCSanFCDriverTestCase, self).setUp()
# configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "<PASSWORD>"
self.configuration.dell_sc_ssn = 64702
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self.configuration.excluded_domain_ip = None
self.configuration.excluded_domain_ips = []
self.configuration.included_domain_ips = []
self._context = context.get_admin_context()
self.driver = storagecenter_fc.SCFCDriver(
configuration=self.configuration)
self.driver.do_setup(None)
self.driver._stats = {'QoS_support': False,
'volume_backend_name': 'dell-1',
'free_capacity_gb': 12123,
'driver_version': '1.0.1',
'total_capacity_gb': 12388,
'reserved_percentage': 0,
'vendor_name': 'Dell',
'storage_protocol': 'FC'}
# Start with none. Add in the specific tests later.
# Mock tests bozo this.
self.driver.backends = None
self.driver.replication_enabled = False
self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d'
self.volume_name = "volume" + self.volid
self.connector = {'ip': '192.168.0.77',
'host': 'cinderfc-vm',
'wwnns': ['20000024ff30441c', '20000024ff30441d'],
'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1',
'wwpns': ['21000024ff30441c', '21000024ff30441d']}
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
def test_initialize_connection(self,
mock_find_wwns,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn':
[u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'initialize_secondary')
@mock.patch.object(storagecenter_api.SCApi,
'get_live_volume')
def test_initialize_connection_live_vol(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_initialize_secondary.return_value = (
1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'],
{u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D'],
u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35',
u'5000D31000FCBE3E', u'5000D31000FCBE36']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume')
@mock.patch.object(storagecenter_api.SCApi,
'get_volume')
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'initialize_secondary')
@mock.patch.object(storagecenter_api.SCApi,
'get_live_volume')
def test_initialize_connection_live_vol_afo(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'}
scvol = {'instanceId': '102.101'}
mock_find_volume.return_value = scvol
mock_get_volume.return_value = scvol
connector = self.connector
sclivevol = {'instanceId': '101.10001',
'primaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'primaryScSerialNumber': 102,
'secondaryVolume': {'instanceId': '101.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 101,
'secondaryRole': 'Activated'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
self.assertFalse(mock_initialize_secondary.called)
mock_find_volume.assert_called_once_with(
fake.VOLUME_ID, '101.101', True)
mock_get_volume.assert_called_once_with('102.101')
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_no_wwns(self,
mock_find_wwns,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'create_server',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_no_server(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_vol_not_found(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(storagecenter_api.SCApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(storagecenter_api.SCApi,
'map_volume',
return_value=None)
@mock.patch.object(storagecenter_api.SCApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_map_vol_fail(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where map_volume returns None (no mappings)
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
def test_initialize_secondary(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
self.assertEqual(find_wwns_ret, ret)
def test_initialize_secondary_create_server(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=None)
mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
self.assertEqual(find_wwns_ret, ret)
def test_initialize_secondary_no_server(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=None)
mock_api.create_server = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
def test_initialize_secondary_map_fail(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
def test_initialize_secondary_vol_not_found(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
mock_api.get_volume = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume')
@mock.patch.object(storagecenter_api.SCApi,
'unmap_all')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
def test_force_detach(self, mock_is_live_vol, mock_unmap_all,
mock_find_volume, mock_close_connection,
mock_open_connection, mock_init):
mock_is_live_vol.return_value = False
scvol = {'instandId': '12345.1'}
mock_find_volume.return_value = scvol
mock_unmap_all.return_value = True
volume = {'id': fake.VOLUME_ID}
res = self.driver.force_detach(volume)
mock_unmap_all.assert_called_once_with(scvol)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res)
mock_unmap_all.assert_called_once_with(scvol)
@mock.patch.object(storagecenter_api.SCApi,
'find_volume')
@mock.patch.object(storagecenter_api.SCApi,
'unmap_all')
@mock.patch.object(storagecenter_fc.SCFCDriver,
'_is_live_vol')
def test_force_detach_fail(self, mock_is_live_vol, mock_unmap_all,
mock_find_volume, mock_close_connection,
mock_open_connection, | |
histogram title
hist_title = stats_font.render('GUESS DISTRIBUTION', True, white, background)
statsRectHist = hist_title.get_rect()
statsRectHist.center = (width // 2, height - 150)
# histogram labels and locations
hist_font = pygame.font.Font('freesansbold.ttf', 11)
# labels
hist_label1 = hist_font.render(str(1), True, white, background)
hist_label2 = hist_font.render(str(2), True, white, background)
hist_label3 = hist_font.render(str(3), True, white, background)
hist_label4 = hist_font.render(str(4), True, white, background)
hist_label5 = hist_font.render(str(5), True, white, background)
hist_label6 = hist_font.render(str(6), True, white, background)
# locations
hist_label1Rect = hist_label1.get_rect()
hist_label2Rect = hist_label2.get_rect()
hist_label3Rect = hist_label3.get_rect()
hist_label4Rect = hist_label4.get_rect()
hist_label5Rect = hist_label5.get_rect()
hist_label6Rect = hist_label6.get_rect()
hist_labels = [hist_label1, hist_label2, hist_label3, hist_label4, hist_label5, hist_label6]
hist_Rects = [hist_label1Rect, hist_label2Rect, hist_label3Rect, hist_label4Rect, hist_label5Rect, hist_label6Rect]
# set positions for bars
for i in range(len(hist_Rects)):
hist_Rects[i].center = (65, (height - 130) + (20 * i))
# get score distribution, note the max
dist = get_result_distribution()
max_ = max(dist.values())
for i in dist:
if dist[i] == max_:
green_hist = int(i)
# histogram bars
rect1 = [75, height - 138, (240 * dist['1']/int(max_)) + 10, 15]
rect2 = [75, height - 118, (240 * dist['2']/int(max_)) + 10, 15]
rect3 = [75, height - 98, (240 * dist['3']/int(max_)) + 10, 15]
rect4 = [75, height - 78, (240 * dist['4']/int(max_)) + 10, 15]
rect5 = [75, height - 58, (240 * dist['5']/int(max_)) + 10, 15]
rect6 = [75, height - 38, (240 * dist['6']/int(max_)) + 10, 15]
rect_dict = [rect1, rect2, rect3, rect4, rect5, rect6]
# exit button
exit_img = pygame.image.load('exit.png').convert_alpha()
exit_button = button.Button(353, 8, exit_img, 0.5)
running = True
while running:
screen.fill(background)
# show stats and titles
screen2.blit(stats_title, statsRect1)
screen2.blit(stats_played, statsRect2)
screen2.blit(games_played, games_playedRect)
screen2.blit(stats_wins, statsRect3)
screen2.blit(percent, percentRect)
screen2.blit(stats_current, statsRect4)
screen2.blit(stats_current2, statsRect5)
screen2.blit(current_streak, current_streakRect)
screen2.blit(stats_max, statsRect6)
screen2.blit(stats_max2, statsRect7)
screen2.blit(max_streak, max_streakRect)
screen2.blit(stats_time, statsRect8)
screen2.blit(stats_time2, statsRect9)
screen2.blit(fastest, fastestRect)
screen2.blit(hist_title, statsRectHist)
# show histogram
for i in range(6):
screen2.blit(hist_labels[i], hist_Rects[i])
for i in range(1,7):
if green_hist == i:
pygame.draw.rect(screen, box_green, rect_dict[i-1])
else: pygame.draw.rect(screen, dark_gray, rect_dict[i-1])
pygame.display.flip()
#pointer
pygame.mouse.set_visible(True)
# exit game loop
pressed = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONUP:
pressed = 1
# if x is pressed (*hovered over lol I need to fix this), go back to game screen (however it might reset progress, we need to test this)
if exit_button.draw(screen2,pressed):
pygame.mouse.set_visible(True)
game()
pygame.display.flip()
pygame.quit()
# settings button
def settings():
'''
Initializes the settings screen when the settings button is pressed.
'''
width = 500
height = 400
screen4 = pygame.display.set_mode([width, height])
pygame.display.set_caption('SETTINGS')
# settings title
settings_font = pygame.font.Font('freesansbold.ttf', 20)
settings_title = settings_font.render('SETTINGS', True, white, background)
settingsRect1 = settings_title.get_rect()
settingsRect1.center = (width // 2, height - 340)
# change music volume, change song
small_font = pygame.font.Font('freesansbold.ttf', 16)
settings_music = small_font.render('Sound', True, blue, background)
settingsRect2 = settings_music.get_rect()
settingsRect2.center = (width // 2, height - 300)
# music on button
music_on = tiny_font.render('Music On', True, white)
on_button = button.Button(width - 307, height - 270, music_on, 1.0)
# music off/mute button
music_off = tiny_font.render('Music Off', True, white)
off_button = button.Button(width - 222, height - 270, music_off, 1.0)
# skip song button
skip_song = tiny_font.render('Skip Song', True, white)
skip_button = button.Button(width - 137, height - 270, skip_song, 1.0)
# change colors from black to white (dark mode default)
settings_color = small_font.render('Display', True, red, background)
settingsRect3 = settings_color.get_rect()
settingsRect3.center = (width // 2, height - 220)
# dark mode button
dark_font = tiny_font.render('Dark Mode', True, white)
dark_button = button.Button(width // 2 - 14, height - 190, dark_font, 1.0)
# light mode button
light_mode = tiny_font.render('Light Mode', True, white)
light_button = button.Button(width // 2 + 71, height - 190, light_mode, 1.0)
# link to feedback form (https://forms.gle/5gXtiFWCRdHt44ac8)
settings_feedback = small_font.render('Feedback', True, title_green, background)
settingsRect4 = settings_feedback.get_rect()
settingsRect4.center = (width // 2, height - 145)
# feedback button
feedback = tiny_font.render('Click here to share your questions, comments, or concerns!', True, white)
feedback_button = button.Button(width // 2 + 175, height - 115, feedback, 1.0)
# message to the inspiration that started all of this!
settings_message = tiny_font.render('We love you, Vishnu! <3 Aubrey, Annaka, & Uno', True, title_yellow, background)
settingsRect5 = settings_message.get_rect()
settingsRect5.center = (width // 2, height - 70)
# exit button
exit_img = pygame.image.load('exit.png').convert_alpha()
exit_button = button.Button(470, 10, exit_img, 0.5)
running = True
while running:
timer.tick(fps)
screen.fill(background)
screen4.blit(settings_title, settingsRect1)
screen4.blit(settings_music, settingsRect2)
screen4.blit(settings_color, settingsRect3)
screen4.blit(settings_feedback, settingsRect4)
screen4.blit(settings_message, settingsRect5)
# exit game loop
pressed = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONUP:
pressed = 1
# if exit button is pressed
if exit_button.draw(screen4, pressed):
pygame.mouse.set_visible(True)
game()
if on_button.draw(screen4, pressed):
pygame.mixer.music.unpause()
if off_button.draw(screen4, pressed):
pygame.mixer.music.pause()
if skip_button.draw(screen4, pressed):
display_functions.change_song()
if dark_button.draw(screen4, pressed):
display_functions.dark_mode()
if light_button.draw(screen4, pressed):
display_functions.light_mode()
if feedback_button.draw(screen4, pressed):
webbrowser.open(r"https://forms.gle/5gXtiFWCRdHt44ac8")
#pointer
pygame.mouse.set_visible(False)
pointerImg_rect.topleft = pygame.mouse.get_pos()
screen.blit(pointerImg, pointerImg_rect)
pygame.display.flip()
pygame.quit()
# game loop
def game():
'''
Main game function. Called at the end of the file and when the user exits the stats or settings screen.
'''
global guess
global turn
global result
# redefine screen dimensions if user was to go to stats button (which resets the width/height dimensions)
width = 450
height = 600
screen = pygame.display.set_mode([width, height])
pygame.display.set_caption('VORDLE')
screen_fill()
#initializing start time and font
time_font = pygame.font.Font('freesansbold.ttf', 16)
timer.tick(fps)
start_time = pygame.time.get_ticks()
# dummy variables
correct = False
was_wrong = False
# start loop
running = True
while running:
timer.tick(fps)
# prevent entering the word check loop until enter is pressed
enter = False
# game events
pressed = 0
for event in pygame.event.get():
# quit
if event.type == pygame.QUIT:
running = False
# buttons
elif event.type == pygame.MOUSEBUTTONUP:
pressed = 1
# typing
if event.type == pygame.KEYDOWN:
if was_wrong == True:
screen_fill()
was_wrong = False
# add to guess
if event.key != pygame.K_BACKSPACE and event.key != pygame.K_RETURN:
if len(guess) < 5:
guess += event.unicode
# backspace
if event.key == pygame.K_BACKSPACE:
guess = guess[:-1]
change_box_color(background, (turn, len(guess)))
draw_boxes_row(range(turn, turn+1))
# press enter to submit guess
if event.key == pygame.K_RETURN and len(guess) == 5:
enter = True
# display guess after each letter is typed
print_guess(guess, turn)
# check validity of guess after enter is presssed
while enter:
# if guess is a 5-letter english word
if is_valid(guess.lower(), all_words):
guess_list.append(guess)
# check validity of each letter
result = check_word(guess, word)
result_list.append(result)
# change main box colors to display result
show_result(result, turn, guess)
print_guess(guess, turn)
# check for double letters, change result list so key is changed to highest result value color
check = checkDouble(guess, result)
check.get_count_dict()
check.get_repeated()
check.update_res_list()
check.update_result()
result = check.result
# change key colors after updating result list
for i in range(len(result)):
if result[i] == 0:
change_key_color(dark_gray, guess[i])
if result[i] == 1:
change_key_color(box_yellow, guess[i])
if result[i] == 2:
change_key_color(box_green, guess[i])
# if guess is correct
if guess.lower() == word:
# update score list and other stats
seconds = math.floor(((pygame.time.get_ticks() - start_time)/1000)%60)
minutes = math.floor((pygame.time.get_ticks() - start_time)/60000)
score = turn + 1
save_results(score, str(minutes)+"m:"+str(seconds)+"s")
enter = False
correct = True
# congratulations display
while correct:
# redefine screen
timer.tick(fps)
screen.fill(background)
# show text
screen.blit(title1, titleRect1)
screen.blit(title2, titleRect2)
screen.blit(title3, titleRect3)
screen.blit(title4, titleRect4)
screen.blit(title5, titleRect5)
screen.blit(title6, titleRect6)
# exit game loop
pressed = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONUP:
pressed = 1
# if stats button is pressed
if stats_button.draw(screen, pressed):
stats()
# if settings button is pressed
if settings_button.draw(screen, pressed):
settings()
# draw game board
draw_boxes()
draw_keys1()
draw_keys2()
draw_keys3()
# print good job message
draw_badge_with_word(screen, "Good Job!", correct_font, 175, 75, (137,75), off_white, background)
# vishy confetti
for i in Vishies:
i[1] += 6
vishnu_confetti = vishnu_img.get_rect()
vishnu_confetti.center = i
vishnu_confetti.size = (25,25)
screen.blit(vishnu_img, vishnu_confetti)
if i[1] > 580:
i[1] = random.randrange(-50, -5)
i[0] = random.randrange(width)
timer.tick(600)
pygame.display.flip()
# if guess is wrong
else:
# reset guess and adjust game variables
guess = ''
turn += 1
enter = False
# if guess isn't valid
else:
# display message
draw_badge_with_word(screen, "Not | |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 18:14:28 2020
@author: zhang
"""
import random
import requests
import json
import time
import re
import os
import traceback
from collections import OrderedDict
from requests.adapters import HTTPAdapter
from tqdm import tqdm
from threading import Thread,Semaphore
import sys
import gc
headers_raw="""Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,ja;q=0.8
Cache-Control: max-age=0
Connection: keep-alive
Cookie: _uuid=7C2EDDAA-B843-15E6-EFEE-DD0EA2F301B023443infoc; buvid3=E1815B06-1828-481F-BC84-498ABFD4F5EA155828infoc; LIVE_BUVID=AUTO4315670402248481; sid=lp8rpg6n; CURRENT_FNVAL=16; rpdid=|(J|)JkR|YR|0J'ulY~|R|k~u; UM_distinctid=16ce24c5cdd32-05def6882cc6ef-396a4605-1fa400-16ce24c5cde221; im_notify_type_1836737=0; stardustvideo=1; laboratory=1-1; INTVER=-1; pgv_pvi=9402613760; CURRENT_QUALITY=116; LIVE_PLAYER_TYPE=1; DedeUserID=1836737; DedeUserID__ckMd5=326caeb00bc9daa3; SESSDATA=68b4dc5a%2C1582691072%2C26581e11; bili_jct=c56310cc6de31f6e8728de07648983ec; flash_player_gray=false; html5_player_gray=false; bp_t_offset_1836737=359095747404222299; _dfcaptcha=d766283d73a7c658c29253faa4ab9077
Host: api.bilibili.com
Sec-Fetch-Dest: document
Sec-Fetch-Mode: navigate
Sec-Fetch-Site: none
Sec-Fetch-User: ?1
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36"""
iplist = []
def getip():
r = requests.get("http://127.0.0.1:5010/get_all/")
data = r.json()
for i in data:
iplist.append(i.get("proxy"))
getip()
class Bili():
def __init__(self,uid,since):
self.name = ''
self.uid = uid
self.data = ''
if since:
self.since = since
else:
self.since = '1900-01-01 00:00:01'
self.jpg=0
self.png=0
self.gif=0
self.mp4=0
self.bl_id_list = []
self.dt=[]
self.download_urls=[]
def get_stream(self,i):
url,file_path,name = i[0],i[1],i[2]
proxies={}
ip=''
p=0
downloaded = 0
dheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'}
pro=0
while 1:
if pro:
#proxies = {'http':'192.168.127.12:3654','https':'192.168.127.12:3654'}
ip = iplist[random.randint(0,len(iplist) - 1)]
proxies = {'http':ip,'https':ip}
if not os.path.exists(file_path):
try:
s = requests.Session()
s.mount(url, HTTPAdapter(max_retries=2))
downloaded = s.get(url,timeout=(5,8),proxies=proxies,headers=dheaders)
if downloaded.status_code ==404:
break
if downloaded.status_code ==200:
with open(file_path,'wb') as f:
f.write(downloaded.content)
else:
raise Exception('wrong_code')
except Exception as e:
if 'wrong_code' in str(e):
print(e,url)
p = 1
pro = 1
del proxies,ip
time.sleep(1)
else:
pass#print(e)
continue
with open(f'{apath}/not_downloaded.txt','a') as f:
if name[0] == name[1]:
url = name[0]+':'+url+' '+file_path+'\n'
else:
url = name[0]+'_'+name[1]+':'+url+' '+file_path+'\n'
f.write(url)
traceback.print_exc()
if p:
print(file_path,'下载完成')
if downloaded:
downloaded.close()
del proxies,ip,downloaded
break
def get_url(self,next_offset):
params = {
'host_uid':self.uid,
'offset_dynamic_id':next_offset,
'need_top':0
}
url= 'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history'
r = requests.get(url,params=params)
result = r.json()
r.close()
return result
def get_user_info(self,user_profile):
user_info = {}
try:
try:
user_info['uname'] = self.rename(user_profile['info']['uname'])
except:
user_info['uname'] = ''
user_info['uid'] = str(user_profile['info']['uid'])
try:
user_info['face'] = user_profile['info']['face']
except:
user_info['face'] = ''
user_info['sign'] = user_profile['sign']
user_info['official'] = user_profile['card']['official_verify']['desc']
#self.user = user_info
return user_info
except:
traceback.print_exc()
return 'error'
def print_user_info(self):
result_headers = ('用户昵称:'+self.user['uname']+
'\n用户id:'+self.uid+
'\n签名:'+self.user['sign']+
'\n官方认证:'+self.user['official']
)
result= ''.join(result_headers)
print(result+'\n\n'+'-'*30)
def get_pic_urls(self,info):
temp = []
if isinstance(info,list):
for i in info:
temp.append(i['img_src'])
else:
temp.append(info)
return temp
def parse_daily(self,item,user=None):
bl = OrderedDict()
if user:
bl['uid'] = user['uid']
bl['uname'] = self.rename(user['uname'])
else:
bl['uid'] = self.uid
bl['uname'] = self.name
if 'rp_id' in item:
print('纯动态')
bl['description'] = item['content']
bl['pictures']= []
bl['video'] = ''
bl['upload_time'] = self.str_to_time(item['timestamp'])
return bl
bl['id'] = item['id']
bl['description'] = item['description']
try:
bl['category'] = item['category']
except:
pass
if 'pictures' in item:
bl['pictures'] = self.get_pic_urls(item['pictures'])
else:
bl['pictures']= []
if 'video_playurl' in item:
bl['video'] = item['video_playurl']
else:
bl['video'] = ''
bl['upload_time'] = self.str_to_time(item['upload_time'])
return bl
'''
except Exception as e:
print('parse_daily Error:',e)
traceback.print_exc()
print(item)
sys.exit()
'''
def parse_tg(self,card,user=None):
bl = OrderedDict()
if user:
bl['uid'] = str(user['uid'])
bl['uname'] = self.rename(user['uname'])
else:
bl['uid'] = self.uid
bl['uname'] = self.name
try:
bl['aid'] = str(card['aid'])
except:
print(card)
time.sleep(10)
try:
bl['cid'] = str(card['cid'])
except:
bl['cid'] ='0'
bl['description'] = card['dynamic']
bl['title'] = card['title']
bl['desc'] = card['desc']
bl['jump_url'] = card['jump_url']
bl['pictures']=self.get_pic_urls(card['pic'])
bl['video'] = ''
bl['owner'] = card['owner']
bl['video']=''
bl['upload_time'] = self.str_to_time(card['pubdate'])
return bl
def str_to_time(self,text):
if ':' in str(text):
result = time.strptime(text,"%Y-%m-%d %H:%M:%S")
else:
result = time.localtime(text)
return result
def print_dt(self,bl):
result = self.write_str(bl)
print(result)
def get_one_page(self,page):
#global still_d
try:
cards = self.data['cards']
except:
print(self.name,'无更多动态')
return
has_more = self.data['has_more']
for i in cards:
card = json.loads(i['card'])
if 'aid' in card:
display = i['display']
bl = self.parse_tg(card)
bl['dynamic_id'] = i['desc']['dynamic_id']
bl['usr_action'] = display['usr_action_txt']
elif 'origin' in card:
if 'episode_id' in card['origin']:
print('番剧')
continue
user = self.get_user_info(card['origin_user'])
if user == 'error':
if 'summary' in card or not card:
continue
else:
print(card)
sys.exit()
if str(user['uid']) == self.uid:
continue
bl = OrderedDict()
item = card['item']
bl['description']=item['content']
origin=json.loads(card['origin'])
if 'item' in origin:
oitem = origin['item']
oitem['user'] = user
bl['origin']=self.parse_daily(oitem,user)
elif 'aid' in origin:
bl['origin'] = self.parse_tg(origin,user)
else:
continue
bl['origin']['dynamic_id'] = i['desc']['orig_dy_id']
bl['dynamic_id'] = i['desc']['dynamic_id']
bl['upload_time'] = self.str_to_time(card['item']['timestamp'])
bl['pictures'] = ''
bl['video'] = ''
elif 'item' in card:
item = card['item']
try:
bl = self.parse_daily(item)
except Exception as e:
print("item Error:",e)
traceback.print_exc()
print(card)
sys.exit()
bl['dynamic_id'] = i['desc']['dynamic_id']
elif 'sketch' in card:
continue
elif 'playCnt' in card:
continue
elif 'roomid' in card:
continue
elif not 'category' in card:
print(card)
exit(1)
elif '小说' in card['category']['name'] or 'summary' in card:
continue
else:
print(card)
exit(1)
if bl['dynamic_id'] in self.bl_id_list:
continue
try:
publish_time = bl['upload_time']
except:
print(card)
sys.exit()
since_date = self.str_to_time(self.since)
if publish_time < since_date:
print(f"到达限制日期,已获取{self.name}的第{page}页动态")
#if not write_it:
return 0
#else:
# self.write_data()
# self.dt=[]
# self.download_urls=[]
# still_d=0
self.print_dt(bl)
print('*'*20)
self.dt.append(bl)
self.bl_id_list.append(bl['dynamic_id'])
print(f"已获取{self.name}的第{page}页动态")
return has_more
def rename(self,oname):
rstr = r"[\/\\\:\*\?\"\<\>\|\- \n]"
name = re.sub(rstr,"_",oname)
return name
def write_data(self):
self.path = apath#f'{apath}/'+self.name
if is_try:
self.path = 'C:/Users/zhang/Desktop'
self.filepath = self.path+f'/{self.name}.txt'
if not os.path.exists(self.filepath):
self.write_txt(0)
else:
self.write_txt(1)
self.download_files()
def get_download_urls(self,i,zf = 0):
try:
if i['pictures']:
for pic in i['pictures']:
if not self.jpg and 'jpg' in pic:
self.jpg=1
if not self.png and 'png' in pic:
self.png=1
if not self.gif and 'gif' in pic:
self.gif=1
if zf:
filename = f"{self.name}-{time.strftime('%Y%m%d_%H%M%S',i['upload_time'])}-{i['uname']}-{pic.split('/')[-1]}"
else:
filename = f"{i['uname']}-{time.strftime('%Y%m%d_%H%M%S',i['upload_time'])}-{pic.split('/')[-1]}"
path = os.path.join(opath,self.name)
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path,filename)
self.download_urls.append([pic,filepath,[self.name,i['uname']]])
if i['video']:
if not self.mp4 and 'mp4' in i['video']:
self.mp4=1
if zf:
filename = f"{self.name}-{time.strftime('%Y%m%d_%H%M%S',i['upload_time'])}-{i['uname']}-{i['video'].split('/')[-1].split('?')[0]}"
else:
filename = f"{i['uname']}-{time.strftime('%Y%m%d_%H%M%S',i['upload_time'])}-{i['video'].split('/')[-1].split('?')[0]}"
path = os.path.join(opath,self.name)
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path,filename)
self.download_urls.append([i['video'],filepath,[self.name,i['uname']]])
if 'origin' in i:
self.get_download_urls(i['origin'],1)
except Exception as e:
print('get download url Error:',e)
traceback.print_exc()
print(i)
sys.exit()
def write_str(self,i):
if 'origin' in i:
first = '转发动态\n'
else:
first = ''
temp_str=(first+i['description']+
'\n发布时间:'+time.strftime("%Y-%m-%d %H:%M:%S",i['upload_time'])
)
result = ''.join(temp_str)
if first:
try:
result+='\n原始用户:'+i['origin']['uname']+'\n'+'原始id:'+i['origin']['uid']+'\n'+self.write_str(i['origin'])
except:
print(i)
sys.exit()
else:
if 'aid' in i:
temp_str=('\n投稿了视频'+
'\naid:'+i['aid']+
'\ncid:'+i['cid']+
'\n标题:'+i['title']+
'\n描述:'+i['desc']
)
result+=''.join(temp_str)
return result
def write_txt(self,mode):
temp_result= []
if not mode:
result_headers = ('用户昵称:'+self.user['uname']+
'\n用户id:'+self.uid+
'\n签名:'+self.user['sign']+
'\n官方认证:'+self.user['official']+
'\n动态内容:\n'+'-'*20+'\n'
)
temp_result.append(result_headers)
for i in self.dt:
aresult=self.write_str(i)+'\n\n'
temp_result.append(aresult)
self.get_download_urls(i)
result = ''.join(temp_result)
with open(self.filepath,'a',encoding = 'utf-8') as f:
f.write(result)
print(f"{len(self.dt)}个动态已保存到{self.filepath}")
def download_files(self):
temp = []
count = 0
for i in tqdm(self.download_urls,desc="下载文件",ncols=50):
self.get_stream(i)
count+=1
if count %15 ==0:
time.sleep(random.randint(1,2))
'''
for i in self.download_urls:
t = Down_thread(i)
t.start()
temp.append(t)
for j in tqdm(temp,desc="下载文件",ncols=50):
if j.is_alive():
j.join()
count+=1
temp.remove(j)
del j
if count % 50 ==0:
time.sleep(random.choice([1,2,3]))
'''
def get_pages(self):
result = self.get_url(0)
self.data= result['data']
if not 'cards' in self.data:
print(f'{self.uid}没有动态')
return 'no'
cardnum=0
card = self.data['cards'][cardnum]
while 1:
try:
user_profile = card['desc']['user_profile']
self.user=self.get_user_info(user_profile)
break
except Exception as e:
print(e)
cardnum+=1
card = self.data['cards'][cardnum]
time.sleep(1)
try:
self.name = self.rename(self.user['uname'])
except:
print(self.user)
exit(1)
self.print_user_info()
page_num=1
while 1:
hasmore = self.get_one_page(page_num)
if hasmore:
next_offset = self.data['next_offset']
result = self.get_url(next_offset)
self.data= result['data']
else:
break
if page_num %10==0:
self.write_data()
self.dt= []
self.download_urls = []
page_num+=1
self.write_data()
def update_user(self,temp_time):
with open('bilidt.txt') as f:
lines = f.read().splitlines()
has_uid=0
for i, line in enumerate(lines):
temp = line.split(' ')
if self.uid == temp[0]:
has_uid=1
if len(temp) < 3:
temp.append(self.name)
temp.append(temp_time)
else:
temp[1] = self.name
temp[2] = temp_time
lines[i] = ' '.join(temp)
if not has_uid:
temp = f'{self.uid} {self.name} {temp_time}'
lines.append(temp)
with open('bilidt.txt','w') as f:
f.write('\n'.join(lines))
def start(self):
temp_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
result = self.get_pages()
if result == 'no':
return
self.update_user(temp_time)
path = os.path.join(opath,self.name)
'''if self.jpg:
os.system(f"cd {path};mv *jpg ..")
if self.png:
os.system(f"cd {path};mv *png ..")
if self.gif:
os.system(f"cd {path};mv *gif ..")
if self.mp4:
os.system(f"cd {path};mv *mp4 ..")'''
class Down_thread(Thread):
def __init__(self,i):
Thread.__init__(self)
self.i=i
def run(self,pro=0):
with thread_max_num:
url,file_path,name = self.i[0],self.i[1],self.i[2]
proxies={}
ip=''
p=0
downloaded = 0
dheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'}
while 1:
if pro:
#proxies = {'http':'192.168.127.12:3654','https':'192.168.127.12:3654'}
ip = iplist[random.randint(0,len(iplist) - 1)]
proxies = {'http':ip,'https':ip}
if not os.path.exists(file_path):
try:
s = requests.Session()
s.mount(url, HTTPAdapter(max_retries=2))
downloaded = s.get(url,timeout=(5,8),proxies=proxies,headers=dheaders)
if downloaded.status_code ==404:
break
if downloaded.status_code ==200:
with open(file_path,'wb') as f:
f.write(downloaded.content)
else:
raise Exception('wrong_code')
except Exception as e:
if 'wrong_code' in str(e):
print(e,url)
p = 1
pro = 1
del proxies,ip
time.sleep(1)
else:
pass#print(e)
continue
with open(f'{apath}/not_downloaded.txt','a') as f:
if name[0] == name[1]:
url = name[0]+':'+url+' '+file_path+'\n'
else:
url = name[0]+'_'+name[1]+':'+url+' '+file_path+'\n'
f.write(url)
traceback.print_exc()
if p:
print(file_path,'下载完成')
if downloaded:
downloaded.close()
del proxies,ip,downloaded
break
def get_headers(header_raw):
return dict(line.split(": ", 1) for line in header_raw.split("\n"))
def getuids():
f = open('/root/u/checked_fmid.txt')
| |
frame folder loaded first
if not self.frames_folder:
tkMessageBox.showinfo(title = "Warning", message = "Load video frames directory first")
return
if self.label_number == 0:
tkMessageBox.showinfo(title = "Warning", message = "Can not extract while label is 0")
return
# check if there is annotated model for the current video
model_annot_name = os.path.join(self.annotation_folder, self.video_name + ".model")
if not os.path.exists(model_annot_name):
tkMessageBox.showinfo(title = "Warning", message = "Annotated model doesn't exist for this video")
else:
annotated_frames_rectangle_pairs = self.load_annotations_from_file(model_annot_name)
num_of_annotations = sum([int(i !=0) for i in annotated_frames_rectangle_pairs])
print num_of_annotations, "annotations"
index_annotation = 0
image_number = 0
while index_annotation < num_of_annotations:
if annotated_frames_rectangle_pairs[image_number] <> 0:
self.sliding_window ((self.rectangle_size[1],self.rectangle_size[0]), image_number, 0)
index_annotation += 1
image_number += 1
# check verbose
print "patches were saved congrats to you!!!"
def extract_patches_tf(self):
# check if there is a frame folder loaded first
if not self.frames_folder:
tkMessageBox.showinfo(title = "Warning", message = "Load video frames directory first")
return
if self.label_number == 0:
tkMessageBox.showinfo(title = "Warning", message = "Can not segment while label is 0")
return
overlap = tkSimpleDialog.askinteger("Overlap","Choose an overlap",parent = self.canvas)
if overlap == None:
tkMessageBox.showinfo(title = "Error", message = "Overlap was not entered")
return
if overlap < 0:
tkMessageBox.showinfo(title = "Error", message = "Overlap must be a positive number or 0")
return
self.image_segmentation((self.rectangle_size[1],self.rectangle_size[0]), overlap)
def save(self):
# check if there is a frame folder loaded first
if not self.frames_folder:
tkMessageBox.showinfo(title = "Warning", message = "Load video frames directory first")
return
# don"t save if there is not at least one annotated frame
number_of_annotaded_frames = sum([int(i !=0) for i in self.rectangle_frame_pairs])
if number_of_annotaded_frames == 0:
tkMessageBox.showinfo(title = "Warning", message = "0 annotations!\nModel not saved!")
return
# check if there is already a model
result = "yes"
model_annot_name = os.path.join(self.annotation_folder, self.video_name + ".model")
if os.path.exists(model_annot_name):
result = tkMessageBox.askquestion("Overwrite", "Are you sure?", icon = "warning")
if result == "yes":
f = file(model_annot_name, 'wb')
cPickle.dump(self.rectangle_frame_pairs, f, protocol = cPickle.HIGHEST_PROTOCOL)
f.close()
tkMessageBox.showinfo(title = "Info", message = "Annotation model saved")
else:
tkMessageBox.showinfo(title = "Info", message = "Annotation model not saved")
def load(self):
#check if there is a model for the current video frames
model_annot_name = os.path.join(self.annotation_folder, self.video_name + ".model")
if not os.path.exists(model_annot_name):
tkMessageBox.showinfo(title = "Info", message = "No existing annotation model")
return
# get the rectangle coordinates for each frame of the loaded model
previous_frames = self.load_annotations_from_file(model_annot_name)
self.rectangle_frame_pairs = [0]*len(previous_frames)
self.rectangle_frame_pairs[0:len(previous_frames)] = previous_frames
w = 0
h = 0
# get number of annootated frames
number_of_annotaded_frames = sum([int(i !=0) for i in self.rectangle_frame_pairs])
# check if the current frame is an annotated one
if (self.rectangle_frame_pairs[self.img_num] is not 0):
label_index = self.get_label_index_in_list()
self.update_image_annotated_with_label(label_index)
self.frame_annot_label.winfo_children()[0].config(text="Annotated frames: {0:0{width}}/{1}".format(number_of_annotaded_frames, len(self.rectangle_frame_pairs), width=3))
tkMessageBox.showinfo(title = "Info", message = "Annotation model loaded")
def next_video(self, forward=True):
save_annot = "no"
save_annot = tkMessageBox.askquestion("End of video frames", "Save annotations?", icon = "warning")
if save_annot == "yes":
self.save()
else:
tkMessageBox.showinfo(title = "Info", message = "Annotation model not saved")
#delete rectangle
for i in range(0, self.num_labels):
self.canvas.delete(self.polygon_id[i])
if forward is True:
self.video_index+=1
if self.video_index == self.total_num_of_videos:
self.video_index = 0
else:
self.video_index-=1
if self.video_index == -1:
self.video_index = self.total_num_of_videos-1
self.img_num = 0
self.load_frames(self.list_of_videos[self.video_index])
self.change_image()
# check if label 0 is chosen
if self.label_number == 0:
self.all_annotations_mode()
# check if this image is annotated
elif (self.rectangle_frame_pairs[self.img_num] is not 0):
label_index = self.get_label_index_in_list()
self.update_image_annotated_with_label(label_index)
self.show_masks()
def rightKey(self, event):
self.img_num +=1
if self.img_num > self.video_num_of_frames-1:
save_annot = "no"
save_annot = tkMessageBox.askquestion("End of video frames", "Save annotations?", icon = "warning")
if save_annot == "yes":
self.save()
else:
tkMessageBox.showinfo(title = "Info", message = "Annotation model not saved")
#delete rectangle
for i in range(0, self.num_labels):
self.canvas.delete(self.polygon_id[i])
self.video_index+=1
if self.video_index == self.total_num_of_videos:
self.video_index = 0
self.img_num = 0
self.load_frames(self.list_of_videos[self.video_index])
self.change_image()
# check if label 0 is chosen
if self.label_number == 0:
self.all_annotations_mode()
# check if this image is annotated
elif (self.rectangle_frame_pairs[self.img_num] is not 0):
label_index = self.get_label_index_in_list()
self.update_image_annotated_with_label(label_index)
self.show_masks()
def leftKey(self, event):
self.img_num -=1
if self.img_num < 0:
save_annot = "no"
save_annot = tkMessageBox.askquestion("End of video frames", "Save annotations?", icon = "warning")
if save_annot == "yes":
self.save()
else:
tkMessageBox.showinfo(title = "Info", message = "Annotation model not saved")
# delete rectangle
for i in range(0, self.num_labels):
self.canvas.delete(self.polygon_id[i])
self.video_index-=1
if self.video_index == -1:
self.video_index = self.total_num_of_videos-1
self.img_num = self.list_number_of_frames[self.video_index] - 1
self.load_frames(self.list_of_videos[self.video_index])
self.change_image()
# check if label 0 is chosen
if self.label_number == 0:
self.all_annotations_mode()
# check if this image is annotated
elif (self.rectangle_frame_pairs[self.img_num] is not 0):
label_index = self.get_label_index_in_list()
self.update_image_annotated_with_label(label_index)
self.show_masks()
def get_label_index_in_list(self):
# number of previous labels for this image
size_labels = len(self.rectangle_frame_pairs[self.img_num])
label_exists = False
# check if there was a previous annotation for the given label number
for i in range(0, size_labels):
if self.label_number == self.rectangle_frame_pairs[self.img_num][i][-1]:
label_exists = True
return i
return -1
def returnKey(self, event):
# don't annotate if you are in the mask only mode
if self.show_mask_flag.get() == 1:
tkMessageBox.showinfo(title="Info",message="Can not annotate in this mode")
return
# check if the label is not 0
if self.label_number == 0:
tkMessageBox.showinfo(title="Warning",message="Can not annotate while label is 0")
return
img_width = self.curr_photoimage.width()
img_height = self.curr_photoimage.height()
#save rectangle position
coords_relative = self.get_coord_rectangle()
# add the label number at the end of the coords
coords_relative.append(self.label_number)
#check if there was bad annotations
# rectangle is defined by left top corner and right bottom corner
left_upper_x = coords_relative[0]
left_upper_y = coords_relative[1]
right_bottom_x = coords_relative[2]
right_bottom_y = coords_relative[3]
# check if the rectangle is outside the image borders
if left_upper_x < 0 or left_upper_y <0 or right_bottom_x >= img_width or right_bottom_y >= img_height:
tkMessageBox.showinfo(title = "Info", message = "Bad annotation, try again")
return
# if first time to annotate this frame
if self.rectangle_frame_pairs[self.img_num] is 0:
self.rectangle_frame_pairs[self.img_num] = []
self.rectangle_frame_pairs[self.img_num].append(coords_relative)
# TODO: Could save the seg_mask here
# else check for previous annotations
else:
label_index = self.get_label_index_in_list()
if label_index != -1:
self.rectangle_frame_pairs[self.img_num][label_index] = coords_relative
else:
self.rectangle_frame_pairs[self.img_num].append(coords_relative)
if (self.flag == 0):
#proveri uste ednas koordinatite
self.tracker.start_track(self.curr_image_raw, dlib.rectangle(coords_relative[0],coords_relative[1],coords_relative[2],coords_relative[3]))
#self.tracker.start_track(self.images_raw[0], dlib.rectangle(170, 200, 240, 240))
self.flag = 1
else:
#update filter
self.tracker.update(self.curr_image_raw, dlib.rectangle(coords_relative[0],coords_relative[1],coords_relative[2],coords_relative[3]))
# update rectangle (overlay it)
rel_position = self.tracker.get_position()
curr_position = self.get_coord_rectangle()
self.canvas.move(self.polygon_id[0], -curr_position[0]+rel_position.left(), -curr_position[1]+rel_position.top())
self.img_num += 1
# check if this is the last frame
if self.img_num >= self.video_num_of_frames:
# delete polygon
self.canvas.delete(self.polygon_id[0])
save_annot = "no"
save_annot = tkMessageBox.askquestion("End of video frames", "Save annotations?", icon = "warning")
if save_annot == "yes":
self.save()
else:
tkMessageBox.showinfo(title = "Info", message = "Annotation model not saved")
self.img_num = 0
self.video_index+=1
# check if the current video is the last video
if self.video_index == self.total_num_of_videos:
self.video_index = 0
# load frames of the next video
self.load_frames(self.list_of_videos[self.video_index])
self.change_image()
# update according to the label number selected
if self.rectangle_frame_pairs[self.img_num] is not 0:
label_index = self.get_label_index_in_list()
self.update_image_annotated_with_label(label_index)
def backspaceKey(self, event):
# don't delete if you are in the mask only mode
if self.show_mask_flag.get() == 1:
tkMessageBox.showinfo(title="Info",message="Can not delete in this mode")
return
# delete all annotations for all the labels if label is 0
if self.label_number == 0:
self.rectangle_frame_pairs[self.img_num] = 0
for i in range(0, self.num_labels):
self.canvas.delete(self.polygon_id[i])
else:
label_index = self.get_label_index_in_list()
if label_index != -1:
del self.rectangle_frame_pairs[self.img_num][label_index]
if len(self.rectangle_frame_pairs[self.img_num]) == 0:
self.rectangle_frame_pairs[self.img_num] = 0
self.canvas.itemconfig(self.polygon_id[0], outline = "blue")
# get number of annootated frames
number_of_annotaded_frames = sum([int(i !=0) for i in self.rectangle_frame_pairs])
self.frame_annot_label.winfo_children()[0].config(text="Annotated frames: {0:0{width}}/{1}".format(number_of_annotaded_frames, len(self.rectangle_frame_pairs), width=3))
def OnTokenButtonPress(self, event):
'''Being drag of an object'''
# record the item and its location
self._drag_data["item"] = self.canvas.find_closest(event.x, event.y)[0]
self._drag_data["x"] = event.x
self._drag_data["y"] = event.y
def OnTokenButtonRelease(self, event):
'''End drag of an object'''
# reset the drag information
self._drag_data["item"] = None
self._drag_data["x"] = 0
self._drag_data["y"] = 0
def OnTokenMotion(self, event):
'''Handle dragging of an object'''
# compute how much this object has moved
delta_x = event.x - self._drag_data["x"]
delta_y = event.y - self._drag_data["y"]
# move the object the appropriate amount
self.canvas.move(self._drag_data["item"], delta_x, delta_y)
# record the new position
self._drag_data["x"] = event.x
self._drag_data["y"] = event.y
def OnPickColorCoord(self, event):
'''Pick the background color coords'''
#check if clicked on the canvas
relative_x = event.x - self.img_start_x
relative_y = event.y - self.img_start_y
img_width = self.curr_photoimage.width()
img_height = self.curr_photoimage.height()
self.bgcolor_rgb = self.curr_image_raw[relative_y, relative_x].tolist()
bgcolor_hex = '#%02x%02x%02x' % tuple(self.bgcolor_rgb)
self.bgcolor_canvas.delete("all")
self.bgcolor_canvas.create_rectangle(0, 0, 40, 20, | |
<reponame>NiclasEriksen/rpg_procgen
import math
import random
from functions import *
from spanning_tree import get_connections
import gridmancer
import numpy as np
from collections import Counter
import jsonpickle # For saving the dungeon
class DungeonGenerator:
def __init__(self, logger, config=None):
self.logger = logger
self.config = dict(
roomcount_min=8,
roomcount_max=16,
room_min_size=5,
room_max_size=30,
dungeon_size=(60, 40),
treasure_chance=30,
enemy_chance=70,
corridor_min_width=1,
corridor_max_width=2,
corridor_wide_chance=30,
attempts_max=200,
)
if config:
for k, v in config.items():
if k in self.config:
self.config[k] = v
else:
self.logger.error(
"No such dungeon config item: {0}".format(k)
)
self.rooms = []
self.corridors = []
self.grid = []
self.walls = []
self.wall_rects = []
self.startroom = None
self.enemy_rooms = []
self.collidable = []
self.pillars = []
self.collidable_objects = []
def set_config(self, setting, value):
if setting in self.config:
self.config[setting] = value
else:
print("No such config item.")
def check_config(self):
if self.config["room_max_size"] > self.config["dungeon_size"][0] - 2:
return False
elif self.config["room_max_size"] > self.config["dungeon_size"][1] - 2:
return False
def get_all_rects(self):
rects = []
for r in self.rooms:
for x in range(r.x1, r.x2):
for y in range(r.y1, r.y2):
rects.append((x, y))
for c in self.corridors:
for x in range(c.x1, c.x2 + 1):
for y in range(c.y1, c.y2 + 1):
if not (x, y) in rects:
rects.append((x, y))
for p in self.pillars:
rects.remove(p)
return rects
def get_tilemap(self):
img_grid = {}
floor = self.get_all_rects()
walkable = []
for f in floor:
walkable.append((f[0] * 32, f[1] * 32))
for tile in walkable:
# if (tile[0], tile[1] - 32) not in walkable:
# img_grid[tile] = "floor_bottom"
# else:
img_grid[tile] = "floor"
for p in self.pillars:
img_grid[(p[0] * 32, p[1] * 32)] = "pillar"
for o in self.collidable_objects:
img_grid[(o[0] * 32, o[1] * 32)] = "col_obj"
walls = []
for w in self.walls:
walls.append(w.p1)
for tile in walls:
if (
(tile[0], tile[1] - 32) not in walls and
(tile[0], tile[1] + 32) not in walls and
(tile[0] - 32, tile[1]) not in walls and
(tile[0] + 32, tile[1]) not in walls
):
img_grid[tile] = "wall_s"
elif (
(tile[0], tile[1] - 32) not in walls and
(tile[0], tile[1] + 32) not in walls and
(tile[0] - 32, tile[1]) not in walls
):
img_grid[tile] = "wall_s_l"
elif (
(tile[0], tile[1] - 32) not in walls and
(tile[0], tile[1] + 32) not in walls and
(tile[0] + 32, tile[1]) not in walls
):
img_grid[tile] = "wall_s_r"
elif (
(tile[0], tile[1] - 32) not in walls and
(tile[0] + 32, tile[1]) not in walls and
(tile[0] - 32, tile[1]) not in walls
):
img_grid[tile] = "wall_s_b"
elif (
(tile[0], tile[1] + 32) not in walls and
(tile[0] + 32, tile[1]) not in walls and
(tile[0] - 32, tile[1]) not in walls
):
img_grid[tile] = "wall_s_t"
elif (
(tile[0] - 32, tile[1]) not in walls and
(tile[0] + 32, tile[1]) not in walls
):
img_grid[tile] = "wall_s_v"
elif (
(tile[0], tile[1] - 32) not in walls and
(tile[0], tile[1] + 32) not in walls
):
img_grid[tile] = "wall_s_h"
elif (
(tile[0], tile[1] + 32) not in walls and
(tile[0] - 32, tile[1]) not in walls
):
img_grid[tile] = "wall_topleft"
elif (
(tile[0], tile[1] + 32) not in walls and
(tile[0] + 32, tile[1]) not in walls
):
img_grid[tile] = "wall_topright"
elif (
(tile[0], tile[1] - 32) not in walls and
(tile[0] - 32, tile[1]) not in walls
):
img_grid[tile] = "wall_bottomleft"
elif (
(tile[0], tile[1] - 32) not in walls and
(tile[0] + 32, tile[1]) not in walls
):
img_grid[tile] = "wall_bottomright"
elif (tile[0], tile[1] - 32) not in walls:
img_grid[tile] = "wall_bottom"
elif (tile[0], tile[1] + 32) not in walls:
img_grid[tile] = "wall_top"
elif (tile[0] - 32, tile[1]) not in walls:
img_grid[tile] = "wall_left"
elif (tile[0] + 32, tile[1]) not in walls:
img_grid[tile] = "wall_right"
else:
img_grid[tile] = "wall"
return img_grid
def generate(self):
self.flush()
self.logger.info(
"Generating dungeon of size {0} by {1} squares...".format(
self.config["dungeon_size"][0], self.config["dungeon_size"][1]
)
)
self.place_rooms()
# if len(self.rooms) < self.config["roomcount_min"]:
# self.logger.warning("Could not generate enough rooms, retrying.")
# self.generate()
# oldroom = None
# for r in self.rooms:
# if oldroom:
# self.connect_rooms(r, oldroom)
# oldroom = r
self.connect_rooms()
l = [item for sublist in self.connections for item in sublist]
sl = sorted(Counter(l).items(), key=lambda x: x[::-1])
self.startroom = self.rooms[sl[0][0]]
p1 = self.startroom.center
longest = 0
endroom = None
for r in self.rooms:
l = get_dist(*p1, *r.center)
if l > longest:
endroom = r
longest = l
self.endroom = endroom
self.define_rooms()
self.grid = self.get_all_rects()
self.generate_walls()
self.generate_wall_grid()
self.logger.info(
"{0} rooms added.".format(
len(self.rooms)
)
)
def define_rooms(self):
available_rooms = self.rooms.copy()
# self.startroom = self.rooms[random.randrange(0, len(self.rooms))]
available_rooms.remove(self.startroom)
available_rooms.remove(self.endroom)
# poi.append(POI(*startroom.center))
while len(available_rooms):
r = available_rooms[random.randrange(0, len(available_rooms))]
if random.randint(0, 101) <= self.config["treasure_chance"]:
for p in self.generate_pillars(r):
self.pillars.append(p)
self.collidable.append(
Collidable(*p)
)
else:
# newpoi = POI(*r.center)
self.enemy_rooms.append(r)
# poi.append(newpoi)
available_rooms.remove(r)
for er in self.enemy_rooms:
er.set_spawn_locations()
for r in self.rooms:
for o in self.generate_collidable_objects(r):
self.collidable_objects.append(o)
self.collidable.append(
Collidable(*o)
)
def generate_collidable_objects(self, room):
w, h = room.w, room.h
x, y = room.x1, room.y1
o = []
for gx in range(x + 1, x + w - 2):
for gy in range(y + 1, y + h - 2):
if (
not (gx, gy) in self.pillars and
not (gx, gy) in room.spawn_locations and
not (gx, gy) == self.startroom.center
):
if not random.randint(0, 60):
o.append((gx, gy))
return o
def generate_walls(self):
self.walls = []
walls = []
for x in range(0, self.config["dungeon_size"][0]):
for y in range(0, self.config["dungeon_size"][1]):
walls.append((x, y))
for free in self.grid:
walls.remove(free)
for p in self.pillars:
walls.remove(p)
for w in walls:
self.walls.append(Wall(w[0], w[1]))
def generate_wall_grid(self):
cols, rows = self.config["dungeon_size"]
# print(cols, rows)
# print(self.walls)
array = [[0 for x in range(cols)] for x in range(rows)]
# print(len(array), len(array[0]))
for w in self.walls:
x, y = w.gx, w.gy
# print(x, y)
array[y][x] = -1
# print(array)
array, rect_count = gridmancer.grid_reduce(grid=array)
# self.logger.info("Reduced walls to minimum amount of rectangles.")
# print("--------")
# print(array)
minimal_grid = np.array(array)
rects = []
for i in range(rect_count):
rects.append(np.asarray(np.where(minimal_grid == i + 1)).T.tolist())
# self.logger.info("Creating rectangles of new array.")
# print(rect_count, len(rects))
final_sets = []
for r in rects:
final_sets.append(
[
(r[0][1], r[0][0]),
(r[-1][1], r[-1][0])
]
)
self.wall_rects = final_sets
# print(minimal_grid)
# return minimal_grid
def generate_pillars(self, room):
w, h = room.w, room.h
x, y = room.x1, room.y1
p = []
if w >= 7 and h >= 9 and not h % 3:
y_count = h // 3
for yi in range(0, y_count):
p.append(
(x + 1, y + yi * 3 + 1)
)
p.append(
(x + w - 2, y + yi * 3 + 1)
)
else:
self.logger.debug("Room not suitable for pillars.")
return p
def connect_rooms(self):
roomcenters = []
for r in self.rooms:
roomcenters.append(r.center)
connections = get_connections(roomcenters)
self.connections = connections
for c in connections:
self.connect_two_rooms(self.rooms[c[0]], self.rooms[c[1]])
def connect_two_rooms(self, r1, r2):
x1, y1 = r1.center
x2, y2 = r2.center
# 50/50 chance of starting horizontal or vertical
if random.randint(0, 1):
c1p1 = (x1, y1)
c1p2 = (x2, y1)
c2p1 = (x2, y1)
c2p2 = (x2, y2)
else:
c1p1 = (x1, y1)
c1p2 = (x1, y2)
c2p1 = (x1, y2)
c2p2 = (x2, y2)
w = self.config["corridor_min_width"]
if not random.randint(0, 2):
w = self.config["corridor_max_width"]
c1 = Corridor(c1p1, c1p2, w=w)
c2 = Corridor(c2p1, c2p2, w=w)
self.corridors.append(c1)
self.corridors.append(c2)
def place_rooms(self):
min_roomsize = self.config["room_min_size"]
max_roomsize = self.config["room_max_size"]
map_width, map_height = self.config["dungeon_size"]
i = 0
attempts = 0
while i < self.config["roomcount_max"]:
if attempts > self.config["attempts_max"]:
self.logger.warning("Failed to add more rooms")
break
w = random.randrange(min_roomsize, max_roomsize + 1)
h = random.randrange(min_roomsize, max_roomsize + 1)
x = random.randrange(0 + 1, map_width - w - 1)
y = random.randrange(0 + 1, map_height - h - 1)
newroom = EnemyRoom(x, y, w, h)
valid = True
for r in self.rooms:
if newroom.check_intersect(r):
valid = False
attempts += 1
break
if valid:
attempts = 0
self.rooms.append(newroom)
i += 1
def flush(self):
self.rooms = []
self.corridors = []
def save(self, target):
dungeonstate = dict()
dungeonstate["rooms"] = self.rooms
dungeonstate["walls"] = self.walls
dungeonstate["wall_rects"] = self.wall_rects
dungeonstate["corridors"] = self.corridors
dungeonstate["startroom"] = self.startroom
dungeonstate["grid"] = self.grid
dungeonstate["enemy_rooms"] = self.enemy_rooms
dungeonstate["collidable"] = | |
# import dash IO and graph objects
from dash.dependencies import Input, Output
# Plotly graph objects to render graph plots
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
# Import dash html, bootstrap components, and tables for datatables
from dash import html
import dash_bootstrap_components as dbc
from dash import dcc
# Import app
from app import app
# Import custom data.py
import data
# Import data from data.py file
sales_dt_df = data.sales_dt_df
sales_df = data.sales_df
ads_df = data.ads_df
sales_customer_aggs = data.sales_customer_aggs
ads_customer_aggs = data.ads_customer_aggs
from datetime import datetime, timedelta
prime_day_start = '2020-10-13'
prime_day_end = '2020-10-14'
@app.callback(
[
Output("sales-chart", "figure"),
Output("orders-chart", "figure"),
Output("quantity-chart", "figure"),
# Output("sales-dt-chart", "figure"),
Output("med-sales-dt-chart", "figure"),
],
[
Input("marketplace-dropdown", "value"),
],
)
def update_sales_stats(marketplace):
mask = (
(sales_df['Marketplace'] == marketplace)
)
filtered_data = sales_df.loc[mask, :]
mask = (
(sales_dt_df['Marketplace'] == marketplace)
)
filtered_dt_data = sales_dt_df.loc[mask, :]
var = 'Median 2-Day Sales'
chart1 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart1.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart1.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart1.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Orders'
chart2 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart2.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart2.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart2.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Quantity'
chart3 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart3.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart3.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart3.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
# var = 'Total Sales'
# chart4 = px.line(
# filtered_dt_data,
# x= 'Date', y= var,
# title= '''{} Market {}'''.format(marketplace, var),
# template= 'none',
# markers= True,
# # hover_data= {variable: '{}'.format(':$.2f' if variable in dollar_cols else ':.2f')},
# )
# chart4.update_traces(hovertemplate= None)
# chart4.update_layout(
# hovermode= 'x',
# yaxis_tickprefix = '$',
# # yaxis_tickformat = '.2f',
# shapes=[
# dict(
# type= 'rect',
# xref= 'x',
# yref= 'y',
# x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
# y0= '0',
# x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
# y1= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .05,
# # fillcolor= 'lightgray',
# fillcolor= '#00AAE2',
# opacity= 0.2,
# line_width= 0,
# layer= 'below',
# ),
# ],
# )
# chart4.add_annotation(
# x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
# y= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .1,
# text= '<b>Prime Day</b>',
# showarrow= False,
# font= {'family': 'Franklin Gothic'},
# )
var = 'Median Sales'
chart5 = px.line(
filtered_dt_data,
x= 'Date', y= var,
title= '''{} Market {}'''.format(marketplace, var),
template= 'none',
markers= True,
# hover_data= {variable: '{}'.format(':$.2f' if variable in dollar_cols else ':.2f')},
)
chart5.update_traces(hovertemplate= None)
chart5.update_layout(
hovermode= 'x',
yaxis_tickprefix = '$',
# yaxis_tickformat = '.2f',
shapes=[
dict(
type= 'rect',
xref= 'x',
yref= 'y',
x0= datetime.strptime(prime_day_start, '%Y-%m-%d') - timedelta(0.2),
y0= '0',
x1= datetime.strptime(prime_day_end, '%Y-%m-%d') + timedelta(0.2),
y1= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .05,
# fillcolor= 'lightgray',
fillcolor= '#00AAE2',
opacity= 0.2,
line_width= 0,
layer= 'below',
),
],
)
chart5.add_annotation(
x= datetime.strptime(prime_day_start, '%Y-%m-%d') + timedelta(0.5),
y= filtered_dt_data[var].max() + filtered_dt_data[var].max() * .1,
text= '<b>Prime Day</b>',
showarrow= False,
font= {'family': 'Franklin Gothic'},
)
return chart1, chart2, chart3, chart5
@app.callback(
[
Output("ad-revenue-chart", "figure"),
Output("ad-spending-chart", "figure"),
Output("roas-chart", "figure"),
Output("acos-chart", "figure"),
Output("ctr-chart", "figure"),
Output("cpc-chart", "figure"),
],
[
Input("marketplace-dropdown", "value"),
Input("sponsored-type-dropdown", "value"),
],
)
def update_ad_stats(marketplace, stype):
mask = (
(ads_df['Marketplace'] == marketplace)
& (ads_df['Sponsored Type'] == stype)
)
filtered_data = ads_df.loc[mask, :]
var = 'Median 2-Day Ad Revenue'
chart1 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart1.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart1.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart1.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median 2-Day Ad Spending'
chart2 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart2.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart2.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart2.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_tickprefix = '$',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median ROAS'
chart3 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart3.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart3.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart3.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median ACoS'
chart4 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart4.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart4.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart4.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median CTR'
chart5 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
)
chart5.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart5.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'suffix': '%'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart5.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
yaxis_ticksuffix = '%',
margin= dict(
l= 80,
r= 80,
b= 50,
t= 80,
),
)
var = 'Median CPC'
chart6 = make_subplots(
rows=1, cols=2,
column_widths= [0.7, 0.3],
specs= [[{'type': 'bar'}, {'type': 'indicator'}]],
# subplot_titles=("Plot 1", "Plot 2"),
)
chart6.add_trace(
go.Bar(
x= filtered_data['Period'],
y= filtered_data[var],
marker= dict(color= ['#242C40', '#00AAE2']),
),
row=1, col=1,
)
chart6.add_trace(
go.Indicator(
mode = 'number+delta',
value = filtered_data[filtered_data['Period'] == 'Prime Day'][var].values[0],
number = {'prefix': '$'},
delta = {'position': 'top',
'reference': filtered_data[filtered_data['Period'] == 'Non-Prime Day'][var].values[0],
'decreasing': {'color': '#3D9970'},
'increasing': {'color': '#FF4136'},
},
domain = {'x': [0, 1], 'y': [0, 1]},
title= 'Prime Day',
),
row=1, col=2,
)
chart6.update_layout(
title={'text': var},
template= 'none',
hovermode= 'x',
| |
<gh_stars>10-100
# Copyright (c) 2020 - 2021 Open Risk (https://www.openriskmanagement.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django.db import models
from django.urls import reverse
from npl_portfolio.property_collateral_choices import *
from npl_portfolio.loan import Loan
class PropertyCollateral(models.Model):
"""
The PropertyCollateral model object holds Property Collateral data conforming to the EBA NPL Template specification
`EBA Templates <https://www.openriskmanual.org/wiki/EBA_NPL_Property_Collateral_Table>`_
"""
#
# IDENTIFICATION FIELDS
#
protection_identifier = models.TextField(blank=True, null=True,
help_text='Institutions internal identifier for the Property Collateral.<a class ="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Protection_identifier" >Documentation</a>')
#
# FOREIGN KEYS
#
loan_identifier = models.ForeignKey(Loan, on_delete=models.CASCADE, null=True, blank=True)
#
# DATA PROPERTIES
#
address_of_property = models.TextField(blank=True, null=True,
help_text='Street address where the Property is located at, including flat / house number or name. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Address_of_Property">Documentation</a>')
amount_of_vat_payable = models.FloatField(blank=True, null=True,
help_text='Amount of VAT payable on the disposal of the Unit. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.11.05.Amount_of_VAT_Payable">Documentation</a>')
area_type_of_property = models.IntegerField(blank=True, null=True, choices=AREA_TYPE_OF_PROPERTY_CHOICES,
help_text='Area type where the Property is located at , i.e. City centre, Suburban and Rural. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Area_Type_of_Property">Documentation</a>')
building_area_m2 = models.FloatField(blank=True, null=True,
help_text='Building area (square metres) of the Unit. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Building_Area_M2">Documentation</a>')
building_area_m2_lettable = models.FloatField(blank=True, null=True,
help_text='Building area (square metres) of the Unit that is lettable. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Building_Area_M2_Lettable">Documentation</a>')
building_area_m2_occupied = models.FloatField(blank=True, null=True,
help_text='Building area (square metres) of the Unit that has been occupied by landlord / tenant. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Building_Area_M2_Occupied">Documentation</a>')
city_of_property = models.TextField(blank=True, null=True,
help_text='City where the Property is located at. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.City_of_Property">Documentation</a>')
completion_of_property = models.BooleanField(blank=True, null=True,
help_text='Indicator as to whether the construction of the Unit is complete. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Completion_of_Property">Documentation</a>')
condition_of_property = models.IntegerField(blank=True, null=True, choices=CONDITION_OF_PROPERTY_CHOICES,
help_text='Quality classification of the property, e.g. Excellent, Good, Fair, Poor. and include explanation of the category, and please provide the internal methodology used to decide the categories as a part of the transaction documents. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Condition_of_Property">Documentation</a>')
currency_of_property = models.TextField(blank=True, null=True,
help_text='Currency that the valuation and cash flows related to the Unit are expressed in. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Currency_of_Property">Documentation</a>')
current_annual_passing_rent = models.FloatField(blank=True, null=True,
help_text='Current annual passing rent charged to the Tenants of the Unit as at latest valuation date. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Current_Annual_Passing_Rent">Documentation</a>')
current_net_operating_income = models.FloatField(blank=True, null=True,
help_text='Current annual net operating income generated by the Unit as at the latest valuation date. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Current_Net_Operating_Income">Documentation</a>')
current_opex_and_overheads = models.FloatField(blank=True, null=True,
help_text='Current annual operational expenses and overheads of the Unit as at latest valuation date. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Current_Opex_And_Overheads">Documentation</a>')
date_of_initial_valuation = models.DateField(blank=True, null=True,
help_text='Date that the initial valuation was assessed. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Date_of_Initial_Valuation">Documentation</a>')
date_of_latest_valuation = models.DateField(blank=True, null=True,
help_text='Date that the latest valuation took place. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Date_of_Latest_Valuation">Documentation</a>')
enforcement_description = models.TextField(blank=True, null=True,
help_text='Comments/Description of the stage of Enforcement that the Property Collateral is in as at cut-off date. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Enforcement_Description">Documentation</a>')
enforcement_status = models.BooleanField(blank=True, null=True,
help_text='Indicator as to whether the property collateral has entered into the enforcement process as at cut-off date. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Enforcement_Status">Documentation</a>')
enforcement_status_third_parties = models.BooleanField(blank=True, null=True,
help_text='Indicator as to whether any other secured creditors have taken steps to enforce security over the asset? (Y/N). <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Enforcement_Status_Third_Parties">Documentation</a>')
estimated_annual_void_cost = models.FloatField(blank=True, null=True,
help_text='Additional costs to "Current Opex And Overheads" when the Units are vacant. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Estimated_Annual_Void_Cost">Documentation</a>')
estimated_rental_void = models.FloatField(blank=True, null=True,
help_text='Estimated number of months the property is expected to be void. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Estimated_Rental_Void">Documentation</a>')
geographic_region_classification = models.IntegerField(blank=True, null=True,
choices=GEOGRAPHIC_REGION_CLASSIFICATION_CHOICES,
help_text='NUTS3 classification used for the field "Geographic Region of Property", i.e. NUTS3 2013 (1), NUTS3 2010 (2), NUTS3 2006 (3), NUTS3 2003 (4), Other (5). <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Geographic_Region_Classification">Documentation</a>')
geographic_region_of_property = models.TextField(blank=True, null=True,
help_text='Province / Region where the Property is located at. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.11.01.Property Collateral.Geographic_Region_of_Property">Documentation</a>')
initial_estimated_rental_value = models.FloatField(blank=True, null=True,
help_text='Estimated annual gross rental value of the Unit assessed at loan origination. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Initial_Estimated_Rental_Value">Documentation</a>')
initial_valuation_amount = models.FloatField(blank=True, null=True,
help_text='Value of the Unit assessed at loan origination. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.11.03.Initial_Valuation_Amount">Documentation</a>')
internal_or_external_initial_valuation = models.IntegerField(blank=True, null=True,
choices=INTERNAL_or_EXTERNAL_INITIAL_VALUATION_CHOICES,
help_text='Indicator as to whether the initial valuation was outsource, or done internally. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Internal_or_External_Initial_Valuation">Documentation</a>')
internal_or_external_latest_valuation = models.IntegerField(blank=True, null=True,
choices=INTERNAL_or_EXTERNAL_LATEST_VALUATION_CHOICES,
help_text='Indicator as to whether the latest valuation was performed internally or by an external appraiser. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Internal_or_External_Latest_Valuation">Documentation</a>')
land_area_m2 = models.FloatField(blank=True, null=True,
help_text='Land area (square metres) of the Property. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Land_Area_M2">Documentation</a>')
latest_estimated_rental_value = models.FloatField(blank=True, null=True,
help_text='Estimated annual gross rental value of the Unit when last assessed. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Latest_Estimated_Rental_Value">Documentation</a>')
latest_valuation_amount = models.FloatField(blank=True, null=True,
help_text='Value of the Unit when last assessed. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Latest_Valuation_Amount">Documentation</a>')
legal_owner_of_the_property = models.TextField(blank=True, null=True,
help_text='Legal owner of the Property Collateral. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Legal_Owner_of_the_Property">Documentation</a>')
number_of_bedrooms = models.FloatField(blank=True, null=True,
help_text='Number of bedrooms that the Unit has. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Number_of_Bedrooms">Documentation</a>')
number_of_car_parking_spaces = models.FloatField(blank=True, null=True,
help_text='Number of car parking spaces relating to the Unit. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Number_of_Car_Parking_Spaces">Documentation</a>')
number_of_lettable_units = models.FloatField(blank=True, null=True,
help_text='Number of lettable units that the Property has. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Number_of_Lettable_Units">Documentation</a>')
number_of_rooms = models.FloatField(blank=True, null=True,
help_text='Number of rooms that the Unit has excluding kitchen and bathroom(s). <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.11.02.Number_of_Rooms">Documentation</a>')
number_of_units_occupied = models.FloatField(blank=True, null=True,
help_text='Number of occupied lettable units that the Property has. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Number_of_Units_Occupied">Documentation</a>')
number_of_units_vacant = models.FloatField(blank=True, null=True,
help_text='Number of vacant lettable units that the Property has. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Number_of_Units_Vacant">Documentation</a>')
party_liable_for_vat = models.IntegerField(blank=True, null=True, choices=PARTY_LIABLE_FOR_VAT_CHOICES,
help_text='Party who is liable to pay the VAT on the disposal of the Unit i.e. the Institution or the buyer(s). <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Party_Liable_for_VAT">Documentation</a>')
percentage_complete = models.FloatField(blank=True, null=True,
help_text='The percentage of development completed since construction started (applicable to Units in development). <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Percentage_complete">Documentation</a>')
planned_capex_next_12m = models.FloatField(blank=True, null=True,
help_text='Current planned CAPEX for the next 12 months. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Planned_Capex_next_12m">Documentation</a>')
property_country = models.TextField(blank=True, null=True,
help_text='Country of residence where the Property is located at. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Property_Country">Documentation</a>')
property_postcode = models.TextField(blank=True, null=True,
help_text='Postcode where the Property is located at. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Property_Postcode">Documentation</a>')
provider_of_energy_performance_certificate = models.TextField(blank=True, null=True,
help_text='Name of the provider of the energy performance certificate. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Provider_of_Energy_Performance_Certificate">Documentation</a>')
provider_of_initial_valuation = models.TextField(blank=True, null=True,
help_text='Name of the external appraiser or managing / estate agent is when "Full Appraisal" or "Managing / Estate Agent" is selected in field "Type of Initial Valuation". If the valuation was done internally, please select "Internal". <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Provider_of_Initial_Valuation">Documentation</a>')
provider_of_latest_valuation = models.TextField(blank=True, null=True,
help_text='Name of the external appraiser or managing / estate agent when "Full Appraisal" or "Managing / Estate Agent" is selected in field "Type of Latest Valuation". If the valuation was done internally, please select "Internal". <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.11.04.Provider_of_Latest_Valuation">Documentation</a>')
purpose_of_property = models.IntegerField(blank=True, null=True, choices=PURPOSE_OF_PROPERTY_CHOICES,
help_text='Purpose of the Property, e.g. Investment property, owner occupied, Business Use, etc.. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Purpose_of_Property">Documentation</a>')
register_of_deeds_number = models.TextField(blank=True, null=True,
help_text='Registration number of the Property. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Register_of_Deeds_Number">Documentation</a>')
remaining_term_of_leasehold = models.FloatField(blank=True, null=True,
help_text='Remaining term of the leasehold when "Leasehold" is selected in field "Tenure". <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Remaining_Term_of_Leasehold">Documentation</a>')
sector_of_property = models.IntegerField(blank=True, null=True, choices=SECTOR_OF_PROPERTY_CHOICES,
help_text='Sector which the property is used for, e.g. commercial real estate, residential real estate, etc.. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Sector_of_Property">Documentation</a>')
tenure = models.IntegerField(blank=True, null=True, choices=TENURE_CHOICES,
help_text='Conditions that the Property is held or occupied, e.g. freehold and leasehold. <a class="risk_manual_url" href="https://www.openriskmanual.org/wiki/EBA_NPL.Property Collateral.Tenure">Documentation</a>')
type_of_initial_valuation = models.IntegerField(blank=True, null=True, choices=TYPE_OF_INITIAL_VALUATION_CHOICES,
help_text='Type of the initial valuation for the Unit | |
Ang.)
#""" This will make that the X-axis in the resolution plots has units 1/Angstrom
#"""
#ResolSam=9.6
#
## {expert} Display resolution?
#DisplayResolution=False
#
##-----------------------------------------------------------------------------
## {section} Low-pass filtering
##-----------------------------------------------------------------------------
## Low-pass filter the reference?
#DoLowPassFilter=True
#
## Use estimated resolution for low-pass filtering?
#"""If set to true, the volume will be filtered at a frecuency equal to
# the resolution computed with a FSC=0.5 threshold, possibly
# plus a constant provided by the user in the next input box.
#
# If set to false, then the filtration will be made at the constant
# value provided by the user in the next box (in digital frequency,
# i.e. pixel-1: minimum 0, maximum 0.5)
#"""
#UseFscForFilter=True
#
## Constant to by add to the estimated resolution
#""" The meaning of this field depends on the previous flag.
# If set to true, then the volume will be filtered at a frecuency equal to
# the resolution computed with resolution_fsc (FSC=0.5) plus the value
# provided in this field
# If set to false, the volume will be filtered at the resolution
# provided in this field
# This value is in digital frequency, or pixel^-1: minimum 0, maximum 0.5
#
# You can specify this option for each iteration.
# This can be done by a sequence of numbers (for instance, ".15 .15 .1 .1"
# specifies 4 iterations, the first two set the constant to .15
# and the last two to 0.1. An alternative compact notation
# is ("2x.15 2x0.1", i.e.,
# 4 iterations with value 0.15, and three with value .1).
# Note: if there are less values than iterations the last value is reused
# Note: if there are more values than iterations the extra value are ignored
#"""
#ConstantToAddToFiltration='0.1'
#
##------------------------------------------------------------------------------------------------
## {section} Parallelization issues
##------------------------------------------------------------------------------------------------
## Number of (shared-memory) threads?
#""" This option provides shared-memory parallelization on multi-core machines.
# It does not require any additional software, other than xmipp
#"""
#NumberOfThreads=1
#
## distributed-memory parallelization (MPI)?
#""" This option provides distributed-memory parallelization on multi-node machines.
# It requires the installation of some MPI flavour, possibly together with a queueing system
#"""
#DoParallel=False
#
## Number of MPI processes to use:
#NumberOfMpiProcesses=5
#
## minumum size of jobs in mpi processe. Set to 1 for large images (e.g. 500x500) and to 10 for small images (e.g. 100x100)
#MpiJobSize='10'
#
## MPI system Flavour
#""" Depending on your queuing system and your mpi implementation, different mpirun-like commands have to be given.
# Ask the person who installed your xmipp version, which option to use.
# Or read: http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/ParallelPage. The following values are available:
#"""
#SystemFlavour=''
#
##------------------------------------------------------------------------------------------------
## {expert} Analysis of results
#""" This script serves only for GUI-assisted visualization of the results
#"""
#AnalysisScript='visualize_projmatch.py'
#SelFileName='partlist.doc'
#DocFileName=''
#ReferenceFileName='volume'
#WorkingDir='ProjMatch/run4'
#DoDeleteWorkingDir=True
#NumberofIterations=10
#ContinueAtIteration=1
#CleanUpFiles=False
#ProjectDir='/ami/data00/appion/11jul06a/testing-xmipp-commands'
#LogDir='Logs'
#ModelNumbers=2
#DoCtfCorrection=False
#CTFDatName='all_images.ctfdat'
#DoAutoCtfGroup=True
#CtfGroupMaxDiff=0.5
#CtfGroupMaxResol=15
#SplitDefocusDocFile=''
#PaddingFactor=1.
#WienerConstant=-1
#DataArePhaseFlipped=True
#ReferenceIsCtfCorrected=True
#DoMask=False
#DoSphericalMask=False
#MaskRadius=32
#MaskFileName='mask.vol'
#DoProjectionMatching=True
#DisplayProjectionMatching=False
#InnerRadius=0
#OuterRadius=64
#AvailableMemory=2
#AngSamplingRateDeg='4x30 2x5 2x3 2x2'
#MaxChangeInAngles='4x1000 2x20 2x9 2x6'
#PerturbProjectionDirections=False
#MaxChangeOffset='1000 '
#Search5DShift='4x5 0'
#Search5DStep='2'
#DoRetricSearchbyTiltAngle=False
#Tilt0=40
#TiltF=90
#SymmetryGroup='c6'
#SymmetryGroupNeighbourhood=''
#OnlyWinner=False
#MinimumCrossCorrelation='-1'
#DiscardPercentage='10'
#ProjMatchingExtra=''
#DoAlign2D='0'
#Align2DIterNr=4
#Align2dMaxChangeOffset='2x1000 2x10'
#Align2dMaxChangeRot='2x1000 2x20'
#DoReconstruction=True
#DisplayReconstruction=False
#ReconstructionMethod='fourier'
#ARTLambda='0.2'
#ARTReconstructionExtraCommand='-k 0.5 -n 10 '
#FourierMaxFrequencyOfInterest='0.25'
#WBPReconstructionExtraCommand=' '
#FourierReconstructionExtraCommand=' '
#DoComputeResolution=True
#DoSplitReferenceImages=True
#ResolSam=9.6
#DisplayResolution=False
#DoLowPassFilter=True
#UseFscForFilter=True
#ConstantToAddToFiltration='0.1'
#NumberOfThreads=1
#DoParallel=False
#NumberOfMpiProcesses=5
#MpiJobSize='10'
#SystemFlavour=''
#AnalysisScript='visualize_projmatch.py'
#NumberofIterations=5
#ContinueAtIteration=1
#ModelNumbers=2
#DoMask=False
#DoSphericalMask=True
#MaskRadius=30
#ReferenceFileName='volume'
#MaskFileName='mask.vol'
#AngSamplingRateDeg=20
#DoRetricSearchbyTiltAngle=False
#Tilt0='-15'
#TiltF='15'
#MaxChangeOffset='1000'
#MaxChangeInAngles='4x1000 2x20 2x9 2x6'
#MinimumCrossCorrelation='-1'
#DiscardPercentage=10
#InnerRadius=0
#OuterRadius=64
#Search5DShift='4x5 0'
#Search5DStep=2
#AvailableMemory=2
#ResolSam='5.5'
#SelFileName='partlist.doc'
#DocFileName=''
#DataArePhaseFlipped=True
#ReferenceIsCtfCorrected=True
#WorkingDir='MultiModel/run2'
#ProjectDir='/ami/data00/appion/11jul06a/xmipp-multimodel'
#LogDir='Logs'
#DoParallel=True
#NumberOfMpiProcesses=5
#SystemFlavour=''
#MpiJobSize=10
#NumberOfThreads=1
#SymmetryGroup='c6'
#SymmetryGroupNeighbourhood=''
#OnlyWinner=True
#DoLowPassFilter=True
#UseFscForFilter=True
#ConstantToAddToFiltration='0.1'
#
#DoDeleteWorkingDir=True
#CleanUpFiles=False
#DoProjectionMatching=True
#DisplayProjectionMatching=False
#PerturbProjectionDirections=False
#ProjMatchingExtra=''
#DoAlign2D=False
#Align2DIterNr=4
#Align2dMaxChangeOffset='2x1000 2x10'
#Align2dMaxChangeRot='2x1000 2x20'
#DisplayReconstruction=False
#DisplayResolution=False
#DoReconstruction=True
#ReconstructionMethod='fourier'
#ARTLambda='0.2'
#ARTReconstructionExtraCommand='-k 0.5 -n 10 '
#WBPReconstructionExtraCommand=''
#FourierReconstructionExtraCommand=''
#FourierMaxFrequencyOfInterest='0.25'
#DoComputeResolution=True
#DoSplitReferenceImages=True
#DoCtfCorrection=False
#CTFDatName=''
#WienerConstant=-1
#DoAutoCtfGroup=True
#CtfGroupMaxDiff=0.5
#CtfGroupMaxResol=15
#SplitDefocusDocFile=''
#PaddingFactor=1
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# {end-of-header} USUALLY YOU DO NOT NEED TO MODIFY ANYTHING BELOW THIS LINE ...
#-----------------------------------------------------------------------------
#Do not change these variables
ReferenceVolumeName='reference_volume.vol'
LibraryDir = "ReferenceLibrary"
ProjectLibraryRootName= LibraryDir + "/ref"
ProjMatchDir = "ProjMatchClasses"
ProjMatchName = 'proj_match'
ProjMatchRootName= ProjMatchDir + "/" + ProjMatchName
ForReconstructionSel="reconstruction.sel"
ForReconstructionDoc="reconstruction.doc"
MultiAlign2dSel="multi_align2d.sel"
DocFileWithOriginalAngles='original_angles.doc'
docfile_with_current_angles='current_angles.doc'
FilteredReconstruction="filtered_reconstruction"
ReconstructedVolume="reconstruction"
OutputFsc="resolution.fsc"
CtfGroupDirectory="CtfGroups"
CtfGroupRootName="ctf"
CtfGroupSubsetFileName="ctf_groups_subset_docfiles.sel"
class projection_matching_class:
#init variables
def __init__(self,
_NumberofIterations,
_ContinueAtIteration,
_CleanUpFiles,
_DoMask,
_ModelNumbers,
_DoSphericalMask,
_MaskRadius,
_ReferenceFileName,
_MaskFileName,
_DoProjectionMatching,
_DisplayProjectionMatching,
_AngSamplingRateDeg,
_PerturbProjectionDirections,
_DoRetricSearchbyTiltAngle,
_Tilt0,
_TiltF,
_ProjMatchingExtra,
_MaxChangeOffset,
_MaxChangeInAngles,
_MinimumCrossCorrelation,
_DiscardPercentage,
_DoAlign2D,
_InnerRadius,
_OuterRadius,
_Search5DShift,
_Search5DStep,
_AvailableMemory,
_Align2DIterNr,
_Align2dMaxChangeOffset,
_Align2dMaxChangeRot,
_DisplayReconstruction,
_DisplayResolution,
_DoReconstruction,
_ReconstructionMethod,
_ARTLambda,
_ARTReconstructionExtraCommand,
_WBPReconstructionExtraCommand,
_FourierReconstructionExtraCommand,
_FourierMaxFrequencyOfInterest,
_DoComputeResolution,
_DoSplitReferenceImages,
_ResolSam,
_SelFileName,
_DocFileName,
_DoCtfCorrection,
_CTFDatName,
_WienerConstant,
_DoAutoCtfGroup,
_CtfGroupMaxDiff,
_CtfGroupMaxResol,
_SplitDefocusDocFile,
_PaddingFactor,
_DataArePhaseFlipped,
_ReferenceIsCtfCorrected,
_WorkingDir,
_ProjectDir,
_LogDir,
_DoParallel,
_MyNumberOfMpiProcesses,
_MySystemFlavour,
_MyMpiJobSize,
_MyNumberOfThreads,
_SymmetryGroup,
_SymmetryGroupNeighbourhood,
_OnlyWinner,
_DoLowPassFilter,
_UseFscForFilter,
_ConstantToAddToFiltration
):
# Import libraries and add Xmipp libs to default search path
import os,sys,shutil
scriptdir=os.path.split(os.path.dirname(os.popen('which xmipp_protocols','r').read()))[0]+'/protocols'
sys.path.append(scriptdir)
import arg,log,logging,selfile
import launch_job
import glob
import subprocess
self._CleanUpFiles=_CleanUpFiles
self._WorkingDir=os.getcwd()+'/'+_WorkingDir
self._SelFileName=_SelFileName
self._ModelNumbers=_ModelNumbers
print self._SelFileName
selfile_without_ext=(os.path.splitext(str(os.path.basename(self._SelFileName))))[0]
self._ReferenceFileName=os.path.abspath(_ReferenceFileName)
self._MaskFileName=os.path.abspath(_MaskFileName)
self._DoMask=_DoMask
self._DoSphericalMask=_DoSphericalMask
self._MaskRadius=_MaskRadius
self._DoProjectionMatching=_DoProjectionMatching
self._DisplayProjectionMatching=_DisplayProjectionMatching
self._DoRetricSearchbyTiltAngle=_DoRetricSearchbyTiltAngle
self._PerturbProjectionDirections=_PerturbProjectionDirections
self._Tilt0=_Tilt0
self._TiltF=_TiltF
self._ProjMatchingExtra=_ProjMatchingExtra
self._ProjectDir=_ProjectDir
self._InnerRadius=_InnerRadius
self._AvailableMemory=_AvailableMemory
self._Align2DIterNr=_Align2DIterNr
self._DisplayReconstruction=_DisplayReconstruction
self._DisplayResolution=_DisplayResolution
self._DoReconstruction=_DoReconstruction
self._DoComputeResolution=_DoComputeResolution
self._DoSplitReferenceImages=_DoSplitReferenceImages
self._ResolSam=_ResolSam
self._DoCtfCorrection=_DoCtfCorrection
self._WienerConstant=_WienerConstant
self._DoAutoCtfGroup=_DoAutoCtfGroup
self._CtfGroupMaxDiff=_CtfGroupMaxDiff
self._CtfGroupMaxResol=_CtfGroupMaxResol
self._SplitDefocusDocFile =''
if(len(_SplitDefocusDocFile) > 1):
self._SplitDefocusDocFile=os.path.abspath(_SplitDefocusDocFile)
self._DocFileName =''
if(len(_DocFileName) > 1):
self._DocFileName=os.path.abspath(_DocFileName)
self._PaddingFactor=PaddingFactor
self._DataArePhaseFlipped=_DataArePhaseFlipped
self._DoParallel=_DoParallel
self._MyNumberOfMpiProcesses=_MyNumberOfMpiProcesses
self._SymmetryGroup=_SymmetryGroup
self._SymmetryGroupNeighbourhood=_SymmetryGroupNeighbourhood
self._OnlyWinner=_OnlyWinner
self._ARTReconstructionExtraCommand=_ARTReconstructionExtraCommand
self._WBPReconstructionExtraCommand=_WBPReconstructionExtraCommand
self._FourierReconstructionExtraCommand=_FourierReconstructionExtraCommand
self._DoLowPassFilter=_DoLowPassFilter
self._UseFscForFilter=_UseFscForFilter
# if we are not starting at the first iteration
# globalFourierMaxFrequencyOfInterest must be computed
# untill I fix this properlly let us set it at max_frequency,
if _ContinueAtIteration==1:
globalFourierMaxFrequencyOfInterest=float(_FourierMaxFrequencyOfInterest)
else:
globalFourierMaxFrequencyOfInterest=0.5
self._MySystemFlavour=_MySystemFlavour
self._MyMpiJobSize =_MyMpiJobSize
self._MyNumberOfThreads =_MyNumberOfThreads
self._user_suplied_ReferenceVolume=self._ReferenceFileName
# Set up logging
self._mylog=log.init_log_system(_ProjectDir,
_LogDir,
sys.argv[0],
_WorkingDir)
# Uncomment next line to get Debug level logging
#self._mylog.setLevel(logging.DEBUG)
self._mylog.debug("Debug level logging enabled")
#input files should exists
# check_file_exists(_ReferenceFileName,self._mylog)
_NumberofIterations +=1;
if _ContinueAtIteration!=1 and DoDeleteWorkingDir==True:
print "You can not delete the working directory"
print " and start at iteration", _ContinueAtIteration
exit(1)
if (DoDeleteWorkingDir):
delete_working_directory(self._mylog,self._WorkingDir)
else:
self._mylog.info("Skipped DoDeleteWorkingDir")
if ReconstructionMethod!='fourier' and not _DoSplitReferenceImages:
print "DoSplitReferenceImages must be set to True if"
print " reconstruction method is not fourier"
exit(1)
create_working_directory(self._mylog,self._WorkingDir)
log.make_backup_of_script_file(sys.argv[0],self._WorkingDir)
# Create a selfile with absolute pathname in the WorkingDir
mysel=selfile.selfile()
mysel.read(_SelFileName)
newsel=mysel.make_abspath()
self._SelFileName=os.path.abspath(self._WorkingDir + '/' + _SelFileName)
newsel.write(self._SelFileName)
# For ctf groups, also create a CTFdat file with absolute pathname in the WorkingDir
if (self._DoCtfCorrection):
import ctfdat
myctfdat=ctfdat.ctfdat()
myctfdat.read(_CTFDatName)
newctfdat=myctfdat.make_abspath()
self._CTFDatName=os.path.abspath(self._WorkingDir + '/' + _CTFDatName)
newctfdat.write(self._CTFDatName)
# Set self._OuterRadius
if (_OuterRadius < 0):
xdim,ydim=newsel.imgSize()
self._OuterRadius = (xdim/2) - 1
comment = " Outer radius set to: " + str(self._OuterRadius)
print '* ' + comment
self._mylog.info(comment)
else:
self._OuterRadius=_OuterRadius
# Create a docfile with the current angles in the WorkingDir
if (self._DocFileName==''):
params=' -i ' + self._SelFileName + \
' -o ' + self._WorkingDir + '/' + \
DocFileWithOriginalAngles
launch_job.launch_job("xmipp_header_extract",
params,
self._mylog,
False,1,1,'')
else:
command = "copy" , self._DocFileName , self._WorkingDir + '/' + DocFileWithOriginalAngles
self._mylog.info(command)
shutil.copy(self._DocFileName, self._WorkingDir + '/' + DocFileWithOriginalAngles)
# Change to working dir
os.chdir(self._WorkingDir)
self._SelFileName=self._WorkingDir+'/'+\
str(os.path.basename(self._SelFileName))
# Make CTF groups
if (self._DoCtfCorrection):
self._NumberOfCtfGroups=execute_ctf_groups(self._mylog,
self._SelFileName,
self._CTFDatName,
self._PaddingFactor,
self._DataArePhaseFlipped,
self._WienerConstant,
self._DoAutoCtfGroup,
self._CtfGroupMaxDiff,
self._CtfGroupMaxResol,
self._SplitDefocusDocFile)
else:
self._NumberOfCtfGroups=1
##
##LOOP
##
#output of reconstruction cycle
#first value given by user
#these names are the input of the mask program
#in general is the output of the reconstruction plus filtration
self._ReconstructedVolume=[]
fill_name_vector("",
self._ReconstructedVolume,
_NumberofIterations,
ReconstructedVolume)
self._ReconstructedandfilteredVolume=[]
fill_name_vector(self._user_suplied_ReferenceVolume,
self._ReconstructedandfilteredVolume,
_NumberofIterations,
FilteredReconstruction)
# Optimal angles from previous iteration or user-provided at the beginning
self._DocFileInputAngles=[]
fill_name_vector('../'+DocFileWithOriginalAngles,
self._DocFileInputAngles,
_NumberofIterations+1,
docfile_with_current_angles)
# Reconstructed and filtered volume of n-1 after masking called reference volume
self._ReferenceVolume=[]
fill_name_vector("",
self._ReferenceVolume,
_NumberofIterations,
ReferenceVolumeName)
for _iteration_number in range(_ContinueAtIteration, _NumberofIterations):
debug_string = "ITERATION: " + str(_iteration_number)
print "*", debug_string
self._mylog.info(debug_string)
# Never allow DoAlign2D and DoCtfCorrection together
if (int(arg.getComponentFromVector(_DoAlign2D,_iteration_number))==1 and
self._DoCtfCorrection):
error_message="You cannot realign classes AND perform CTF-correction. Switch either of them off!"
self._mylog.error(error_message)
print error_message
exit(1)
# Create working dir for this iteration and go there
Iteration_Working_Directory=self._WorkingDir+'/Iter_'+\
str(_iteration_number)
create_working_directory(self._mylog,Iteration_Working_Directory)
os.chdir(Iteration_Working_Directory)
# Mask reference volume
execute_mask(self._DoMask,
self._mylog,
self._ProjectDir,
self._ReconstructedandfilteredVolume[_iteration_number],#in
self._MaskFileName,
self._DoSphericalMask,
self._MaskRadius,
_iteration_number,
self._ReferenceVolume[_iteration_number])#out
if (_DoProjectionMatching):
# Parameters for projection matching
self._AngSamplingRateDeg=arg.getComponentFromVector(_AngSamplingRateDeg,\
_iteration_number-1)
self._MaxChangeOffset=arg.getComponentFromVector(_MaxChangeOffset,\
_iteration_number-1)
self._MaxChangeInAngles=arg.getComponentFromVector(_MaxChangeInAngles,\
_iteration_number-1)
self._Search5DShift=arg.getComponentFromVector(_Search5DShift,\
_iteration_number-1)
self._Search5DStep=arg.getComponentFromVector(_Search5DStep,\
_iteration_number-1)
self._MinimumCrossCorrelation=arg.getComponentFromVector(_MinimumCrossCorrelation,\
_iteration_number-1)
self._DiscardPercentage=arg.getComponentFromVector(_DiscardPercentage,\
_iteration_number-1)
self._DoAlign2D=arg.getComponentFromVector(_DoAlign2D,\
_iteration_number-1)
self._Align2dMaxChangeOffset=arg.getComponentFromVector(_Align2dMaxChangeOffset,\
_iteration_number-1)
self._Align2dMaxChangeRot=arg.getComponentFromVector(_Align2dMaxChangeRot,\
_iteration_number-1)
# Initial reference is CTF-amplitude corrected?
if ( (_iteration_number == 1) and (_ReferenceIsCtfCorrected==False) ):
self._ReferenceIsCtfCorrected=False
else:
self._ReferenceIsCtfCorrected=True
execute_projection_matching(self._mylog,
self._ProjectDir,
self._ReferenceVolume[_iteration_number],
self._MaskFileName,
self._ModelNumbers,
self._DocFileInputAngles[_iteration_number],
self._DocFileInputAngles[_iteration_number+1],
self._DoCtfCorrection,
self._NumberOfCtfGroups,
self._WienerConstant,
self._PaddingFactor,
self._ReferenceIsCtfCorrected,
self._AngSamplingRateDeg,
self._PerturbProjectionDirections,
self._DoRetricSearchbyTiltAngle,
self._Tilt0,
self._TiltF,
self._InnerRadius,
self._OuterRadius,
self._Search5DShift,
self._Search5DStep,
self._MaxChangeOffset,
self._MaxChangeInAngles,
self._ProjMatchingExtra,
self._MinimumCrossCorrelation,
self._DiscardPercentage,
self._DisplayProjectionMatching,
self._DoParallel,
self._MyNumberOfMpiProcesses,
self._MyNumberOfThreads,
self._MySystemFlavour,
self._MyMpiJobSize,
self._WorkingDir,
self._SymmetryGroup,
self._SymmetryGroupNeighbourhood,
self._OnlyWinner,
self._AvailableMemory,
self._DoComputeResolution,
self._DoSplitReferenceImages,
self._DoAlign2D,
self._Align2DIterNr,
self._Align2dMaxChangeOffset,
self._Align2dMaxChangeRot,
_iteration_number)
else:
self._mylog.info("Skipped ProjectionMatching")
# Make a new selfile excluding the images that were possibly discarded by the user
command='cat ' + MultiAlign2dSel + \
' | grep -v ' + ProjectLibraryRootName + \
' | grep -v ref.xmp ' + \
' | grep -v \ -1 >' + ForReconstructionSel
self._mylog.info(command)
os.system(command)
self._ReconstructionMethod=arg.getComponentFromVector(_ReconstructionMethod,\
_iteration_number-1)
self._ARTLambda=arg.getComponentFromVector(_ARTLambda,\
_iteration_number-1)
if (_DoReconstruction):
execute_reconstruction(self._mylog,
self._ARTReconstructionExtraCommand,
self._WBPReconstructionExtraCommand,
self._FourierReconstructionExtraCommand,
_iteration_number,
self._DisplayReconstruction,
self._DoParallel,
self._MyNumberOfMpiProcesses,
self._MyNumberOfThreads,
self._MySystemFlavour,
self._MyMpiJobSize,
self._ReconstructionMethod,
globalFourierMaxFrequencyOfInterest,
self._ARTLambda,
self._SymmetryGroup,
self._ReconstructedVolume[_iteration_number],
self._DoComputeResolution,
self._DoSplitReferenceImages,
self._PaddingFactor
)
else:
self._mylog.info("Skipped Reconstruction")
if (_DoComputeResolution):
filter_frequence=execute_resolution(self._mylog,
self._ARTReconstructionExtraCommand,
self._WBPReconstructionExtraCommand,
self._FourierReconstructionExtraCommand,
self._ReconstructionMethod,
globalFourierMaxFrequencyOfInterest,
_iteration_number,
self._DisplayReconstruction,
self._ResolSam,
self._DoParallel,
self._MyNumberOfMpiProcesses,
self._MyNumberOfThreads,
self._MySystemFlavour,
self._MyMpiJobSize,
self._SymmetryGroup,
self._DisplayResolution,
self._ReconstructedVolume[_iteration_number],
self._ARTLambda,
self._OuterRadius,
self._DoSplitReferenceImages,
self._PaddingFactor
)
else:
filter_frequence=0
self._mylog.info("Skipped Resolution calculation")
| |
from threading import Thread
import pandas
import matplotlib.pyplot as plt
import shutil
import os
import subprocess
import time
from django.template import loader
import signal
from django.http import HttpResponse
from django.shortcuts import render, redirect
import re
threads=[]
stop=False
proc1 = None
live_flag=0
live_n=None
live_p=None
live_number_of_lines=None
live_pattern=None
req = None
n_global=None
p_global=None
running_status=0
def IndexView(request):
global stop
global threads
global live_flag
global live_n
global live_p
global live_number_of_lines
global live_pattern
global req
global n_global
global proc1
global p_global
global running_status
variable=0
variable2=0
data=""
live_flag=0
if live_flag ==1:
if ("Live_Stop" in request.POST):
live_flag = 0
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'variable2': variable2,
}
return HttpResponse(template.render(context, request))
data2=[]
list=[]
time.sleep(1)
live(live_n, live_p, live_number_of_lines, "a")
print("<NAME>")
variable = 0
variable2 = 1
for i in range(live_n):
f = open("/anode%d" % (i + 1), "r")
list.append(i)
data = f.read()
data2.append((data, i))
print(data2)
print(len(data2))
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
else:
if request.method == "POST":
print(request)
if request.POST["value_n"] :
value_n = request.POST["value_n"]
if value_n != "":
value_n = int(value_n)
n_global=value_n
print(value_n)
if request.POST["value_p"]:
value_p = request.POST["value_p"]
if value_p != "":
value_p = int(value_p)
p_global=value_p
print(value_p)
if ("Start" in request.POST) :
process = Thread(target=run_script, args=[value_n, value_p])
process.start()
threads.append(process)
print(threads)
running_status=1
if ("Stop" in request.POST) and (live_flag != 1):
while(os.path.isdir('log_simulator')):
os.system('rm -rf log_simulator')
print("####################STOPPED#######################")
stop = False
for process in threads:
process.join()
running_status=0
if ("Print" in request.POST):
n_val = int(request.POST["n"])
p_val = int(request.POST["p"])
pattern = request.POST["pattern"]
number_lines = int(request.POST["number_of_lines"])
headTen(n_val, p_val, number_lines, pattern, "/home/harsh/PycharmProjects/CloudInit/log.txt")
f = open("log.txt", "r")
data = f.read()
variable = 1
data2 = []
list = []
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'data2': data2, 'variable2': variable2, 'list': list, 'running_status':running_status,
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('index.html')
context = {'variable': variable, 'data': data, 'variable2': variable2, 'running_status':running_status,
}
return HttpResponse(template.render(context, request))
def LiveView(request):
global live_flag
global live_n
global live_p
global live_number_of_lines
global live_pattern
global n_global
variable2=0
if live_flag ==1:
if ("Live_Stop" in request.POST):
live_flag = 0
template = loader.get_template('live.html')
context = {'variable2': variable2,
}
return HttpResponse(template.render(context, request))
data2=[]
list=[]
time.sleep(1)
live(live_n, live_p, live_number_of_lines, "a")
variable2 = 1
for i in range(live_n):
df = pandas.read_csv("anode%d.csv"%(i+1), sep=',')
data = df.to_html()
list.append(i)
data2.append((data, i))
template = loader.get_template('live.html')
context = {'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
else:
if request.method == "POST":
data2 = []
list = []
if ("Print_live" in request.POST):
live_n = n_global
live_p = int(request.POST["p_live"])
live_number_of_lines = int(request.POST["live_number_of_lines"])
live_flag = 1
if live_flag == 1:
time.sleep(1)
live(live_n, live_p, live_number_of_lines, "a")
variable = 0
variable2 = 1
for i in range(live_n):
df = pandas.read_csv("anode%d.csv" % (i + 1), sep=',')
data = df.to_html()
list.append(i)
data2.append((data, i))
template = loader.get_template('live.html')
context = { 'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
template = loader.get_template('live.html')
context = { 'data2': data2, 'variable2': variable2, 'list': list,
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('live.html')
context = {'variable2': variable2,
}
return HttpResponse(template.render(context, request))
def TimeView(request):
variable3 = 0
data_time = ""
global n_global
print(request.POST)
if (request.method == "POST"):
n_time = int(request.POST['n_time'])
p_time = int(request.POST['p_time'])
start = request.POST['date_start']
end = request.POST['date_end']
live(n_global,p_time,1000,"a")
TimeData(n_time, p_time, start, end)
df = pandas.read_csv("time.csv", sep=',')
data_time = df.to_html()
variable3 = 1
template = loader.get_template('time.html')
context = {'variable3': variable3, 'data_time': data_time,
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('time.html')
context = {'variable3': variable3, 'data_time': data_time,
}
return HttpResponse(template.render(context, request))
def TimelineView(request):
global n_global
global p_global
variable=0
data_timeline =[]
if(request.method == 'POST'):
number_of_lines = int(request.POST['number_of_lines'])
timeline(n_global,p_global,number_of_lines)
df = pandas.read_csv("timeline.csv", sep=',')
data_timeline = df.to_html()
variable=1
print(data_timeline)
template = loader.get_template('timeline.html')
context = {'variable':variable, 'data_timeline':data_timeline}
return HttpResponse(template.render(context,request))
else:
variable=0
data_timeline=[]
template = loader.get_template('timeline.html')
context = {'variable':variable, 'data_timeline':data_timeline}
return HttpResponse(template.render(context,request))
def GraphView(request):
variable=0
global n_global
if (request.method == "POST"):
variable=1
n_graph = n_global
p_graph = int(request.POST['p_graph'])
num_graph = int(request.POST['num_graph'])
search = request.POST['search']
process_counts(n_graph, p_graph, num_graph, search)
list=[]
data=[]
for i in range(n_graph):
list.append((i,i+1))
data.append(("plotNode_%d.png"%(i+1),"plotNode_pie_%d.png"%(i+1),i))
print(data)
print(list)
template = loader.get_template('graph.html')
context = {'variable': variable,
'list':list,
'data':data
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('graph.html')
context = {'variable': variable,
}
return HttpResponse(template.render(context, request))
def timeline(n,p,num):
file_path2="log_simulator/"
i=1
while i<=n:
j=1
while j<=p:
filename=file_path2+'HackNode'+str(i)+"/Process"+str(j)+".log"
lines="".join(tail(filename,num)).split('\n')
FR=open("timeline.csv","w")
FR.write("Date,Time,Node,Process,Tag,File,Exception"+"\n")
FR.close()
FR=open("timeline.csv","a+")
for line in lines :
l=line.split()
# if line.find(keyword):
# print(line)
if (re.match("^\d",line)!=None):
# print(l)
FR.write(l[0] + " , "+str(l[1])+ " , " + str(i)+ " , " +str(j) + " , "+l[2]+" , "+l[3]+" , "+" ".join(l[4:])+"\n")
print(str(l[0]) + "| "+str(l[1])+ "| NODE :" + str(i)+ "| PROCESS :" +str(j) + "| MATCH LINE:"+str(" ".join(l[2:]))+"\n")
j+=1
FR.close()
i+=1
def process_counts(n, process, num, extra):
i = 1
extra = extra.split(',')
file_path1='log_simulator/'
while i <= n:
filename = file_path1 + 'HackNode' + str(i) + "/Process" + str(process) + ".log"
count_info = 0
count_dbg = 0
count_error = 0
string = "".join(tail(filename, num))
extra_y = [0] * len(extra)
for line in string.split('\n'):
print(line)
if line.find('DBG') >= 0:
count_dbg += 1
elif line.find('ERROR') >= 0:
count_error += 1
elif line.find('INFO') >= 0:
count_info += 1
for j in range(0, len(extra)):
if re.search(extra[j], line, re.I):
extra_y[j] += 1
x = ["INFO", "DBG", "ERROR"] + extra
y = [count_info, count_dbg, count_error] + extra_y
barplot(x, y, i, num)
i += 1
def barplot(x, y, i, num):
# my_color=tuple([round(i/(len(x)+1),1) for i in range(1,len(x)+1)])
# print(my_color)
plt.bar(x, y)
plt.xlabel('Category')
plt.ylabel('Number of occurrence in last ' + str(num) + ' logs in node ' + str(i))
plt.savefig('media/plotNode_' + str(i))
plt.close()
plt.pie(y, labels=x)
plt.savefig('media/plotNode_pie_' + str(i))
plt.close()
def run_script(n,p):
global proc1
proc1 = subprocess.Popen("python2 log_simulator.zip -n %d -p %d" %(n,p), shell=True)
def headTen(node, process, num, pattern, outputfilename):
filename = 'log_simulator/HackNode' + str(node) + "/Process" + str(process) + ".log"
FO = open(filename, 'r')
FR = open(outputfilename, 'w')
count = 0
while True and count < num:
loglines = FO.readline()
if loglines.find(pattern) >= 0:
# print(loglines)
# loglines = loglines +"<br>"
FR.write(loglines)
count += 1
# def live(n,process,num,pattern,outputfilename):
# file_path1 = '/home/harsh/PycharmProjects/CloudInit/log_simulator/'
# delay=0
# time.sleep(delay)
# i=1
# while i <= n:
# filename=file_path1+'HackNode'+str(i)+"/Process"+str(process)+".log"
# FR=open(outputfilename+"node"+str(i),'w')
# to_print = "".join(tail(filename,num))
# if re.search(pattern, to_print, re.I):
# FR.write(to_print)
# print(to_print)
# FR.close()
# i+=1
# def live(n, process, num, outputfilename):
# file_path1 = '/home/harsh/PycharmProjects/CloudInit/log_simulator/'
# delay = 0.1
# time.sleep(delay)
# i = 1
# while i <= n:
# filename = file_path1 + 'HackNode' + str(i) + "/Process" + str(process) + ".log"
# FR = open(outputfilename + "node" + str(i)+".csv", 'w')
# FR.write("Date,Timestamp,Tags,File,Exception" + "\n")
# FR.close()
# FR = open(outputfilename + "node" + str(i)+".csv", 'a+')
# to_print = "".join(tail(filename, num))
# to_print = to_print.split("\n")
# count = 0
# flag = 0
# for x in to_print:
# count += 1
# if ((re.match("^\d", x) == None)):
# if (x.split(" ")[0] == "Traceback"):
# flag = 4
#
# print(x)
# flag -= 1
# else:
# if (count > num):
# continue
# t = x.split(" ")
# a = " ".join(t[4:])
# b = ",".join(t[0:4])
# toprint = b + "," + a
# if (count != num):
# FR.write(toprint + "\n")
# # print(toprint)
# else:
# FR.write(toprint)
# # print(toprint)
#
# # to_print[5]= " ".join(to_print[5:])
# # to_print = ",".join(to_print[0:5])
# # print(to_print)
# print("\n")
# # if re.search(pattern, to_print, re.I):
# # FR.write(to_print)
# # print(to_print)
# FR.close()
# # with open(outputfilename+"node"+str(i),'r') as infile, open(outputfilename+"node_"+str(i), 'a+') as outfile:
# # for line in infile:
# # outfile.write(" ".join(line.split()).replace(' ', ','))
# # outfile.write(",")
# i += 1
def live(n, process, num, outputfilename):
file_path1 = 'log_simulator/'
delay = 0.01
time.sleep(delay)
i = 1
while i <= n:
filename = file_path1 + 'HackNode' + str(i) + "/Process" + str(process) + ".log"
FR = open(outputfilename + "node" + str(i) + '.csv', 'w')
FR.write("Date,Timestamp,Tags,File,Exception" + "\n")
FR.close()
FR = open(outputfilename + "node" + str(i) + '.csv', 'a+')
to_print = "".join(tail(filename, num))
to_print = to_print.split("\n")
count = 0
for x in to_print:
count += 1
if ((re.match("^\d", x) == None)):
if (re.match("^\s", x) != None):
y = x.split(",")
print(" - , - ," + y[0] + "," + y[1] + ", Traceback" + y[2])
FR.write(" - , - ," + y[0] + "," + y[1] + ", Traceback" + y[2] +"\n")
# print("-,-,"+x)
elif (x.split(" ")[0] == "Traceback"):
continue
else:
y = x.split(":")
if (len(y) > 1):
FR.write(" - , - , " + y[0] + " , - ," + y[1] + "\n")
print(" - , - , " + y[0] + " , - ," + y[1])
else:
if (count > | |
TexMobject("\\over \\,")
frac_line.stretch_to_fit_width(numerator.get_width())
frac_line.next_to(numerator, DOWN)
denominator = TextMobject("(Num. samples)")
denominator.next_to(frac_line, DOWN)
self.play(ShowCreation(v_lines, run_time = 3))
self.wait()
self.play(
ReplacementTransform(
v_lines.copy(),
summed_v_lines,
run_time = 3,
submobject_mode = "lagged_start"
),
Write(
plusses,
rate_func = squish_rate_func(smooth, 0.3, 1)
)
)
self.play(Write(frac_line, run_time = 1))
self.play(Write(denominator))
self.wait()
self.plusses = plusses
self.average = VGroup(numerator, frac_line, denominator)
self.v_lines = v_lines
###
def get_sample_lines(self, dx, color = YELLOW, stroke_width = 2):
return VGroup(*[
self.get_vertical_line_to_graph(
x, self.graph,
color = color,
stroke_width = stroke_width,
)
for x in np.arange(
self.bounds[0]+dx,
self.bounds[1],
dx
)
])
class FiniteSampleWithMoreSamplePoints(FiniteSample):
CONFIG = {
"dx" : 0.05
}
class FeelsRelatedToAnIntegral(TeacherStudentsScene):
def construct(self):
self.student_says(
"Seems integral-ish...",
target_mode = "maybe"
)
self.play(self.teacher.change_mode, "happy")
self.wait(2)
class IntegralOfSine(FiniteSample):
CONFIG = {
"thin_dx" : 0.01,
"rect_opacity" : 0.75,
}
def construct(self):
self.force_skipping()
FiniteSample.construct(self)
self.remove(self.y_axis_label_mob)
self.remove(*self.x_axis_labels[::2])
self.revert_to_original_skipping_status()
self.put_average_in_corner()
self.write_integral()
self.show_riemann_rectangles()
self.let_dx_approach_zero()
self.bring_back_average()
self.distribute_dx()
self.let_dx_approach_zero(restore = False)
self.write_area_over_width()
self.show_moving_v_line()
def put_average_in_corner(self):
self.average.save_state()
self.plusses.set_stroke(width = 0.5)
self.play(
self.average.scale, 0.75,
self.average.to_corner, DOWN+RIGHT,
)
def write_integral(self):
integral = TexMobject("\\int_0^\\pi", "\\sin(x)", "\\,dx")
integral.move_to(self.graph_portion_between_bounds)
integral.to_edge(UP)
self.play(Write(integral))
self.wait(2)
self.integral = integral
def show_riemann_rectangles(self):
kwargs = {
"dx" : self.dx,
"x_min" : self.bounds[0],
"x_max" : self.bounds[1],
"fill_opacity" : self.rect_opacity,
}
rects = self.get_riemann_rectangles(self.graph, **kwargs)
rects.set_stroke(YELLOW, width = 1)
flat_rects = self.get_riemann_rectangles(
self.get_graph(lambda x : 0),
**kwargs
)
thin_kwargs = dict(kwargs)
thin_kwargs["dx"] = self.thin_dx
thin_kwargs["stroke_width"] = 0
self.thin_rects = self.get_riemann_rectangles(
self.graph,
**thin_kwargs
)
start_index = 20
end_index = start_index + 5
low_opacity = 0.5
high_opacity = 1
start_rect = rects[start_index]
side_brace = Brace(start_rect, LEFT, buff = SMALL_BUFF)
bottom_brace = Brace(start_rect, DOWN, buff = SMALL_BUFF)
sin_x = TexMobject("\\sin(x)")
sin_x.next_to(side_brace, LEFT, SMALL_BUFF)
dx = bottom_brace.get_text("$dx$", buff = SMALL_BUFF)
self.transform_between_riemann_rects(
flat_rects, rects,
replace_mobject_with_target_in_scene = True,
)
self.remove(self.v_lines)
self.wait()
rects.save_state()
self.play(*it.chain(
[
ApplyMethod(
rect.set_style_data, BLACK, 1,
None, #Fill color
high_opacity if rect is start_rect else low_opacity
)
for rect in rects
],
list(map(GrowFromCenter, [side_brace, bottom_brace])),
list(map(Write, [sin_x, dx])),
))
self.wait()
for i in range(start_index+1, end_index):
self.play(
rects[i-1].set_fill, None, low_opacity,
rects[i].set_fill, None, high_opacity,
side_brace.set_height, rects[i].get_height(),
side_brace.next_to, rects[i], LEFT, SMALL_BUFF,
bottom_brace.next_to, rects[i], DOWN, SMALL_BUFF,
MaintainPositionRelativeTo(sin_x, side_brace),
MaintainPositionRelativeTo(dx, bottom_brace),
)
self.wait()
self.play(
rects.restore,
*list(map(FadeOut, [sin_x, dx, side_brace, bottom_brace]))
)
self.rects = rects
self.dx_brace = bottom_brace
self.dx_label = dx
def let_dx_approach_zero(self, restore = True):
start_state = self.rects.copy()
self.transform_between_riemann_rects(
self.rects, self.thin_rects,
run_time = 3
)
self.wait(2)
if restore:
self.transform_between_riemann_rects(
self.rects, start_state.copy(),
run_time = 2,
)
self.remove(self.rects)
self.rects = start_state
self.rects.set_fill(opacity = 1)
self.play(
self.rects.set_fill, None,
self.rect_opacity,
)
self.wait()
def bring_back_average(self):
num_samples = self.average[-1]
example_dx = TexMobject("0.1")
example_dx.move_to(self.dx_label)
input_range = Line(*[
self.coords_to_point(bound, 0)
for bound in self.bounds
])
input_range.set_color(RED)
#Bring back average
self.play(
self.average.restore,
self.average.center,
self.average.to_edge, UP,
self.integral.to_edge, DOWN,
run_time = 2
)
self.wait()
self.play(
Write(self.dx_brace),
Write(self.dx_label),
)
self.wait()
self.play(
FadeOut(self.dx_label),
FadeIn(example_dx)
)
self.play(Indicate(example_dx))
self.wait()
self.play(ShowCreation(input_range))
self.play(FadeOut(input_range))
self.wait()
#Ask how many there are
num_samples_copy = num_samples.copy()
v_lines = self.v_lines
self.play(*[
ApplyFunction(
lambda l : l.shift(0.5*UP).set_color(GREEN),
line,
rate_func = squish_rate_func(
there_and_back, a, a+0.3
),
run_time = 3,
)
for line, a in zip(
self.v_lines,
np.linspace(0, 0.7, len(self.v_lines))
)
] + [
num_samples_copy.set_color, GREEN
])
self.play(FadeOut(v_lines))
self.wait()
#Count number of samples
num_samples_copy.generate_target()
num_samples_copy.target.shift(DOWN + 0.5*LEFT)
rhs = TexMobject("\\approx", "\\pi", "/", "0.1")
rhs.next_to(num_samples_copy.target, RIGHT)
self.play(
MoveToTarget(num_samples_copy),
Write(rhs.get_part_by_tex("approx")),
)
self.play(ShowCreation(input_range))
self.play(ReplacementTransform(
self.x_axis_labels[1].copy(),
rhs.get_part_by_tex("pi")
))
self.play(FadeOut(input_range))
self.play(
ReplacementTransform(
example_dx.copy(),
rhs.get_part_by_tex("0.1")
),
Write(rhs.get_part_by_tex("/"))
)
self.wait(2)
#Substitute number of samples
self.play(ReplacementTransform(
example_dx, self.dx_label
))
dx = rhs.get_part_by_tex("0.1")
self.play(Transform(
dx, TexMobject("dx").move_to(dx)
))
self.wait(2)
approx = rhs.get_part_by_tex("approx")
rhs.remove(approx)
self.play(
FadeOut(num_samples),
FadeOut(num_samples_copy),
FadeOut(approx),
rhs.next_to, self.average[1], DOWN
)
self.wait()
self.pi_over_dx = rhs
def distribute_dx(self):
numerator, frac_line, denominator = self.average
pi, over, dx = self.pi_over_dx
integral = self.integral
dx.generate_target()
lp, rp = parens = TexMobject("()")
parens.set_height(numerator.get_height())
lp.next_to(numerator, LEFT)
rp.next_to(numerator, RIGHT)
dx.target.next_to(rp, RIGHT)
self.play(
MoveToTarget(dx, path_arc = np.pi/2),
Write(parens),
frac_line.stretch_to_fit_width,
parens.get_width() + dx.get_width(),
frac_line.shift, dx.get_width()*RIGHT/2,
FadeOut(over)
)
self.wait(2)
average = VGroup(parens, numerator, dx, frac_line, pi)
integral.generate_target()
over_pi = TexMobject("\\frac{\\phantom{\\int \\sin(x)\\dx}}{\\pi}")
integral.target.set_width(over_pi.get_width())
integral.target.next_to(over_pi, UP)
integral_over_pi = VGroup(integral.target, over_pi)
integral_over_pi.to_corner(UP+RIGHT)
arrow = Arrow(LEFT, RIGHT)
arrow.next_to(integral.target, LEFT)
self.play(
average.scale, 0.9,
average.next_to, arrow, LEFT,
average.shift_onto_screen,
ShowCreation(arrow),
Write(over_pi),
MoveToTarget(integral, run_time = 2)
)
self.wait(2)
self.play(*list(map(FadeOut, [self.dx_label, self.dx_brace])))
self.integral_over_pi = VGroup(integral, over_pi)
self.average = average
self.average_arrow = arrow
def write_area_over_width(self):
self.play(
self.integral_over_pi.shift, 2*LEFT,
*list(map(FadeOut, [self.average, self.average_arrow]))
)
average_height = TextMobject("Average height = ")
area_over_width = TexMobject(
"{\\text{Area}", "\\over\\,", "\\text{Width}}", "="
)
area_over_width.get_part_by_tex("Area").set_color_by_gradient(
BLUE, GREEN
)
area_over_width.next_to(self.integral_over_pi[1][0], LEFT)
average_height.next_to(area_over_width, LEFT)
self.play(*list(map(FadeIn, [average_height, area_over_width])))
self.wait()
def show_moving_v_line(self):
mean = np.mean(self.bounds)
v_line = self.get_vertical_line_to_graph(
mean, self.graph,
line_class = DashedLine,
color = WHITE,
)
self.play(ShowCreation(v_line))
for count in range(2):
for x in self.bounds + [mean]:
self.play(self.get_v_line_change_anim(
v_line, self.graph, x,
run_time = 3
))
class Approx31(Scene):
def construct(self):
tex = TexMobject("\\approx 31")
tex.set_width(FRAME_WIDTH - LARGE_BUFF)
tex.to_edge(LEFT)
self.play(Write(tex))
self.wait(3)
class LetsSolveThis(TeacherStudentsScene):
def construct(self):
expression = TexMobject(
"{\\int_0^\\pi ", " \\sin(x)", "\\,dx \\over \\pi}"
)
expression.to_corner(UP+LEFT)
question = TextMobject(
"What's the antiderivative \\\\ of",
"$\\sin(x)$",
"?"
)
for tex_mob in expression, question:
tex_mob.set_color_by_tex("sin", BLUE)
self.add(expression)
self.teacher_says("Let's compute it.")
self.wait()
self.student_thinks(question)
self.wait(2)
class Antiderivative(AverageOfSineStart):
CONFIG = {
"antideriv_color" : GREEN,
"deriv_color" : BLUE,
"riemann_rect_dx" : 0.01,
"y_axis_label" : "",
"graph_origin" : 4*LEFT + DOWN,
}
def construct(self):
self.setup_axes()
self.add_x_axis_labels()
self.negate_derivative_of_cosine()
self.walk_through_slopes()
self.apply_ftoc()
self.show_difference_in_antiderivative()
self.comment_on_area()
self.divide_by_pi()
self.set_color_antiderivative_fraction()
self.show_slope()
self.bring_back_derivative()
self.show_tangent_slope()
def add_x_axis_labels(self):
AverageOfSineStart.add_x_axis_labels(self)
self.remove(*self.x_axis_labels[::2])
def negate_derivative_of_cosine(self):
cos, neg_cos, sin, neg_sin = graphs = [
self.get_graph(func)
for func in [
np.cos,
lambda x : -np.cos(x),
np.sin,
lambda x : -np.sin(x),
]
]
VGroup(cos, neg_cos).set_color(self.antideriv_color)
VGroup(sin, neg_sin).set_color(self.deriv_color)
labels = ["\\cos(x)", "-\\cos(x)", "\\sin(x)", "-\\sin(x)"]
x_vals = [2*np.pi, 2*np.pi, 5*np.pi/2, 5*np.pi/2]
vects = [UP, DOWN, UP, DOWN]
for graph, label, x_val, vect in zip(graphs, labels, x_vals, vects):
graph.label = self.get_graph_label(
graph, label,
x_val = x_val,
direction = vect,
buff = SMALL_BUFF
)
derivs = []
for F, f in ("\\cos", "-\\sin"), ("-\\cos", "\\sin"):
deriv = TexMobject(
"{d(", F, ")", "\\over\\,", "dx}", "(x)",
"=", f, "(x)"
)
deriv.set_color_by_tex(F, self.antideriv_color)
deriv.set_color_by_tex(f, self.deriv_color)
deriv.to_edge(UP)
derivs.append(deriv)
cos_deriv, neg_cos_deriv = derivs
self.add(cos_deriv)
for graph in cos, neg_sin:
self.play(
ShowCreation(graph, rate_func = smooth),
Write(
graph.label,
rate_func = squish_rate_func(smooth, 0.3, 1)
),
run_time = 2
)
self.wait()
self.wait()
self.play(*[
ReplacementTransform(*pair)
for pair in [
(derivs),
(cos, neg_cos),
(cos.label, neg_cos.label),
(neg_sin, sin),
(neg_sin.label, sin.label),
]
])
self.wait(2)
self.neg_cos = neg_cos
self.sin = sin
self.deriv = neg_cos_deriv
def walk_through_slopes(self):
neg_cos = self.neg_cos
sin = self.sin
faders = sin, sin.label
for mob in faders:
mob.save_state()
sin_copy = self.get_graph(
np.sin,
x_min = 0,
x_max = 2*np.pi,
color = BLUE,
)
v_line = self.get_vertical_line_to_graph(
0, neg_cos,
line_class = DashedLine,
color = WHITE
)
ss_group = self.get_secant_slope_group(
0, neg_cos,
dx = 0.001,
secant_line_color = YELLOW
)
def quad_smooth(t):
return 0.25*(np.floor(4*t) + smooth((4*t) % 1))
self.play(*[
ApplyMethod(m.fade, 0.6)
for m in faders
])
self.wait()
self.play(*list(map(ShowCreation, ss_group)), run_time = 2)
kwargs = {
"run_time" : 20,
"rate_func" : quad_smooth,
}
v_line_anim = self.get_v_line_change_anim(
v_line, sin_copy, 2*np.pi,
**kwargs
)
self.animate_secant_slope_group_change(
ss_group,
target_x = 2*np.pi,
added_anims = [
ShowCreation(sin_copy, **kwargs),
v_line_anim
],
**kwargs
)
self.play(
*list(map(FadeOut, [ss_group, v_line, sin_copy]))
)
self.wait()
self.ss_group = ss_group
def apply_ftoc(self):
deriv = self.deriv
integral = TexMobject(
"\\int", "^\\pi", "_0", "\\sin(x)", "\\,dx"
)
rhs = TexMobject(
"=", "\\big(", "-\\cos", "(", "\\pi", ")", "\\big)",
"-", "\\big(", "-\\cos", "(", "0", ")", "\\big)",
)
rhs.next_to(integral, RIGHT)
equation = VGroup(integral, rhs)
equation.to_corner(UP+RIGHT, buff = MED_SMALL_BUFF)
(start_pi, end_pi), (start_zero, end_zero) = start_end_pairs = [
[
m.get_part_by_tex(tex)
for m in (integral, rhs)
]
for tex in ("\\pi", "0")
]
for tex_mob in integral, rhs:
tex_mob.set_color_by_tex("sin", self.deriv_color)
tex_mob.set_color_by_tex("cos", self.antideriv_color)
tex_mob.set_color_by_tex("0", YELLOW)
tex_mob.set_color_by_tex("\\pi", YELLOW)
self.play(
Write(integral),
self.deriv.scale, 0.5,
self.deriv.center,
self.deriv.to_edge, LEFT, MED_SMALL_BUFF,
self.deriv.shift, UP,
)
self.wait()
self.play(FadeIn(
VGroup(*[part for part in rhs if part not in [end_pi, end_zero]]),
submobject_mode = "lagged_start",
run_time = 2,
))
self.wait()
for start, end in start_end_pairs:
self.play(ReplacementTransform(
start.copy(), end,
path_arc = np.pi/6,
run_time = 2
))
self.wait()
self.integral = integral
self.rhs = rhs
def show_difference_in_antiderivative(self):
pi_point, zero_point = points = [
self.input_to_graph_point(x, self.neg_cos)
for x in reversed(self.bounds)
]
interim_point = pi_point[0]*RIGHT + zero_point[1]*UP
pi_dot, zero_dot = dots = [
Dot(point, color = YELLOW)
for point in points
]
v_line = DashedLine(pi_point, interim_point)
h_line = DashedLine(interim_point, zero_point)
v_line_brace = Brace(v_line, RIGHT)
two_height_label = v_line_brace.get_text(
"$2$", buff = SMALL_BUFF
)
two_height_label.add_background_rectangle()
pi | |
function parameter or return value."
"\nFunction annotations are usually used for type hints: for example, this function is \n"
"expected to take two int arguments and is also expected to have an int return value:"
"\ndef sum_two_numbers(a: int, b: int) -> int:\n return a + b\nFunction annotation \n"
"syntax is explained in section Function definitions.\nSee variable annotation and \n"
"PEP 484, which describe this functionality.",
"__future__": "A pseudo-module which programmers can use to enable new language features which are not \n"
"compatible with the current interpreter.\nBy importing the __future__ module and evaluating \n"
"its variables, you can see when a new feature was first added to the language and when it \n"
"becomes the default:\n>>>\n>>> import __future__\n>>> __future__.division"
"\n_Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192)",
"garbage collection": "The process of freeing memory when it is not used anymore. Python performs garbage \n"
"collection via reference counting and a cyclic garbage collector that is able to \n"
"detect and break reference cycles. The garbage collector can be controlled \n"
"using the gc module.",
"generator": "A function which returns a generator iterator. It looks like a normal function except that it \n"
"contains yield expressions for producing a series of values usable in a for-loop or that can \n"
"be retrieved one at a time with the next() function."
"\nUsually refers to a generator function, but may refer to a generator iterator in some contexts. \n"
"In cases where the intended meaning isn\'t clear, using the full terms avoids ambiguity.",
"generator iterator": "An object created by a generator function.\nEach yield temporarily suspends processing, \n"
"remembering the location execution state (including local variables and pending \n"
"try-statements). When the generator iterator resumes, it picks up where it left off \n"
"(in contrast to functions which start fresh on every invocation).",
"generator expression": "An expression that returns an iterator. It looks like a normal expression followed \n"
"by a for clause defining a loop variable, range, and an optional if clause. The \n"
"combined expression generates values for an enclosing function:\n>>>"
"\n>>> sum(i*i for i in range(10)) # sum of squares 0, 1, 4, ... 81\n285",
"generic function": "A function composed of multiple functions implementing the same operation for different \n"
"types. Which implementation should be used during a call is determined by the \n"
"dispatch algorithm.\nSee also the single dispatch glossary entry, the functools.\n"
"singledispatch() decorator, and PEP 443.",
"GIL": "See global interpreter lock.",
"global interpreter lock": "The mechanism used by the CPython interpreter to assure that only one thread \n"
"executes Python bytecode at a time. This simplifies the CPython implementation \n"
"by making the object model (including critical built-in types such as dict) \n"
"implicitly safe against concurrent access. Locking the entire interpreter \n"
"makes it easier for the interpreter to be multi-threaded, at the expense of \n"
"much of the parallelism afforded by multi-processor machines."
"\nHowever, some extension modules, either standard or third-party, are designed \n"
"so as to release the GIL when doing computationally-intensive tasks such as \n"
"compression or hashing. Also, the GIL is always released when doing I/O."
"\nPast efforts to create a \"free-threaded\" interpreter (one which locks shared \n"
"data at a much finer granularity) have not been successful because performance \n"
"suffered in the common single-processor case. It is believed that overcoming \n"
"this performance issue would make the implementation much more complicated and \n"
"therefore costlier to maintain.",
"hash-based pyc": "A bytecode cache file that uses the hash rather than the last-modified time of the \n"
"corresponding source file to determine its validity. See Cached bytecode invalidation.",
"hashable": "An object is hashable if it has a hash value which never changes during its lifetime (it needs \n"
"a __hash__() method), and can be compared to other objects (it needs an __eq__() method). \n"
"Hashable objects which compare equal must have the same hash value.\nHashability makes an \n"
"object usable as a dictionary key and a set member, because these data structures use the \n"
"hash value internally.\nMost of Python\'s immutable built-in objects are hashable; mutable \n"
"containers (such as lists or dictionaries) are not; immutable containers (such as tuples and \n"
"frozensets) are only hashable if their elements are hashable. Objects which are instances of \n"
"user-defined classes are hashable by default. They all compare unequal (except with themselves), \n"
"and their hash value is derived from their id().",
"IDLE": "An Integrated Development Environment for Python. IDLE is a basic editor and interpreter environment \n"
"which ships with the standard distribution of Python.",
"immutable": "An object with a fixed value. Immutable objects include numbers, strings and tuples. Such an \n"
"object cannot be altered. A new object has to be created if a different value has to be stored. \n"
"They play an important role in places where a constant hash value is needed, for example as a key \n"
"in a dictionary.",
"import path": "A list of locations (or path entries) that are searched by the path based finder for modules \n"
"to import. During import, this list of locations usually comes from sys.path, but for \n"
"subpackages it may also come from the parent package\'s __path__ attribute.",
"importing": "The process by which Python code in one module is made available to Python code in another module.",
"importer": "An object that both finds and loads a module; both a finder and loader object.",
"interactive": "Python has an interactive interpreter which means you can enter statements and expressions \n"
"at the interpreter prompt, immediately execute them and see their results. Just launch python \n"
"with no arguments (possibly by selecting it from your computer\'s main menu). It is a very \n"
"powerful way to test out new ideas or inspect modules and packages (remember help(x)).",
"interpreted": "Python is an interpreted language, as opposed to a compiled one, though the distinction can \n"
"be blurry because of the presence of the bytecode compiler. This means that source files can \n"
"be run directly without explicitly creating an executable which is then run. Interpreted \n"
"languages typically have a shorter development/debug cycle than compiled ones, though their \n"
"programs generally also run more slowly. See also interactive.",
"interpreter shutdown": "When asked to shut down, the Python interpreter enters a special phase where it \n"
"gradually releases all allocated resources, such as modules and various critical \n"
"internal structures. It also makes several calls to the garbage collector. This \n"
"can trigger the execution of code in user-defined destructors or weakref callbacks. \n"
"Code executed during the shutdown phase can encounter various exceptions as the \n"
"resources it relies on may not function anymore (common examples are library modules \n"
"or the warnings machinery).\nThe main reason for interpreter shutdown is that \n"
"the __main__ module or the script being run has finished executing.",
"iterable": "An object capable of returning its members one at a time. Examples of iterables include all \n"
"sequence types (such as list, str, and tuple) and some non-sequence types like dict, file objects, \n"
"and objects of any classes you define with an __iter__() method or with a __getitem__() method \n"
"that implements Sequence semantics.\nIterables can be used in a for loop and in many other places \n"
"where a sequence is needed (zip(), map(), ...). When an iterable object is passed as an argument to \n"
"the built-in function iter(), it returns an iterator for the object. This iterator is good for one \n"
"pass over the set of values. When using iterables, it is usually not necessary to call iter() or \n"
"deal with iterator objects yourself. The for statement does that automatically for you, creating a \n"
"temporary unnamed variable to hold the iterator for the | |
copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.1")
elif key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_dns_nameservers(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
dns_ns = ["192.168.127.12", "192.168.3.11"]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=dns_ns, enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes,
dns=dns_ns
) as (subnet_create, dns_create, route_create):
res = self.plugin.create_subnet(self.context,
copy.deepcopy(subnet))
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 2)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_routes(self):
routes = [dict(cidr="1.1.1.1/8", gateway="172.16.0.4"),
dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "1.1.1.1/8",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 2)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertIn(("1.1.1.1/8", "172.16.0.4"), res_tuples)
self.assertEqual(1, len(res_tuples))
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_default_route(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
elif key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_two_default_routes_fails(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4"),
dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[
{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"},
{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
with self.assertRaises(q_exc.DuplicateRouteConflict):
self.plugin.create_subnet(self.context, subnet_request)
def test_create_subnet_default_route_gateway_ip(self):
"""Host_routes precedence
If default route (host_routes) and gateway_ip are both provided,
then host_route takes precedence.
"""
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip="172.16.0.3",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
allocation_pools=[{"start": "172.16.0.5",
"end": "172.16.0.254"}],
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(res["gateway_ip"], "172.16.0.4")
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
elif key == "host_routes":
self.assertEqual(len(res[key]), 0)
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_null_gateway_no_routes(self):
"""A subnet with a NULL gateway IP shouldn't create routes."""
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=None,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
with self._stubs(
subnet=subnet["subnet"],
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertIsNone(res[key])
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_routes_quota_pass(self):
routes = (("0.0.0.0/0", "127.0.0.1"),
("1.0.0.0/0", "127.0.0.1"),
("2.0.0.0/0", "127.0.0.1"))
host_routes = [{"destination": x, "nexthop": y} for x, y in routes]
stub_routes = [{"cidr": x, "gateway": y} for x, y in routes]
subnet = {"subnet":
{"cidr": "192.168.127.12/24", "created_at": datetime.now(),
"host_routes": host_routes, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), routes=stub_routes):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_routes_quota_fail(self):
routes = (("0.0.0.0/0", "127.0.0.1"),
("1.0.0.0/0", "127.0.0.1"),
("2.0.0.0/0", "127.0.0.1"),
("3.0.0.0/0", "127.0.0.1"))
host_routes = [{"destination": x, "nexthop": y} for x, y in routes]
stub_routes = [{"cidr": x, "gateway": y} for x, y in routes]
subnet = {"subnet":
{"cidr": "192.168.127.12/24", "created_at": datetime.now(),
"host_routes": host_routes, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), routes=stub_routes):
with self.assertRaises(exceptions.OverQuota):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_dns_quota_pass(self):
nameservers = ["172.16.17.32", "172.16.31.10"]
subnet = {"subnet":
{"cidr": "192.168.127.12/24", "created_at": datetime.now(),
"dns_nameservers": nameservers, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), dns=nameservers):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_dns_quota_fail(self):
nameservers = ["172.16.17.32", "172.16.31.10", "192.168.3.11"]
subnet = {"subnet":
{"cidr": "192.168.127.12/24", "created_at": datetime.now(),
"dns_nameservers": nameservers, "id": 1, "ip_version": 4,
"network_id": 1, "tenant_id": self.context.tenant_id}}
with self._stubs(subnet=subnet.get("subnet"), dns=nameservers):
with self.assertRaises(exceptions.OverQuota):
self.plugin.create_subnet(self.context, subnet)
class TestQuarkAllocationPoolCache(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, has_subnet=True, host_routes=None, new_routes=None,
find_routes=True, new_dns_servers=None, new_ip_policy=None,
ip_version=4):
if host_routes is None:
host_routes = []
if new_routes:
new_routes = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=1)
for r in new_routes]
if new_dns_servers:
new_dns_servers = [models.DNSNameserver(
ip=ip,
subnet_id=1) for ip in new_dns_servers]
if new_ip_policy:
exc = [models.IPPolicyCIDR(cidr=excluded_cidr)
for excluded_cidr in new_ip_policy]
new_ip_policy = models.IPPolicy(exclude=exc)
if ip_version == 4:
cidr = "172.16.0.0/24"
else:
cidr = "2607:fc00:db20:35b:7399::5/64"
subnet_mod = None
if has_subnet:
subnet = dict(
id=0,
network_id=1,
tenant_id=self.context.tenant_id,
ip_version=ip_version,
cidr=cidr,
host_routes=host_routes,
dns_nameservers=["192.168.127.12", "192.168.3.11"],
enable_dhcp=None,
_allocation_pool_cache=None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
exclude = [models.IPPolicyCIDR(cidr="172.16.0.0/32"),
models.IPPolicyCIDR(cidr="172.16.0.255/32")]
subnet_mod = models.Subnet(
ip_policy=models.IPPolicy(exclude=exclude),
network=models.Network(id=1)
)
subnet_mod.update(subnet)
subnet_mod["dns_nameservers"] = [models.DNSNameserver(ip=ip)
for ip in dns_ips]
subnet_mod["routes"] = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=subnet_mod["id"])
for r in host_routes]
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
) as (subnet_find, subnet_update,
dns_create, route_find, route_update, route_create):
subnet_find.return_value = subnet_mod
if has_subnet:
route_find.return_value = (subnet_mod["routes"][0] if
subnet_mod["routes"] and
find_routes else None)
new_subnet_mod = models.Subnet()
new_subnet_mod.update(subnet_mod)
new_subnet_mod.update(dict(id=1))
if new_routes:
new_subnet_mod["routes"] = new_routes
if new_dns_servers:
new_subnet_mod["dns_nameservers"] = new_dns_servers
if new_ip_policy:
new_subnet_mod["ip_policy"] = new_ip_policy
subnet_update.return_value = new_subnet_mod
yield subnet_mod
@mock.patch("quark.db.api.subnet_update_set_alloc_pool_cache")
def test_update_subnet_allocation_pools_invalidate_cache(self, set_cache):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
with self._stubs() as subnet_found:
pools = [dict(start="172.16.0.1", end="172.16.0.12")]
s = dict(subnet=dict(allocation_pools=pools))
self.plugin.update_subnet(self.context, 1, s)
self.assertEqual(set_cache.call_count, 1)
set_cache.assert_called_with(self.context, subnet_found)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
@mock.patch("quark.db.api.subnet_update_set_alloc_pool_cache")
def test_get_subnet_set_alloc_cache_if_cache_is_none(self, set_cache):
with self._stubs() as subnet_found:
self.plugin.get_subnet(self.context, 1)
self.assertEqual(set_cache.call_count, 1)
set_cache.assert_called_with(self.context, subnet_found,
[dict(start="172.16.0.1",
end="172.16.0.254")])
class TestQuarkUpdateSubnet(test_quark_plugin.TestQuarkPlugin):
DEFAULT_ROUTE = [dict(destination="0.0.0.0/0",
nexthop="172.16.0.1")]
@contextlib.contextmanager
def _stubs(self, has_subnet=True, host_routes=None, new_routes=None,
find_routes=True, new_dns_servers=None, new_ip_policy=None,
ip_version=4):
if host_routes is None:
host_routes = []
if new_routes:
new_routes = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=1)
for r in new_routes]
if new_dns_servers:
new_dns_servers = [models.DNSNameserver(
ip=ip,
subnet_id=1) for ip in new_dns_servers]
if new_ip_policy:
exc = [models.IPPolicyCIDR(cidr=excluded_cidr)
for excluded_cidr in new_ip_policy]
new_ip_policy = models.IPPolicy(exclude=exc)
if ip_version == 4:
cidr = "172.16.0.0/24"
else:
cidr = "fdf8:f53e:61e4::18/64"
subnet_mod = None
if has_subnet:
subnet = dict(
id=0,
network_id=1,
tenant_id=self.context.tenant_id,
ip_version=ip_version,
cidr=cidr,
host_routes=host_routes,
dns_nameservers=["192.168.127.12", "192.168.3.11"],
enable_dhcp=None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
exclude = [models.IPPolicyCIDR(cidr="172.16.0.0/32"),
models.IPPolicyCIDR(cidr="172.16.0.255/32")]
subnet_mod = models.Subnet(
ip_policy=models.IPPolicy(exclude=exclude),
network=models.Network(id=1)
)
subnet_mod.update(subnet)
subnet_mod["dns_nameservers"] = [models.DNSNameserver(ip=ip)
for ip in dns_ips]
subnet_mod["routes"] = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=subnet_mod["id"])
for r in host_routes]
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
) as (subnet_find, subnet_update,
dns_create,
route_find, route_update, route_create):
subnet_find.return_value = subnet_mod
if has_subnet:
route_find.return_value = (subnet_mod["routes"][0] if
subnet_mod["routes"] and
find_routes else None)
new_subnet_mod = models.Subnet()
new_subnet_mod.update(subnet_mod)
new_subnet_mod.update(dict(id=1))
if new_routes:
new_subnet_mod["routes"] = new_routes
if new_dns_servers:
new_subnet_mod["dns_nameservers"] = new_dns_servers
if new_ip_policy:
new_subnet_mod["ip_policy"] = new_ip_policy
subnet_update.return_value = new_subnet_mod
yield dns_create, route_update, route_create
def test_update_subnet_not_found(self):
with self._stubs(has_subnet=False):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.update_subnet(self.context, 1, {})
def test_update_subnet_dns_nameservers(self):
new_dns_servers = ["192.168.3.11"]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_dns_servers=new_dns_servers
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(dns_nameservers=new_dns_servers))
res = self.plugin.update_subnet(self.context,
1,
req)
self.assertEqual(dns_create.call_count, 1)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(res["dns_nameservers"], new_dns_servers)
def test_update_subnet_routes(self):
new_routes = [dict(destination="10.0.0.0/24",
nexthop="1.1.1.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"10.0.0.0/24")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.1.1.1")
self.assertIsNone(res["gateway_ip"])
def test_update_subnet_gateway_ip_with_default_route_in_db(self):
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=[dict(destination="0.0.0.0/0", nexthop="192.168.127.12")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="192.168.127.12"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(route_update.call_count, 1)
self.assertEqual(len(res["host_routes"]), 0)
self.assertEqual(res["gateway_ip"], "192.168.127.12")
def test_update_subnet_gateway_ip_with_non_default_route_in_db(self):
with self._stubs(
host_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9")],
find_routes=False,
new_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9"),
dict(destination="0.0.0.0/0", nexthop="192.168.127.12")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="192.168.127.12"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(res["gateway_ip"], "192.168.127.12")
self.assertEqual(len(res["host_routes"]), 1)
res_tuples = [(r["destination"], r["nexthop"])
for r in res["host_routes"]]
self.assertIn(("1.1.1.1/8", "9.9.9.9"), res_tuples)
def test_update_subnet_gateway_ip_without_default_route_in_db(self):
with self._stubs(
host_routes=None,
new_routes=[dict(destination="0.0.0.0/0", nexthop="192.168.127.12")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="192.168.127.12"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 0)
self.assertEqual(res["gateway_ip"], "192.168.127.12")
def test_update_subnet_gateway_ip_with_default_route_in_args(self):
new_routes = [dict(destination="0.0.0.0/0",
nexthop="192.168.127.12")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes,
gateway_ip="192.168.127.12"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 0)
self.assertEqual(res["gateway_ip"], "192.168.127.12")
def test_update_subnet_allocation_pools_invalid_outside(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', True, 'QUARK')
pools = [dict(start="172.16.1.10", end="172.16.1.20")]
s = dict(subnet=dict(allocation_pools=pools))
with self._stubs() as (dns_create, route_update, route_create):
with self.assertRaises(exceptions.OutOfBoundsAllocationPool):
self.plugin.update_subnet(self.context, 1, s)
cfg.CONF.set_override('allow_allocation_pool_update', og, 'QUARK')
def test_update_subnet_allocation_pools_zero(self):
with self._stubs() as (dns_create, route_update, route_create):
resp = self.plugin.update_subnet(self.context, 1,
dict(subnet=dict()))
self.assertEqual(resp["allocation_pools"],
[dict(start="172.16.0.1", end="172.16.0.254")])
def test_update_subnet_allocation_pools_one(self):
og = cfg.CONF.QUARK.allow_allocation_pool_update
cfg.CONF.set_override('allow_allocation_pool_update', | |
on raster: " + self.raster_info_lf)
try:
max_lf = float(max(self.lifespans))
self.logger.info(" * max. lifespan: " + str(max_lf))
except:
max_lf = 50.0
self.logger.info(
" * using default max. lifespan (error in input.inp definitions): " + str(max_lf))
temp_ras_base = Con((IsNull(self.raster_dict_lf[self.raster_info_lf]) == 1),
(IsNull(self.raster_dict_lf[self.raster_info_lf]) * 0),
Float(self.raster_dict_lf[self.raster_info_lf]))
temp_ras_Fr = Con((IsNull(self.ras_Fr) == 1), (IsNull(self.ras_Fr) * max_lf), Float(self.ras_Fr))
ras_Fr_new = Con((temp_ras_Fr < temp_ras_base), temp_ras_Fr, Float(self.raster_dict_lf[self.raster_info_lf]))
self.ras_Fr = ras_Fr_new
# update lf dictionary
self.raster_info_lf = "ras_Fr"
self.raster_dict_lf.update({self.raster_info_lf: self.ras_Fr})
except:
pass
else:
self.logger.info(" * Nothing to do (no Rasters provided).")
@fGl.err_info
@fGl.spatial_license
def analyse_h(self, threshold_h):
# analysis of flow depth as a limiting parameter for feature survival as a function of a threshold value
# convert threshold value units
threshold_h = threshold_h / self.ft2m
self.set_extent()
self.logger.info(" >>> Analyzing hyd (h only).")
h = FlowDepth(self.condition)
if any(str(e).__len__() > 0 for e in h.rasters):
self.ras_dth = self.compare_raster_set(h.rasters, threshold_h)
try:
self.ras_dth.extent # crashes if CellStatistics failed
try:
max_lf = float(max(self.lifespans))
self.logger.info(" * max. lifespan: " + str(max_lf))
except:
max_lf = 50.0
ras_h_new = Con((IsNull(self.ras_dth) == 1), (IsNull(self.ras_dth) * max_lf), Float(self.ras_dth))
self.ras_dth = ras_h_new
self.raster_info_lf = "ras_dth"
self.raster_dict_lf.update({self.raster_info_lf: self.ras_dth})
except:
pass
else:
self.logger.info(" * Nothing to do (no Rasters provided).")
@fGl.err_info
@fGl.spatial_license
def analyse_mobile_grains(self, threshold_taux):
# surface grain mobility frequency
self.set_extent()
self.logger.info(" >>> Analyzing Dcr (mobile grains) with n = " + str(self.n) + " " + self.n_label)
h = FlowDepth(self.condition)
u = FlowVelocity(self.condition)
Dmean = GrainSizes(self.condition) # in ft or m
Dcr_raster_list = []
for i in range(0, h.raster_names.__len__()):
if (str(u.rasters[i]).__len__() > 1) and (str(h.rasters[i]).__len__() > 1):
__ras__ = (Square(u.rasters[i] * Float(self.n)) / (Float(self.s - 1) *
threshold_taux * Power(h.rasters[i], (1 / 3)))) / Float(self.sf)
Dcr_raster_list.append(__ras__)
else:
try:
self.logger.info(" * empty Raster operation for {0}-years lifespan".format(str(self.lifespans[i])))
except:
self.logger.info(" * empty Raster operation (missing lifespan definitions?!)")
if any(str(e).__len__() > 0 for e in Dcr_raster_list) and (str(Dmean.raster).__len__() > 0):
self.ras_Dcr = self.compare_raster_set(Dcr_raster_list, Dmean.raster)
try:
self.ras_Dcr.extent # crashes if CellStatistics failed
if not(self.threshold_freq == 0.0):
temp_ras = Con((self.ras_Dcr > self.threshold_freq), self.ras_Dcr)
self.ras_Dcr = temp_ras
if self.verify_raster_info():
self.logger.info(" * based on raster: " + self.raster_info_lf)
temp_ras_Dcr = Con((IsNull(self.ras_Dcr) == 1), (IsNull(self.ras_Dcr) * 0), self.ras_Dcr)
temp_ras_base = Con((IsNull(self.raster_dict_lf[self.raster_info_lf]) == 1),
(IsNull(self.raster_dict_lf[self.raster_info_lf]) * 0),
self.raster_dict_lf[self.raster_info_lf])
ras_Dcr_new = Con(((temp_ras_Dcr < temp_ras_base) & (temp_ras_Dcr > 0)),
self.ras_Dcr, self.raster_dict_lf[self.raster_info_lf])
self.ras_Dcr = ras_Dcr_new
self.raster_info_lf = "ras_Dcr"
self.raster_dict_lf.update({self.raster_info_lf: self.ras_Dcr})
except:
pass
else:
self.logger.info(" * Nothing to do (no Rasters provided).")
@fGl.err_info
@fGl.spatial_license
def analyse_mu(self, mu_bad, mu_good, method):
# morphological unit delineation
self.set_extent()
self.logger.info(" >>> Analyzing morphologic units.")
mu = MU(self.condition)
if str(mu.raster).__len__() > 1:
if method == 0:
self.logger.info(" MU: using exclusive method.")
try:
temp_dict = {}
for morph_unit in mu_bad:
temp_dict.update({morph_unit: Con((mu.raster == mu.mu_dict[morph_unit]), 1.0, 0)})
self.ras_mu = CellStatistics(fGl.dict_values2list(temp_dict.values()), "SUM", "DATA")
temp_ras = Con((self.ras_mu >= 1), 0, 1.0)
self.ras_mu = temp_ras
except:
self.logger.info("ERROR: Could not assign MU raster.")
if method == 1:
self.logger.info(" MU: using inclusive method.")
try:
temp_dict = {}
for morph_unit in mu_good:
temp_dict.update({morph_unit: Con((mu.raster == mu.mu_dict[morph_unit]), 1.0, 0)})
self.ras_mu = CellStatistics(fGl.dict_values2list(temp_dict.values()), "SUM", "DATA")
temp_ras = Con((self.ras_mu >= 1), 1.0, 0)
self.ras_mu = temp_ras
except:
self.logger.info("ERROR: Could not assign MU raster.")
try:
self.logger.info(" -- mu_good: " + str(mu_good))
except:
self.logger.info(" -- bad mu_good list assignment.")
try:
self.logger.info(" -- method: " + str(method))
except:
self.logger.info(" -- bad method assignment.")
if not (method < 0):
# if no MU delineation applies: method == -1
if self.verify_raster_info():
self.logger.info(" * based on raster: " + self.raster_info_lf)
# make temp_ras without noData pixels for both ras_mu and ras_dict
temp_ras_mu = Con((IsNull(self.ras_mu) == 1), (IsNull(self.ras_mu) * 0), self.ras_mu)
temp_ras_di = Con((IsNull(self.raster_dict_lf[self.raster_info_lf]) == 1),
(IsNull(self.raster_dict_lf[self.raster_info_lf]) * 0),
self.raster_dict_lf[self.raster_info_lf])
# compare temp_ras with raster_dict but use self.ras_... values if condition is True
ras_mu_new = Con(((temp_ras_mu == 1.0) & (temp_ras_di > 0)), temp_ras_di)
self.ras_mu = ras_mu_new
self.raster_info_lf = "ras_mu"
self.raster_dict_lf.update({"ras_mu": self.ras_mu})
else:
self.logger.info(" --> skipped (threshold workbook has no valid entries for mu)")
else:
self.logger.info(" * Nothing to do (no Rasters provided).")
@fGl.err_info
@fGl.spatial_license
def analyse_scour(self, threshold_scour):
# analysis of scour rate as limiting parameter for feature survival as a function of a threshold value
# convert threshold value units
threshold_scour = threshold_scour / self.ft2m
self.set_extent()
self.logger.info(" >>> Analyzing tcd (scour only) ...")
dod = DoD(self.condition)
if str(dod.raster_scour).__len__() > 1:
if not(self.raster_dict_lf.items().__len__() > 0):
# routine to override noData pixels if required.
temp_scour = Con((IsNull(dod.raster_scour) == 1), (IsNull(dod.raster_scour) * 0), dod.raster_scour)
dod.raster_scour = temp_scour
if not self.inverse_tcd:
self.ras_tcd = Con((dod.raster_scour >= threshold_scour), 1.0, 0)
else:
self.ras_tcd = Con((dod.raster_scour < threshold_scour), 1.0)
if self.verify_raster_info():
self.logger.info(" * based on raster: " + self.raster_info_lf)
# make temp_ras without noData pixels
if not self.inverse_tcd:
try:
max_lf = float(max(self.lifespans))
self.logger.info(" * max. lifespan: " + str(max_lf))
except:
max_lf = 50.0
self.logger.info(
" * using default max. lifespan (error in input.inp definitions): " + str(
max_lf))
temp_ras = Con((IsNull(self.ras_tcd) == 1), (IsNull(self.ras_tcd) * max_lf), Float(self.ras_tcd))
# compare temp_ras with raster_dict but use self.ras_... values if condition is True
ras_tcd_new = Con((temp_ras == 1.0), self.ras_tcd, self.raster_dict_lf[self.raster_info_lf])
else:
ras_tcd_new = Con(((self.ras_tcd == 1.0) &
(self.raster_dict_lf[self.raster_info_lf] > self.threshold_freq)),
self.raster_dict_lf[self.raster_info_lf])
self.ras_tcd = ras_tcd_new
self.raster_info_lf = "ras_tcd"
self.raster_dict_lf.update({self.raster_info_lf: self.ras_tcd})
else:
self.logger.info(" >>> No DoD scour raster provided. Omitting analysis.")
@fGl.err_info
@fGl.spatial_license
def analyse_taux(self, threshold_taux):
self.set_extent()
self.logger.info(" >>> Analyzing taux.")
# Copy last RasterDataset to Output/Rasters folder
h = FlowDepth(self.condition) # thresholds are
u = FlowVelocity(self.condition)
grains = GrainSizes(self.condition)
if str(grains.raster).__len__() > 1:
tx_raster_list = []
for i in range(0, h.raster_names.__len__()):
if (str(u.rasters[i]).__len__() > 1) and (str(h.rasters[i]).__len__() > 1):
_q_ = fGl.read_Q_str(h.raster_names[i], prefix='h')
_name__ = 'tb' + fGl.write_Q_str(_q_) + '.tif'
name__ = 'ts' + fGl.write_Q_str(_q_) + '.tif'
# __ras__ = (self.rho_w * Square(u.rasters[i] / (5.75 * Log10(12.2 * h.rasters[i] /
# (2 * 2.2 * grains.raster))))) / (self.rho_w * self.g * (self.s - 1) * grains.raster)
_ras__ = Square(u.rasters[i] / (5.75 * Log10(12.2 * h.rasters[i] / (2 * 2.2 * grains.raster))))
arcpy.CopyRaster_management(_ras__, self.output_ts + _name__)
__ras__ = (self.rho_w * _ras__) / (self.rho_w * self.g * (self.s - 1) * grains.raster)
arcpy.CopyRaster_management(__ras__, self.output_ts + name__)
tx_raster_list.append(__ras__)
else:
self.logger.info(" * empty Raster operation for {0} and {1}".format(str(u.rasters[i]), str(h.rasters[i])))
if any(str(e).__len__() > 0 for e in tx_raster_list):
self.ras_taux = self.compare_raster_set(tx_raster_list, threshold_taux)
try:
self.ras_taux.extent # crashes if CellStatistics failed
if self.verify_raster_info():
self.logger.info(" * based on raster: " + self.raster_info_lf)
try:
max_lf = float(max(self.lifespans))
self.logger.info(" * max. lifespan: " + str(max_lf))
except:
max_lf = 50.0
self.logger.info(
" * using default max. lifespan (error in input.inp definitions): " + str(max_lf))
# make temp_ras without noData pixels
temp_ras_base = Con((IsNull(self.raster_dict_lf[self.raster_info_lf]) == 1),
(IsNull(self.raster_dict_lf[self.raster_info_lf]) * 0),
Float(self.raster_dict_lf[self.raster_info_lf]))
temp_ras_tx = Con((IsNull(self.ras_taux) == 1), (IsNull(self.ras_taux) * max_lf), Float(self.ras_taux))
# compare temp_ras with raster_dict but use self.ras_... values if condition is True
ras_taux_new = Con((temp_ras_tx < temp_ras_base),
self.ras_taux, Float(self.raster_dict_lf[self.raster_info_lf]))
self.ras_taux = ras_taux_new
self.raster_info_lf = "ras_taux"
self.raster_dict_lf.update({"ras_taux": self.ras_taux})
except:
pass
else:
self.logger.info(" * Nothing to do (no Rasters provided).")
else:
self.logger.info(" * Nothing to do (no Rasters provided).")
@fGl.err_info
@fGl.spatial_license
def analyse_tcd(self, threshold_fill, threshold_scour):
# analysis of fill and scour rates as limiting parameters for feature survival as a function of thresholds
# convert thresholds value units
threshold_fill = threshold_fill / self.ft2m
threshold_scour = threshold_scour / self.ft2m
self.set_extent()
self.logger.info(" >>> Analyzing tcd (fill and scour).")
dod = DoD(self.condition)
if (str(dod.raster_fill).__len__() > 1) or (str(dod.raster_scour).__len__() > 1):
if not(self.raster_dict_lf.items().__len__() > 0):
# routine to override noData pixels -- applies when raster_dict_lf is still empty
temp_fill = Con((IsNull(dod.raster_fill) == 1), (IsNull(dod.raster_fill) * 0), dod.raster_fill)
dod.raster_fill = temp_fill
temp_scour = Con((IsNull(dod.raster_scour) == 1), (IsNull(dod.raster_scour) * 0), dod.raster_scour)
dod.raster_scour = temp_scour
if not self.inverse_tcd:
self.ras_tcd = Con(((dod.raster_fill >= threshold_fill) | (dod.raster_scour >= threshold_scour)), Float(1.0), Float(0.0))
else:
self.ras_tcd = Con(((dod.raster_fill < threshold_fill) | (dod.raster_scour < threshold_scour)), Float(1.0), Float(0.0))
if self.verify_raster_info():
self.logger.info(" * based on raster: " + self.raster_info_lf)
# make temp_ras without noData pixels
try:
max_lf = float(max(self.lifespans))
self.logger.info(" * max. lifespan: " + str(max_lf))
except:
max_lf = 50.0
self.logger.info(
" * using default max. lifespan (error in input.inp definitions): " + str(max_lf))
temp_ras = Con((IsNull(self.ras_tcd) == 1), (IsNull(self.ras_tcd) * max_lf), Float(self.ras_tcd))
# compare temp_ras with raster_dict | |
<filename>hoa_chargen.py<gh_stars>0
#
# Heroes of Aegypt! Character Generator
#
###########################################
"""
HOA Chargen 0.0.1 Beta
-----------------------------------------------------------------------
This program generates characters for the Expedition to Ancient Aegypt! RPG.
"""
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import time
from mainwindow_001b import Ui_MainWindow
from aboutdialog_001b import Ui_aboutDialog
from alertdialog_001b import Ui_alertDialog
from savedialog_001b import Ui_saveDialog
import sys
import os
import logging
import json
from fpdf import FPDF
__author__ = '<NAME> <<EMAIL>>\n<EMAIL>'
__app__ = 'HOA CharGen 0.0.1 (Beta)'
__version__ = '0.0.1b'
__expired_tag__ = False
class aboutDialog(QDialog, Ui_aboutDialog):
def __init__(self):
'''
Open the About dialog window
'''
super().__init__()
log.info('PyQt5 aboutDialog initializing...')
self.setWindowFlags(Qt.Drawer | Qt.WindowStaysOnTopHint)
self.setupUi(self)
self.aboutOKButton.clicked.connect(self.acceptOKButtonClicked)
log.info('PyQt5 aboutDialog initialized.')
def acceptOKButtonClicked(self):
'''
Close the About dialog window
'''
log.info('PyQt5 aboutDialog closing...')
self.close()
class alertDialog(QDialog, Ui_alertDialog):
def __init__(self):
'''
Open the Alert dialog window
'''
super().__init__()
log.info('PyQt5 alertDialog initializing...')
self.setWindowFlags(Qt.Drawer | Qt.WindowStaysOnTopHint)
self.setupUi(self)
self.aboutOKButton.clicked.connect(self.acceptOKButtonClicked)
log.info('PyQt5 alertDialog initialized.')
def acceptOKButtonClicked(self):
'''
Close the Alert dialog window
'''
log.info('PyQt5 alertDialog closing...')
self.close()
class saveDialog(QDialog, Ui_saveDialog):
def __init__(self):
'''
Open the Save dialog window
'''
super().__init__()
log.info('PyQt5 saveDialog initializing...')
self.setWindowFlags(Qt.Drawer | Qt.WindowStaysOnTopHint)
self.setupUi(self)
self.saveOKButton.clicked.connect(self.acceptOKButtonClicked)
self.saveDisplay.setText('Character saved.')
log.info('PyQt5 saveDialog initialized.')
def acceptOKButtonClicked(self):
'''
Close the Save dialog window
'''
log.info('PyQt5 saveDialog closing...')
self.close()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
'''
Display the main app window.
Connect all the buttons to their functions.
Initialize their value ranges.
'''
super().__init__()
log.info('PyQt5 MainWindow initializing...')
self.setupUi(self)
self.actionAbout_HOA_CharGen.triggered.connect(self.actionAbout_triggered)
self.actionQuitProg.triggered.connect(self.actionQuitProg_triggered)
self.bodyScore.valueChanged.connect(self.bodyScore_valueChanged)
self.clearButton.clicked.connect(self.clearButton_clicked)
self.actionClear.triggered.connect(self.clearButton_clicked)
self.loadButton.clicked.connect(self.loadButton_clicked)
self.actionLoad.triggered.connect(self.loadButton_clicked)
self.saveButton.clicked.connect(self.saveButton_clicked)
self.actionSave.triggered.connect(self.saveButton_clicked)
self.printButton.clicked.connect(self.printButton_clicked)
self.actionPrint.triggered.connect(self.printButton_clicked)
self.actionVisit_Blog.triggered.connect(self.Visit_Blog)
self.actionFeedback.triggered.connect(self.Feedback)
self.actionOverview.triggered.connect(self.Overview_menu)
self.mindScore.valueChanged.connect(self.mindScore_valueChanged)
self.spiritScore.valueChanged.connect(self.spiritScore_valueChanged)
self.agilitySkill.setDisabled(True)
self.beautySkill.setDisabled(True)
self.strengthSkill.setDisabled(True)
self.knowledgeSkill.setDisabled(True)
self.perceptionSkill.setDisabled(True)
self.technologySkill.setDisabled(True)
self.charismaSkill.setDisabled(True)
self.empathySkill.setDisabled(True)
self.focusSkill.setDisabled(True)
self.boxingSkill.setDisabled(True)
self.meleeSkill.setDisabled(True)
self.rangedSkill.setDisabled(True)
self.artSkill.setDisabled(True)
self.languagesSkill.setDisabled(True)
self.scienceSkill.setDisabled(True)
self.blessSkill.setDisabled(True)
self.exorcismSkill.setDisabled(True)
self.healingSkill.setDisabled(True)
self.demonologySkill.setDisabled(True)
self.metamorphosisSkill.setDisabled(True)
self.necromancySkill.setDisabled(True)
self.clairvoyanceSkill.setDisabled(True)
self.psychokinesisSkill.setDisabled(True)
self.telepathySkill.setDisabled(True)
self.saveButton.setDisabled(True)
self.actionSave.setDisabled(True)
self.printButton.setDisabled(True)
self.actionPrint.setDisabled(True)
self.charnameEdit.setDisabled(True)
self.ageEdit.setDisabled(True)
self.genderEdit.setDisabled(True)
self.rankBox.setDisabled(True)
self.deptBox.setDisabled(True)
self.levelBox.setDisabled(True)
self.xpEdit.setDisabled(True)
self.agilitySkill.valueChanged.connect(self.agilitySkill_valueChanged)
self.beautySkill.valueChanged.connect(self.beautySkill_valueChanged)
self.strengthSkill.valueChanged.connect(self.strengthSkill_valueChanged)
self.knowledgeSkill.valueChanged.connect(self.knowledgeSkill_valueChanged)
self.perceptionSkill.valueChanged.connect(self.perceptionSkill_valueChanged)
self.technologySkill.valueChanged.connect(self.technologySkill_valueChanged)
self.charismaSkill.valueChanged.connect(self.charismaSkill_valueChanged)
self.empathySkill.valueChanged.connect(self.empathySkill_valueChanged)
self.focusSkill.valueChanged.connect(self.focusSkill_valueChanged)
self.boxingSkill.valueChanged.connect(self.boxingSkill_valueChanged)
self.meleeSkill.valueChanged.connect(self.meleeSkill_valueChanged)
self.rangedSkill.valueChanged.connect(self.rangedSkill_valueChanged)
self.artSkill.valueChanged.connect(self.artSkill_valueChanged)
self.languagesSkill.valueChanged.connect(self.languagesSkill_valueChanged)
self.scienceSkill.valueChanged.connect(self.scienceSkill_valueChanged)
self.blessSkill.valueChanged.connect(self.blessSkill_valueChanged)
self.exorcismSkill.valueChanged.connect(self.exorcismSkill_valueChanged)
self.healingSkill.valueChanged.connect(self.healingSkill_valueChanged)
self.demonologySkill.valueChanged.connect(self.demonologySkill_valueChanged)
self.metamorphosisSkill.valueChanged.connect(self.metamorphosisSkill_valueChanged)
self.necromancySkill.valueChanged.connect(self.necromancySkill_valueChanged)
self.clairvoyanceSkill.valueChanged.connect(self.clairvoyanceSkill_valueChanged)
self.psychokinesisSkill.valueChanged.connect(self.psychokinesisSkill_valueChanged)
self.telepathySkill.valueChanged.connect(self.telepathySkill_valueChanged)
self.charnameEdit.setText('Sample Char')
self.languageDisplay.setText('')
self.rewardDisplay.setText('None')
self.armorDisplay.setPlainText('None')
self.weaponDisplay.setPlainText('None')
self.starting_items = 'Sandals'
self.itemsDisplay.setPlainText(self.starting_items)
self.specialDisplay.setPlainText('None')
self.traitsDisplay.setPlainText('')
self.backstoryDisplay.setPlainText('')
self.notesDisplay.setPlainText('')
self.rank_choice = ['Choose', 'Aegypt', 'Nubia', 'Babylon', 'Troy', 'Greece', 'Atlantis']
self.rank_language = ['', 'Egyptian', 'Egyptian', 'Babylonian', 'Greek', 'Greek', 'Minoan']
for i in self.rank_choice:
self.rankBox.addItem(i)
self.rankBox.setCurrentIndex(0)
self.rankBox.currentIndexChanged.connect(self.rankBox_changed)
self.dept_choice = ['Choose', 'Worker', 'Architect', 'Royalty', 'Warrior', 'Philosopher', 'Magi', 'Witch', 'Astrologer']
for i in self.dept_choice:
self.deptBox.addItem(i)
self.deptBox.setCurrentIndex(0)
self.deptBox.currentIndexChanged.connect(self.deptBox_changed)
self.dept_skill = ['', 'Body', 'Mind', 'Spirit', 'Combat', 'Strange', 'Divine', 'Occult', 'Psionic']
self.dept_item = ['', 'Tunic, Waterskin', 'Tunic, Tool Kit', 'Robes, Sword', 'Tunic, Spear', 'Toga, Scroll', 'Robes, Staff', 'Tunic, Dagger', 'Robes, Incense']
self.rank_not_chosen = True
self.department_not_chosen = True
self.char_level = 1
self.levelBox.addItem('1')
self.levelBox.addItem('2')
self.levelBox.addItem('3')
self.levelBox.addItem('4')
self.levelBox.addItem('5')
self.levelBox.setCurrentIndex(0)
self.levelBox.currentIndexChanged.connect(self.levelBox_changed)
self.char_xp = 0
self.game_name = 'EXPEDITION to ANCIENT AEGYPT!'
self.char_folder = 'Heroes of Aegypt Characters'
self.file_extension = '.tps'
self.file_format = 1.3
# Set the About menu item
self.popAboutDialog = aboutDialog()
# Set the Alert menu item
self.popAlertDialog=alertDialog()
# Set the Save menu item
self.popSaveDialog=saveDialog()
log.info('PyQt5 MainWindow initialized.')
if __expired_tag__ is True:
'''
Beta for this app has expired!
'''
log.warning(__app__ + ' expiration detected...')
self.alert_window()
'''
display alert message and disable all the things
'''
self.clearButton.setDisabled(True)
self.actionClear.setDisabled(True)
self.saveButton.setDisabled(True)
self.actionSave.setDisabled(True)
self.loadButton.setDisabled(True)
self.actionLoad.setDisabled(True)
self.printButton.setDisabled(True)
self.actionPrint.setDisabled(True)
self.actionVisit_Blog.setDisabled(True)
self.actionFeedback.setDisabled(True)
self.actionOverview.setDisabled(True)
self.actionAbout_HOA_CharGen.setDisabled(True)
self.bodyScore.setDisabled(True)
self.mindScore.setDisabled(True)
self.spiritScore.setDisabled(True)
self.additional1Display.setDisabled(True)
self.agilitySkill.setDisabled(True)
self.beautySkill.setDisabled(True)
self.strengthSkill.setDisabled(True)
self.knowledgeSkill.setDisabled(True)
self.perceptionSkill.setDisabled(True)
self.technologySkill.setDisabled(True)
self.charismaSkill.setDisabled(True)
self.empathySkill.setDisabled(True)
self.focusSkill.setDisabled(True)
self.boxingSkill.setDisabled(True)
self.meleeSkill.setDisabled(True)
self.rangedSkill.setDisabled(True)
self.artSkill.setDisabled(True)
self.languagesSkill.setDisabled(True)
self.scienceSkill.setDisabled(True)
self.blessSkill.setDisabled(True)
self.exorcismSkill.setDisabled(True)
self.healingSkill.setDisabled(True)
self.demonologySkill.setDisabled(True)
self.metamorphosisSkill.setDisabled(True)
self.necromancySkill.setDisabled(True)
self.clairvoyanceSkill.setDisabled(True)
self.psychokinesisSkill.setDisabled(True)
self.telepathySkill.setDisabled(True)
self.additional2Display.setDisabled(True)
self.charnameEdit.setDisabled(True)
self.ageEdit.setDisabled(True)
self.genderEdit.setDisabled(True)
self.rankBox.setDisabled(True)
self.deptBox.setDisabled(True)
self.levelBox.setDisabled(True)
self.xpEdit.setDisabled(True)
self.armorDisplay.setDisabled(True)
self.weaponDisplay.setDisabled(True)
self.itemsDisplay.setDisabled(True)
self.specialDisplay.setDisabled(True)
self.traitsDisplay.setDisabled(True)
self.backstoryDisplay.setDisabled(True)
self.notesDisplay.setDisabled(True)
else:
'''
Create .tpsrpg folder and tps.ini file the first time this program is run.
Also, create the save folder for this program to save its .tps files in.
'''
self.temp_dir = os.path.expanduser('~')
os.chdir(self.temp_dir)
if not os.path.exists('.tpsrpg'):
os.mkdir('.tpsrpg')
os.chdir(self.temp_dir + '\.tpsrpg')
if not os.path.exists(self.char_folder):
os.mkdir(self.char_folder)
log.info(self.char_folder + ' folder created')
if not os.path.exists('tps.ini'):
with open('tps.ini', 'w') as f:
f.write('[CharGen Folders]\n')
f.write(self.char_folder + '\n')
log.info('tps.ini created and initialized')
else:
self.contains_foldername = False
with open('tps.ini', 'r') as f:
if self.char_folder in f.read():
self.contains_foldername = True
if not self.contains_foldername:
with open('tps.ini', 'a') as f:
f.write(self.char_folder + '\n')
log.info(self.char_folder + ' added to TPS folder list')
# Initialize Attribute Scores
self.body = 0
self.mind = 1
self.spirit = 2
self.attribute_name = ['BODY', 'MIND', 'SPIRIT']
self.attribute_score = [1, 1, 1]
# Initialize Status Levels
self.health = 0
self.sanity = 1
self.morale = 2
self.status_name = ['HEALTH', 'SANITY', 'MORALE']
self.status_level = [2, 2, 2]
self.bodyScore.setValue(self.attribute_score[self.body])
self.mindScore.setValue(self.attribute_score[self.mind])
self.spiritScore.setValue(self.attribute_score[self.spirit])
self.tempbodyScore = self.bodyScore.value()
self.tempmindScore = self.mindScore.value()
self.tempspiritScore = self.spiritScore.value()
self.additional_attribute_points = 3
self.additional1Display.setText(str(self.additional_attribute_points))
self.healthDisplay.setText(str(self.status_level[self.health] + self.attribute_score[self.body]))
self.sanityDisplay.setText(str(self.status_level[self.sanity] + self.attribute_score[self.mind]))
self.moraleDisplay.setText(str(self.status_level[self.morale] + self.attribute_score[self.spirit]))
# Initialize Skill Levels
self.agilitySkill.setValue(0)
self.beautySkill.setValue(0)
self.strengthSkill.setValue(0)
self.knowledgeSkill.setValue(0)
self.perceptionSkill.setValue(0)
self.technologySkill.setValue(0)
self.charismaSkill.setValue(0)
self.empathySkill.setValue(0)
self.focusSkill.setValue(0)
self.boxingSkill.setValue(0)
self.meleeSkill.setValue(0)
self.rangedSkill.setValue(0)
self.artSkill.setValue(0)
self.languagesSkill.setValue(0)
self.scienceSkill.setValue(0)
self.blessSkill.setValue(0)
self.exorcismSkill.setValue(0)
self.healingSkill.setValue(0)
self.demonologySkill.setValue(0)
self.metamorphosisSkill.setValue(0)
self.necromancySkill.setValue(0)
self.clairvoyanceSkill.setValue(0)
self.psychokinesisSkill.setValue(0)
self.telepathySkill.setValue(0)
self.tempagilitySkill = self.agilitySkill.value()
self.tempbeautySkill = self.beautySkill.value()
self.tempstrengthSkill = self.strengthSkill.value()
self.tempknowledgeSkill = self.knowledgeSkill.value()
self.tempperceptionSkill = self.perceptionSkill.value()
self.temptechnologySkill = self.technologySkill.value()
self.tempcharismaSkill = self.charismaSkill.value()
self.tempempathySkill = self.empathySkill.value()
self.tempfocusSkill = self.focusSkill.value()
self.tempboxingSkill = self.boxingSkill.value()
self.tempmeleeSkill = self.meleeSkill.value()
self.temprangedSkill = self.rangedSkill.value()
self.tempartSkill = self.artSkill.value()
self.templanguagesSkill = self.languagesSkill.value()
self.tempscienceSkill = self.scienceSkill.value()
self.tempblessSkill = self.blessSkill.value()
self.tempexorcismSkill = self.exorcismSkill.value()
self.temphealingSkill = self.healingSkill.value()
self.tempdemonologySkill = self.demonologySkill.value()
self.tempmetamorphosisSkill = self.metamorphosisSkill.value()
self.tempnecromancySkill = self.necromancySkill.value()
self.tempclairvoyanceSkill = self.clairvoyanceSkill.value()
self.temppsychokinesisSkill = self.psychokinesisSkill.value()
self.temptelepathySkill = self.telepathySkill.value()
self.additional_skill_points = 12
self.additional2Display.setText(str(self.additional_skill_points))
# Initialize Movement and Range
self.encumbranceDisplay.setText(str(1 + self.bodyScore.value() + self.strengthSkill.value()) + ' items')
self.movementDisplay.setText(str(1 + self.bodyScore.value() + self.agilitySkill.value()) + ' spaces')
self.rangeDisplay.setText(str(1 + self.bodyScore.value() + self.strengthSkill.value()) + ' miles')
def clearButton_clicked(self):
'''
Clear all the fields
'''
log.info('Clear all fields')
self.status_level = [2, 2, 2]
self.bodyScore.setValue(self.attribute_score[self.body])
self.mindScore.setValue(self.attribute_score[self.mind])
self.spiritScore.setValue(self.attribute_score[self.spirit])
self.tempbodyScore = self.bodyScore.value()
self.tempmindScore = self.mindScore.value()
self.tempspiritScore = self.spiritScore.value()
self.additional_attribute_points = 3
self.additional1Display.setText(str(self.additional_attribute_points))
self.healthDisplay.setText(str(self.status_level[self.health] + self.attribute_score[self.body]))
self.sanityDisplay.setText(str(self.status_level[self.sanity] + self.attribute_score[self.mind]))
self.moraleDisplay.setText(str(self.status_level[self.morale] + self.attribute_score[self.spirit]))
self.agilitySkill.setValue(0)
self.beautySkill.setValue(0)
self.strengthSkill.setValue(0)
self.knowledgeSkill.setValue(0)
self.perceptionSkill.setValue(0)
self.technologySkill.setValue(0)
self.charismaSkill.setValue(0)
self.empathySkill.setValue(0)
self.focusSkill.setValue(0)
self.boxingSkill.setValue(0)
self.meleeSkill.setValue(0)
self.rangedSkill.setValue(0)
self.artSkill.setValue(0)
self.languagesSkill.setValue(0)
self.scienceSkill.setValue(0)
self.blessSkill.setValue(0)
self.exorcismSkill.setValue(0)
self.healingSkill.setValue(0)
self.demonologySkill.setValue(0)
self.metamorphosisSkill.setValue(0)
self.necromancySkill.setValue(0)
self.clairvoyanceSkill.setValue(0)
self.psychokinesisSkill.setValue(0)
self.telepathySkill.setValue(0)
self.tempagilitySkill = self.agilitySkill.value()
self.tempbeautySkill = self.beautySkill.value()
self.tempstrengthSkill = self.strengthSkill.value()
self.tempknowledgeSkill = self.knowledgeSkill.value()
self.tempperceptionSkill = self.perceptionSkill.value()
self.temptechnologySkill = self.technologySkill.value()
self.tempcharismaSkill = self.charismaSkill.value()
self.tempempathySkill = self.empathySkill.value()
self.tempfocusSkill = self.focusSkill.value()
self.tempboxingSkill = self.boxingSkill.value()
self.tempmeleeSkill = self.meleeSkill.value()
self.temprangedSkill = self.rangedSkill.value()
self.tempartSkill = self.artSkill.value()
self.templanguagesSkill = self.languagesSkill.value()
self.tempscienceSkill = self.scienceSkill.value()
self.tempblessSkill = self.blessSkill.value()
self.tempexorcismSkill = self.exorcismSkill.value()
self.temphealingSkill = self.healingSkill.value()
self.tempdemonologySkill = self.demonologySkill.value()
self.tempmetamorphosisSkill = self.metamorphosisSkill.value()
self.tempnecromancySkill = self.necromancySkill.value()
self.tempclairvoyanceSkill = self.clairvoyanceSkill.value()
self.temppsychokinesisSkill = self.psychokinesisSkill.value()
self.temptelepathySkill = self.telepathySkill.value()
self.rankBox.setCurrentIndex(0)
self.deptBox.setCurrentIndex(0)
self.rank_not_chosen = True
self.department_not_chosen = True
self.levelBox.setCurrentIndex(0)
self.agilitySkill.setDisabled(True)
self.beautySkill.setDisabled(True)
self.strengthSkill.setDisabled(True)
self.knowledgeSkill.setDisabled(True)
self.perceptionSkill.setDisabled(True)
self.technologySkill.setDisabled(True)
self.charismaSkill.setDisabled(True)
self.empathySkill.setDisabled(True)
self.focusSkill.setDisabled(True)
self.boxingSkill.setDisabled(True)
self.meleeSkill.setDisabled(True)
self.rangedSkill.setDisabled(True)
self.artSkill.setDisabled(True)
self.languagesSkill.setDisabled(True)
self.scienceSkill.setDisabled(True)
self.blessSkill.setDisabled(True)
self.exorcismSkill.setDisabled(True)
self.healingSkill.setDisabled(True)
self.demonologySkill.setDisabled(True)
self.metamorphosisSkill.setDisabled(True)
self.necromancySkill.setDisabled(True)
self.clairvoyanceSkill.setDisabled(True)
self.psychokinesisSkill.setDisabled(True)
self.telepathySkill.setDisabled(True)
self.additional_skill_points = 12
self.additional2Display.setText(str(self.additional_skill_points))
self.rankBox.setDisabled(True)
self.deptBox.setDisabled(True)
self.levelBox.setDisabled(True)
self.charnameEdit.setText('')
self.charnameEdit.setDisabled(True)
self.ageEdit.setText('')
self.ageEdit.setDisabled(True)
self.genderEdit.setText('')
self.genderEdit.setDisabled(True)
self.languageDisplay.setText('')
self.rewardDisplay.setText('None')
self.healthStatus.setText('')
self.sanityStatus.setText('')
self.moraleStatus.setText('')
self.bodyScore.setDisabled(False)
self.mindScore.setDisabled(False)
self.spiritScore.setDisabled(False)
self.armorDisplay.setPlainText('None')
self.weaponDisplay.setPlainText('None')
self.itemsDisplay.setPlainText(self.starting_items)
self.specialDisplay.setPlainText('None')
self.traitsDisplay.setPlainText('')
self.backstoryDisplay.setPlainText('')
self.notesDisplay.setPlainText('')
self.char_level = 1
self.char_xp = 0
def bodyScore_valueChanged(self):
'''
A Body Score was entered.
Add/substract from additional Attribute points.
'''
self.encumbranceDisplay.setText(str(1 + self.bodyScore.value() + self.strengthSkill.value()) + ' items')
self.movementDisplay.setText(str(1 + self.bodyScore.value() + self.agilitySkill.value()) + ' spaces')
self.rangeDisplay.setText(str(1 + self.bodyScore.value() + self.strengthSkill.value()) + ' miles')
self.additional_attribute_points += self.tempbodyScore - self.bodyScore.value()
if self.additional_attribute_points >= 0:
self.additional1Display.setText(str(self.additional_attribute_points))
else:
self.additional1Display.setText('<span style=" color:#ff0000;">' + str(self.additional_attribute_points) + '</span>')
self.tempbodyScore = self.bodyScore.value()
self.healthDisplay.setText(str(self.status_level[self.health] + self.bodyScore.value()))
if self.additional_attribute_points == 0:
self.agilitySkill.setDisabled(False)
self.beautySkill.setDisabled(False)
self.strengthSkill.setDisabled(False)
self.knowledgeSkill.setDisabled(False)
self.perceptionSkill.setDisabled(False)
self.technologySkill.setDisabled(False)
self.charismaSkill.setDisabled(False)
self.empathySkill.setDisabled(False)
self.focusSkill.setDisabled(False)
self.boxingSkill.setDisabled(False)
self.meleeSkill.setDisabled(False)
self.rangedSkill.setDisabled(False)
self.artSkill.setDisabled(False)
self.languagesSkill.setDisabled(False)
self.scienceSkill.setDisabled(False)
# self.clairvoyanceSkill.setDisabled(False)
# self.psychokinesisSkill.setDisabled(False)
# self.telepathySkill.setDisabled(False)
else:
self.agilitySkill.setDisabled(True)
self.beautySkill.setDisabled(True)
self.strengthSkill.setDisabled(True)
self.knowledgeSkill.setDisabled(True)
self.perceptionSkill.setDisabled(True)
self.technologySkill.setDisabled(True)
self.charismaSkill.setDisabled(True)
self.empathySkill.setDisabled(True)
self.focusSkill.setDisabled(True)
self.boxingSkill.setDisabled(True)
self.meleeSkill.setDisabled(True)
self.rangedSkill.setDisabled(True)
self.artSkill.setDisabled(True)
self.languagesSkill.setDisabled(True)
self.scienceSkill.setDisabled(True)
# self.clairvoyanceSkill.setDisabled(True)
# self.psychokinesisSkill.setDisabled(True)
# self.telepathySkill.setDisabled(True)
def mindScore_valueChanged(self):
'''
A Mind Score was entered.
Add/substract from additional Attribute points.
'''
self.additional_attribute_points += self.tempmindScore - self.mindScore.value()
if self.additional_attribute_points >= 0:
self.additional1Display.setText(str(self.additional_attribute_points))
else:
self.additional1Display.setText('<span style=" color:#ff0000;">' + str(self.additional_attribute_points) + '</span>')
self.tempmindScore = self.mindScore.value()
self.sanityDisplay.setText(str(self.status_level[self.sanity] + self.mindScore.value()))
if self.additional_attribute_points == 0:
self.agilitySkill.setDisabled(False)
self.beautySkill.setDisabled(False)
self.strengthSkill.setDisabled(False)
self.knowledgeSkill.setDisabled(False)
self.perceptionSkill.setDisabled(False)
self.technologySkill.setDisabled(False)
self.charismaSkill.setDisabled(False)
self.empathySkill.setDisabled(False)
self.focusSkill.setDisabled(False)
self.boxingSkill.setDisabled(False)
self.meleeSkill.setDisabled(False)
self.rangedSkill.setDisabled(False)
self.artSkill.setDisabled(False)
self.languagesSkill.setDisabled(False)
self.scienceSkill.setDisabled(False)
# self.clairvoyanceSkill.setDisabled(False)
# self.psychokinesisSkill.setDisabled(False)
# self.telepathySkill.setDisabled(False)
else:
self.agilitySkill.setDisabled(True)
self.beautySkill.setDisabled(True)
self.strengthSkill.setDisabled(True)
self.knowledgeSkill.setDisabled(True)
self.perceptionSkill.setDisabled(True)
self.technologySkill.setDisabled(True)
self.charismaSkill.setDisabled(True)
self.empathySkill.setDisabled(True)
self.focusSkill.setDisabled(True)
self.boxingSkill.setDisabled(True)
self.meleeSkill.setDisabled(True)
self.rangedSkill.setDisabled(True)
self.artSkill.setDisabled(True)
self.languagesSkill.setDisabled(True)
self.scienceSkill.setDisabled(True)
# self.clairvoyanceSkill.setDisabled(True)
# self.psychokinesisSkill.setDisabled(True)
# self.telepathySkill.setDisabled(True)
def spiritScore_valueChanged(self):
'''
A Spirit Score was entered.
Add/substract from additional Attribute points.
'''
self.additional_attribute_points += self.tempspiritScore - self.spiritScore.value()
if self.additional_attribute_points >= 0:
self.additional1Display.setText(str(self.additional_attribute_points))
else:
self.additional1Display.setText('<span style=" color:#ff0000;">' + str(self.additional_attribute_points) + '</span>')
self.tempspiritScore = self.spiritScore.value()
self.moraleDisplay.setText(str(self.status_level[self.morale] + self.spiritScore.value()))
if self.additional_attribute_points == 0:
self.agilitySkill.setDisabled(False)
self.beautySkill.setDisabled(False)
self.strengthSkill.setDisabled(False)
self.knowledgeSkill.setDisabled(False)
self.perceptionSkill.setDisabled(False)
self.technologySkill.setDisabled(False)
self.charismaSkill.setDisabled(False)
self.empathySkill.setDisabled(False)
self.focusSkill.setDisabled(False)
self.boxingSkill.setDisabled(False)
self.meleeSkill.setDisabled(False)
self.rangedSkill.setDisabled(False)
self.artSkill.setDisabled(False)
self.languagesSkill.setDisabled(False)
self.scienceSkill.setDisabled(False)
# self.clairvoyanceSkill.setDisabled(False)
# self.psychokinesisSkill.setDisabled(False)
# self.telepathySkill.setDisabled(False)
else:
self.agilitySkill.setDisabled(True)
| |
<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torchvision.ops import RoIAlign, RoIPool, nms
import numpy as np
from utils.config import cfg
from nets.generate_anchors import generate_anchors_pre
from nets.proposal_layer import proposal_layer
from nets.anchor_target_layer import anchor_target_layer
from nets.proposal_target_layer import proposal_target_layer
from nets.bbox_transform import bbox_transform_inv, clip_boxes, bbox_overlaps
from models.pytorch_i3d import InceptionI3d
from models.i3dpt import I3D
class Transformer(nn.Module):
def __init__(self, in_channels):
super(Transformer, self).__init__()
self.dropout_rate = 0.3
self.hidden_size = 2048
self.in_channels = in_channels
self.dropout1 = nn.Dropout(self.dropout_rate)
self.layer_norm1 = nn.LayerNorm(in_channels)
self.dropout2 = nn.Dropout(self.dropout_rate)
self.layer_norm2 = nn.LayerNorm(in_channels)
self.fc1 = nn.Linear(in_channels, self.hidden_size)
self.dropout3 = nn.Dropout(self.dropout_rate)
self.fc2 = nn.Linear(self.hidden_size, in_channels)
self.init_weights()
def forward(self, roi_pool, video_feature):
query = roi_pool.contiguous().view(-1, self.in_channels)
key = video_feature.view(self.in_channels, -1)
value = video_feature.view(-1, self.in_channels)
out = torch.matmul(query, key) / float(pow(self.in_channels, 0.5))
attn = F.softmax(out, dim=-1)
out = torch.matmul(attn, value)
out = self.layer_norm1(query + self.dropout1(out))
out = self.fc2(F.relu(self.fc1(out)))
out = self.layer_norm2(out + self.dropout2(out))
return out.unsqueeze(dim=2).unsqueeze(dim=3)
def init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.fc1, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.fc2, 0, 0.01, cfg.TRAIN.TRUNCATED)
class TransformerBlock(nn.Module):
def __init__(self, in_channels):
super(TransformerBlock, self).__init__()
self.hidden_size = 128
self.transformer1 = Transformer(self.hidden_size // 2)
self.transformer2 = Transformer(self.hidden_size // 2)
self.linear_projection_3d = nn.Conv3d(in_channels, self.hidden_size, kernel_size=(1, 1, 1))
def forward(self, roi_pool, video_feature):
projection_video_feature = self.linear_projection_3d(F.relu(video_feature))
roi_pool1, roi_pool2 = torch.chunk(roi_pool, chunks=2, dim=1)
video_feature1, video_feature2 = torch.chunk(projection_video_feature, chunks=2, dim=1)
out1 = self.transformer1(roi_pool1, video_feature1)
out2 = self.transformer2(roi_pool2, video_feature2)
out = torch.cat((out1, out2), dim=1)
return out
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self._num_classes = 81
self._device = 'cuda'
self._layers = {}
self._predictions = {}
self._proposal_targets = {}
self._anchor_targets = {}
self._losses = {}
self._check = False
self._feat_stride = [
16,
]
self._feat_compress = [
1. / float(self._feat_stride[0]),
]
# self._net_conv_channels = 1024
self._net_conv_channels = 832
self._linear_projection_channels = 128
def _init_head_tail(self, pretrained='Kinetics'):
if pretrained == 'ImageNet':
self.i3d = InceptionI3d()
elif pretrained == 'Kinetics':
self.i3d = I3D(num_classes=400, modality='rgb')
for param in self.i3d.parameters():
param.requires_grad = True
for m in self.i3d.modules():
if isinstance(m, nn.BatchNorm3d):
m.weight.requires_grad = False
m.bias.requires_grad = False
def _image_to_head(self, pretrained='Kinetics'):
if pretrained == 'ImageNet':
net_conv = self.i3d.extract_features(self._image)
elif pretrained == 'Kinetics':
net_conv = self.i3d(self._image)
return net_conv[:, :, 7, :, :], net_conv
def load_pretrained_cnn(self, pretrained='Kinetics'):
if pretrained == 'ImageNet':
self.i3d.load_state_dict(torch.load('./weight/rgb_imagenet.pt'))
elif pretrained == 'Kinetics':
self.i3d.load_state_dict(torch.load('./weight/model_rgb.pth'))
def create_architecture(self, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), pretrained='Kinetics'):
self._tag = tag
self._num_classes = num_classes
self._anchor_scales = anchor_scales
self._num_scales = len(anchor_scales)
self._anchor_ratios = anchor_ratios
self._num_ratios = len(anchor_ratios)
self._num_anchors = self._num_scales * self._num_ratios
assert tag != None
# Initialize layers
self._init_modules(pretrained)
def _init_modules(self, pretrained='Kinetics'):
self._init_head_tail(pretrained)
# rpn
self.rpn_net = nn.Conv2d(cfg.RPN_CHANNELS, 512, kernel_size=3, padding=1)
# self.linear_projection_3d = nn.Conv3d(cfg.RPN_CHANNELS, 128, kernel_size=(1, 1, 1))
self.linear_projection_2d = nn.Conv2d(cfg.RPN_CHANNELS, 128, kernel_size=1)
self.up_scale_2d = nn.Conv2d(128, cfg.RPN_CHANNELS, kernel_size=1)
self.rpn_cls_score_net = nn.Conv2d(512, self._num_anchors * 2, kernel_size=1)
self.rpn_bbox_pred_net = nn.Conv2d(512, self._num_anchors * 4, kernel_size=1)
self.cls_score_net = nn.Linear(self._net_conv_channels, self._num_classes)
self.bbox_pred_net = nn.Linear(self._net_conv_channels, self._num_classes * 4)
self.transformer_block1 = TransformerBlock(self._net_conv_channels)
self.transformer_block2 = TransformerBlock(self._net_conv_channels)
self.transformer_block3 = TransformerBlock(self._net_conv_channels)
self.init_weights()
def init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)
def _anchor_component(self, height, width):
anchors, anchor_length = generate_anchors_pre(height, width, self._feat_stride, self._anchor_scales, self._anchor_ratios)
self._anchors = torch.from_numpy(anchors).to(self._device)
self._anchor_length = anchor_length
def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred):
rois, rpn_scores = proposal_layer(rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode, self._feat_stride, self._anchors, self._num_anchors)
return rois, rpn_scores
def _anchor_target_layer(self, rpn_cls_score):
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = \
anchor_target_layer(
rpn_cls_score.data,
self._gt_boxes.data.cpu().numpy(),
self._im_info,
self._feat_stride,
self._anchors.data.cpu().numpy(),
self._num_anchors,
)
rpn_labels = torch.from_numpy(rpn_labels).float().to(self._device) #.set_shape([1, 1, None, None])
rpn_bbox_targets = torch.from_numpy(rpn_bbox_targets).float().to(self._device) #.set_shape([1, None, None, self._num_anchors * 4])
rpn_bbox_inside_weights = torch.from_numpy(rpn_bbox_inside_weights).float().to(self._device) #.set_shape([1, None, None, self._num_anchors * 4])
rpn_bbox_outside_weights = torch.from_numpy(rpn_bbox_outside_weights).float().to(self._device) #.set_shape([1, None, None, self._num_anchors * 4])
rpn_labels = rpn_labels.long()
self._anchor_targets['rpn_labels'] = rpn_labels
self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights
return rpn_labels
def _proposal_target_layer(self, rois, roi_scores):
rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights, cls_inside_weights = proposal_target_layer(rois, roi_scores, self._gt_boxes, self._gt_classes, self._num_classes)
if rois is None:
self._check = True
if not self._check:
self._proposal_targets['rois'] = rois
# self._proposal_targets['labels'] = labels.long()
self._proposal_targets['labels'] = labels
self._proposal_targets['bbox_targets'] = bbox_targets
self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights
self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights
self._proposal_targets['cls_inside_weights'] = cls_inside_weights
return rois, roi_scores
return None, None
def _region_proposal(self, net_conv):
rpn = F.relu(self.rpn_net(net_conv))
rpn_cls_score = self.rpn_cls_score_net(rpn) # batch * (num_anchors * 2) * h * w
# change it so that the score has 2 as its channel size
rpn_cls_score_reshape = rpn_cls_score.view(1, 2, -1, rpn_cls_score.size()[-1]) # batch * 2 * (num_anchors*h) * w
rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, dim=1)
# Move channel to the last dimension, to fit the input of python functions
rpn_cls_prob = rpn_cls_prob_reshape.view_as(rpn_cls_score).permute(0, 2, 3, 1) # batch * h * w * (num_anchors * 2)
rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1) # batch * h * w * (num_anchors * 2)
rpn_cls_score_reshape = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous() # batch * (num_anchors*h) * w * 2
rpn_cls_pred = torch.max(rpn_cls_score_reshape.view(-1, 2), 1)[1]
rpn_bbox_pred = self.rpn_bbox_pred_net(rpn)
rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).contiguous() # batch * h * w * (num_anchors*4)
if self._mode == 'TRAIN':
rois, roi_scores = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred) # rois, roi_scores are variable
rpn_labels = self._anchor_target_layer(rpn_cls_score)
rois, _ = self._proposal_target_layer(rois, roi_scores)
if self._check:
return None
else:
if cfg.TEST.MODE == 'nms':
rois, _ = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred)
elif cfg.TEST.MODE == 'top':
rois, _ = self._proposal_top_layer(rpn_cls_prob, rpn_bbox_pred)
else:
raise NotImplementedError
self._predictions["rpn_cls_score"] = rpn_cls_score
self._predictions["rpn_cls_score_reshape"] = rpn_cls_score_reshape
self._predictions["rpn_cls_prob"] = rpn_cls_prob
self._predictions["rpn_cls_pred"] = rpn_cls_pred
self._predictions["rpn_bbox_pred"] = rpn_bbox_pred
self._predictions["rois"] = rois
return rois
def _roi_pool_layer(self, bottom, rois):
return RoIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0 / 16.0)(bottom, rois)
def _roi_align_layer(self, bottom, rois):
return RoIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0 / 16.0, 0)(bottom, rois)
def _query_preprocessing(self, query):
return F.avg_pool2d(query, (7, 7))
def _predict(self, pretrained='Kinetics'):
torch.backends.cudnn.benchmark = False
net_conv, video_feature = self._image_to_head(pretrained)
# region proposal network, get rois
self._anchor_component(net_conv.size(2), net_conv.size(3))
rois = self._region_proposal(net_conv)
if not self._check:
if cfg.POOLING_MODE == 'align':
pool5 = self._roi_align_layer(net_conv, rois)
else:
pool5 = self._roi_pool_layer(net_conv, rois)
if self._mode == 'TRAIN':
torch.backends.cudnn.benchmark = True
avg_pool = F.avg_pool2d(pool5, (2, 2))
avg_pool = self._query_preprocessing(avg_pool)
down_sample_conv2d = self.linear_projection_2d(avg_pool)
transformer_out1 = self.transformer_block1(down_sample_conv2d, video_feature)
transformer_out2 = self.transformer_block2(transformer_out1, video_feature)
transformer_out3 = self.transformer_block3(transformer_out2, video_feature)
upscale_out = F.relu(self.up_scale_2d(transformer_out3))
transformer_out3 = upscale_out.contiguous().view(-1, self._net_conv_channels)
cls_prob, bbox_pred = self._region_classification(transformer_out3)
return rois, cls_prob, bbox_pred
return None, None, None
def _region_classification(self, fc7):
cls_score = self.cls_score_net(fc7)
# cls_pred = torch.max(cls_score, 1)[1]
cls_prob = torch.sigmoid(cls_score)
bbox_pred = self.bbox_pred_net(fc7)
self._predictions["cls_score"] = cls_score
# self._predictions["cls_pred"] = cls_pred
self._predictions["cls_prob"] = cls_prob
self._predictions["bbox_pred"] = bbox_pred
return cls_prob, bbox_pred
def forward(self, image, im_info, boxes=None, gt_classes=None, mode='TRAIN'):
self._image = image.to(self._device)
self._im_info = im_info
self._gt_boxes = boxes.to(self._device) if boxes is not None else None
self._gt_classes = gt_classes.to(self._device) if gt_classes is not None else None
self._mode = mode
rois, cls_prob, bbox_pred = self._predict()
if not self._check:
if mode == 'TEST':
stds = bbox_pred.data.new(cfg.TRAIN.BBOX_NORMALIZE_STDS).repeat(self._num_classes).unsqueeze(0).expand_as(bbox_pred)
means = bbox_pred.data.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS).repeat(self._num_classes).unsqueeze(0).expand_as(bbox_pred)
self._predictions["bbox_pred"] = bbox_pred.mul(stds).add(means)
else:
self._add_losses() # compute losses
return rois
def train_step(self, blobs, train_op):
self._check = False
rois = self.forward(blobs['data'], blobs['im_info'], blobs['boxes'], blobs['gt_classes'])
if not self._check:
rpn_loss_cls, rpn_loss_box, cross_entropy, loss_box, loss = self._losses["rpn_cross_entropy"].item(), \
self._losses['rpn_loss_box'].item(), \
self._losses['cross_entropy'].item(), \
self._losses['loss_box'].item(), \
self._losses['total_loss'].item()
# rpn_loss_cls, rpn_loss_box, loss = self._losses["rpn_cross_entropy"].item(), \
# self._losses['rpn_loss_box'].item(), \
# self._losses['total_loss'].item()
# utils.timer.timer.tic('backward')
train_op.zero_grad()
self._losses['total_loss'].backward()
nn.utils.clip_grad_norm_(self.parameters(), max_norm=35, norm_type=2)
# utils.timer.timer.toc('backward')
train_op.step()
self.delete_intermediate_states()
return rpn_loss_cls, rpn_loss_box, cross_entropy, loss_box, loss
# return rpn_loss_cls, rpn_loss_box, 0, 0, loss, rois
return None, None, None, None, None
def _add_losses(self, sigma_rpn=3.0):
# RPN, class loss
rpn_cls_score = self._predictions['rpn_cls_score_reshape'].view(-1, 2)
rpn_label = self._anchor_targets['rpn_labels'].view(-1)
rpn_select = (rpn_label.data != -1).nonzero().view(-1)
rpn_cls_score = rpn_cls_score.index_select(0, rpn_select).contiguous().view(-1, 2)
rpn_label = rpn_label.index_select(0, rpn_select).contiguous().view(-1)
rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)
# RPN, bbox loss
rpn_bbox_pred = self._predictions['rpn_bbox_pred']
rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']
rpn_loss_box = self._smooth_l1_loss(
rpn_bbox_pred,
rpn_bbox_targets,
rpn_bbox_inside_weights,
rpn_bbox_outside_weights,
sigma=sigma_rpn,
dim=[1, 2, 3]
)
cls_score = self._predictions["cls_score"]
# label = self._proposal_targets["labels"].view(-1)
label = self._proposal_targets["labels"]
# cross_entropy = F.cross_entropy(cls_score.view(-1, self._num_classes), label)
cls_score = cls_score.view(-1, self._num_classes)
cls_inside_weights = self._proposal_targets['cls_inside_weights']
avg_factor = max(torch.sum(cls_inside_weights > 0).float().item(), 1.)
cross_entropy = F.binary_cross_entropy_with_logits(cls_score, label.float(), weight=cls_inside_weights.float(), reduction='sum') / avg_factor
# RCNN, bbox loss
bbox_pred = self._predictions['bbox_pred']
bbox_targets = self._proposal_targets['bbox_targets']
bbox_inside_weights = self._proposal_targets['bbox_inside_weights']
bbox_outside_weights = self._proposal_targets['bbox_outside_weights']
# print(bbox_pred.size(), bbox_targets.size(), bbox_inside_weights.size(), bbox_outside_weights.size())
loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
self._losses['cross_entropy'] = cross_entropy
# self._losses['cross_entropy'] = 0
self._losses['loss_box'] = | |
<filename>external/metadata/factory.py
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# factory.py
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 <NAME>, <NAME>
#
# First Edition: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = [ 'Factory', 'register', 'gettype', 'parse' ]
# python imports
import stat
import io
import os
import sys
import struct
import urllib.parse
import urllib.request, urllib.parse, urllib.error
import logging
# kaa.metadata imports
from . import core
# get logging object
log = logging.getLogger('metadata')
# factory object
_factory = None
# some timing debug
TIME_DEBUG = False
R_MIMETYPE = 0
R_EXTENSION = 1
R_CLASS = 2
def register(mimetype, extensions, c, magic=None):
"""
Register a parser to the factory.
"""
return Factory().register(mimetype, extensions, c, magic)
def gettype(mimetype, extensions):
"""
Return parser for mimetype / extensions
"""
return Factory().get(mimetype,extensions)
def parse(filename, force=True):
"""
parse a file
"""
result = Factory().create(filename, force)
if result:
result._finalize()
return result
class NullParser(object):
def __init__(self, file):
raise core.ParseError
class File(io.FileIO):
def read(self, bytes=-1):
"""
If the size argument is negative or omitted, read until EOF is
reached. If more than 5MB is requested, an IOError is
raised. This should not mappen for kaa.metadata parsers.
"""
if bytes > 5000000 or (bytes < 0 and os.stat(self.name)[stat.ST_SIZE] - self.tell() > 1000000):
# reading more than 1MB looks like a bug
raise IOError('trying to read %s bytes' % bytes)
return super().read(bytes)
class _Factory:
"""
Abstract Factory for the creation of Media instances. The different
Methods create Media objects by parsing the given medium.
"""
def __init__(self):
self.extmap = {}
self.mimemap = {}
self.classmap = {}
self.magicmap = {}
self.types = []
self.device_types = []
self.directory_types = []
self.stream_types = []
def get_class(self, name):
if name not in self.classmap:
# Import the parser class for the given name.
try:
scope = {}
exec('from .%s import Parser' % name, globals(), scope)
self.classmap[name] = scope['Parser']
except BaseException:
# Something failed while trying to import this parser. Rather
# than bail altogher, just log the error and use NullParser.
log.exception('Error importing parser %s' % name)
raise
self.classmap[name] = NullParser
return self.classmap[name]
def get_scheme_from_info(self, info):
if info.__class__.__name__ == 'DVDInfo':
return 'dvd'
else:
return 'file'
def create_from_file(self, file, force=True):
"""
create based on the file stream 'file
"""
# Check extension as a hint
e = os.path.splitext(file.name)[1].lower()
parser = None
if e and e.startswith('.') and e[1:] in self.extmap:
log.debug("trying ext %s on file %s", e[1:], file.name)
parsers = self.extmap[e[1:]]
for info in parsers:
file.seek(0,0)
try:
parser = self.get_class(info[R_CLASS])
return parser(file)
except core.ParseError:
pass
# Try to find a parser based on the first bytes of the
# file (magic header). If a magic header is found but the
# parser failed, no other parser will be tried to speed
# up parsing of a bunch of files. So magic information should
# only be set if the parser is very sure
file.seek(0,0)
magic = file.read(10)
for length, magicmap in list(self.magicmap.items()):
if magic[:length] in magicmap:
for p in magicmap[magic[:length]]:
log.info('Trying %s by magic header', p[R_CLASS])
file.seek(0,0)
try:
parser = self.get_class(p[R_CLASS])
return parser(file)
except core.ParseError:
pass
log.info('Magic header found but parser failed')
return None
if not force:
log.info('No Type found by Extension (%s). Giving up.' % e)
return None
log.info('No Type found by Extension (%s). Trying all parsers.' % e)
for e in self.types:
if self.get_class(e[R_CLASS]) == parser:
# We already tried this parser, don't bother again.
continue
log.debug('trying %s' % e[R_MIMETYPE])
file.seek(0,0)
try:
return self.get_class(e[R_CLASS])(file)
except core.ParseError:
pass
return None
def create_from_url(self, url, force=True):
"""
Create information for urls. This includes file:// and cd://
"""
split = urllib.parse.urlsplit(url)
scheme = split[0]
if scheme == 'file':
(scheme, location, path, query, fragment) = split
return self.create_from_filename(location+path, force)
elif scheme == 'cdda':
r = self.create_from_filename(split[4], force)
if r:
r._set_url(url)
return r
elif scheme == 'http' and False:
# This code is deactivated right now. Parsing video data over
# http is way to slow right now. We need a better way to handle
# this before activating it again.
# We will need some more soffisticated and generic construction
# method for this. Perhaps move file.open stuff into __init__
# instead of doing it here...
for e in self.stream_types:
log.debug('Trying %s' % e[R_MIMETYPE])
try:
return self.get_class(e[R_CLASS])(url)
except core.ParseError:
pass
elif scheme == 'dvd':
path = split[2]
if not path.replace('/', ''):
return self.create_from_device('/dev/dvd')
return self.create_from_filename(split[2])
else:
(scheme, location, path, query, fragment) = split
try:
uhandle = urllib.request.urlopen(url)
except IOError:
# Unsupported URL scheme
return
mime = uhandle.info().gettype()
log.debug("Trying %s" % mime)
if mime in self.mimemap:
try:
return self.get_class(self.mimemap[mime][R_CLASS])(file)
except core.ParseError:
pass
# XXX Todo: Try other types
def create_from_filename(self, filename, force=True):
"""
Create information for the given filename
"""
if os.path.isdir(filename):
return None
if os.path.isfile(filename):
try:
f = File(filename,'rb')
except (IOError, OSError) as e:
log.info('error reading %s: %s' % (filename, e))
return None
result = self.create_from_file(f, force)
# create a hash for the file based on hashes from
# http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
qwsize = struct.calcsize('q')
filehash = filesize = os.path.getsize(filename)
for fpos in (0, max(0, filesize - 65536)):
f.seek(fpos)
# Read up to 64k, but skip the last few bytes if we can't get a
# full 64-bit value.
buf = f.read(65536)
for qw in struct.unpack('%dq' % (len(buf) / qwsize), buf[:len(buf) & ~7]):
filehash = (filehash + qw) & 0xFFFFFFFFFFFFFFFF
filehash = "%016x" % filehash
f.close()
if result:
result._set_url('%s://%s' % (self.get_scheme_from_info(result), os.path.abspath(filename)))
result.hash = filehash
return result
return None
def create_from_device(self,devicename):
"""
Create information from the device. Currently only rom drives
are supported.
"""
for e in self.device_types:
log.debug('Trying %s' % e[R_MIMETYPE])
try:
t = self.get_class(e[R_CLASS])(devicename)
t._set_url('%s://%s' % (self.get_scheme_from_info(t), os.path.abspath(devicename)))
return t
except core.ParseError:
pass
return None
def create_from_directory(self, dirname):
"""
Create information from the directory.
"""
for e in self.directory_types:
log.debug('Trying %s' % e[R_MIMETYPE])
try:
return self.get_class(e[R_CLASS])(dirname)
except core.ParseError:
pass
return None
def create(self, name, force=True):
"""
Global 'create' function. This function calls the different
'create_from_'-functions.
"""
try:
if hasattr(name, 'seek'):
# a file-like object
return self.create_from_file(name, force)
if name.find('://') > 0:
return self.create_from_url(name)
if not os.path.exists(name):
return None
if (sys.platform.startswith('freebsd') and \
stat.S_ISCHR(os.stat(name)[stat.ST_MODE])) \
or stat.S_ISBLK(os.stat(name)[stat.ST_MODE]):
return self.create_from_device(name)
if os.path.isdir(name):
return self.create_from_directory(name)
return self.create_from_filename(name, force)
except Exception:
log.exception('kaa.metadata.create error')
log.warning('Please report this bug to the Freevo mailing list')
return None
def register(self, mimetype, extensions, c, magic=None):
"""
register the parser to kaa.metadata
"""
log.debug('%s registered' % mimetype)
tuple = (mimetype, extensions, c)
if extensions == core.EXTENSION_DEVICE:
self.device_types.append(tuple)
elif extensions == core.EXTENSION_DIRECTORY:
self.directory_types.append(tuple)
elif extensions == core.EXTENSION_STREAM:
self.stream_types.append(tuple)
else:
self.types.append(tuple)
for e in (x.lower() for x in extensions):
if e not in self.extmap:
self.extmap[e] = []
self.extmap[e].append(tuple)
self.mimemap[mimetype] = tuple
# add to magic header list
if magic is not None:
if not len(magic) in self.magicmap:
self.magicmap[len(magic)] = {}
if not magic in self.magicmap[len(magic)]:
self.magicmap[len(magic)][magic] = []
self.magicmap[len(magic)][magic].append(tuple)
def get(self, mimetype, extensions):
"""
return the object for mimetype/extensions or None
"""
if extensions == core.EXTENSION_DEVICE:
l = self.device_types
elif extensions == core.EXTENSION_DIRECTORY:
l = self.directory_types
elif extensions == core.EXTENSION_STREAM:
l = self.stream_types
else:
l = self.types
for info in l:
if info[R_MIMETYPE] == mimetype and info[R_EXTENSION] == extensions:
return self.get_class(info[R_CLASS])
return None
class Singleton(object):
"""
Create Singleton object from classref on demand.
"""
class MemberFunction(object):
def __init__(self, singleton, name):
self._singleton = singleton
self._name = name
def __call__(self, *args, **kwargs):
return getattr(self._singleton(), self._name)(*args, **kwargs)
def __init__(self, classref):
self._singleton | |
# Licensed under a 3-clause BSD style license - see LICENSE
'''
This module provides the framework for grouping sources from two photometric catalogues into
distinct "islands" of sources, along with calculating whether they are within overlap for
various photometric integral purposes.
'''
import sys
import os
import numpy as np
from .misc_functions import (load_small_ref_auf_grid, hav_dist_constant_lat,
map_large_index_to_small_index, _load_rectangular_slice,
_create_rectangular_slice_arrays)
from .group_sources_fortran import group_sources_fortran as gsf
from .make_set_list import set_list
__all__ = ['make_island_groupings']
def make_island_groupings(joint_folder_path, a_cat_folder_path, b_cat_folder_path,
a_auf_folder_path, b_auf_folder_path, a_auf_pointings, b_auf_pointings,
a_filt_names, b_filt_names, a_title, b_title, r, dr, rho, drho, j1s,
max_sep, ax_lims, int_fracs, mem_chunk_num, include_phot_like,
use_phot_priors):
'''
Function to handle the creation of "islands" of astrometrically coeval
sources, and identify which overlap to some probability based on their
combined AUFs.
Parameters
----------
joint_folder_path : string
Folder on disk containing the files related to the cross-match between
the two catalogues.
a_cat_folder_path : string
Folder on disk where catalogue "a" files have been stored.
b_cat_folder_path : string
Folder on disk where catalogue "b" files are saved.
a_auf_folder_path : string
Folder on disk where perturbation AUF component files for catalogue "a"
are located.
b_auf_folder_path : string
Folder on disk where perturbation AUF component files for catalogue "b"
are located.
a_auf_pointings : 2-D numpy.ndarray
Array containing the listings of longitude, latitude pointings at which
the perturbation AUF components were computed for catalogue "a".
b_auf_pointings : 2-D numpy.ndarray
Array containing the listings of longitude, latitude pointings at which
the perturbation AUF components were computed for catalogue "b".
a_filt_names : list of string
List of ordered names for filters used in catalogue "a" cross-match.
b_filt_names : list of string
List of filters in catalogue "b" matching.
a_title : string
Name used to describe catalogue "a" in the cross-match.
b_title : string
Catalogue "b" description, for identifying its given folder.
r : numpy.ndarray
Array of real-space distances, in arcseconds, used in the evaluation of
convolved AUF integrals; represent bin edges.
dr : numpy.ndarray
Widths of real-space bins in ``r``. Will have shape one shorter than ``r``,
due to ``r`` requiring an additional right-hand bin edge.
rho : numpy.ndarray
Fourier-space array, used in handling the Hankel transformation for
convolution of AUFs. As with ``r``, represents bin edges.
drho : numpy.ndarray
Array representing the bin widths of ``rho``. As with ``dr``, is one
shorter than ``rho`` due to its additional bin edge.
j1s : 2-D numpy.ndarray
Array holding the evaluations of the Bessel Function of First kind of
First Order, evaluated at all ``r`` and ``rho`` bin-middle combination.
max_sep : float
The maximum allowed sky separation between two sources in opposing
catalogues for consideration as potential counterparts.
ax_lims : list of floats, or numpy.ndarray
The four limits of the cross-match between catalogues "a" and "b",
as lower and upper longitudinal coordinate, lower and upper latitudinal
coordinates respectively.
int_fracs : list of floats, or numpy.ndarray
List of integral limits used in evaluating probability of match based on
separation distance.
mem_chunk_num : integer
Number of sub-arrays to break larger array computations into for memory
limiting purposes.
include_phot_like : boolean
Flag indicating whether to perform additional computations required for
the future calculation of photometric likelihoods.
use_phot_priors : boolean
Flag indicating whether to calcualte additional parameters needed to
calculate photometric-information dependent priors for cross-matching.
'''
# Convert from arcseconds to degrees internally.
max_sep = np.copy(max_sep) / 3600
print("Creating catalogue islands and overlaps...")
sys.stdout.flush()
print("Calculating maximum overlap...")
sys.stdout.flush()
# The initial step to create island groupings is to find the largest number
# of overlaps for a single source, to minimise the size of the array of
# overlap indices. To do so, we load small-ish chunks of the sky, with
# padding in one catalogue to ensure all pairings can be found, and total
# the number of overlaps for each object across all sky slices.
ax1_loops = np.linspace(ax_lims[0], ax_lims[1], 11)
# Force the sub-division of the sky area in question to be 100 chunks, or
# roughly one square degree chunks, whichever is larger in area.
if ax1_loops[1] - ax1_loops[0] < 1:
ax1_loops = np.linspace(ax_lims[0], ax_lims[1], int(np.ceil(ax_lims[1] - ax_lims[0]) + 1))
ax2_loops = np.linspace(ax_lims[2], ax_lims[3], 11)
if ax2_loops[1] - ax2_loops[0] < 1:
ax2_loops = np.linspace(ax_lims[2], ax_lims[3], int(np.ceil(ax_lims[3] - ax_lims[2]) + 1))
# Load the astrometry of each catalogue for slicing.
a_full = np.load('{}/con_cat_astro.npy'.format(a_cat_folder_path), mmap_mode='r')
b_full = np.load('{}/con_cat_astro.npy'.format(b_cat_folder_path), mmap_mode='r')
# Generate the necessary memmap sky slice arrays now.
_create_rectangular_slice_arrays(joint_folder_path, 'a', len(a_full))
memmap_slice_arrays_a = []
for n in ['1', '2', '3', '4', 'combined']:
memmap_slice_arrays_a.append(np.lib.format.open_memmap(
'{}/{}_temporary_sky_slice_{}.npy'.format(joint_folder_path, 'a', n), mode='r+',
dtype=bool, shape=(len(a_full),)))
_create_rectangular_slice_arrays(joint_folder_path, 'b', len(b_full))
memmap_slice_arrays_b = []
for n in ['1', '2', '3', '4', 'combined']:
memmap_slice_arrays_b.append(np.lib.format.open_memmap(
'{}/{}_temporary_sky_slice_{}.npy'.format(joint_folder_path, 'b', n), mode='r+',
dtype=bool, shape=(len(b_full),)))
asize = np.lib.format.open_memmap('{}/group/asize.npy'.format(joint_folder_path), mode='w+',
dtype=int, shape=(len(a_full),))
asize[:] = 0
bsize = np.lib.format.open_memmap('{}/group/bsize.npy'.format(joint_folder_path), mode='w+',
dtype=int, shape=(len(b_full),))
bsize[:] = 0
for ax1_start, ax1_end in zip(ax1_loops[:-1], ax1_loops[1:]):
for ax2_start, ax2_end in zip(ax2_loops[:-1], ax2_loops[1:]):
ax_cutout = [ax1_start, ax1_end, ax2_start, ax2_end]
a, afouriergrid, amodrefindsmall, a_cut = _load_fourier_grid_cutouts(
a_full, ax_cutout, joint_folder_path, a_cat_folder_path, a_auf_folder_path, 0, 'a',
memmap_slice_arrays_a)
b, bfouriergrid, bmodrefindsmall, b_cut = _load_fourier_grid_cutouts(
b_full, ax_cutout, joint_folder_path, b_cat_folder_path, b_auf_folder_path,
max_sep, 'b', memmap_slice_arrays_b)
if len(a) > 0 and len(b) > 0:
overlapa, overlapb = gsf.get_max_overlap(
a[:, 0], a[:, 1], b[:, 0], b[:, 1], max_sep, a[:, 2], b[:, 2],
r[:-1]+dr/2, rho[:-1], drho, j1s, afouriergrid, bfouriergrid, amodrefindsmall,
bmodrefindsmall, int_fracs[2])
asize[a_cut] = asize[a_cut] + overlapa
bsize[b_cut] = bsize[b_cut] + overlapb
amaxsize = int(np.amax(asize))
bmaxsize = int(np.amax(bsize))
del (overlapa, overlapb, a, b, a_cut, b_cut, amodrefindsmall, bmodrefindsmall,
afouriergrid, bfouriergrid)
print("Truncating star overlaps by AUF integral...")
sys.stdout.flush()
ainds = np.lib.format.open_memmap('{}/group/ainds.npy'.format(joint_folder_path), mode='w+',
dtype=int, shape=(amaxsize, len(a_full)), fortran_order=True)
binds = np.lib.format.open_memmap('{}/group/binds.npy'.format(joint_folder_path), mode='w+',
dtype=int, shape=(bmaxsize, len(b_full)), fortran_order=True)
ainds[:, :] = -1
binds[:, :] = -1
asize[:] = 0
bsize[:] = 0
for ax1_start, ax1_end in zip(ax1_loops[:-1], ax1_loops[1:]):
for ax2_start, ax2_end in zip(ax2_loops[:-1], ax2_loops[1:]):
ax_cutout = [ax1_start, ax1_end, ax2_start, ax2_end]
a, afouriergrid, amodrefindsmall, a_cut = _load_fourier_grid_cutouts(
a_full, ax_cutout, joint_folder_path, a_cat_folder_path, a_auf_folder_path, 0, 'a',
memmap_slice_arrays_a)
b, bfouriergrid, bmodrefindsmall, b_cut = _load_fourier_grid_cutouts(
b_full, ax_cutout, joint_folder_path, b_cat_folder_path, b_auf_folder_path,
max_sep, 'b', memmap_slice_arrays_b)
if len(a) > 0 and len(b) > 0:
indicesa, indicesb, overlapa, overlapb = gsf.get_overlap_indices(
a[:, 0], a[:, 1], b[:, 0], b[:, 1], max_sep, amaxsize, bmaxsize, a[:, 2],
b[:, 2], r[:-1]+dr/2, rho[:-1], drho, j1s, afouriergrid, bfouriergrid,
amodrefindsmall, bmodrefindsmall, int_fracs[2])
a_cut2 = np.arange(0, len(a_full))[a_cut]
b_cut2 = np.arange(0, len(b_full))[b_cut]
for j in range(0, len(a_cut2)):
ainds[asize[a_cut2[j]]:asize[a_cut2[j]]+overlapa[j], a_cut2[j]] = b_cut2[
indicesa[:overlapa[j], j] - 1]
for j in range(0, len(b_cut2)):
binds[bsize[b_cut2[j]]:bsize[b_cut2[j]]+overlapb[j], b_cut2[j]] = a_cut2[
indicesb[:overlapb[j], j] - 1]
asize[a_cut] = asize[a_cut] + overlapa
bsize[b_cut] = bsize[b_cut] + overlapb
del (a_cut, a_cut2, b_cut, b_cut2, indicesa, indicesb, overlapa, overlapb, a, b,
amodrefindsmall, bmodrefindsmall, afouriergrid, bfouriergrid)
# Delete sky slices used to make fourier cutouts.
os.system('rm {}/*temporary_sky_slice*.npy'.format(joint_folder_path))
print("Cleaning overlaps...")
sys.stdout.flush()
ainds, asize = _clean_overlaps(ainds, asize, joint_folder_path, 'ainds')
binds, bsize = _clean_overlaps(binds, bsize, joint_folder_path, 'binds')
if include_phot_like or use_phot_priors:
ablen = np.lib.format.open_memmap('{}/group/ablen.npy'.format(joint_folder_path),
mode='w+', dtype=float, shape=(len(a_full),))
aflen = np.lib.format.open_memmap('{}/group/aflen.npy'.format(joint_folder_path),
mode='w+', dtype=float, shape=(len(a_full),))
bblen = np.lib.format.open_memmap('{}/group/bblen.npy'.format(joint_folder_path),
mode='w+', dtype=float, shape=(len(b_full),))
bflen = np.lib.format.open_memmap('{}/group/bflen.npy'.format(joint_folder_path),
mode='w+', dtype=float, shape=(len(b_full),))
for cnum in range(0, mem_chunk_num):
lowind = np.floor(len(a_full)*cnum/mem_chunk_num).astype(int)
highind = np.floor(len(a_full)*(cnum+1)/mem_chunk_num).astype(int)
a = a_full[lowind:highind, 2]
a_inds_small = ainds[:, lowind:highind]
a_size_small = asize[lowind:highind]
a_inds_small = np.asfortranarray(a_inds_small[:np.amax(a_size_small), :])
a_inds_map, a_inds_unique = map_large_index_to_small_index(
a_inds_small, len(b_full), '{}/group'.format(joint_folder_path))
b = b_full[a_inds_unique, 2]
modrefind = np.load('{}/modelrefinds.npy'.format(a_auf_folder_path),
mmap_mode='r')[:, lowind:highind]
[a_fouriergrid], a_modrefindsmall = load_small_ref_auf_grid(
modrefind, a_auf_folder_path, ['fourier'])
modrefind = np.load('{}/modelrefinds.npy'.format(b_auf_folder_path),
mmap_mode='r')[:, a_inds_unique]
[b_fouriergrid], b_modrefindsmall = load_small_ref_auf_grid(
modrefind, b_auf_folder_path, ['fourier'])
del modrefind
a_int_lens = gsf.get_integral_length(
a, b, r[:-1]+dr/2, rho[:-1], drho, j1s, a_fouriergrid, b_fouriergrid,
a_modrefindsmall, b_modrefindsmall, a_inds_map, a_size_small, int_fracs[0:2])
ablen[lowind:highind] = a_int_lens[:, 0]
aflen[lowind:highind] = a_int_lens[:, 1]
for cnum in range(0, mem_chunk_num):
lowind = np.floor(len(b_full)*cnum/mem_chunk_num).astype(int)
highind = np.floor(len(b_full)*(cnum+1)/mem_chunk_num).astype(int)
b = b_full[lowind:highind, 2]
b_inds_small = binds[:, lowind:highind]
b_size_small = bsize[lowind:highind]
b_inds_small = np.asfortranarray(b_inds_small[:np.amax(b_size_small), :])
b_inds_map, b_inds_unique = map_large_index_to_small_index(
b_inds_small, len(a_full), '{}/group'.format(joint_folder_path))
a = a_full[b_inds_unique, 2]
modrefind = np.load('{}/modelrefinds.npy'.format(b_auf_folder_path),
mmap_mode='r')[:, lowind:highind]
[b_fouriergrid], b_modrefindsmall = load_small_ref_auf_grid(
modrefind, b_auf_folder_path, ['fourier'])
modrefind = np.load('{}/modelrefinds.npy'.format(a_auf_folder_path),
mmap_mode='r')[:, b_inds_unique]
[a_fouriergrid], a_modrefindsmall = load_small_ref_auf_grid(
modrefind, a_auf_folder_path, ['fourier'])
del modrefind
b_int_lens = gsf.get_integral_length(
b, a, r[:-1]+dr/2, rho[:-1], drho, j1s, b_fouriergrid, a_fouriergrid,
b_modrefindsmall, a_modrefindsmall, b_inds_map, b_size_small, int_fracs[0:2])
bblen[lowind:highind] = b_int_lens[:, 0]
bflen[lowind:highind] = b_int_lens[:, 1]
print("Maximum overlaps | |
<reponame>LaudateCorpus1/oci-ansible-collection
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_load_balancer
short_description: Manage a NetworkLoadBalancer resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a NetworkLoadBalancer resource in Oracle Cloud Infrastructure
- For I(state=present), creates a network load balancer.
- "This resource has the following action operations in the M(oracle.oci.oci_network_load_balancer_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancer.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
display_name:
description:
- Network load balancer identifier, which can be renamed.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
is_preserve_source_destination:
description:
- This parameter can be enabled only if backends are compute OCIDs. When enabled, the skipSourceDestinationCheck parameter is automatically
enabled on the load balancer VNIC, and packets are sent to the backend with the entire IP header intact.
- This parameter is updatable.
type: bool
reserved_ips:
description:
- An array of reserved Ips.
type: list
elements: dict
suboptions:
id:
description:
- OCID of the reserved public IP address created with the virtual cloud network.
- Reserved public IP addresses are IP addresses that are registered using the virtual cloud network API.
- Create a reserved public IP address. When you create the network load balancer, enter the OCID of the reserved public IP address in the
reservedIp field to attach the IP address to the network load balancer. This task configures the network load balancer to listen to
traffic on this IP address.
- Reserved public IP addresses are not deleted when the network load balancer is deleted. The IP addresses become unattached from the
network load balancer.
- "Example: \\"ocid1.publicip.oc1.phx.unique_ID\\""
type: str
is_private:
description:
- Whether the network load balancer has a virtual cloud network-local (private) IP address.
- "If \\"true\\", then the service assigns a private IP address to the network load balancer."
- "If \\"false\\", then the service assigns a public IP address to the network load balancer."
- A public network load balancer is accessible from the internet, depending on the
L(security list rules,https://docs.cloud.oracle.com/Content/network/Concepts/securitylists.htm) for your virtual cloud network. For more
information about public and
private network load balancers,
see L(How Network Load Balancing Works,https://docs.cloud.oracle.com/Content/Balance/Concepts/balanceoverview.htm#how-network-load-balancing-
works).
This value is true by default.
- "Example: `true`"
type: bool
subnet_id:
description:
- The subnet in which the network load balancer is spawned L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required for create using I(state=present).
type: str
nlb_ip_version:
description:
- IP version associated with the NLB.
- This parameter is updatable.
type: str
choices:
- "IPV4"
- "IPV4_AND_IPV6"
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- This parameter is updatable.
type: dict
network_load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the NetworkLoadBalancer.
- Use I(state=present) to create or update a NetworkLoadBalancer.
- Use I(state=absent) to delete a NetworkLoadBalancer.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create network_load_balancer
oci_network_load_balancer:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
# optional
is_preserve_source_destination: true
reserved_ips:
- # optional
id: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
is_private: true
nlb_ip_version: IPV4
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update network_load_balancer
oci_network_load_balancer:
# required
network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
is_preserve_source_destination: true
nlb_ip_version: IPV4
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update network_load_balancer using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_network_load_balancer:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
# optional
is_preserve_source_destination: true
nlb_ip_version: IPV4
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete network_load_balancer
oci_network_load_balancer:
# required
network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete network_load_balancer using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_network_load_balancer:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
state: absent
"""
RETURN = """
network_load_balancer:
description:
- Details of the NetworkLoadBalancer resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancer.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name, which does not have to be unique, and can be changed.
- "Example: `example_load_balancer`"
returned: on success
type: str
sample: display_name_example
lifecycle_state:
description:
- The current state of the network load balancer.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- A message describing the current state in more detail.
For example, can be used to provide actionable information for a resource in Failed state.
returned: on success
type: str
sample: lifecycle_details_example
nlb_ip_version:
description:
- IP version associated with the NLB.
returned: on success
type: str
sample: IPV4
time_created:
description:
- The date and time the network load balancer was created, in the format defined by RFC3339.
- "Example: `2020-05-01T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time the network load balancer was updated. An RFC3339 formatted date-time string.
- "Example: `2020-05-01T22:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
ip_addresses:
description:
- An array of IP addresses.
returned: on success
type: complex
contains:
ip_address:
description:
- An IP address.
- "Example: `192.168.0.3`"
returned: on success
type: str
sample: ip_address_example
is_public:
description:
- Whether the IP address is public or private.
- "If \\"true\\", then the IP address is public and accessible from the internet."
- "If \\"false\\", then the IP address is private and accessible only from within the associated virtual cloud network."
returned: on success
type: bool
sample: true
ip_version:
description:
- IP version associated with this IP address.
returned: on success
type: str
sample: IPV4
reserved_ip:
description:
- ""
returned: on success
type: complex
contains:
id:
description:
- OCID of the reserved public IP address created with the virtual cloud network.
- Reserved public IP addresses are IP addresses that are registered using the virtual cloud network API.
- Create a reserved public IP address. When you create the network load balancer, enter the OCID of the reserved public IP
address in the
reservedIp field to attach the IP address to the network load balancer. This task configures the network load balancer to
listen to traffic on this IP address.
- Reserved public IP addresses are not deleted when the network load balancer is deleted. The IP addresses become unattached
from the network load balancer.
- "Example: \\"ocid1.publicip.oc1.phx.unique_ID\\""
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
is_private:
description:
- Whether the network load balancer has a virtual cloud network-local (private) IP address.
- "If \\"true\\", then the service assigns a private IP address to the network load balancer."
- "If \\"false\\", then the service assigns a public IP address to the network load balancer."
- A public network load balancer is accessible from the internet, depending the
L(security list | |
Worker",
"Composer Or Musician": "Singer",
"Computer Programming": "Technologist",
"Computer Support Technician": "Technologist",
"Counseling": "Counselor",
"Counselor": "Counselor",
"Dentist": "Health Worker",
"Designer": "Artist",
"Detectives": "Detective",
"Doctor": "Health Worker",
"Electrician": "Mechanic",
"Engineer": "Factory Worker",
"Engineering": "Factory Worker",
"Entrepreneur": "Office Worker",
"Fashion Designer": "Artist",
"Firefighter": "Firefighter",
"Forensic Science": "Scientist",
"Forest Ranger": "Police Officer",
"Graphic Designer": "Office Worker",
"Human Resources Manager": "Office Worker",
"Human Resources Specialist": "Office Worker",
"Inventor": "Scientist",
"Journalist": "Detective",
"Judge": "Judge",
"Law Enforcement": "Police Officer",
"Lawyer": "Office Worker",
"Librarian": "Teacher",
"Manager": "Office Worker",
"Marketer": "Office Worker",
"Mathematician": "Teacher",
"Mechanics": "Mechanic",
"Military": "Pilot",
"Musician": "Singer",
"Naturalist": "Farmer",
"Nurse": "Health Worker",
"Nursing": "Health Worker",
"Nutritionist": "Health Worker",
"Office Manager": "Office Worker",
"Paralegal": "Office Worker",
"Paramedic": "Health Worker",
"Pediatrician": "Health Worker",
"Photographer": "Detective",
"Physical Therapist": "Health Worker",
"Physician": "Health Worker",
"Pilot": "Pilot",
"Police Officer": "Police Officer",
"Police Officers": "Police Officer",
"Politician": "Office Worker",
"Psychiatrist": "Health Worker",
"Psychologist": "Health Worker",
"Receptionist": "Technologist",
"Religious Worker": "Pilot",
"Sales Agent": "Office Worker",
"Sales Representative": "Office Worker",
"School Administrator": "Teacher",
"Scientist": "Scientist",
"Social Work": "Teacher",
"Social Worker": "Teacher",
"Software Developer": "Technologist",
"Software Engineer": "Technologist",
"Teacher": "Teacher",
"Teaching": "Teacher",
"Tv Anchor/Reporter": "Office Worker",
"University Professor": "Teacher",
"Veterinarian": "Health Worker",
"Video Game Designer": "Technologist",
"Writer": "Technologist"
}
personality_types = {
"INTJ": {"Type": ["Introversion", "Intuition", "Thinking", "Judging"], "Name": "The Architect",
"AltName": "The Architect", "Class": "fas fa-shapes", "BGColor": "#606060",
"FTColor": "wheat",
"Description": "Imaginative and strategic thinkers, with a plan for everything.",
"Dominant": "Introverted Intuition", "Auxiliary": "Extraverted Thinking",
"Tertiary": "Introverted Feeling", "Inferior": "Extraverted Sensing",
"KeyCharacteristics": ["Intjs Tend To Be Introverted And Prefer To Work Alone.",
"Intjs Look At The Big Picture And Like To Focus On Abstract Information Rather Than Concrete Details.",
"Intjs Place Greater Emphasis On Logic And Objective Information Rather Than Subjective Emotions.",
"Intjs Like Their World To Feel Controlled And Ordered So They Prefer To Make Plans Well In Advance."],
"Strengths": ["Enjoys Theoretical And Abstract Concepts", "High Expectations",
"Good At Listening", "Takes Criticism Well",
"Self-Confident And Hard-Working"],
"Weaknesses": ["Can Be Overly Analytical And Judgmental", "Very Perfectionistic",
"Dislikes Talking About Emotions",
"Sometimes Seems Callous Or Insensitive"],
"KnownCelbrities": ["<NAME>, U.S. President", "C.S. Lewis, Author",
"<NAME>, Actor & Politician",
"Gandalf, The Lord Of The Rings", "Lance Armstrong, Cyclist"],
"Careers": ["Scientist", "Mathematician", "Engineer", "Dentist", "Doctor", "Teacher",
"Judge", "Lawyer"], "ID": "INTJ"},
"INTP": {"Type": ["Introversion", "Intuition", "Thinking", "Perceiving"],
"Name": "The Thinker", "AltName": "The Thinker", "Class": "fas fa-brain",
"BGColor": "#e52a47", "FTColor": "wheat",
"Description": "Innovative inventors with an unquenchable thirst for knowledge.",
"Dominant": "Introverted Thinking", "Auxiliary": "Extraverted Intuition",
"Tertiary": "Introverted Sensing", "Inferior": "Extraverted Feeling",
"KeyCharacteristics": [
"Intps Are Quiet, Reserved, And Thoughtful. As Introverts, They Prefer To Socialize With A Small Group Of Close Friends With Whom They Share Common Interests And Connections.",
"They Enjoy Thinking About Theoretical Concepts And Tend To Value Intellect Over Emotion. Intps Are Logical And Base Decisions On Objective Information Rather Than Subjective Feelings.",
"When Analyzing Data And Making Decisions, They Are Highly Logical And Objective.",
"Tends To Be Flexible And Good At Thinking Outside Of The Box.",
"People With This Personality Type Think About The Big Picture Rather Than Focusing On Every Tiny Detail.",
"Intps Like To Keep Their Options Open And Feel Limited By Structure And Planning."],
"Strengths": ["Logical And Objective", "Abstract Thinker", "Independent",
"Loyal And Affectionate With Loved Ones"],
"Weaknesses": ["Difficult To Get To Know", "Can Be Insensitive",
"Prone To Self-Doubt", "Struggles To Follow Rules",
"Has Trouble Expressing Feelings"],
"KnownCelbrities": ["<NAME>, Scientist",
"<NAME>, U.S. President",
"<NAME>, Psychoanalyst", "<NAME>",
"<NAME>, The Big Bang Theory"],
"Careers": ["Scientist", "Mathematician", "Engineer", "Dentist", "Doctor", "Teacher",
"Judge", "Lawyer"], "ID": "INTP"},
"ENTJ": {"Type": ["Extroversion", "Intuition", "Thinking", "Judging"], "Name": "<NAME>",
"AltName": "<NAME>", "Class": "fas fa-chess-king", "BGColor": "#99892d",
"FTColor": "wheat",
"Description": "Bold, imaginative and strong-willed leaders, always finding a way or making one.",
"Dominant": "Extraverted Thinking", "Auxiliary": "Introverted Intuition",
"Tertiary": "Extraverted Sensing", "Inferior": "Introverted Feeling",
"KeyCharacteristics": [
"People With This Personality Type Enjoy Spending Time With Other People. They Have Strong Verbal Skills And Interacting With Others Helps Them Feel Energized.",
"Entj Types Prefer To Think About The Future Rather Than Focus On The Here-And-Now. They Usually Find Abstract And Theoretical Information More Interesting Than Concrete Details.",
"When Making Decisions, Entjs Place A Greater Emphasis On Objective And Logical Information. Personal Feeling And The Emotions Of Others Tend Not To Factor Much Into Their Choices.2Introduction To Type In College. Cpp; 1993.\\Ufeff",
"Entjs Are Planners. Making Decisions And Having A Schedule Or Course Of Action Planned Out Gives Them A Sense Of Predictability And Control.",
"They Are Highly Rational, Good At Spotting Problems, And Excel At Taking Charge. These Tendencies Make Them Natural Leaders Who Are Focused On Efficiently Solving Problems.",
"One Myth About Entjs Is That They Are Cold And Ruthless. While They Are Not Necessarily Good With Emotions, This Does Not Mean That They Are Intentionally Cruel. They Are Prone To Hiding Their Own Emotions And Sentimentality, Viewing It As A Weakness That Should Not Be Made Known To Others.",
"This Is An Entj Preferred Functioned And Is Expressed Through The Way They Make Decisions And Judgments.",
"Entjs Have A Tendency To Speak First Without Listening, Making Snap Judgments Before Really Taking In All The Information Pertaining To A Situation.",
"While They Tend To Make Snap Judgments, They Are Also Very Rational And Objective. They Are Focused On Imposing Order And Standards On The World Around Them. Setting Measurable Goals Is Important.",
"People With This Personality Type Are Future-Focused And Always Consider The Possibilities When Approaching A Decision.",
"Entjs Are Forward-Thinking And Are Not Afraid Of Change. They Trust Their Instincts, Although They May Have A Tendency To Regret Jumping To Conclusions So Quickly.",
"This Cognitive Function Gives Entjs An Appetite For Adventure. They Enjoy Novel Experiences And May Sometimes Engage In Thrill-Seeking Behaviors.",
"Because Their Outward Sensory Focus, They Also Have An Appreciation For Beautiful Things In Life. They Often Enjoy Surrounding Themselves With Things That They Find Attractive Or Interesting.",
"Introverted Feeling Is Centered On Internal Feelings And Values. Emotions Can Be\\U00A0Difficult Area For Entjs, And They Often Lack An Understanding Of How This Part Of Their Personality Contributes To Their Decision-Making Process.",
"When This Aspect Of Personality Is Weak, Entjs May Find Themselves Feeling Uncomfortable Or Awkward In Settings Where Some Type Of Emotional Response Is Called For."],
"Strengths": ["Strong Leadership Skills", "Self-Assured", "Well-Organized",
"Good At Making Decisions", "Assertive And Outspoken",
"Strong Communication Skills"],
"Weaknesses": ["Impatient", "Stubborn", "Insensitive", "Aggressive", "Intolerant"],
"KnownCelbrities": ["<NAME>, U.S. President",
"<NAME>, Microsoft Founder", "<NAME>, Football Coach",
"<NAME>, Astronomer", "<NAME>, Superman Character"],
"Careers": ["Human Resources Manager", "Company Ceo Or Manager", "Lawyer",
"Scientist", "Software Developer", "Business Analyst", "Entrepreneur",
"University Professor"], "ID": "ENTJ"},
"ENTP": {"Type": ["Extroversion", "Intuition", "Thinking", "Perceiving"], "Name": "Debater",
"AltName": "The Debater", "Class": "fas fa-bullhorn", "BGColor": "#213164",
"FTColor": "wheat",
"Description": "Smart and curious thinkers who cannot resist an intellectual challenge.",
"Dominant": "Extraverted Intuition", "Auxiliary": "Introverted Thinking",
"Tertiary": "Extraverted Feeling", "Inferior": "Introverted Sensing",
"KeyCharacteristics": [
"Entps Enjoy Interacting With A Wide Variety Of People. They Are Great Conversationalists And Love To Engage Other People In Debates.",
"They Are More Focused On The Future Rather Than On Immediate Details. They May Start Projects And Never Finish Them Because They Are So Focused On The Big Picture Rather Than The Present Needs.",
"Entps Enjoy Being Around Other People, Particularly If They Are Able To Engage In A Conversation Or Debate About Something In Which They Are Interested. They Are Usually Fairly Laid-Back And Easy To Get Along With. However, They Can Sometimes Get So Wrapped Up In Their Ideas Or Plans That They Lose Sight Of Their Close Relationships.",
"They Tend To Reserve Judgment. Instead Of Making A Decision Or Committing To A Course Of Action, They Would Prefer To Wait And See What Happens.",
"Entps Are Immensely Curious And Focused On Understanding The World Around Them. They Are Constantly Absorbing New Information And Ideas And Quickly | |
# Author: SiliconSloth 18/1/2018
from threading import Thread, Timer
import numpy as np
import cPickle, serial, os, time, subprocess
import cv2
# PyCharm seems to need this to work properly.
# try:
# from cv2 import cv2
# except:
# pass
# This script is responsible for actually driving the robot automatically. It uses the reduced video and feature files
# produced by Reducer.py for guidance and outputs debug video and log files which can be viewed with Viewer.py.
videoPath = "/home/someone/RobleyVision/Recordings/" # Where video files will be saved to.
# beepFile = "/home/someone/RobleyVision/Beep.wav" # File to play when beeping on Linux. (optional)
prizmPort = "/dev/ttyUSB0" # Check for the value of this in the Arduino IDE.
cameraIndex = 2 # 0,1,2 etc. determines which camera to use.
# Files to load training video and features from.
reducedVideoFile = "TrainingReduced.avi"
reducedFeatureFile = "ReducedFeatures.pkl"
# Determines how quickly the robot moves forwards/backwards and rotates. Bigger is faster.
forwardCoeff, rotationCoeff = 0.5, 0.1
# This is the minimum motor power required to make the robot move at all, which is
# added onto the powers calculated while driving to overcome friction.
basePower = 10
# Function to set the left and right motor powers on the robot.
def setPowers(power1, power2):
# print(power1, power2)
# See Trainer.py for more details on the format used to send powers to the Prizm controller.
prizm.write(serial.to_bytes([int(power1)+100, int(power2)+100]))
# Function to find matching key points between two images.
def match(keypoints1, descriptors1, keypoints2, descriptors2):
try:
matches = bf.match(descriptors1, descriptors2)
goodMatches = []
for i,m in enumerate(matches):
# Get the coordinates of the two key points in this match.
point1, point2 = keypoints1[m.queryIdx].pt, keypoints2[m.trainIdx].pt
# Realistically we know that key points will not have moved very far across the image.
# If a matching is found between two points that are very far away from each other it is probably an
# incorrect matching, and should be excluded from the results.
if abs(point1[1] - point2[1]) < 100 and abs(point1[0] - point2[0]) < 100:
goodMatches.append(m)
return goodMatches
except:
# It is possible for bf.match() to throw an exception if it failed to find a match due to an insufficient number of key points.
# If this happens, return an empty list of matches.
return []
# Function to calculate the median optical flow between two images.
def calculateFlow(keypoints1, descriptors1, keypoints2, descriptors2):
# Find matches between the key points of the two images.
matches = match(keypoints1, descriptors1, keypoints2, descriptors2)
# If the matching failed and returned an empty list the optical flow cannot be found.
# If the matching contains less than 20 matches it is too poor to be reliable so don't use it.
if not matches or len(matches) < 20:
return None
# Create a 2D array in which each row contains the x and y components of one optical flow vector.
# Do this by subtracting the coordinates of the first key point of each match from the second key point's coordinates.
flowVectors = np.array([(keypoints2[m.trainIdx].pt[0] - keypoints1[m.queryIdx].pt[0],
keypoints2[m.trainIdx].pt[1] - keypoints1[m.queryIdx].pt[1]) for m in matches])
# Find the medians of all the x and y components.
return np.median(flowVectors, 0)
# Called if the camera takes too long to respond when the next frame is requested.
def cameraTimeoutFunc():
global timedOut
print("Timeout!")
timedOut = True
# Stop the robot until the camera responds, so that the robot doesn't drive off course without being able to see what it is doing.
setPowers(0,0)
# This loop is constantly running in the background, ready to make a beeping sound if the robot is obstructed or drives off course.
# def beepLoop():
# while True:
# if fails >= 10: # If matching fails 10 times or more in a row the robot is probably obstructed or has driven wildly off course.
# # On Linux, call an external program for beeping.
# subprocess.call(["paplay", beepFile])
# # On Windows, use winsound.
# # winsound.Beep(500,600)
# time.sleep(0.4) # Add a delay between beeps.
# else:
# time.sleep(0.01) # Keep waiting until beeping is needed.
# Connect to the Prizm controller.
prizm = serial.Serial(prizmPort)
# Initialize the camera and load the target video.
capture = cv2.VideoCapture(cameraIndex)
print("Camera ready...")
width, height = int(capture.get(3)), int(capture.get(4))
targetCapture = cv2.VideoCapture(videoPath+reducedVideoFile)
# Load the target key points and descriptors from the feature file.
with open(videoPath+reducedFeatureFile, "rb") as file:
targetFeatures = cPickle.load(file)
# Convert the key point tuples back to OpenCV KeyPoints.
for keypoints, descriptors in targetFeatures:
for i in range(len(keypoints)):
point = keypoints[i]
keypoints[i] = cv2.KeyPoint(point[0][0], point[0][1], point[1], point[2], point[3], point[4], point[5])
# The video files produced have numbered names, e.g. Debug17.avi.
# Find the highest number out of the existing files so we can make the number of
# the new file one greater than that.
lastNo = 0
for file in os.listdir(videoPath):
if file.startswith("Debug") and file.endswith(".avi"):
try:
lastNo = max(lastNo, int(file.replace("Debug", "").replace(".avi", "")))
except:
pass
# The debug video shows the current camera frame and target frame side by side, so requires double width.
writer = cv2.VideoWriter(videoPath+"Debug"+str(lastNo+1)+".avi", cv2.VideoWriter_fourcc(*"MJPG"), 20, (width*2, height), True)
# This list will contain tuples containing the values of various variables at each iteration of the main loop.
# You can change which variables are logged by modifying the code that adds values to the list.
debugLog = []
# Create the key point detector, descriptor extractor and matcher.
fast = cv2.FastFeatureDetector_create(threshold=16, nonmaxSuppression=True)
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create(bytes=16, use_orientation=False)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# The index of the current target video frame, starts at 0.
featureInd = 0
# The frame that the robot is currently trying to match with, initialized as the first video frame/
targetFrame = targetCapture.read()[1]
# The number of successive times that the software has failed to find a matching between the current camera frame
# and the target frame. If there are 10 successive failures that probably means that the robot has gone off course
# or had its view obstructed so should stop moving and start beeping.
fails = 0
# Wait for the user to press enter before the robot starts moving.
try:
input("Press enter to start...")
except:
pass
# This function runs on a background thread, and sets stop to True when the user presses enter again.
# This allows the robot to be stopped mid-drive if things are going wrong.
def interruptFunc():
global stop
try:
input("Press enter to stop...\n")
except:
pass
stop = True
stop = False # Stops driving and terminates the script when set to True.
# Call interruptFunc() on a background thread, so that the script will not hang until enter is pressed.
interruptThread = Thread(target=interruptFunc)
interruptThread.daemon = True
interruptThread.start()
# Start a background thread to handle beeping when matching fails too many times in a row.
# beepThread = Thread(target=beepLoop)
# beepThread.daemon = True
# beepThread.start()
flow = None
while capture.isOpened() and not stop: # Stop driving and terminate if stop is set to True or the camera closes unexpectedly.
# Sometimes the camera can lag a bit (especially if RightLight is enabled) so the robot will stop moving if retrieving the next
# frame from the camera takes too long, so that it doesn't drive off course while it can't see where it is going.
timedOut = False
# cameraTimeoutFunc() will stop the robot after 0.1 seconds, unless cancel() is called before then.
cameraTimeoutThread = Timer(0.1, cameraTimeoutFunc)
cameraTimeoutThread.daemon = True
cameraTimeoutThread.start()
camTime = time.time()
frame = capture.read()[1]
cameraTimeoutThread.cancel() # Cancel the timeout if the frame is grabbed before the time is up.
camTime = time.time() - camTime # Record the time taken for debugging purposes.
# Detect key points and their descriptors in the camera frame.
camKeypoints = fast.detect(frame, None)
camKeypoints, camDescriptors = brief.compute(frame, camKeypoints)
targetKeypoints, targetDescriptors = targetFeatures[featureInd]
# Calculate the median optical flow between the camera frame and the target frame. Will be None if matching failed.
flow = calculateFlow(camKeypoints, camDescriptors, targetKeypoints, targetDescriptors)
# If the flow vector has small x and y components the robot is close enough to the location that the target frame was
# taken at to move onto the next frame.
if flow is not None and abs(flow[0]) < 20 and abs(flow[1]) < 10:
featureInd += 1
if featureInd == len(targetFeatures):
print("Arrived!")
break # Exit the driving loop.
else:
# Move onto the next frame.
targetFrame = targetCapture.read()[1]
# Add a | |
y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, name='train')
init = tf.global_variables_initializer()
# Creating a tf.train.Saver adds operations to the graph to save and
# restore variables from checkpoints.
saver_def = tf.train.Saver().as_saver_def
###NO VA EL SAVER DE GRAH DEF##########################
# with open('graph.pb', 'wb') as f:
# f.write(tf.get_default_graph().as_graph_def())
saver = tf.train.Saver()
# Training
# saver.save(sess, your_path + "/checkpoint_name.ckpt")
# TensorFlow session
sess = tf.Session()
sess.run(init)
# saver.save(sess, "-"+ NOMBRE_CHECKPOINTS + "-.ckpt") "checkpoint_actualizado.ckpt"
#saver.save(sess, "checkpoint_actualizado_"+modelctrstr3+".ckpt")
# saver.save(sess, "checkpoint_actualizado_"+modelctrstr+".ckpt")
saver.save(sess, "checkpoint_actualizado_"+"global"+".ckpt")
print("se han creado fichero con nombre ... "+ modelctrstr4)
#
app.config["FICHERO_CHECK"] = ""
# zipf = zipfile.ZipFile(NOMBRE_CHECKPOINTS + '.ckpt.meta.zip','w'. zipfile.ZIP_DEFLATED)
# zipObj = ZipFile("ficherotestnoviembre.zip", 'w')
# zipObj.write(NOMBRE_CHECKPOINTS + ".ckpt.meta")
# zipObj.close()
## return send_from_directory(app.config["FICHERO_CHECK"], filename='ficherotestnoviembre.zip', as_attachment=True)
zipObj = ZipFile('test.zip', 'w')
zipObj.write("checkpoint")
# zipObj.write("-"+ NOMBRE_CHECKPOINTS + "-.ckpt"+'.index')
# zipObj.write("-" + NOMBRE_CHECKPOINTS + "-.ckpt" + '.data-00000-of-00001')
# zipObj.write("-" + NOMBRE_CHECKPOINTS + "-.ckpt.meta") # no se si igual .meta
# zipObj.write("checkpoint_actualizado_"+modelctrstr3+".ckpt" + '.index')
# zipObj.write("checkpoint_actualizado_"+modelctrstr3+".ckpt" + '.data-00000-of-00001')
# zipObj.write("checkpoint_actualizado_"+modelctrstr+".ckpt.meta") # no se si igual .meta
# zipObj.write("fichero_datos"+modelctrstr3)
zipObj.write("checkpoint_actualizado_"+"global"+".ckpt" + '.index') #si
zipObj.write("checkpoint_actualizado_"+"global"+".ckpt" + '.data-00000-of-00001')
zipObj.write("fichero_datos"+"global")
zipObj.close()
return send_from_directory(app.config["FICHERO_CHECK"], filename='test.zip', as_attachment=True)
# return flask.send_file('test.zip', mimetype = 'zip',attachment_filename= 'test.zip', as_attachment= True)
else: # actualizo el modelo
return
##
@app.route("/descargar_graph", methods = ['GET']) #FUNCIONA: DESCARGAS EL FICHERO
def descargar_graph():
if flask.request.method == "GET":
print("mirando si esta updated")
#habra que mirar una variable o algo
if modelUpdated: # mando el fichero checkpoints
global numero
numero = numero + 1
global modelctr
modelctr = modelctr + 1
global NOMBRE_CHECKPOINTS
NOMBRE_CHECKPOINTS = "checkpoints_name_" + str(modelctr)
global NOMBRE_ZIP
NOMBRE_ZIP = "Zip_Name_" + str(modelctr)
x = tf.placeholder(tf.float32, name='input')
y_ = tf.placeholder(tf.float32, name='target')
W = tf.Variable(5., name='W')
b = tf.Variable(3., name='b')
y = tf.add(tf.multiply(x, W), b)
y = tf.identity(y, name='output')
loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, name='train')
init = tf.global_variables_initializer()
# Creating a tf.train.Saver adds operations to the graph to save and
# restore variables from checkpoints.
saver_def = tf.train.Saver().as_saver_def()
###NO VA EL SAVER DE GRAH DEF##########################
with open('graph.pb', 'wb') as f:
f.write(tf.get_default_graph().as_graph_def().SerializeToString())
app.config["FICHERO_CHECK"] = ""
return send_from_directory(app.config["FICHERO_CHECK"], filename='graph.pb', as_attachment=True)
else: # actualizo el modelo
return
@app.route("/descargar_graph_dense", methods = ['GET']) #FUNCIONA: DESCARGAS EL FICHERO
def descargar_graph_dense():
if flask.request.method == "GET":
print("mirando si esta updated")
#habra que mirar una variable o algo
if modelUpdated: # mando el fichero checkpoints
global numero
numero = numero + 1
global modelctr
modelctr = modelctr + 1
global NOMBRE_CHECKPOINTS
NOMBRE_CHECKPOINTS = "checkpoints_name_" + str(modelctr)
global NOMBRE_ZIP
NOMBRE_ZIP = "Zip_Name_" + str(modelctr)
model = tf.keras.models.Sequential([tf.keras.layers.Dense(1, input_shape=(1,)),
tf.keras.layers.Dense(25, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(1, activation=tf.keras.activations.relu)])
model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.mean_squared_error)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
init = tf.global_variables_initializer()
saver_def = tf.train.Saver().as_saver_def()
###NO VA EL SAVER DE GRAH DEF##########################
with open('graph.pb', 'wb') as f:
f.write(tf.get_default_graph().as_graph_def().SerializeToString())
app.config["FICHERO_CHECK"] = ""
return send_from_directory(app.config["FICHERO_CHECK"], filename='graph.pb', as_attachment=True)
else: # actualizo el modelo
return
"""
zipObj = ZipFile(NOMBRE_ZIP + "_PRUEBA_" + str(modelctr) + '.zip', 'w')
zipObj.write('checkpoint')
zipObj.write(NOMBRE_CHECKPOINTS + ".ckpt.meta")
zipObj.close()
return send_from_directory(app.config["FICHERO_CHECK"], filename= NOMBRE_ZIP + "_PRUEBA_" + str(modelctr) + '.zip', as_attachment=True)
"""
@app.route("/descargar_checkpoint", methods = ['GET']) #FUNCIONA: DESCARGAS EL FICHERO ****si
def descargar_checkpoint():
if flask.request.method == "GET":
print("mirando si esta updated")
#habra que mirar una variable o algo
if modelUpdated: # mando el fichero checkpoints
app.config["FICHERO_TEXTO1"] = ""
x = tf.placeholder(tf.float32, name='input')
y_ = tf.placeholder(tf.float32, name='target')
W = tf.Variable(5., name='W')
b = tf.Variable(3., name='b')
y = x * W + b
y = tf.identity(y, name='output')
loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, name='train')
init = tf.global_variables_initializer()
# Creating a tf.train.Saver adds operations to the graph to save and
# restore variables from checkpoints.
saver_def = tf.train.Saver().as_saver_def
###NO VA EL SAVER DE GRAH DEF##########################
# with open('graph.pb', 'wb') as f:
# f.write(tf.get_default_graph().as_graph_def())
saver = tf.train.Saver()
# Training
# saver.save(sess, your_path + "/checkpoint_name.ckpt")
# TensorFlow session
sess = tf.Session()
sess.run(init)
saver.save(sess, "checkpoint_name.ckpt")
#igual hay que ponerlo como zip y mandarlo
return send_from_directory(app.config["FICHERO_TEXTO1"], filename= "checkpoint_name.ckpt.meta", as_attachment=True)
else: # actualizo el modelo
return
@app.route("/descargar4", methods=['GET']) # NO FUNCIONA
def descargar4():
if flask.request.method == "GET":
print("mirando si esta updated")
# habra que mirar una variable o algo
if modelUpdated: # mando el fichero checkpoints
storage_client = storage.Client.from_service_account_json("tftalejandroaguilera-8712e9c215d2.json")
BUCKET_NAME = 'bucket-alejandro-aguilera' # Nombre del bucket que he creado en el google-cloud
bucket = storage_client.get_bucket(BUCKET_NAME)
blob = bucket.blob("checkpoint")
blob.download_to_filename("checkpoint1")
print('Blop {} downloaded to {}.' .format(
checkpoint,
checkpoint1 ))
app.config["FICHERO_CHECK"] = ""
return send_from_directory(app.config["FICHERO_CHECK"], filename="checkpoint1", as_attachment= True)
else: # actualizo el modelo
return
##empieza federated parasp
##The following function is responsible for receiving the weights and temporarily storing them for averaging:
@app.route("/upload", methods = ['POST']) #PONER BIEN LOS NOMBRES
def upload():
global numero
numero = numero + 1
global modelctr
modelctr = modelctr + 1
global NOMBRE_CHECKPOINTS
NOMBRE_CHECKPOINTS = "checkpoints_name_" + str(modelctr)
global NOMBRE_ZIP
NOMBRE_ZIP = "Zip_Name_" + str(modelctr)
if flask.request.method == "POST":
modelUpdated = False # ahora el modelo no esta actualizado. hay que sacar los pesos y evaluarlos para volver a entrenar el modelo y sacar el fichero checkpoints
print("Uploading File")
if flask.request.files["file"]: #se coge la infomracion de file y se pone en weights
weights = flask.request.files["file"].read()
weights_stream = io.BytesIO(weights)
#bucket = storage.bucket()
storage_client = storage.Client.from_service_account_json("tftalejandroaguilera-8712e9c215d2.json")
BUCKET_NAME = 'bucket-alejandro-aguilera' # Nombre del bucket que he creado en el google-cloud
bucket = storage_client.get_bucket(BUCKET_NAME)
#Uploading Files to Firebase
print("Saving at Server")
with open("delta.bin", "wb") as f: #se crea un fichero delta.bin con los weigths descargados que llegan desde la app
f.write(weights_stream.read())
print("Starting upload to Firebase") ####*************esto es para usbirlo a la firebase queno es mio. O uso una o lo creo en una direccion local
with open("delta.bin", "rb") as upload:
byte_w = upload.read()
#Preprocessing data before upload. File to be sent to Firebase is named "Weights.bin"
with open("Weights.bin", "wb") as f: #nombre del fichero que se envia desde la app
pickle.dump(weights, f)
with open("Weights.bin", "rb") as f: #manda a google-cloud el fichero Weights.bin
blob = bucket.blob('weight__'+ str(modelctr))
blob.upload_from_file(f)
print("File Successfully Uploaded to Firebase")
return "File Uploaded\n"
else:
print("File not found")
@app.route("/upload_A", methods = ['POST']) #PONER BIEN LOS NOMBRES
def upload_A():
global numero
numero = numero + 1
global modelctr
modelctr = modelctr + 1
global NOMBRE_CHECKPOINTS
NOMBRE_CHECKPOINTS = "checkpoints_name_" + str(modelctr)
global NOMBRE_ZIP
NOMBRE_ZIP = "Zip_Name_" + str(modelctr)
global modelctrstr
modelctrstr = str(modelctr)
global NOMBRE_CHECKPOINTS1_2
NOMBRE_CHECKPOINTS1_2 = "checkpoints_name_" + str(modelctr -1)
global NOMBRE_CHECKPOINTS1_3
NOMBRE_CHECKPOINTS1_3 = "checkpoints_name_" + str(modelctr -2)
if flask.request.method == "POST":
modelUpdated = False # ahora el modelo no actualizado. hay que sacar los pesos y evaluarlos para volver a entrenar el modelo y sacar el fichero checkpoints
print("Uploading File")
#se coge la infomracion de file y se pone en weights
uploaded_file = request.files['file']
if uploaded_file.filename != '':
uploaded_file.save(uploaded_file.filename)
storage_client = storage.Client.from_service_account_json("tftalejandroaguilera-8712e9c215d2.json")
BUCKET_NAME = 'bucket-alejandro-aguilera' # Nombre del bucket que he creado en el google-cloud
bucket = storage_client.get_bucket(BUCKET_NAME)
#Uploading Files to Firebase
print("Saving at Server")
with open(uploaded_file.filename, "rb") as f: #manda a google-cloud el fichero Weights.bin
# blob = bucket.blob(uploaded_file.filename + str(modelctr))
blob = bucket.blob(NOMBRE_CHECKPOINTS+'_fichero_pesos_')
blob.upload_from_file(f)
print("File Successfully Uploaded to Firebase")
print("subido el fichero "+NOMBRE_CHECKPOINTS+'_fichero_pesos_')
global nombre_fichero_descarga
nombre_fichero_descarga = NOMBRE_CHECKPOINTS+'_fichero_pesos_'
print("fichero con nombre " + nombre_fichero_descarga)
global nombre_fichero_descarga1
nombre_fichero_descarga1 = NOMBRE_CHECKPOINTS1_2+'_fichero_pesos_'
print("fichero con nombre " + nombre_fichero_descarga1)
global nombre_fichero_descarga2
nombre_fichero_descarga2 = NOMBRE_CHECKPOINTS1_3+'_fichero_pesos_'
print("fichero con nombre " + nombre_fichero_descarga2)
return "File Uploaded\n"
else:
print("File not found")
@app.route("/upload_B", methods = ['POST']) #PONER BIEN LOS NOMBRES
def upload_B():
global numero2
numero2 = numero2 + 1
global modelctr2
modelctr2 = modelctr2 + 1
global NOMBRE_CHECKPOINTS2
NOMBRE_CHECKPOINTS2 = "checkpoints_name_" + str(modelctr2)
global NOMBRE_ZIP2
NOMBRE_ZIP2 = "Zip_Name2_" + str(modelctr2)
global modelctrstr2
modelctrstr2 = str(modelctr2)
global NOMBRE_CHECKPOINTS2_2
NOMBRE_CHECKPOINTS2_2 = "checkpoints_name_" + str(modelctr2 -1)
global NOMBRE_CHECKPOINTS2_3
NOMBRE_CHECKPOINTS2_3 = "checkpoints_name_" + str(modelctr2 -2)
if flask.request.method == "POST":
modelUpdated = False # ahora el modelo no actualizado. hay que sacar los pesos y evaluarlos para volver a entrenar el modelo y sacar el fichero checkpoints
print("Uploading File")
#se coge la infomracion de file y se pone en weights
uploaded_file = request.files['file']
if uploaded_file.filename != '':
uploaded_file.save(uploaded_file.filename)
storage_client = storage.Client.from_service_account_json("tftalejandroaguilera-8712e9c215d2.json")
BUCKET_NAME = 'bucket-alejandro-aguilera' # Nombre del bucket que he creado en el google-cloud
bucket = storage_client.get_bucket(BUCKET_NAME)
#Uploading Files to Firebase
print("Saving at Server")
with open(uploaded_file.filename, "rb") as f: #manda a google-cloud | |
E501
raise ApiValueError("Missing the required parameter `function_type` when calling `archive_by_function_type`") # noqa: E501
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `archive_by_function_type`") # noqa: E501
collection_formats = {}
path_params = {}
if "definition_id" in local_var_params:
path_params["definitionId"] = local_var_params["definition_id"] # noqa: E501
if "function_type" in local_var_params:
path_params["functionType"] = local_var_params["function_type"] # noqa: E501
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/automation/v4/actions/{appId}/{definitionId}/functions/{functionType}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create_or_replace(self, definition_id, function_type, function_id, app_id, body, **kwargs): # noqa: E501
"""Create or replace a custom action function # noqa: E501
Creates or replaces a function for a custom workflow action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_replace(definition_id, function_type, function_id, app_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str definition_id: The ID of the custom workflow action. (required)
:param str function_type: The type of function. This determines when the function will be called. (required)
:param str function_id: The ID qualifier for the function. This is used to specify which input field a function is associated with for `PRE_FETCH_OPTIONS` and `POST_FETCH_OPTIONS` function types. (required)
:param int app_id: (required)
:param str body: The function source code. Must be valid JavaScript code. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ActionFunctionIdentifier
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_or_replace_with_http_info(definition_id, function_type, function_id, app_id, body, **kwargs) # noqa: E501
def create_or_replace_with_http_info(self, definition_id, function_type, function_id, app_id, body, **kwargs): # noqa: E501
"""Create or replace a custom action function # noqa: E501
Creates or replaces a function for a custom workflow action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_replace_with_http_info(definition_id, function_type, function_id, app_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str definition_id: The ID of the custom workflow action. (required)
:param str function_type: The type of function. This determines when the function will be called. (required)
:param str function_id: The ID qualifier for the function. This is used to specify which input field a function is associated with for `PRE_FETCH_OPTIONS` and `POST_FETCH_OPTIONS` function types. (required)
:param int app_id: (required)
:param str body: The function source code. Must be valid JavaScript code. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ActionFunctionIdentifier, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["definition_id", "function_type", "function_id", "app_id", "body"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method create_or_replace" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'definition_id' is set
if self.api_client.client_side_validation and ("definition_id" not in local_var_params or local_var_params["definition_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `definition_id` when calling `create_or_replace`") # noqa: E501
# verify the required parameter 'function_type' is set
if self.api_client.client_side_validation and ("function_type" not in local_var_params or local_var_params["function_type"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `function_type` when calling `create_or_replace`") # noqa: E501
# verify the required parameter 'function_id' is set
if self.api_client.client_side_validation and ("function_id" not in local_var_params or local_var_params["function_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `function_id` when calling `create_or_replace`") # noqa: E501
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ("app_id" not in local_var_params or local_var_params["app_id"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `create_or_replace`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ("body" not in local_var_params or local_var_params["body"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_or_replace`") # noqa: E501
collection_formats = {}
path_params = {}
if "definition_id" in local_var_params:
path_params["definitionId"] = local_var_params["definition_id"] # noqa: E501
if "function_type" in local_var_params:
path_params["functionType"] = local_var_params["function_type"] # noqa: E501
if "function_id" in local_var_params:
path_params["functionId"] = local_var_params["function_id"] # noqa: E501
if "app_id" in local_var_params:
path_params["appId"] = local_var_params["app_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(["text/plain"]) # noqa: E501 # noqa: E501
# Authentication setting
auth_settings = ["developer_hapikey"] # noqa: E501
return self.api_client.call_api(
"/automation/v4/actions/{appId}/{definitionId}/functions/{functionType}/{functionId}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ActionFunctionIdentifier", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create_or_replace_by_function_type(self, definition_id, function_type, app_id, body, **kwargs): # noqa: E501
"""Create or replace a custom action function # noqa: E501
Creates or replaces a function for a custom workflow action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_replace_by_function_type(definition_id, function_type, app_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str definition_id: The ID of the custom workflow action. (required)
:param str function_type: The type of function. This determines when the function will be called. (required)
:param int app_id: (required)
:param str body: The function source code. Must be valid JavaScript code. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ActionFunctionIdentifier
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_or_replace_by_function_type_with_http_info(definition_id, function_type, app_id, body, **kwargs) # noqa: E501
def create_or_replace_by_function_type_with_http_info(self, definition_id, function_type, app_id, body, **kwargs): # noqa: E501
"""Create or replace a custom action function # noqa: E501
Creates or replaces a function for a custom workflow action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_replace_by_function_type_with_http_info(definition_id, function_type, app_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str definition_id: The ID of the custom workflow action. (required)
:param str function_type: The type of function. This determines when the function will be called. (required)
:param int app_id: (required)
:param str body: The function source code. Must be valid JavaScript code. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ActionFunctionIdentifier, | |
from application.notification import IObserver, NotificationCenter
from application.python import Null
from application.python.types import Singleton
from datetime import timedelta
from sipsimple.account import Account, AccountManager
from sipsimple.application import SIPApplication
from sipsimple.audio import WavePlayer
from sipsimple.configuration.settings import SIPSimpleSettings
from sipsimple.core import SIPURI, ToHeader
from sipsimple.lookup import DNSLookup
from sipsimple.session import Session
from sipsimple.streams import AudioStream
from sipsimple.threading import run_in_twisted_thread
from threading import RLock
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from zope.interface import implements
from op2d.configuration.datatypes import DefaultPath
from op2d.resources import Resources
__all__ = ['SessionManager']
# TODO: refactor the sessions model, borrow the one from Blink Qt
class IncomingRequest(object):
def __init__(self):
self.session = None
self.streams = None
self.new_session = False
self.accepted_streams = None
self.reject_mode = None
self._done = False
def initialize(self, session, streams, new_session=True):
self.session = session
self.streams = streams
self.new_session = new_session
notification_center = NotificationCenter()
notification_center.post_notification('IncomingRequestReceived', sender=self)
@property
def proposed_streams(self):
return [stream.type for stream in self.streams]
@property
def ringtone(self):
if 'ringtone' not in self.__dict__:
if 'audio' in self.proposed_streams:
sound_file = self.session.account.sounds.inbound_ringtone
if sound_file is not None and sound_file.path is DefaultPath:
settings = SIPSimpleSettings()
sound_file = settings.sounds.inbound_ringtone
ringtone = WavePlayer(SIPApplication.alert_audio_mixer, sound_file.path, volume=sound_file.volume, loop_count=0, pause_time=2.7) if sound_file is not None else Null
ringtone.bridge = SIPApplication.alert_audio_bridge
else:
ringtone = WavePlayer(SIPApplication.alert_audio_mixer, Resources.get('sounds/beeping_ringtone.wav'), volume=70, loop_count=0, pause_time=5)
ringtone.bridge = SIPApplication.alert_audio_bridge
self.__dict__['ringtone'] = ringtone
return self.__dict__['ringtone']
def accept(self):
if self._done:
return
self.accepted_streams = [next(stream for stream in self.streams if stream.type=='audio')]
notification_center = NotificationCenter()
notification_center.post_notification('IncomingRequestAccepted', sender=self)
self._done = True
def reject(self):
if self._done:
return
self.reject_mode = 'reject'
notification_center = NotificationCenter()
notification_center.post_notification('IncomingRequestRejected', sender=self)
self._done = True
def busy(self):
if self._done:
return
self.reject_mode = 'busy'
notification_center = NotificationCenter()
notification_center.post_notification('IncomingRequestRejected', sender=self)
self._done = True
def cancel(self):
if self._done:
return
notification_center = NotificationCenter()
notification_center.post_notification('IncomingRequestCancelled', sender=self)
self._done = True
class SessionItem(object):
implements(IObserver)
def __init__(self):
self.name = None
self.uri = None
self.session = None
self.streams = {}
self.initialized = False
self.timer = LoopingCall(self._timer_fired)
self.outbound_ringtone = Null
self.offer_in_progress = False
self.local_hold = False
self.remote_hold = False
self._active = False
self._codec_info = u''
self._tls = False
self._srtp = False
self._duration = timedelta(0)
self._latency = 0
self._packet_loss = 0
self.status = None
self.notification_center = NotificationCenter()
def init_incoming(self, session, streams):
self.name = session.remote_identity.display_name
self.uri = session.remote_identity.uri
self.session = session
for stream in streams:
self._set_stream(stream.type, stream)
self.initialized = True
self.notification_center.add_observer(self, sender=self.session)
self.notification_center.post_notification('SessionItemNewIncoming', sender=self)
def init_outgoing(self, name, uri, streams, account):
self.name = name
self.uri = uri
self.session = Session(account)
for stream in streams:
self._set_stream(stream.type, stream)
self.initialized = True
self.notification_center.add_observer(self, sender=self.session)
self.notification_center.post_notification('SessionItemNewOutgoing', sender=self)
def _set_stream(self, stream_type, stream):
old_stream = self.streams.get(stream_type, None)
self.streams[stream_type] = stream
if old_stream is not None:
self.notification_center.remove_observer(self, sender=old_stream)
if stream_type == 'audio':
self.hold_tone = Null
if stream is not None:
self.notification_center.add_observer(self, sender=stream)
if stream_type == 'audio':
self.hold_tone = WavePlayer(stream.bridge.mixer, Resources.get('sounds/hold_tone.wav'), loop_count=0, pause_time=45, volume=30)
stream.bridge.add(self.hold_tone)
@property
def audio_stream(self):
return self.streams.get('audio', None)
@property
def codec_info(self):
return self._codec_info
@property
def tls(self):
return self._tls
@property
def srtp(self):
return self._srtp
@property
def duration(self):
return self._duration
@property
def latency(self):
return self._latency
@property
def packet_loss(self):
return self._packet_loss
def _get_status(self):
return self.__dict__.get('status', None)
def _set_status(self, value):
old_value = self.__dict__.get('status', None)
if old_value == value:
return
self.__dict__['status'] = value
self.notification_center.post_notification('SessionItemDidChange', sender=self)
status = property(_get_status, _set_status)
@property
def pending_removal(self):
return not bool(self.streams.values())
def _get_active(self):
return self._active
def _set_active(self, value):
value = bool(value)
if self._active == value:
return
self._active = value
if self.audio_stream:
self.audio_stream.device.output_muted = not value
if value:
self.unhold()
self.notification_center.post_notification('SessionItemDidActivate', sender=self)
else:
self.hold()
self.notification_center.post_notification('SessionItemDidDeactivate', sender=self)
active = property(_get_active, _set_active)
del _get_active, _set_active
def connect(self):
self.offer_in_progress = True
account = self.session.account
settings = SIPSimpleSettings()
if isinstance(account, Account):
if account.sip.outbound_proxy is not None:
proxy = account.sip.outbound_proxy
uri = SIPURI(host=proxy.host, port=proxy.port, parameters={'transport': proxy.transport})
elif account.sip.always_use_my_proxy:
uri = SIPURI(host=account.id.domain)
else:
uri = self.uri
else:
uri = self.uri
self.status = u'Looking up destination'
lookup = DNSLookup()
self.notification_center.add_observer(self, sender=lookup)
lookup.lookup_sip_proxy(uri, settings.sip.transport_list)
def hold(self):
if not self.pending_removal and not self.local_hold:
self.local_hold = True
self.session.hold()
self.hold_tone.start()
def unhold(self):
if not self.pending_removal and self.local_hold:
self.local_hold = False
self.session.unhold()
def send_dtmf(self, digit):
if self.audio_stream is not None:
try:
self.audio_stream.send_dtmf(digit)
except RuntimeError:
pass
else:
digit_map = {'*': 'star'}
filename = 'sounds/dtmf_%s_tone.wav' % digit_map.get(digit, digit)
player = WavePlayer(SIPApplication.voice_audio_bridge.mixer, Resources.get(filename))
self.notification_center.add_observer(self, sender=player)
if self.session.account.rtp.inband_dtmf:
self.audio_stream.bridge.add(player)
SIPApplication.voice_audio_bridge.add(player)
player.start()
def end(self):
if self.session.state is None:
self.status = u'Call canceled'
self._cleanup()
else:
self.session.end()
def _cleanup(self):
if self.timer.running:
self.timer.stop()
self.notification_center.remove_observer(self, sender=self.session)
for k in self.streams.keys():
self._set_stream(k, None)
player = WavePlayer(SIPApplication.voice_audio_bridge.mixer, Resources.get('sounds/hangup_tone.wav'), volume=60)
self.notification_center.add_observer(self, sender=player)
SIPApplication.voice_audio_bridge.add(player)
player.start()
self.notification_center.post_notification('SessionItemDidEnd', sender=self)
def _reset_status(self):
if self.pending_removal or self.offer_in_progress:
return
if self.local_hold:
self.status = u'On hold'
elif self.remote_hold:
self.status = u'Hold by remote'
else:
self.status = None
def _set_codec_info(self):
codecs = []
if self.audio_stream is not None:
desc = 'HD Audio' if self.audio_stream.sample_rate/1000 >= 16 else 'Audio'
codecs.append('[%s] %s %dkHz' % (desc, self.audio_stream.codec, self.audio_stream.sample_rate/1000))
self._codec_info = ', '.join(codecs)
def _timer_fired(self):
if self.audio_stream is not None:
stats = self.audio_stream.statistics
if stats is not None:
self._latency = stats['rtt']['avg'] / 1000
self._packet_loss = int(stats['rx']['packets_lost']*100.0/stats['rx']['packets']) if stats['rx']['packets'] else 0
self._duration += timedelta(seconds=1)
self.notification_center.post_notification('SessionItemDidChange', sender=self)
@run_in_twisted_thread
def handle_notification(self, notification):
handler = getattr(self, '_NH_%s' % notification.name, Null)
handler(notification)
def _NH_AudioStreamICENegotiationStateDidChange(self, notification):
if notification.data.state == 'GATHERING':
self.status = u'Gathering ICE candidates'
elif notification.data.state == 'NEGOTIATING':
self.status = u'Negotiating ICE'
elif notification.data.state == 'RUNNING':
self.status = u'Connecting...'
elif notification.data.state == 'FAILED':
self.status = u'ICE failed'
def _NH_AudioStreamGotDTMF(self, notification):
digit_map = {'*': 'star'}
filename = 'sounds/dtmf_%s_tone.wav' % digit_map.get(notification.data.digit, notification.data.digit)
player = WavePlayer(SIPApplication.voice_audio_bridge.mixer, Resources.get(filename))
notification.center.add_observer(self, sender=player)
SIPApplication.voice_audio_bridge.add(player)
player.start()
def _NH_DNSLookupDidSucceed(self, notification):
settings = SIPSimpleSettings()
notification.center.remove_observer(self, sender=notification.sender)
if self.pending_removal:
return
if self.audio_stream:
outbound_ringtone = settings.sounds.outbound_ringtone
if outbound_ringtone:
self.outbound_ringtone = WavePlayer(self.audio_stream.mixer, outbound_ringtone.path, outbound_ringtone.volume, loop_count=0, pause_time=5)
self.audio_stream.bridge.add(self.outbound_ringtone)
routes = notification.data.result
self._tls = routes[0].transport=='tls' if routes else False
self.status = u'Connecting...'
self.session.connect(ToHeader(self.uri), routes, self.streams.values())
def _NH_DNSLookupDidFail(self, notification):
notification.center.remove_observer(self, sender=notification.sender)
if self.pending_removal:
return
self.status = u'Destination not found'
self._cleanup()
def _NH_SIPSessionGotRingIndication(self, notification):
self.status = u'Ringing...'
self.outbound_ringtone.start()
def _NH_SIPSessionWillStart(self, notification):
self.outbound_ringtone.stop()
def _NH_SIPSessionDidStart(self, notification):
if self.audio_stream not in notification.data.streams:
self._set_stream('audio', None)
if not self.local_hold:
self.offer_in_progress = False
if not self.pending_removal:
self.timer.start(1)
self._set_codec_info()
self.status = u'Connected'
self._srtp = self.audio_stream is not None and self.audio_stream.srtp_active
self._tls = self.session.transport == 'tls'
reactor.callLater(1, self._reset_status)
else:
self.status = u'Ending...'
self._cleanup()
def _NH_SIPSessionDidFail(self, notification):
self.offer_in_progress = False
if notification.data.failure_reason == 'user request':
if notification.data.code == 487:
reason = u'Call canceled'
else:
reason = unicode(notification.data.reason)
else:
reason = notification.data.failure_reason
self.status = reason
self.outbound_ringtone.stop()
self._cleanup()
def _NH_SIPSessionDidEnd(self, notification):
self.offer_in_progress = False
self.status = u'Call ended' if notification.data.originator=='local' else u'Call ended by remote'
self._cleanup()
def _NH_SIPSessionDidChangeHoldState(self, notification):
if notification.data.originator == 'remote':
self.remote_hold = notification.data.on_hold
if self.local_hold:
if not self.offer_in_progress:
self.status = u'On hold'
elif self.remote_hold:
if not self.offer_in_progress:
self.status = u'Hold by remote'
self.hold_tone.start()
else:
self.status = None
self.hold_tone.stop()
self.offer_in_progress = False
def _NH_WavePlayerDidFail(self, notification):
notification.center.remove_observer(self, sender=notification.sender)
def _NH_WavePlayerDidEnd(self, notification):
notification.center.remove_observer(self, sender=notification.sender)
class SessionManager(object):
__metaclass__ = Singleton
implements(IObserver)
def __init__(self):
self._lock = RLock()
self._active_session = None
self._incoming_proposals = []
self._sessions = []
self.current_ringtone = Null
self.last_dialed_uri = None
@classmethod
def create_uri(cls, account, address):
if not address.startswith(('sip:', 'sips:')):
address = 'sip:' + address
username, separator, domain = address.partition('@')
if not domain and isinstance(account, Account):
domain = account.id.domain
elif '.' not in domain and isinstance(account, Account):
domain += '.' + account.id.domain
elif not domain:
raise ValueError('SIP address without domain')
address = username + '@' + domain
return SIPURI.parse(str(address))
def start(self):
notification_center = NotificationCenter()
notification_center.add_observer(self, name='SIPSessionNewIncoming')
notification_center.add_observer(self, name='SIPSessionNewProposal')
notification_center.add_observer(self, name='SIPSessionDidFail')
notification_center.add_observer(self, name='SIPSessionDidRenegotiateStreams')
def stop(self):
notification_center = NotificationCenter()
notification_center.remove_observer(self, name='SIPSessionNewIncoming')
notification_center.remove_observer(self, name='SIPSessionNewProposal')
notification_center.remove_observer(self, name='SIPSessionDidFail')
notification_center.remove_observer(self, name='SIPSessionDidRenegotiateStreams')
def _get_active_session(self):
return self._active_session
def _set_active_session(self, value):
old_active_session = self._active_session
if old_active_session == value:
return
if old_active_session is not None:
old_active_session.active = False
if value is not None:
value.active = True
self._active_session = value
active_session = property(_get_active_session, _set_active_session)
del _get_active_session, _set_active_session
@property
def sessions(self):
return [session for session in self._sessions if not session.pending_removal]
def start_call(self, name, address, account=None):
account_manager = AccountManager()
account = account or account_manager.default_account
if account is None or not account.enabled:
raise ValueError('Invalid account')
try:
remote_uri = self.create_uri(account, address)
except Exception, e:
raise ValueError('Invalid URI: %s' % e)
else:
self.last_dialed_uri = remote_uri
session_item = SessionItem()
self._sessions.append(session_item)
notification_center = NotificationCenter()
notification_center.add_observer(self, sender=session_item)
self.active_session = session_item
session_item.init_outgoing(name, remote_uri, [AudioStream()], account)
session_item.connect()
def update_ringtone(self):
if not self._incoming_proposals:
self.current_ringtone = Null
elif self.sessions:
self.current_ringtone = self.beeping_ringtone
else:
self.current_ringtone = self._incoming_proposals[0].ringtone
@property
def beeping_ringtone(self):
if 'beeping_ringtone' not in self.__dict__:
ringtone = | |
<reponame>oponcea/ceph-uprev-stx-config
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System Inventory Kubernetes Application Operator."""
import docker
import grp
import os
import pwd
import re
import shutil
import subprocess
import threading
import time
import yaml
from collections import namedtuple
from eventlet import greenpool
from eventlet import greenthread
from eventlet import queue
from eventlet import Timeout
from oslo_config import cfg
from oslo_log import log as logging
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.common import utils as cutils
from sysinv.helm import common
from sysinv.helm import helm
# Log and config
LOG = logging.getLogger(__name__)
kube_app_opts = [
cfg.StrOpt('armada_image_tag',
default=('quay.io/airshipit/armada:'
'f807c3a1ec727c883c772ffc618f084d960ed5c9'),
help='Docker image tag of Armada.'),
]
CONF = cfg.CONF
CONF.register_opts(kube_app_opts)
# Constants
APPLY_SEARCH_PATTERN = 'Processing Chart,'
ARMADA_CONTAINER_NAME = 'armada_service'
ARMADA_MANIFEST_APPLY_SUCCESS_MSG = 'Done applying manifest'
CONTAINER_ABNORMAL_EXIT_CODE = 137
DELETE_SEARCH_PATTERN = 'Deleting release'
INSTALLATION_TIMEOUT = 3600
MAX_DOWNLOAD_THREAD = 20
TARFILE_DOWNLOAD_CONNECTION_TIMEOUT = 60
TARFILE_TRANSFER_CHUNK_SIZE = 1024 * 512
# Helper functions
def generate_armada_manifest_filename(app_name, manifest_filename):
return os.path.join('/manifests', app_name + '-' + manifest_filename)
def generate_armada_manifest_filename_abs(app_name, manifest_filename):
return os.path.join(constants.APP_SYNCED_DATA_PATH,
app_name + '-' + manifest_filename)
def generate_manifest_filename_abs(app_name, manifest_filename):
return os.path.join(constants.APP_INSTALL_PATH,
app_name, manifest_filename)
def generate_images_filename_abs(app_name):
return os.path.join(constants.APP_SYNCED_DATA_PATH,
app_name + '-images.yaml')
def create_app_path(path):
uid = pwd.getpwnam(constants.SYSINV_USERNAME).pw_uid
gid = os.getgid()
if not os.path.exists(constants.APP_INSTALL_PATH):
os.makedirs(constants.APP_INSTALL_PATH)
os.chown(constants.APP_INSTALL_PATH, uid, gid)
os.makedirs(path)
os.chown(path, uid, gid)
def get_app_install_root_path_ownership():
uid = os.stat(constants.APP_INSTALL_ROOT_PATH).st_uid
gid = os.stat(constants.APP_INSTALL_ROOT_PATH).st_gid
return (uid, gid)
Chart = namedtuple('Chart', 'name namespace')
class AppOperator(object):
"""Class to encapsulate Kubernetes App operations for System Inventory"""
def __init__(self, dbapi):
self._dbapi = dbapi
self._docker = DockerHelper()
self._helm = helm.HelmOperator(self._dbapi)
self._kube = kubernetes.KubeOperator(self._dbapi)
self._lock = threading.Lock()
def _cleanup(self, app):
"""" Remove application directories and override files """
try:
if (app.status != constants.APP_UPLOAD_FAILURE and
os.path.exists(os.path.join(app.path, 'metadata.yaml'))):
self._process_node_labels(app, op=constants.LABEL_REMOVE_OP)
if app.system_app and app.status != constants.APP_UPLOAD_FAILURE:
self._remove_chart_overrides(app.armada_mfile_abs)
if os.path.exists(app.armada_mfile_abs):
os.unlink(app.armada_mfile_abs)
if os.path.exists(app.imgfile_abs):
os.unlink(app.imgfile_abs)
if os.path.exists(app.path):
shutil.rmtree(app.path)
except OSError as e:
LOG.error(e)
def _update_app_status(self, app, new_status=None, new_progress=None):
""" Persist new app status """
if new_status is None:
new_status = app.status
elif (new_status in [constants.APP_UPLOAD_SUCCESS,
constants.APP_APPLY_SUCCESS]):
new_progress = constants.APP_PROGRESS_COMPLETED
with self._lock:
app.update_status(new_status, new_progress)
def _abort_operation(self, app, operation,
progress=constants.APP_PROGRESS_ABORTED):
if (app.status == constants.APP_UPLOAD_IN_PROGRESS):
self._update_app_status(app, constants.APP_UPLOAD_FAILURE,
progress)
elif (app.status == constants.APP_APPLY_IN_PROGRESS):
self._update_app_status(app, constants.APP_APPLY_FAILURE,
progress)
elif (app.status == constants.APP_REMOVE_IN_PROGRESS):
self._update_app_status(app, constants.APP_REMOVE_FAILURE,
progress)
LOG.error("Application %s aborted!." % operation)
def _download_tarfile(self, app):
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from socket import timeout as socket_timeout
from six.moves.urllib.parse import urlparse
def _handle_download_failure(reason):
raise exception.KubeAppUploadFailure(
name=app.name,
reason=reason)
try:
remote_file = urlopen(
app.tarfile, timeout=TARFILE_DOWNLOAD_CONNECTION_TIMEOUT)
try:
remote_filename = remote_file.info()['Content-Disposition']
except KeyError:
remote_filename = os.path.basename(
urlparse.urlsplit(remote_file.url).path)
filename_avail = True if (remote_filename is None or
remote_filename == '') else False
if filename_avail:
if (not remote_filename.endswith('.tgz') and
not remote_filename.endswith('.tar.gz')):
reason = app.tarfile + ' has unrecognizable tar file ' + \
'extension. Supported extensions are: .tgz and .tar.gz.'
_handle_download_failure(reason)
return None
filename = '/tmp/' + remote_filename
else:
filename = '/tmp/' + app.name + '.tgz'
with open(filename, 'wb') as dest:
shutil.copyfileobj(remote_file, dest, TARFILE_TRANSFER_CHUNK_SIZE)
return filename
except HTTPError as err:
LOG.error(err)
reason = 'failed to download tarfile ' + app.tarfile + \
', error code = ' + str(err.code)
_handle_download_failure(reason)
except URLError as err:
LOG.error(err)
reason = app.tarfile + ' is unreachable.'
_handle_download_failure(reason)
except shutil.Error as err:
LOG.error(err)
err_file = os.path.basename(filename) if filename_avail else app.tarfile
reason = 'failed to process tarfile ' + err_file
_handle_download_failure(reason)
except socket_timeout as e:
LOG.error(e)
reason = 'failed to download tarfile ' + app.tarfile + \
', connection timed out.'
_handle_download_failure(reason)
def _extract_tarfile(self, app):
def _handle_extract_failure(
reason='failed to extract tarfile content.'):
raise exception.KubeAppUploadFailure(
name=app.name,
reason=reason)
def _find_manifest_file(app_path):
mfiles = cutils.find_manifest_file(app_path)
if mfiles is None:
_handle_extract_failure('manifest file is corrupted.')
if mfiles:
if len(mfiles) == 1:
return mfiles[0]
else:
_handle_extract_failure(
'tarfile contains more than one manifest file.')
else:
_handle_extract_failure('manifest file is missing.')
orig_uid, orig_gid = get_app_install_root_path_ownership()
try:
# One time set up of Armada manifest path for the system
if not os.path.isdir(constants.APP_SYNCED_DATA_PATH):
os.makedirs(constants.APP_SYNCED_DATA_PATH)
if not os.path.isdir(app.path):
create_app_path(app.path)
# Temporarily change /scratch group ownership to wrs_protected
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid,
grp.getgrnam(constants.SYSINV_WRS_GRPNAME).gr_gid)
# Extract the tarfile as sysinv user
if not cutils.extract_tarfile(app.path, app.tarfile, demote_user=True):
_handle_extract_failure()
if app.downloaded_tarfile:
if not cutils.verify_checksum(app.path):
_handle_extract_failure('checksum validation failed.')
mname, mfile = _find_manifest_file(app.path)
# Save the official manifest file info. They will be persisted
# in the next status update
app.regenerate_manifest_filename(mname, os.path.basename(mfile))
if os.path.isdir(app.charts_dir):
if len(os.listdir(app.charts_dir)) == 0:
_handle_extract_failure('tarfile contains no Helm charts.')
tar_filelist = cutils.get_files_matching(app.charts_dir,
'.tgz')
if not tar_filelist:
reason = 'tarfile contains no Helm charts of expected ' + \
'file extension (.tgz).'
_handle_extract_failure(reason)
for p, f in tar_filelist:
if not cutils.extract_tarfile(
p, os.path.join(p, f), demote_user=True):
_handle_extract_failure()
except OSError as e:
LOG.error(e)
_handle_extract_failure()
finally:
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid, orig_gid)
def _get_image_tags_by_path(self, path):
""" Mine the image tags from values.yaml files in the chart directory,
intended for custom apps.
TODO(awang): Support custom apps to pull images from local registry
"""
image_tags = []
ids = []
for r, f in cutils.get_files_matching(path, 'values.yaml'):
with open(os.path.join(r, f), 'r') as file:
try:
y = yaml.load(file)
ids = y["images"]["tags"].values()
except (TypeError, KeyError):
pass
image_tags.extend(ids)
return list(set(image_tags))
def _get_image_tags_by_charts(self, app_path, charts):
""" Mine the image tags from the chart paths. Add the converted
image tags to the overrides if the image tags from the chart
paths do not exist. Intended for system app.
The image tagging conversion(local docker registry address prepended):
${LOCAL_DOCKER_REGISTRY_IP}:${REGISTRY_PORT}/<image-name>
(ie..192.168.204.2:9001/docker.io/mariadb:10.2.13)
"""
local_docker_registry_ip = self._dbapi.address_get_by_name(
cutils.format_address_name(constants.CONTROLLER_HOSTNAME,
constants.NETWORK_TYPE_MGMT)
).address
image_tags = []
for chart in charts:
images_charts = {}
images_overrides = {}
overrides = chart.namespace + '-' + chart.name + '.yaml'
overrides_file = os.path.join(common.HELM_OVERRIDES_PATH,
overrides)
chart_name = os.path.join(app_path, chart.name)
chart_path = os.path.join(chart_name, 'values.yaml')
# Get the image tags from the chart path
if os.path.exists(chart_path):
with open(chart_path, 'r') as file:
try:
doc = yaml.load(file)
images_charts = doc["images"]["tags"]
except (TypeError, KeyError):
pass
# Get the image tags from the overrides file
if os.path.exists(overrides_file):
with open(overrides_file, 'r') as file:
try:
y = yaml.load(file)
images_overrides = y["data"]["values"]["images"]["tags"]
except (TypeError, KeyError):
LOG.info("Overrides file %s has no img tags" %
overrides_file)
pass
# Add the converted image tags to the overrides if the images from
# the chart path do not exist in the overrides
tags_updated = False
for key, image_tag in images_charts.items():
if (key not in images_overrides and
not image_tag.startswith(local_docker_registry_ip)):
images_overrides.update(
{key: '{}:{}/{}'.format(local_docker_registry_ip, common.REGISTRY_PORT, image_tag)})
tags_updated = True
if tags_updated:
with open(overrides_file, 'w') as file:
try:
if "images" not in y["data"]["values"]:
file.seek(0)
file.truncate()
y["data"]["values"]["images"] = {"tags": images_overrides}
else:
y["data"]["values"]["images"]["tags"] = images_overrides
yaml.safe_dump(y, file, explicit_start=True,
default_flow_style=False)
LOG.info("Overrides file %s updated with new image tags" %
overrides_file)
except (TypeError, KeyError):
LOG.error("Overrides file %s fails to update" %
overrides_file)
if images_overrides:
image_tags.extend(images_overrides.values())
return list(set(image_tags))
def _register_embedded_images(self, app):
"""
TODO(tngo): When we're ready to support air-gap scenario and private
images, the following need to be done:
a. load the embedded images
b. tag and push them to the docker registery on the controller
c. find image tag IDs in each chart and replace their values with
new tags. Alternatively, document the image tagging convention
${MGMT_FLOATING_IP}:${REGISTRY_PORT}/<image-name>
(e.g. 192.168.204.2:9001/prom/mysqld-exporter)
to be referenced in the application Helm charts.
"""
raise exception.KubeAppApplyFailure(
name=app.name,
reason="embedded images are not yet supported.")
def _save_images_list(self, app):
# Extract the list of images from the charts and overrides where
# applicable. Save the list to the same location as the armada manifest
# so it can be sync'ed.
if app.system_app:
LOG.info("Generating application overrides...")
self._helm.generate_helm_application_overrides(
app.name, cnamespace=None, armada_format=True, combined=True)
app.charts = self._get_list_of_charts(app.armada_mfile_abs)
# Get the list of images from the updated images overrides
images_to_download = self._get_image_tags_by_charts(
app.charts_dir, app.charts)
else:
# For custom apps, mine image tags from application path
images_to_download = self._get_image_tags_by_path(app.path)
if not images_to_download:
# TODO(tngo): We may want to support the deployment of apps that
# set up resources only in the future. In which case, generate
# an info log and let it advance to the next step.
raise exception.KubeAppUploadFailure(
name=app.name,
reason="charts specify no docker images.")
with open(app.imgfile_abs, 'wb') as f:
yaml.safe_dump(images_to_download, f, explicit_start=True,
default_flow_style=False)
def _retrieve_images_list(self, app_images_file):
with open(app_images_file, 'rb') as f:
images_list = yaml.load(f)
return images_list
def _download_images(self, app):
if os.path.isdir(app.images_dir):
return self._register_embedded_images(app)
if app.system_app:
# Some images could have been overwritten via user overrides
# between upload and apply, or between applies. Refresh the
# saved images list.
saved_images_list = self._retrieve_images_list(app.imgfile_abs)
combined_images_list = list(saved_images_list)
combined_images_list.extend(
self._get_image_tags_by_charts(app.charts_dir, app.charts))
images_to_download = list(set(combined_images_list))
if saved_images_list != images_to_download:
with | |
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_check_cardinality_(
self, value, input_name,
min_occurs=0, max_occurs=1, required=None):
if value is None:
length = 0
elif isinstance(value, list):
length = len(value)
else:
length = 1
if required is not None :
if required and length < 1:
self.gds_collector_.add_message(
"Required value {}{} is missing".format(
input_name, self.gds_get_node_lineno_()))
if length < min_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is below "
"the minimum allowed, "
"expected at least {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
min_occurs, length))
elif length > max_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is above "
"the maximum allowed, "
"expected at most {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
max_occurs, length))
def gds_validate_builtin_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value, input_name=input_name)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_validate_defined_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
# provide default value in case option --disable-xml is used.
content = ""
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
def excl_select_objs_(obj):
return (obj[0] != 'parent_object_' and
obj[0] != 'gds_collector_')
if type(self) != type(other):
return False
return all(x == y for x, y in zip_longest(
filter(excl_select_objs_, self.__dict__.items()),
filter(excl_select_objs_, other.__dict__.items())))
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def gds_get_node_lineno_(self):
if (hasattr(self, "gds_elementtree_node_") and
self.gds_elementtree_node_ is not None):
return ' near line {}'.format(
self.gds_elementtree_node_.sourceline)
else:
return ""
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
# Set this to false in order to deactivate during export, the use of
# name space prefixes captured from the input document.
UseCapturedNS_ = True
CapturedNsmap_ = {}
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
def encode_str_2_3(instr):
return instr
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == | |
is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self._viewset.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
_, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def post(self, request, *args, **kwargs):
action = request.POST.get('action', None)
pk = request.POST.get('item', None)
try:
if action and pk:
obj = self.model.objects.get(pk=pk)
result = self._viewset.invoke_action(
self.request, int(action), obj)
return JsonResponse({
'result': result[0],
'message': result[1]
})
except (ValueError, IndexError, ObjectDoesNotExist):
pass
return JsonResponse({
'result': False,
'message': "Invalid operation"
})
class TemplateNameMixin(object):
"""
Mixin adds the ViewSet attribute, set by 'popupcrud_template_name` view
attribute value, as one of the templates to the list of templates to be
looked up for rendering the view.
And if the incoming request is an AJAX request, it replaces all the template
filenames with '_inner' such that site common embellishments are removed
while rendering the view content inside a modal popup. Of course it's assumed
that the '_inner.html' template is written as a pure template, which doesn't
derive from the site common base template.
"""
def get_template_names(self):
templates = super(TemplateNameMixin, self).get_template_names()
# if the viewset customized listview template, make sure that is
# looked for first by putting its name in the front of the list
template_attr_name = getattr(self, "popupcrud_template_name", None)
if hasattr(self._viewset, template_attr_name):
templates.insert(0, getattr(self._viewset, template_attr_name))
# make the default template of lower priority than the one
# determined by default -- <model>_list.html
templates.append(getattr(self, template_attr_name))
if self.request.is_ajax():
# If this is an AJAX request, replace all the template names with
# their <template_name>_inner.html counterparts.
# These 'inner' templates are expected to be a bare-bones templates,
# sans the base template's site-common embellishments.
for index, template in enumerate(templates):
parts = template.split('.')
templates[index] = "{0}_inner.{1}".format(parts[0], parts[1])
return templates
class CreateView(AttributeThunk, TemplateNameMixin, AjaxObjectFormMixin,
PermissionRequiredMixin, generic.CreateView):
popupcrud_template_name = "form_template"
form_template = "popupcrud/form.html"
def get_context_data(self, **kwargs):
kwargs['pagetitle'] = self._viewset.get_page_title('create')
#ugettext("New {0}").format(self._viewset.model._meta.verbose_name)
kwargs['form_url'] = self._viewset.get_new_url()
# formset = self._viewset.get_formset()
# if formset:
# kwargs['formset'] = formset
return super(CreateView, self).get_context_data(**kwargs)
class DetailView(AttributeThunk, TemplateNameMixin, PermissionRequiredMixin,
generic.DetailView):
popupcrud_template_name = "detail_template"
detail_template = "popupcrud/detail.html"
def get_context_data(self, **kwargs):
kwargs['pagetitle'] = self._viewset.get_page_title('detail', self.object)
#six.text_type(self.object)
# _("{0} - {1}").format(
# self._viewset.model._meta.verbose_name,
# six.text_type(self.object))
return super(DetailView, self).get_context_data(**kwargs)
class UpdateView(AttributeThunk, TemplateNameMixin, AjaxObjectFormMixin,
PermissionRequiredMixin, generic.UpdateView):
popupcrud_template_name = "form_template"
form_template = "popupcrud/form.html"
def get_context_data(self, **kwargs):
kwargs['pagetitle'] = self._viewset.get_page_title('update', obj=self.object)
#ugettext("Edit {0}").format(self._viewset.model._meta.verbose_name)
kwargs['form_url'] = self._viewset.get_edit_url(self.object)
return super(UpdateView, self).get_context_data(**kwargs)
class DeleteView(AttributeThunk, PermissionRequiredMixin, generic.DeleteView):
template_name = "popupcrud/confirm_delete.html"
def get_context_data(self, **kwargs):
kwargs['pagetitle'] = self._viewset.get_page_title('delete', obj=self.object)
#ugettext("Delete {0}").format(self._viewset.model._meta.verbose_name)
kwargs['model_options'] = self._viewset.model._meta
return super(DeleteView, self).get_context_data(**kwargs)
def handle_no_permission(self):
"""
Slightly different form of handling no_permission from Create/Update
views. Delete ajax request expects a JSON response to its AJAX request
and therefore we render the 403 template and return the rendered context
as error message text.
"""
if self.request.is_ajax():
temp = loader.get_template("popupcrud/403.html")
return JsonResponse({
'result': False,
'message': temp.render({}, self.request)
})
return super(DeleteView, self).handle_no_permission()
def delete(self, request, *args, **kwargs):
""" Override to return JSON success response for AJAX requests """
retval = super(DeleteView, self).delete(request, *args, **kwargs)
if self.request.is_ajax():
return JsonResponse({
'result': True,
'message': ugettext("{0} {1} deleted").format(
self.model._meta.verbose_name,
str(self.object))
})
messages.info(self.request, ugettext("{0} {1} deleted").format(
self._viewset.model._meta.verbose_name,
str(self.object)))
return retval
class PopupCrudViewSet(object):
"""
This is the base class from which you derive a class in your project
for each model that you need to build CRUD views for.
"""
_urls = None # urls cache, so that we don't build it for every request
#: The model to build CRUD views for. This is a required attribute.
model = None
#: URL to the create view for creating a new object. This is a required
#: attribute.
new_url = None
#: Lists the fields to be displayed in the list view columns. This attribute
#: is modelled after ModelAdmin.list_display and supports model methods as
#: as ViewSet methods much like ModelAdmin. This is a required attribute.
#:
#: So you have four possible values that can be used in list_display:
#:
#: - A field of the model
#: - A callable that accepts one parameter for the model instance.
#: - A string representing an attribute on ViewSet class.
#: - A string representing an attribute on the model
#:
#: See ModelAdmin.list_display `documentation
#: <https://docs.djangoproject.com/en/1.11/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_display>`_
#: for examples.
#:
#: A note about ``list_display`` fields with respect to how it differs from
#: ``ModelAdmin``'s ``list_display``.
#:
#: In ``ModelAdmin``, if a field specified in ``list_display`` is not
#: a database field, it can be set as a sortable field by setting
#: the method's ``admin_order_field`` attribute to the relevant database
#: field that can be used as the sort field. In ``PopupCrudViewSet``, this
#: attribute is named ``order_Field``.
list_display = ()
#: A list of names of fields. This is interpreted the same as the Meta.fields
#: attribute of ModelForm. This is a required attribute.
fields = ()
#: The form class to instantiate for Create and Update views. This is optional
#: and if not specified a ModelForm using the values of fields attribute will
#: be instantiated. An optional attribute, if specified, overrides fields
#: attribute value.
form_class = None
#: The url where the list view is rooted. This will be used as the success_url
#: attribute value for the individual CRUD views. This is a required attribute.
list_url = None
#: Number of entries per page in list view. Defaults to 10. Setting this
#: to None will disable pagination. This is an optional attribute.
paginate_by = POPUPCRUD['paginate_by'] #10 # turn on pagination by default
#: List of permission names for the list view. Permission names are of the
#: same format as what is specified in ``permission_required()`` decorator.
#: Defaults to no permissions, meaning no permission is required.
#:
#: Depracated. Use :ref:`permissions_required <permissions_required>` dictionary instead.
list_permission_required = ()
#: List of permission names for the create view.
#: Defaults to no permissions, meaning no permission is required.
#:
#: Depracated. Use :ref:`permissions_required <permissions_required>` dictionary instead.
create_permission_required = ()
#: List of permission names for the detail view.
#: Defaults to no permissions, meaning no permission is required.
#:
#: Depracated. Use :ref:`permissions_required <permissions_required>` dictionary instead.
detail_permission_required = ()
#: List of permission names for the update view.
#: Defaults to no permissions, meaning no permission is required.
#:
#: Depracated. Use :ref:`permissions_required <permissions_required>` dictionary instead.
update_permission_required = ()
#: List of permission names for the delete view.
#: Defaults to no permissions, meaning no permission is required.
#:
#: Depracated. Use :ref:`permissions_required <permissions_required>` dictionary instead.
delete_permission_required = ()
#: .. _permissions_required:
#:
#: Permissions table for the various CRUD views. Use this | |
"""
-----------------------------------------------
EE2703: Applied Programming Lab (Jan-May 2020)
Assignment: Final Exam
Name: <NAME>
Roll no.: EE18B122
-----------------------------------------------
"""
# imports
import sys
from scipy.linalg import lstsq
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['axes.formatter.useoffset'] = False
# Define constants in SI units, wherever applicable
Lx = 0.1 # width of tank
Ly = 0.2 # height of tank
Eo = 8.85e-12 # permittivity of free space
Er = 2 # dielectric constant of water
def findExpFit(errors, iterations, printFit=False):
'''
Find LSTSQ Fit (exponential) for
x = iteration, y = error
Bx
y = A.e
(or)
log(y) = log(A) + Bx
Input
-----
errors: list/numpy 1d array
error vector
iterations: list/numpy 1d array
iteration vector
Output
------
fit: numpy 1d array
coefficients A, B
estimate: numpy 1d array
estimated y values
'''
# get number of x-values
nRows = len(errors)
# initialise coeffMatrix and constMatrix
coeffMatrix = np.zeros((nRows,2), dtype=float)
constMatrix = np.zeros_like(errors)
# coeffMatrix = [1, iterations]
coeffMatrix[:,0] = 1
coeffMatrix[:,1] = iterations
# constMatrix = log(errors)
constMatrix = np.log(errors)
# fit
fit = lstsq(coeffMatrix, constMatrix)[0]
# debug statements
if printFit==True:
print("LSTSQ Fit parameters")
print("--------------------")
print("logA =", fit[0])
print("B =", fit[1])
estimate = coeffMatrix@fit
return fit, estimate
def solve(M, N, step, k, accuracy, No, plotAll=False):
'''
Function to solve Laplace's Equation
in the tank.
Assumes that top of tank is at 1V.
Input
-----
M: int
number of nodes along X-axis, including
boundary nodes
N: int
number of nodes along Y-axis, including
boundary nodes
step: float
distance between nodes (assumed same for
X- and Y- axes)
k: int
index corresponding to height h
accuracy: float
desired accuracy
No: int
maximum number of iterations
plotAll: bool
switch to plot data
True - plot data
False - no plotting
Output
------
phi: 2d numpy array (MxN)
array of solved potentials
N: int
number of iterations carried out
err: 1d numpy array
error vector
'''
# initialise potentials to 0 everywhere, except at top plate
# potential at top = 1V
phi = np.zeros((N, M), dtype=float)
phi[-1, :] = 1.0
# create meshgrid for plotting potential distribution and for later
# calculation of Electric field
x = np.linspace(0, Lx, M, dtype=float)
y = np.linspace(0, Ly, N, dtype=float)
X, Y = np.meshgrid(x, y)
if plotAll:
plotContour(X, Y, phi, figTitle='Initial potential distribution')
iteration=[] # iteration number
error=[] # error vector
# iteratively calculate potentials
for i in range(No):
# create copy of potentials
oldPhi = phi.copy()
# updating the potentials
phi[1:-1, 1:-1] = 0.25*(phi[1:-1, 0:-2]+phi[1:-1, 2:]+phi[0:-2, 1:-1]+phi[2:, 1:-1])
phi[k, 1:-1] = (Er*oldPhi[k-1, 1:-1] + oldPhi[k+1, 1:-1])*1.0/(1+Er)
# Applying Boundary Conditions
phi[0, :] = 0.0 # bottom edge
phi[:, -1] = 0.0 # right edge
phi[:, 0] = 0.0 # left edge
phi[-1, :] = 1.0 # top edge
# calculating error
currError = np.abs(phi-oldPhi).max()
error.append(currError)
iteration.append(i)
# stop if accuracy reached
if currError <= accuracy:
break
if plotAll:
plotContour(X, Y, phi, figTitle='Potential distribution after updating')
# find LSTSQ Estimate for exponential region (>5000 iterations)
fit, estimate = findExpFit(error[5000:], iteration[5000:], printFit=True)
# extrapolate the estimated error function till iteration 0
estimate = np.e**(fit[0]+np.multiply(fit[1], iteration))
plotSemilogy([iteration, iteration], [error, estimate], multiplePlots=True, labels=["Actual error", "Fitted error (iteration >= 5000)"], figTitle='Error vs. iteration', xLabel=r"iterations $\to$", yLabel=r'error $\to$')
# calculate E
Ex, Ey = findEField(phi, step, M, N, plotAll)
checkContinuity(Ex, Ey, k, M, plotAll)
# calculate charge densities
sigma = findSigma(Ex, Ey, k)
# calculate charges Qtop and Qfluid
Q = findCharges(sigma, k, step)
# calculate angles with normal
angleBelow = findAngles(Ex[k-1, :], Ey[k-1, :])
angleAbove = findAngles(Ex[k, :], Ey[k, :])
if plotAll:
x = np.linspace(0, Lx, M-1, dtype=float)
sineAnglesBelow = np.sin(angleBelow)
sineAnglesAbove = np.sin(angleAbove)
tanAnglesBelow = np.tan(angleBelow)
tanAnglesAbove = np.tan(angleAbove)
plot(x, np.divide(sineAnglesBelow, sineAnglesAbove), r"Ratio of sine of angle with normal above and below", yLabel=r"$\frac{sin\,\theta_a}{sin\,\theta_b}$")
plot(x, np.divide(tanAnglesBelow, tanAnglesAbove), r"Ratio of tangent of angle with normal above and below", yLabel=r"$\frac{tan\,\theta_a}{tan\,\theta_b}$")
return phi, Q, iteration[-1], error
def findEField(phi, step, M, N, plotAll):
'''
Calculates the x- and y- components of E-field at
each point.
Input
-----
phi: 2d numpy array
potential array
step: float
distance between 2 points on the grid
M: int
nodes along x-axis
N: int
nodes along y-axis
plotAll: bool
switch to plot data
True - plot data
False - no plotting
Output
------
Ex: 2d numpy array
X-components of E field
Ey: 2d numpy array
Y-components of E-field
'''
# Ex calculation
# * * * row i
# - - --> center of mesh cells
# * * * row i+1
#
negativeGradientX = (phi[:, :-1] - phi[:, 1:])*(1.0/step)
Ex = (negativeGradientX[:-1, :] + negativeGradientX[1:, :])*0.5
# Ey calculation
# * *
# - --> center of mesh cells
# * *
# - --> center of mesh cells
# * *
# col i col i+1
#
negativeGradientY = (phi[:-1, :] - phi[1:, :])*(1.0/step)
Ey = (negativeGradientY[:, :-1] + negativeGradientY[:, 1:])*0.5
# plot
if plotAll:
x = np.linspace(0, Lx, M-1, dtype=float)
y = np.linspace(0, Ly, N-1, dtype=float)
X, Y = np.meshgrid(x, y)
plotQuiver(X, Y, Ex, Ey, r"Vector Plot of $\vec{E}$", blockFig=False)
return Ex, Ey
def findSigma(Ex, Ey, k):
'''
Find the charge density (linear) on
each side of the tank
Input
-----
Ex: 2d numpy array
X-component of Electric field at all
points inside the tank
Ey: 2d numpy array
Y-component of Electric field at all
points inside the tank
k: int
index corresponding to boundary
Output
------
sigma: list
[top, right, bottom, left] plate charge
densities
'''
# finding sigma on top plate
# NOTE: -ve sign due to outward normal
# for conductor, which is along
# -y direction
sigmaTop = -Ey[-1, :]*Eo
# finding sigma on bottom plate
sigmaBottom = Ey[0, :]*Eo*Er
# finding sigma on left plate
# NOTE: for nodes below boundary,
# permittivity is Eo*Er
sigmaLeft = Ex[:, 0]*Eo
sigmaLeft[:k] = Ex[:k, 0]*Eo*Er
# finding sigma on right plate
# NOTE: -ve sign due to outward
# normal in -x direction
# NOTE: for nodes below boundary,
# permittivity is Eo*Er
sigmaRight = -Ex[:, -1]*Eo
sigmaRight[:k] = -Ex[:k, -1]*Eo*Er
sigma = [sigmaTop, sigmaRight, sigmaBottom, sigmaLeft]
return sigma
def findCharges(sigma, k, step):
'''
Find the charges Qtop and Qfluid
Input
-----
sigma: list of 1d numpy arrays
charge densities (linear) on all surfaces
Refer to findSigma() for order of surfaces
k: int
index corresponding to boundary
step: float
distance between 2 adjacent nodes
Output
------
Q: list
[Qtop, Qfluid] charges
'''
# top plate charge
QTop = np.sum(sigma[0]*step)
# bottom surface charge
QBottom = np.sum(sigma[2]*step)
# left plate (submerged in dielectric) charge
QLeftFluid = np.sum(sigma[3][:k]*step)
# right plate (submerged in dielectric) charge
QRightFluid = np.sum(sigma[1][:k]*step)
# total charge in surface submerged in fluid
QFluid = QBottom+QLeftFluid+QRightFluid
Q = [QTop, QFluid]
return Q
def findAngles(Ex, Ey):
'''
Find the angle b/w y-axis and E-field at all
points on the grid
Input
-----
Ex: 2d numpy array
X-component of E-field
Ey: 2d numpy array
Y-component of E-field
Output
------
angle: 2d numpy array
angle b/w E-field and y-axis at all points
on the grid
'''
# angle = atan(Ex/Ey)
## NOTE: angle is calculated wrt y-axis
angles = np.arctan2(Ex, Ey)
return angles
def checkContinuity(Ex, Ey, k, M, plotAll):
'''
Function to verify continuity of Dn and
Et across interface
Input
-----
Ex: 2d numpy array
X-component of E-field
Ey: 2d numpy array
Y-component of E-field
k: int
index corresponding to height of fluid
M: int
number of nodes across x-axis
plotAll: bool
switch to plot data
True - plot data
False - no plotting
'''
if plotAll:
x = np.linspace(0, Lx, M-1)
# checking Dn continuity
plot([x, x], [Ey[k-1, :]*Er, Ey[k, :]], multiplePlots=True, labels=["Below boundary", "Above boundary"], yLabel=r"$D_{normal}$", figTitle=r"Continuity of $D_{normal}$ across boundary")
# checking Et continuity
plot([x, x], [Ex[k-1, :], Ex[k, :]], multiplePlots=True, labels=["Below boundary", "Above boundary"], yLabel=r"$E_{tangential}$", figTitle=r"Continuity of $E_{tangential}$ across boundary")
'''
Helper functions
plotSemilogy() - for semilogy plots
plotQuiver() - for quiver plots
plotContour() - for contour plots
plot() - for linear-scale plots
'''
figNum=0 # figure number
plotsDir | |
import os
import time
import tkinter
import sqlite3
import json
import smtplib
import locale
import requests
import threading
import html2text as h2t
from tkinter import *
from tkinter.ttk import *
from tkinter import filedialog
from tkinter import messagebox
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
version = "v1.4" # DO NOT CHANGE
print('Starting Lite Mails {} \n'.format(version))
file = None
toopen = None
# Inizializzazione finestra
window = Tk()
window.title('Lite Mails {}'.format(version)) # Titolo finestra
window.geometry('460x425') # Dimensione finestra
window.resizable(False, False) # Blocco ridimensionamento
window.style = Style()
window.style.theme_use('vista')
langsel = IntVar()
datesel = IntVar()
timesel = IntVar()
destination = StringVar()
subject = StringVar()
email = StringVar()
password = StringVar()
# Inizializzazione database
db = sqlite3.connect("config.db")
c = db.cursor()
# Table account
try:
c.execute("SELECT email FROM account")
print('Row "email" and table "account" loaded!')
except:
try:
c.execute("UPDATE account SET email = ? WHERE id = ? ", (None, 0))
db.commit()
print('Row "email" and table "account" loaded!')
except:
try:
c.execute("INSERT INTO account(email) VALUES(?)", (None))
db.commit()
print('Row "email" created and table "account" loaded!')
except:
c.execute("CREATE TABLE account(email TEXT, password TEXT, id INTEGER)")
db.commit()
c.execute("INSERT INTO account(email, password, id) VALUES(?,?,?)", (None, None, 0))
db.commit()
print('\nTable "account" created!')
print('Row "email" and table "account" loaded!')
try:
c.execute("SELECT password FROM account")
print('Row "password" and table "account" loaded!')
except:
try:
c.execute("UPDATE account SET password = ? WHERE id = ? ", (None, 0))
db.commit()
print('Row "password" and table "account" loaded!')
except:
try:
c.execute("INSERT INTO account(password) VALUES(?)", (None))
db.commit()
print('Row "password" created and table "account" loaded!')
except:
c.execute("CREATE TABLE account(email TEXT, password TEXT, id INTEGER)")
db.commit()
c.execute("INSERT INTO account(email, password, id) VALUES(?,?,?)", (None, None, 0))
db.commit()
print('\nTable "account" created!')
print('Row "password" and table "account" loaded!')
try:
c.execute("SELECT id FROM account")
print('Row "id" and table "account" loaded!')
except:
try:
c.execute("UPDATE account SET id = ? WHERE id = ? ", (None, 0))
db.commit()
print('Row "id" and table "account" loaded!')
except:
try:
c.execute("INSERT INTO account(id) VALUES(?)", (None))
db.commit()
print('Row "id" created and table "account" loaded!')
except:
c.execute("CREATE TABLE account(email TEXT, password TEXT, id INTEGER)")
db.commit()
c.execute("INSERT INTO account(email, password, id) VALUES(?,?,?)", (None, None, 0))
db.commit()
print('\nTable "account" created!')
print('Row "id" and table "account" loaded!')
# Table settings
try:
c.execute("SELECT language FROM settings")
print('Row "language" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET language = ? WHERE id = ? ", (str(locale.getdefaultlocale()), 0))
db.commit()
print('Row "language" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(language) VALUES(?)", (str(locale.getdefaultlocale())))
db.commit()
print('Row "language" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "language" and table "settings" loaded!')
try:
c.execute("SELECT date_format FROM settings")
print('Row "date_format" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET date_format = ? WHERE id = ? ", (1, 0))
db.commit()
print('Row "date_format" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(date_format) VALUES(?)", (1))
db.commit()
print('Row "date_format" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "date_format" and table "settings" loaded!')
try:
c.execute("SELECT time_format FROM settings")
print('Row "time_format" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET time_format = ? WHERE id = ? ", (1, 0))
db.commit()
print('Row "time_format" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(time_format) VALUES(?)", (1))
db.commit()
print('Row "time_format" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "time_format" and table "settings" loaded!')
try:
c.execute("SELECT id FROM settings")
print('Row "id" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET id = ? WHERE id = ? ", (0, 0))
db.commit()
print('Row "id" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(id) VALUES(?)", (0))
db.commit()
print('Row "id" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "id" and table "settings" loaded!')
c.execute("SELECT email, password FROM account")
credentials = list(c.fetchall())
c.execute("SELECT language FROM settings")
language = list(c.fetchall())
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
if not os.path.isfile('version.txt'):
print('\nCreated version file.')
with open('version.txt', 'w') as f:
f.write(version)
f.close()
if not os.path.isdir("emails"):
os.makedirs("emails")
if 'en' in language[0][0]:
with open("languages/en-EN.json", "r") as read_file:
string = json.load(read_file)
langsel.set(1)
elif 'it' in language[0][0]:
with open("languages/it-IT.json", "r") as read_file:
string = json.load(read_file)
langsel.set(2)
else:
with open("languages/en-EN.json", "r") as read_file:
string = json.load(read_file)
langsel.set(1)
datesel.set(datetime_format[0][0])
timesel.set(datetime_format[0][1])
class message_handler: # Gestione messaggi
def auth_error_type2():
messagebox.showerror(string['error'], string['auth-error-type2'])
def auth_error_type1():
messagebox.showerror(string['error'], string['auth-error-type1'])
def mail_sent():
messagebox.showinfo(string['info'], string['mail-sent'])
def compile_error():
messagebox.showerror(string['error'], string['send-error'])
def apply_language():
messagebox.showinfo(string['info'], string['apply-language'])
def no_conn():
messagebox.showerror(string['error'], string['no-connection'])
def save_email(): # Salvataggio email
if not os.path.isdir("emails"):
os.makedirs("emails")
tosave = filedialog.asksaveasfile(defaultextension="*.litemail", initialdir="emails", title=string['save-email'], filetypes=[('E-Mail', "*.litemail")])
if tosave is None:
return
template = ("""{0}
{1}
{2}
-""").format(destination.get(), subject.get(), msg_input.get('1.0', 'end-1c'))
tosave.write(str(template))
tosave.close()
print('Email saved!')
to_save = str(tosave.name)
f_ = os.path.basename(to_save)
fn = list(f_.split('.'))
window.title('Lite Mails {0} - {1}'.format(version, fn[0]))
def open_email(): # Apertura emails
global toopen
toopen = filedialog.askopenfilename(initialdir="emails", title=string['open-email'], filetypes=[("E-Mail", "*.litemail")])
if toopen == '':
return
with open(toopen, 'r') as openedfile:
def clear():
dest_input.delete(0, 'end')
sub_input.delete(0, 'end')
msg_input.delete('1.0', 'end')
dest_input.insert(0, openedfile.readline().strip())
sub_input.insert(0, openedfile.readline(62).strip())
lines = openedfile.readlines()
msg_input.insert('1.0', (''.join(lines[0:-1])).strip())
fn = list(toopen.split('.'))
window.title('Lite Mails {0} - {1}'.format(version, os.path.basename(fn[0])))
if msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['open-email'], string['quit-message'])
if quitquestion is True:
save_email()
clear()
elif quitquestion is False:
clear()
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
clear()
else:
clear()
def close_program(): # Funzione per chiudere il programma
if toopen:
if msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
window.destroy()
os._exit(0)
else:
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
window.destroy()
os._exit(0)
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
window.destroy()
os._exit(0)
elif quitquestion is None:
pass
else:
window.destroy()
os._exit(0)
def account(): # Impostazioni account
c.execute("SELECT email, password FROM account")
credentials = list(c.fetchall())
accountwin = Toplevel(window) # Creazione nuova finestra
accountwin.title(string['account-settings']) # Titolo finestra
accountwin.geometry('450x155') # Dimensione finestra
accountwin.resizable(False, False) # Blocco ridimensionamento
accountwin.iconbitmap('litemails.ico')
# Elementi finestra
user_label = Label(accountwin, text=string['email'], font=('Segoe UI', 13)).grid(row=0, pady=15, padx=5, sticky='w')
user_input = Entry(accountwin, textvariable=email, font=('Segoe UI', 10), width=45)
user_input.grid(row=0, column=1, pady=15, padx=5, sticky='w')
psw_label = Label(accountwin, text=string['password'], font=('Segoe UI', 13)).grid(row=1, pady=15, padx=5, sticky='w')
psw_input = Entry(accountwin, textvariable=password, font=('Segoe UI', 10), width=45, show='*')
psw_input.grid(row=1, column=1, pady=15, padx=5, sticky='w')
try:
user_input.delete(0, 'end')
psw_input.delete(0, 'end')
user_input.insert(0, credentials[0][0])
psw_input.insert(0, credentials[0][1])
except tkinter.TclError:
pass
def close_and_save():
print('Saving account data...')
c.execute("UPDATE account SET email = ? WHERE id = ? ", (email.get(), 0))
db.commit()
c.execute("UPDATE account SET password = ? WHERE id = ? ", (password.get(), 0))
db.commit()
accountwin.destroy()
ok_button = Button(accountwin, text=string['done'], width=10, command=lambda: close_and_save())
ok_button.grid(row=2, column=1, padx=25, sticky='se')
def language(lang): # Gestione lingua
global settings
c.execute("SELECT language FROM settings")
language = list(c.fetchall())
c.execute("UPDATE settings SET language = ? WHERE id = ? ", (lang, 0))
db.commit()
user_choice = messagebox.askokcancel(string['info'], string['apply-language'])
if user_choice:
window.destroy()
os._exit(0)
def check_for_updates(fromwhat=None): # Gestione aggiornamenti
try:
global r
r = requests.get('http://alex3025.github.io/litemails.html')
version_to_install = h2t.html2text(r.text).strip()
except:
version_to_install = None
pass
class RunUpdaterScript(threading.Thread):
def __init__(self):
Thread.__init__(self)
self.start()
window.destroy()
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def run(self):
os.chdir('..')
os.system('python Updater.py')
def start_updating():
db.commit()
db.close()
thread = RunUpdaterScript()
thread.stop()
os._exit(0)
if version_to_install:
if version < version_to_install:
uf = messagebox.askyesno(string['info'], string['update-found'])
if uf:
if toopen:
if msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
start_updating()
else:
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
start_updating()
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
start_updating()
elif quitquestion is None:
pass
else:
start_updating()
elif fromwhat == 'menu':
messagebox.showinfo(string['info'], string['no-update'])
elif fromwhat == 'menu':
message_handler.no_conn()
else:
print('No updates found!')
def add_attachment(): # Funzione per l'aggiunta dell'allegato
global file
file = filedialog.askopenfilename(title=string['add-attachment'])
if file:
send_button.configure(text=string['send-with-attachment'])
remove_attch_button.configure(state='active')
else:
send_button.configure(text=string['send'])
remove_attch_button.configure(state='disabled')
def remove_attch(): # Rimozione allegato
global file
if file:
send_button.configure(text=string['send'])
remove_attch_button.configure(state='disabled')
file = None
def add_date_time(date_or_time, format_=None): # Aggiunge la data corrente alla mail
global datetime_format
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
if format_:
if format_ == string['date-format-type1']:
c.execute("UPDATE settings SET date_format = ? WHERE id = ? ", (1, 0))
db.commit()
elif format_ == string['date-format-type2']:
c.execute("UPDATE settings SET date_format = ? WHERE id = ? ", (2, 0))
db.commit()
elif format_ == string['time-format-type1']:
c.execute("UPDATE settings SET time_format = ? WHERE id = ? ", (1, 0))
db.commit()
elif format_ == string['time-format-type2']:
c.execute("UPDATE settings SET time_format = ? WHERE id = ? ", (2, 0))
db.commit()
else:
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
if date_or_time:
if date_or_time == 'date':
if datetime_format[0][0] == 1:
msg_input.insert('insert', time.strftime("%d/%m/%Y"))
elif datetime_format[0][0] == 2:
msg_input.insert('insert', time.strftime("%d-%m-%Y"))
if date_or_time == 'time':
if datetime_format[0][1] == 1:
msg_input.insert('insert', time.strftime("%H:%M:%S"))
elif datetime_format[0][1] == 2:
msg_input.insert('insert', time.strftime("%H:%M"))
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
def new_mail():
def clear_for_new_mail():
toopen = None
tosave = None
file = None
dest_input.delete(0, 'end')
sub_input.delete(0, 'end')
msg_input.delete('1.0', 'end')
window.title('Lite Mails {}'.format(version))
if toopen:
if msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
clear_for_new_mail()
else:
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
clear_for_new_mail()
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
clear_for_new_mail()
elif quitquestion is None:
pass
else:
clear_for_new_mail()
def send_email(): # Funzione per inviare la mail
c.execute("SELECT email, password FROM account")
credentials = list(c.fetchall())
if r:
try:
msg = MIMEMultipart()
msg['From'] = str(credentials[0][0])
msg['To'] = str(destination.get())
msg['Subject'] = str(subject.get())
msg.attach(MIMEText(msg_input.get('1.0', 'end-1c'), 'plain'))
if file:
attachment = open(file, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % os.path.basename(file))
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(str(credentials[0][0]), str(credentials[0][1]))
text = msg.as_string()
server.sendmail(str(credentials[0][0]), str(destination.get()), text)
server.quit()
print('Mail sent.')
message_handler.mail_sent()
except smtplib.SMTPAuthenticationError:
if email.get() or password.get() == None:
message_handler.auth_error_type2()
else:
message_handler.auth_error_type1()
except smtplib.SMTPRecipientsRefused:
message_handler.compile_error()
else:
message_handler.no_conn()
# Oggetti
main_frame = Frame(window)
main_frame.grid(row=0, column=0, pady=15, sticky='wn')
dest_label = Label(main_frame, text=string['to'], font=('Segoe UI', 13)).grid(row=0, padx=5, sticky='w')
dest_input = Entry(main_frame, textvariable=destination, font=('Segoe UI', 10), width=45)
dest_input.grid(row=0, column=1, padx=5, sticky='w')
sub_label = Label(main_frame, text=string['subject'], font=('Segoe UI', 13)).grid(row=1, pady=5, padx=5, sticky='w')
sub_input = Entry(main_frame, textvariable=subject, font=('Segoe UI', 10), width=45)
sub_input.grid(row=1, column=1, pady=5, padx=5, sticky='w')
msg_label = Label(main_frame, text=string['message'], font=('Segoe UI', 13)).grid(row=2, pady=15, padx=5, sticky='wn')
msg_input = Text(main_frame, font=('Segoe UI', 10), width=45, height=15)
msg_input.grid(row=2, column=1, pady=20, padx=5, sticky='w')
scroll = Scrollbar(main_frame, command=msg_input.yview, orient='vertical')
scroll.config(command=msg_input.yview)
msg_input.configure(yscrollcommand=scroll.set)
scroll.grid(row=2, column=2, ipady=105, sticky='e')
send_button = Button(main_frame, text=string['send'], width=20, command=lambda: send_email())
send_button.grid(row=3, column=1, padx=25, sticky='se')
remove_attch_button = Button(main_frame, text=string['remove-attachment'], state='disabled', width=20, command=lambda: remove_attch())
remove_attch_button.grid(row=3, column=1, padx=25, sticky='sw')
# Barre menu
menu_bar = Menu(window)
# Menu mail
menu_mail = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label=string['mail'], menu=menu_mail)
menu_mail.add_command(label=string['new-mail'], command=lambda: new_mail())
menu_mail.add_command(label=string['save-email'], command=lambda: save_email())
menu_mail.add_command(label=string['open-email'], command=lambda: open_email())
# Menu formato datetime
menu_datetime_format = Menu(menu_bar, tearoff=0)
menu_datetime_format.add_radiobutton(label=string['date'].title() + ': ' + string['date-format-type1'], command=lambda: add_date_time(None, string['date-format-type1']), variable=datesel, | |
= mutant_to_letter_pos_idx_focus_list[mutant]
mut_aa = mutant[-1]
focus_seq_copy[idx_focus] = mut_aa
return focus_seq_copy
print("Getting mutated sequences")
# First sequence is the wt sequence
mutant_sequences = [list(self.focus_seq_trimmed)[:]]
valid_mutants = ['wt']
# df_input.apply(lambda row: get_mutated_seq(row['mutant']), axis=1).to_list()
for mutants in df['mutant'].to_list():
assert isinstance(mutants, str), "mutants is not a string: " + str(mutants)
mutant_list = mutants.split(":")
valid_mutant = True
# TODO handle silent mutations, don't raise errors for them
# if any of the mutants in this list aren"t in the focus sequence,
# I cannot make a prediction
for mutant in mutant_list:
if mutant not in mutant_to_letter_pos_idx_focus_list:
valid_mutant = False
print("Invalid mutant:", mutant)
print("mutant_list", mutant_list)
break
# If it is a valid mutant, add it to my list to make predictions
if valid_mutant:
focus_seq_copy = list(self.focus_seq_trimmed)[:]
for mutant in mutant_list:
wt_aa, pos, idx_focus = mutant_to_letter_pos_idx_focus_list[mutant]
mut_aa = mutant[-1]
focus_seq_copy[idx_focus] = mut_aa
mutant_sequences.append("".join(focus_seq_copy))
valid_mutants.append(mutants)
print("Number of valid mutant sequences:", len(mutant_sequences))
if len(mutant_sequences) == 0:
raise ValueError("No valid mutant sequences found")
print("Making one-hot matrix")
# Then make the one hot sequence
mutant_sequences_one_hot = np.zeros((len(mutant_sequences), len(self.focus_cols), len(self.alphabet)))
for i, sequence in enumerate(mutant_sequences):
if not tqdm_available and i % 10 == 0:
print(i)
for j, letter in enumerate(sequence):
k = self.aa_dict[letter]
mutant_sequences_one_hot[i, j, k] = 1.0
prediction_matrix = np.zeros((mutant_sequences_one_hot.shape[0], N_pred_iterations))
batch_order = np.arange(mutant_sequences_one_hot.shape[0])
print("Getting ELBOs over ", N_pred_iterations, " iterations and ", minibatch_size, " size minibatches")
print("Prediction matrix size:", prediction_matrix.shape)
# Why not batch along the iterations direction? if we have mutants < minibatch_size this doesn't help.
# Although if iterations is too big, it may not fit in a minibatch.
# And if mutants is too big, it may also not fit.
for i in tqdm(range(N_pred_iterations)):
if not tqdm_available and i % 10 == 0:
print(i)
np.random.shuffle(batch_order)
for j in range(0, mutant_sequences_one_hot.shape[0], minibatch_size):
batch_index = batch_order[j:j + minibatch_size]
batch_preds, _, _ = model.all_likelihood_components(mutant_sequences_one_hot[batch_index])
for k, idx_batch in enumerate(batch_index.tolist()):
prediction_matrix[idx_batch][i] = batch_preds[k]
# Then take the mean of all my elbo samples
mean_elbos = np.mean(prediction_matrix, axis=1).flatten().tolist()
# Remove the wild type sequence
wt_elbo = mean_elbos.pop(0)
valid_mutants.pop(0)
delta_elbos = np.asarray(mean_elbos) - wt_elbo
assert len(delta_elbos) == len(valid_mutants), "delta_elbos and valid_mutants should be the same length" + str(
len(delta_elbos)) + " " + str(len(valid_mutants))
elbos = pd.DataFrame({'DeepSequence': delta_elbos, 'mutant': valid_mutants})
print("elbos:\n", len(elbos), elbos.head(30))
df = df.merge(elbos, how='inner', on='mutant')
print("merged:\n", len(df), df.head())
print("Saving to file")
output_filename = output_filename_prefix + "_samples-" + str(N_pred_iterations) + "_elbo_predictions.csv"
if random_seed is not None:
output_filename = output_filename_prefix + "_samples-" + str(N_pred_iterations) + "_seed-" + str(random_seed) + "_elbo_predictions.csv"
df.to_csv(output_filename, index=False)
print("Written to file", output_filename)
def custom_sequences(self, input_filename, model, N_pred_iterations=10, \
minibatch_size=2000, filename_prefix="", offset=0):
""" Predict the delta elbo for a custom mutation filename
"""
# Get the start and end index from the sequence name
start_idx, end_idx = self.focus_seq_name.split("/")[-1].split("-")
start_idx = int(start_idx)
wt_pos_focus_idx_tuple_list = []
focus_seq_index = 0
focus_seq_list = []
mutant_to_letter_pos_idx_focus_list = {}
self.mutant_sequences = ["".join(self.focus_seq_trimmed)]
self.mutant_sequences_descriptor = ["wt"]
# run through the input file
if not os.path.isfile(input_filename):
input_filename = os.path.join(self.working_dir, input_filename)
assert os.path.isfile(input_filename), "File not found: "+input_filename
INPUT = open(input_filename, "r")
#INPUT = open(input_filename, "r")
header = ''
new_line = ''
aa_s = set('ACDEFGHIKLNMPQRSTVWY-')
print("focus_seq", self.focus_seq)
print("focus_seq_trimmed", self.focus_seq_trimmed)
for i,line in enumerate(INPUT):
line = line.rstrip()
# if encountering header, after first, add prev entry to list
if line[0] == '>':
new_line = ''.join([aa for ix, aa in enumerate(new_line) if (ix in self.focus_cols)])
if len(new_line) == len(self.focus_seq_trimmed) and set(new_line).issubset(aa_s):
self.mutant_sequences.append(new_line) #hack?
self.mutant_sequences_descriptor.append(header)
else:
print(len(new_line))
print(set(new_line) - aa_s)
header = line[1:]
new_line = ''
else:
new_line = new_line + line
INPUT.close()
# add final entry
new_line = ''.join([aa for aa in new_line if (aa in aa_s)])
if len(new_line) == len(self.focus_seq_trimmed):
self.mutant_sequences.append(new_line)
self.mutant_sequences_descriptor.append(header)
# Then make the one hot sequence
self.mutant_sequences_one_hot = np.zeros(\
(len(self.mutant_sequences),len(self.focus_cols),len(self.alphabet)))
for i,sequence in enumerate(self.mutant_sequences):
for j,letter in enumerate(sequence):
if letter in self.aa_dict:
k = self.aa_dict[letter]
self.mutant_sequences_one_hot[i,j,k] = 1.0
self.prediction_matrix = np.zeros((self.mutant_sequences_one_hot.shape[0],N_pred_iterations))
batch_order = np.arange(self.mutant_sequences_one_hot.shape[0])
for i in range(N_pred_iterations):
np.random.shuffle(batch_order)
for j in range(0,self.mutant_sequences_one_hot.shape[0],minibatch_size):
batch_index = batch_order[j:j+minibatch_size]
batch_preds, _, _ = model.all_likelihood_components(self.mutant_sequences_one_hot[batch_index])
for k,idx_batch in enumerate(batch_index.tolist()):
self.prediction_matrix[idx_batch][i]= batch_preds[k]
# Then take the mean of all my elbo samples
self.mean_elbos = np.mean(self.prediction_matrix, axis=1).flatten().tolist()
self.wt_elbo = self.mean_elbos.pop(0)
self.mutant_sequences_descriptor.pop(0)
self.delta_elbos = np.asarray(self.mean_elbos) - self.wt_elbo
if filename_prefix == "":
return self.mutant_sequences_descriptor, self.delta_elbos
else:
OUTPUT = open(filename_prefix, "w")
for i,descriptor in enumerate(self.mutant_sequences_descriptor):
OUTPUT.write(descriptor+","+str(self.delta_elbos[i])+"\n")
OUTPUT.close()
def get_pattern_activations(self, model, update_num, filename_prefix="",
verbose=False, minibatch_size=2000):
activations_filename = self.working_dir + "/embeddings/" + filename_prefix + "_pattern_activations.csv"
OUTPUT = open(activations_filename, "w")
batch_order = np.arange(len(self.x_train_name_list))
for i in range(0, len(self.x_train_name_list), minibatch_size):
batch_index = batch_order[i:i + minibatch_size]
one_hot_seqs = self.x_train[batch_index]
batch_activation = model.get_pattern_activations(one_hot_seqs)
for j, idx in enumerate(batch_index.tolist()):
sample_activation = [str(val) for val in batch_activation[j].tolist()]
sample_name = self.x_train_name_list[idx]
out_line = [str(update_num), sample_name] + sample_activation
if verbose:
print("\t".join(out_line))
OUTPUT.write(",".join(out_line) + "\n")
OUTPUT.close()
def get_embeddings(self, model, update_num, filename_prefix="",
verbose=False, minibatch_size=2000):
""" Save the latent variables from all the sequences in the alignment """
embedding_filename = self.working_dir + "/embeddings/" + filename_prefix + "_seq_embeddings.csv"
# Append embeddings to file if it has already been created
# This is useful if you want to see the embeddings evolve over time
if os.path.isfile(embedding_filename):
OUTPUT = open(embedding_filename, "a")
else:
OUTPUT = open(embedding_filename, "w")
mu_header_list = ["mu_" + str(i + 1) for i in range(model.n_latent)]
log_sigma_header_list = ["log_sigma_" + str(i + 1) for i in range(model.n_latent)]
header_list = mu_header_list + log_sigma_header_list
OUTPUT.write("update_num,name," + ",".join(header_list) + "\n")
batch_order = np.arange(len(self.x_train_name_list))
for i in range(0, len(self.x_train_name_list), minibatch_size):
batch_index = batch_order[i:i + minibatch_size]
one_hot_seqs = self.x_train[batch_index]
batch_mu, batch_log_sigma = model.recognize(one_hot_seqs)
for j, idx in enumerate(batch_index.tolist()):
sample_mu = [str(val) for val in batch_mu[j].tolist()]
sample_log_sigma = [str(val) for val in batch_log_sigma[j].tolist()]
sample_name = self.x_train_name_list[idx]
out_line = [str(update_num), sample_name] + sample_mu + sample_log_sigma
if verbose:
print("\t".join(out_line))
OUTPUT.write(",".join(out_line) + "\n")
OUTPUT.close()
def get_elbo_samples(self, model, N_pred_iterations=100, minibatch_size=2000):
self.prediction_matrix = np.zeros((self.one_hot_mut_array_with_wt.shape[0], N_pred_iterations))
batch_order = np.arange(self.one_hot_mut_array_with_wt.shape[0])
for i in range(N_pred_iterations):
np.random.shuffle(batch_order)
for j in range(0, self.one_hot_mut_array_with_wt.shape[0], minibatch_size):
batch_index = batch_order[j:j + minibatch_size]
batch_preds, _, _ = model.all_likelihood_components(self.one_hot_mut_array_with_wt[batch_index])
for k, idx_batch in enumerate(batch_index.tolist()):
self.prediction_matrix[idx_batch][i] = batch_preds[k]
def gen_job_string(data_params, model_params):
"""
Generates a unique job string given data and model parameters.
This is used later as an identifier for the
saved model weights and figures
Parameters
------------
data_params: dictionary of parameters for the data class
model_params: dictionary of parameters for the model class
Returns
------------
job string denoting parameters of run
"""
written_out_vals = ["n_latent"]
layer_num_list = ["zero", "one", "two", "three", "four"]
encoder_architecture = []
decoder_architecture = []
for layer_num in layer_num_list:
if "encode_dim_" + layer_num in model_params:
encoder_architecture.append(model_params["encode_dim_" + layer_num])
if "decode_dim_" + layer_num in model_params:
decoder_architecture.append(model_params["decode_dim_" + layer_num])
written_out_vals += ["encode_dim_" + layer_num, "decode_dim_" + layer_num]
n_latent = model_params["n_latent"]
encoder_architecture_str = "-".join([str(size) for size in encoder_architecture])
decoder_architecture_str = "-".join([str(size) for size in decoder_architecture])
# job_str = "vae_output_encoder-"+encoder_architecture_str+"_Nlatent-"+str(n_latent)\
# +"_decoder-"+decoder_architecture_str
# JF: Modified job string as name was so long it was causing an error
job_str = ""
job_id_list = []
for data_id, data_val in sorted(data_params.items()):
if data_id not in written_out_vals:
if str(type(data_val)) == "<type 'list'>":
job_id_list.append(data_id + "-" + "-".join([str(val) for val in data_val]))
# LvN: Skipped '/' character because it causes errors
elif isinstance(data_val, str) and "/" in data_val:
pass
else:
job_id_list.append(data_id + "-" + str(data_val))
# for model_id,model_val in sorted(model_params.items()):
# if model_id not in written_out_vals:
# if str(type(model_val)) == "<type 'list'>":
# job_id_list.append(model_id+"-"+"-".join([str(val) for val in model_val]))
# else:
# job_id_list.append(model_id+"-"+str(model_val))
# return job_str+"_"+"_".join(job_id_list)
# JF: Modified job string as name was so long it was causing an error
return "_".join(job_id_list)
# Copied from Javier's /n/groups/marks/users/javier/ESM-1b/protein_transformer/utils/mutation_scoring.py
def DMS_file_cleanup(DMS_data, target_seq, alphabet, start_idx=1, end_idx=None, DMS_mutant_column='mutant',
DMS_phenotype_name='score', DMS_directionality=1, keep_singles_only=False):
import pandas as pd
end_idx = start_idx + len(target_seq) - 1 if end_idx is None else end_idx
DMS_data['mutant'] = DMS_data[DMS_mutant_column]
num_starting_mutants = len(DMS_data)
DMS_data = DMS_data[DMS_data['mutant'].notnull()].copy() # This should be filtering NaNs
# Make sure we're filtering nans
DMS_data.dropna(subset=['mutant'], inplace=True)
# Different way of filtering nans
DMS_data = DMS_data[DMS_data['mutant'].apply(lambda x: isinstance(x, str))]
DMS_data = DMS_data[DMS_data['mutant'].apply(lambda x: all([len(y) >= 3 for y in x.split(":")]))].copy() # filter first set of degenerate mutants
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
DMS_data=DMS_data[DMS_data['mutant'].apply(lambda x: all([(y[0] in | |
"""Association testing"""
from datetime import datetime
from io import BytesIO
import logging
import os
from pathlib import Path
import queue
import socket
import sys
import time
import threading
import pytest
from pydicom import dcmread
from pydicom.dataset import Dataset, FileMetaDataset
from pydicom.uid import (
UID,
ImplicitVRLittleEndian,
ExplicitVRLittleEndian,
JPEGBaseline,
JPEG2000,
JPEG2000Lossless,
DeflatedExplicitVRLittleEndian,
ExplicitVRBigEndian
)
from pynetdicom import (
AE, VerificationPresentationContexts, build_context, evt, _config,
debug_logger, build_role
)
from pynetdicom.association import Association
from pynetdicom.dimse_primitives import C_STORE, C_FIND, C_GET, C_MOVE
from pynetdicom.dsutils import encode, decode
from pynetdicom.events import Event
from pynetdicom._globals import MODE_REQUESTOR, MODE_ACCEPTOR
from pynetdicom.pdu_primitives import (
UserIdentityNegotiation, SOPClassExtendedNegotiation,
SOPClassCommonExtendedNegotiation, SCP_SCU_RoleSelectionNegotiation,
AsynchronousOperationsWindowNegotiation, A_ASSOCIATE
)
from pynetdicom.sop_class import (
VerificationSOPClass,
CTImageStorage, MRImageStorage, RTImageStorage,
PatientRootQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelGet,
PatientRootQueryRetrieveInformationModelMove,
PatientStudyOnlyQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelMove,
SecondaryCaptureImageStorage,
UnifiedProcedureStepPullSOPClass,
UnifiedProcedureStepPushSOPClass,
UnifiedProcedureStepWatchSOPClass
)
#debug_logger()
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')
BIG_DATASET = dcmread(os.path.join(TEST_DS_DIR, 'RTImageStorage.dcm')) # 2.1 M
DATASET_PATH = os.path.join(TEST_DS_DIR, 'CTImageStorage.dcm')
BAD_DATASET_PATH = os.path.join(TEST_DS_DIR, 'CTImageStorage_bad_meta.dcm')
DATASET = dcmread(DATASET_PATH)
# JPEG2000Lossless
COMP_DATASET = dcmread(
os.path.join(TEST_DS_DIR, 'MRImageStorage_JPG2000_Lossless.dcm')
)
# DeflatedExplicitVRLittleEndian
DEFL_DATASET = dcmread(
os.path.join(TEST_DS_DIR, 'SCImageStorage_Deflated.dcm')
)
class DummyDIMSE(object):
def __init__(self):
self.status = None
self.msg_queue = queue.Queue()
def send_msg(self, rsp, context_id):
self.status = rsp.Status
self.rsp = rsp
def get_msg(self, block=False):
return None, None
class TestAssociation(object):
"""Run tests on Associtation."""
def setup(self):
"""This function runs prior to all test methods"""
self.ae = None
def teardown(self):
"""This function runs after all test methods"""
if self.ae:
self.ae.shutdown()
def test_bad_connection(self):
"""Test connect to non-AE"""
# sometimes causes hangs in Travis
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
assoc = ae.associate('localhost', 22)
assert not assoc.is_established
def test_connection_refused(self):
"""Test connection refused"""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
assoc = ae.associate('localhost', 11120)
assert not assoc.is_established
def test_req_no_presentation_context(self):
"""Test rejection due to no acceptable presentation contexts"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
assoc = ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_aborted
scp.shutdown()
def test_peer_releases_assoc(self):
"""Test peer releases association"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
scp.active_associations[0].release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_peer_aborts_assoc(self):
"""Test peer aborts association."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
scp.active_associations[0].abort()
assert assoc.is_aborted
assert not assoc.is_established
scp.shutdown()
def test_peer_rejects_assoc(self):
"""Test peer rejects assoc"""
self.ae = ae = AE()
ae.require_calling_aet = [b'HAHA NOPE']
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
time.sleep(0.1)
assert assoc.is_rejected
assert not assoc.is_established
scp.shutdown()
def test_assoc_release(self):
"""Test Association release"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
# Simple release
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
# Simple release, then release again
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
assert assoc.is_released
assoc.release()
assert assoc.is_released
# Simple release, then abort
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert assoc.is_released
assert not assoc.is_established
assoc.abort()
assert not assoc.is_aborted
scp.shutdown()
def test_assoc_abort(self):
"""Test Association abort"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
# Simple abort
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.abort()
assert not assoc.is_established
assert assoc.is_aborted
# Simple abort, then release
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.abort()
assert not assoc.is_established
assert assoc.is_aborted
assoc.release()
assert assoc.is_aborted
assert not assoc.is_released
# Simple abort, then abort again
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.abort()
assert assoc.is_aborted
assert not assoc.is_established
assoc.abort()
scp.shutdown()
def test_scp_removed_ui(self):
"""Test SCP removes UI negotiation"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ui = UserIdentityNegotiation()
ui.user_identity_type = 0x01
ui.primary_field = b'pynetdicom'
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112, ext_neg=[ui])
assert assoc.is_established
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_removed_ext_neg(self):
"""Test SCP removes ex negotiation"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ext = SOPClassExtendedNegotiation()
ext.sop_class_uid = '1.1.1.1'
ext.service_class_application_information = b'\x01\x02'
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112, ext_neg=[ext])
assert assoc.is_established
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_removed_com_ext_neg(self):
"""Test SCP removes common ext negotiation"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ext = SOPClassCommonExtendedNegotiation()
ext.related_general_sop_class_identification = ['1.2.1']
ext.sop_class_uid = '1.1.1.1'
ext.service_class_uid = '1.1.3'
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112, ext_neg=[ext])
assert assoc.is_established
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_scp_assoc_limit(self):
"""Test SCP limits associations"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.maximum_associations = 1
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae = AE()
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc_2 = ae.associate('localhost', 11112)
assert not assoc_2.is_established
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_require_called_aet(self):
"""SCP requires matching called AET"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
ae.require_called_aet = True
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_rejected
scp.shutdown()
def test_require_calling_aet(self):
"""SCP requires matching called AET"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
ae.require_calling_aet = [b'TESTSCP']
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_rejected
scp.shutdown()
def test_dimse_timeout(self):
"""Test that the DIMSE timeout works"""
def handle(event):
time.sleep(0.2)
return 0x0000
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.dimse_timeout = 0.1
scp = ae.start_server(
('', 11112), block=False, evt_handlers=[(evt.EVT_C_ECHO, handle)]
)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.dimse_timeout == 0.1
assert assoc.dimse.dimse_timeout == 0.1
assert assoc.is_established
assoc.send_c_echo()
assoc.release()
assert not assoc.is_released
assert assoc.is_aborted
scp.shutdown()
def test_multiple_association_release_cycles(self):
"""Test repeatedly associating and releasing"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
for ii in range(10):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert not assoc.is_released
assoc.send_c_echo()
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_local(self):
"""Test Association.local."""
ae = AE()
assoc = Association(ae, 'requestor')
assoc.requestor.ae_title = ae.ae_title
assert assoc.local['ae_title'] == b'PYNETDICOM '
assoc = Association(ae, 'acceptor')
assoc.acceptor.ae_title = ae.ae_title
assert assoc.local['ae_title'] == b'PYNETDICOM '
def test_remote(self):
"""Test Association.local."""
ae = AE()
assoc = Association(ae, 'requestor')
assert assoc.remote['ae_title'] == b''
assoc = Association(ae, 'acceptor')
assert assoc.remote['ae_title'] == b''
def test_mode_raises(self):
"""Test exception is raised if invalid mode."""
msg = (
r"Invalid association `mode` value, must be either 'requestor' or "
"'acceptor'"
)
with pytest.raises(ValueError, match=msg):
assoc = Association(None, 'nope')
def test_setting_socket_override_raises(self):
"""Test that set_socket raises exception if socket set."""
ae = AE()
assoc = Association(ae, MODE_REQUESTOR)
assoc.dul.socket = 'abc'
msg = r"The Association already has a socket set."
with pytest.raises(RuntimeError, match=msg):
assoc.set_socket('cba')
assert assoc.dul.socket == 'abc'
def test_invalid_context(self, caplog):
"""Test receiving an message with invalid context ID"""
with caplog.at_level(logging.INFO, logger='pynetdicom'):
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.add_requested_context(CTImageStorage)
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assoc.dimse_timeout = 0.1
assert assoc.is_established
assoc._accepted_cx[3] = assoc._rejected_cx[0]
assoc._accepted_cx[3].result = 0x00
assoc._accepted_cx[3]._as_scu = True
assoc._accepted_cx[3]._as_scp = True
ds = Dataset()
ds.SOPClassUID = CTImageStorage
ds.SOPInstanceUID = '1.2.3.4'
ds.file_meta = FileMetaDataset()
ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
result = assoc.send_c_store(ds)
time.sleep(0.1)
assert assoc.is_aborted
assert (
'Received DIMSE message with invalid or rejected context ID'
) in caplog.text
scp.shutdown()
def test_get_events(self):
"""Test Association.get_events()."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert evt.EVT_C_STORE in assoc.get_events()
assert evt.EVT_USER_ID in assoc.get_events()
def test_requested_handler_abort(self):
"""Test the EVT_REQUESTED handler sending abort."""
def handle_req(event):
event.assoc.acse.send_abort(0x00)
time.sleep(0.1)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
hh = [(evt.EVT_REQUESTED, handle_req)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=hh)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_aborted
scp.shutdown()
def test_requested_handler_reject(self):
"""Test the EVT_REQUESTED handler sending reject."""
def handle_req(event):
event.assoc.acse.send_reject(0x02, 0x01, 0x01)
# Give the requestor time to process the message before killing
# the connection
time.sleep(0.1)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
hh = [(evt.EVT_REQUESTED, handle_req)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=hh)
ae.add_requested_context(VerificationSOPClass)
assoc = | |
<reponame>WithPrecedent/amos<gh_stars>0
"""
mappings: extensible, flexible, lightweight dict-like classes
<NAME> <<EMAIL>>
Copyright 2021, <NAME>
License: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contents:
Dictionary (Bunch, MutableMapping): bunches's drop-in replacement for a
python dict with some added functionality.
Catalog (Dictionary): wildcard-accepting dict which is primarily intended
for storing different options and strategies. It also returns lists of
matches if a list of keys is provided.
Library (MutableMapping): a chained mapping used to registering subclasses
and instances.
ToDo:
"""
from __future__ import annotations
from collections.abc import (
Hashable, Iterator, Mapping, MutableMapping, Sequence)
import copy
import dataclasses
import inspect
from typing import Any, Optional, Type, Union
from ..observe import traits
from ..repair import convert
from . import bunches
_ALL_KEYS: list[Any] = ['all', 'All', ['all'], ['All']]
_DEFAULT_KEYS: list[Any] = [
'default', 'defaults', 'Default', 'Defaults', ['default'], ['defaults'],
['Default'], ['Defaults']]
_NONE_KEYS: list[Any] = ['none', 'None', ['none'], ['None']]
@dataclasses.dataclass # type: ignore
class Dictionary(bunches.Bunch, MutableMapping): # type: ignore
"""Basic bunches dict replacement.
A Dictionary differs from an ordinary python dict in ways inherited from
Bunch by requiring 'add' and 'subset' methods, storing data in 'contents',
and allowing the '+' operator to join Dictionary instances with other
mappings, including Dictionary instances.
# In addition, it differs in 1 other significant way:
# 1) When returning 'keys', 'values' and 'items', this class returns them
# as tuples instead of KeysView, ValuesView, and ItemsView.
Args:
contents (MutableMapping[Hashable, Any]): stored dictionary. Defaults
to an empty dict.
default_factory (Optional[Any]): default value to return or default
function to call when the 'get' method is used. Defaults to None.
"""
contents: MutableMapping[Hashable, Any] = dataclasses.field(
default_factory = dict)
default_factory: Optional[Any] = None
""" Public Methods """
def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:
"""Adds 'item' to the 'contents' attribute.
Args:
item (Mapping[Hashable, Any]): items to add to 'contents' attribute.
kwargs: creates a consistent interface even when subclasses have
additional parameters.
"""
self.contents.update(item, **kwargs)
return
@classmethod
def fromkeys(
cls,
keys: Sequence[Hashable],
value: Any,
**kwargs: Any) -> Dictionary:
"""Emulates the 'fromkeys' class method from a python dict.
Args:
keys (Sequence[Hashable]): items to be keys in a new Dictionary.
value (Any): the value to use for all values in a new Dictionary.
Returns:
Dictionary: formed from 'keys' and 'value'.
"""
return cls(contents = dict.fromkeys(keys, value), **kwargs)
def get(self, key: Hashable, default: Optional[Any] = None) -> Any: # type: ignore
"""Returns value in 'contents' or default options.
Args:
key (Hashable): key for value in 'contents'.
default (Optional[Any]): default value to return if 'key' is not
found in 'contents'.
Raises:
KeyError: if 'key' is not in the Dictionary and 'default' and the
'default_factory' attribute are both None.
Returns:
Any: value matching key in 'contents' or 'default_factory' value.
"""
try:
return self[key]
except (KeyError, TypeError):
if default is None:
if self.default_factory is None:
raise KeyError(f'{key} is not in the Dictionary')
else:
try:
return self.default_factory()
except TypeError:
return self.default_factory
else:
return default
def items(self) -> tuple[tuple[Hashable, Any], ...]: # type: ignore
"""Emulates python dict 'items' method.
Returns:
tuple[tuple[Hashable], Any]: a tuple equivalent to dict.items().
"""
return tuple(zip(self.keys(), self.values()))
def keys(self) -> tuple[Hashable, ...]: # type: ignore
"""Returns 'contents' keys as a tuple.
Returns:
tuple[Hashable, ...]: a tuple equivalent to dict.keys().
"""
return tuple(self.contents.keys())
def setdefault(self, value: Any) -> None: # type: ignore
"""sets default value to return when 'get' method is used.
Args:
value (Any): default value to return when 'get' is called and the
'default' parameter to 'get' is None.
"""
self.default_factory = value
return
def subset(
self,
include: Optional[Union[Hashable, Sequence[Hashable]]] = None,
exclude: Optional[Union[Hashable, Sequence[Hashable]]] = None) -> (
Dictionary):
"""Returns a new instance with a subset of 'contents'.
This method applies 'include' before 'exclude' if both are passed. If
'include' is None, all existing keys will be added before 'exclude' is
applied.
Args:
include (Optional[Union[Hashable, Sequence[Hashable]]]): key(s) to
include in the new Dictionary instance.
exclude (Optional[Union[Hashable, Sequence[Hashable]]]): key(s) to
exclude in the new Dictionary instance.
Raises:
ValueError: if 'include' and 'exclude' are both None.
Returns:
Dictionary: with only keys from 'include' and no keys in 'exclude'.
"""
if include is None and exclude is None:
raise ValueError('include or exclude must not be None')
else:
if include is None:
contents = self.contents
else:
include = list(convert.iterify(item = include))
contents = {k: self.contents[k] for k in include}
if exclude is not None:
exclude = list(convert.iterify(item = exclude))
contents = {
k: v for k, v in contents.items()
if k not in exclude}
new_dictionary = copy.deepcopy(self)
new_dictionary.contents = contents
return new_dictionary
def values(self) -> tuple[Any, ...]: # type: ignore
"""Returns 'contents' values as a tuple.
Returns:
tuple[Any, ...]: a tuple equivalent to dict.values().
"""
return tuple(self.contents.values())
""" Dunder Methods """
def __getitem__(self, key: Hashable) -> Any:
"""Returns value for 'key' in 'contents'.
Args:
key (Hashable): key in 'contents' for which a value is sought.
Returns:
Any: value stored in 'contents'.
"""
return self.contents[key]
def __setitem__(self, key: Hashable, value: Any) -> None:
"""sets 'key' in 'contents' to 'value'.
Args:
key (Hashable): key to set in 'contents'.
value (Any): value to be paired with 'key' in 'contents'.
"""
self.contents[key] = value
return
def __delitem__(self, key: Hashable) -> None:
"""Deletes 'key' in 'contents'.
Args:
key (Hashable): key in 'contents' to delete the key/value pair.
"""
del self.contents[key]
return
@dataclasses.dataclass # type: ignore
class Catalog(Dictionary):
"""Wildcard and list-accepting dictionary.
A Catalog inherits the differences between a Dictionary and an ordinary
python dict.
A Catalog differs from a Dictionary in 5 significant ways:
1) It recognizes an 'all' key which will return a list of all values
stored in a Catalog instance.
2) It recognizes a 'default' key which will return all values matching
keys listed in the 'default' attribute. 'default' can also be set
using the 'catalog['default'] = new_default' assignment. If
'default' is not passed when the instance is initialized, the
initial value of 'default' is 'all'.
3) It recognizes a 'none' key which will return an empty list.
4) It supports a list of keys being accessed with the matching values
returned. For example, 'catalog[['first_key', 'second_key']]' will
return the values for those keys in a list ['first_value',
'second_value'].
5) If a single key is sought, a Catalog can either return the stored
value or a stored value in a list (if 'always_return_list' is
True). The latter option is available to make iteration easier
when the iterator assumes a single type will be returned.
Args:
contents (Mapping[Hashable, Any]]): stored dictionary. Defaults to an
empty dict.
default_factory (Any): default value to return when the 'get' method is
used.
default (Sequence[Any]]): a list of keys in 'contents' which will be
used to return items when 'default' is sought. If not passed,
'default' will be set to all keys.
always_return_list (bool): whether to return a list even when the key
passed is not a list or special access key (True) or to return a
list only when a list or special access key is used (False).
Defaults to False.
"""
contents: Mapping[Hashable, Any] = dataclasses.field(
default_factory = dict)
default_factory: Optional[Any] = None
default: Optional[Any] = 'all'
always_return_list: bool = False
""" Dunder Methods """
def __getitem__(
self,
key: Union[Hashable, Sequence[Hashable]]) | |
<reponame>Nexuscompute/inplace
"""
In-place file processing
The ``in_place`` module provides an ``InPlace`` class for reading & writing a
file "in-place": data that you write ends up at the same filepath that you read
from, and ``in_place`` takes care of all the necessary mucking about with
temporary files for you.
Visit <https://github.com/jwodder/inplace> for more information.
"""
__version__ = "0.6.0.dev1"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__url__ = "https://github.com/jwodder/inplace"
import os
import os.path
import platform
import shutil
import sys
import tempfile
from warnings import warn
if (
platform.system() == "Windows"
and platform.python_implementation() != "PyPy"
and sys.version_info[:2] < (3, 8)
):
from jaraco.windows.filesystem import get_final_path as realpath
else:
from os.path import realpath
__all__ = ["InPlace", "InPlaceBytes", "InPlaceText"]
class InPlace:
"""
A class for reading from & writing to a file "in-place" (with data that you
write ending up at the same filepath that you read from) that takes care of
all the necessary mucking about with temporary files.
:param name: The path to the file to open & edit in-place (resolved
relative to the current directory at the time of the instance's
creation)
:type name: path-like
:param string mode: Whether to operate on the file in binary or text mode.
If ``mode`` is ``'b'``, the file will be opened in binary mode, and
data will be read & written as `bytes` objects. If ``mode`` is ``'t'``
or unset, the file will be opened in text mode, and data will be read &
written as `str` objects.
:param backup: The path at which to save the file's original contents once
editing has finished (resolved relative to the current directory at the
time of the instance's creation); if `None` (the default), no backup is
saved
:type backup: path-like
:param backup_ext: A string to append to ``name`` to get the path at which
to save the file's original contents. Cannot be empty. ``backup`` and
``backup_ext`` are mutually exclusive.
:type backup_ext: path-like
:param bool delay_open: If `True`, the newly-constructed instance will not
be open, and the user must either explicitly call the :meth:`open()`
method or use the instance as a context manager in order to open it.
If `False` (the default), the instance will be automatically opened as
soon as it is constructed.
:param bool move_first: If `True`, the original (input) file will be moved
to a temporary location before opening, and the output file will be
created in its place. If `False` (the default), the output file will
be created at a temporary location, and neither file will be moved or
deleted until :meth:`close()` is called.
:param kwargs: Additional keyword arguments to pass to `open()`
"""
UNOPENED = 0
OPEN = 1
CLOSED = 2
def __init__(
self,
name,
mode=None,
backup=None,
backup_ext=None,
delay_open=False,
move_first=False,
**kwargs,
):
cwd = os.getcwd()
#: The path to the file to edit in-place
self.name = os.fsdecode(name)
#: Whether to operate on the file in binary or text mode
self.mode = mode
#: The absolute path of the file to edit in-place
self.filepath = os.path.join(cwd, self.name)
#: ``filepath`` with symbolic links resolved. This is set just before
#: opening the file.
self.realpath = None
if backup is not None:
if backup_ext is not None:
raise ValueError("backup and backup_ext are mutually exclusive")
#: The absolute path of the backup file (if any) that the original
#: contents of ``realpath`` will be moved to after editing
self.backuppath = os.path.join(cwd, os.fsdecode(backup))
elif backup_ext is not None:
if not backup_ext:
raise ValueError("backup_ext cannot be empty")
self.backuppath = self.filepath + os.fsdecode(backup_ext)
else:
self.backuppath = None
#: Whether to move the input file before opening and create the output
#: file in its place instead of moving the files after closing
self.move_first = move_first
#: Additional arguments to pass to `open`
self.kwargs = kwargs
#: The input filehandle from which data is read; only non-`None` while
#: the instance is open
self.input = None
#: The output filehandle to which data is written; only non-`None`
#: while the instance is open
self.output = None
#: The absolute path to the temporary file; only non-`None` while the
#: instance is open
self._tmppath = None
#: Are we not open yet, open, or closed?
self._state = self.UNOPENED
if not delay_open:
self.open()
def __enter__(self):
if self._state < self.OPEN:
self.open()
return self
def __exit__(self, exc_type, _exc_value, _traceback):
if self._state == self.OPEN:
if exc_type is not None:
self.rollback()
else:
self.close()
return False
def _mktemp(self, filepath):
"""
Create an empty temporary file in the same directory as ``filepath``
and return the path to the new file
"""
fd, tmppath = tempfile.mkstemp(
dir=os.path.dirname(filepath),
prefix="._in_place-",
)
os.close(fd)
return tmppath
def open(self):
"""
Open the file :attr:`name` for reading and open a temporary file for
writing. If :attr:`move_first` is `True`, :attr:`name` will be moved
to a temporary location before opening.
If ``delay_open=True`` was passed to the instance's constructor, this
method must be called (either explicitly or else implicitly by using
the instance as a context manager) before the instance can be used for
reading or writing. If ``delay_open`` was `False` (the default), this
method is called automatically by the constructor, and the user should
not call it again.
:raises ValueError: if called more than once on the same instance
"""
if self._state < self.OPEN:
self._state = self.OPEN
self.realpath = realpath(self.filepath)
try:
if self.move_first:
if self.backuppath is not None:
self._tmppath = self._mktemp(self.backuppath)
else:
self._tmppath = self._mktemp(self.realpath)
try:
os.replace(self.realpath, self._tmppath)
except OSError:
try_unlink(self._tmppath)
self._tmppath = None
raise
self.output = self.open_write(self.realpath)
copystats(self._tmppath, self.realpath)
input_path = self._tmppath
else:
self._tmppath = self._mktemp(self.realpath)
self.output = self.open_write(self._tmppath)
copystats(self.realpath, self._tmppath)
input_path = self.realpath
self.input = self.open_read(input_path)
except Exception:
self.rollback()
raise
else:
raise ValueError("open() called twice on same filehandle")
def open_read(self, path):
"""
Open the file at ``path`` for reading and return a file-like object.
Use :attr:`mode` to determine whether to open in binary or text mode.
"""
if not self.mode or self.mode == "t":
return open(path, "r", **self.kwargs)
elif self.mode == "b":
return open(path, "rb", **self.kwargs)
else:
raise ValueError(f"{self.mode!r}: invalid mode")
def open_write(self, path):
"""
Open the file at ``path`` for writing and return a file-like object.
Use :attr:`mode` to determine whether to open in binary or text mode.
"""
if not self.mode or self.mode == "t":
return open(path, "w", **self.kwargs)
elif self.mode == "b":
return open(path, "wb", **self.kwargs)
else:
raise ValueError(f"{self.mode!r}: invalid mode")
def _close(self):
"""
Close filehandles (if they aren't closed already) and set them to
`None`
"""
if self.input is not None:
self.input.close()
self.input = None
if self.output is not None:
self.output.close()
self.output = None
def close(self):
"""
Close filehandles and move affected files to their final destinations.
If called after the filehandle has already been closed (with either
this method or :meth:`rollback`), :meth:`close` does nothing.
:return: `None`
:raises ValueError: if called before opening the filehandle
"""
if self._state == self.UNOPENED:
raise ValueError("Cannot close unopened file")
elif self._state == self.OPEN:
self._state = self.CLOSED
self._close()
try:
if self.move_first:
if self.backuppath is not None:
try:
os.replace(self._tmppath, self.backuppath)
except IOError:
os.replace(self._tmppath, self.realpath)
self._tmppath = None
raise
else:
if self.backuppath is not None:
os.replace(self.realpath, self.backuppath)
os.replace(self._tmppath, self.realpath)
finally:
if self._tmppath is not None:
try_unlink(self._tmppath)
self._tmppath = None
# elif self._state == self.CLOSED: pass
def rollback(self):
"""
Close filehandles and remove/rename temporary files so that things look
like they did before the `InPlace` instance was opened
:return: `None`
:raises ValueError: if called while the `InPlace` instance is not open
"""
if self._state == self.UNOPENED:
raise ValueError("Cannot close unopened file")
elif self._state == self.OPEN:
self._state = self.CLOSED
self._close()
if self._tmppath is not None: # In case of error while opening
if self.move_first:
os.replace(self._tmppath, self.realpath)
else:
try_unlink(self._tmppath)
self._tmppath = None
else:
assert self._state == self.CLOSED
raise ValueError("Cannot rollback closed file")
@property
def closed(self):
"""
`True` iff the filehandle is not currently open. Note that, if the
filehandle was initialized with ``delay_open=True``, `closed` will be
`True` until :meth:`open()` is called.
"""
return self._state != self.OPEN
def read(self, size=-1):
if self._state != self.OPEN:
raise ValueError("Filehandle is not currently open")
return self.input.read(size)
def readline(self, size=-1):
| |
file containing Semi-Empirical building inventory data in an HDFContainer. (described in __init__).
:param collapse_file:
HDF5 file containing Semi-Empirical collapse rate data in an HDFContainer. (described in __init__).
:param casualty_file:
HDF5 file containing Semi-Empirical casualty rate data in an HDFContainer.(described in __init__).
:param workforce_file:
HDF5 file containing Semi-Empirical workforce data in an HDFContainer. (described in __init__).
:param growth_file:
Excel spreadsheet containing population growth rate data (described in PopulationGrowth.fromUNSpreadsheet()).
:returns:
SemiEmpiricalFatality object.
"""
# turn the inventory,collapse, and casualty spreadsheets into Panels...
inventory = HDFContainer.load(inventory_file)
collapse = HDFContainer.load(collapse_file)
casualty = HDFContainer.load(casualty_file)
workforce = HDFContainer.load(workforce_file)
# extract the one dataframe from the Panel
workforce = workforce.getDataFrame('Workforce')
workforce = workforce.set_index('CountryCode')
# read the growth spreadsheet into a PopulationGrowth object...
popgrowth = PopulationGrowth.fromDefault()
return cls(inventory, collapse, casualty, workforce, popgrowth)
def setGlobalFiles(self, popfile, popyear, urbanfile, isofile):
"""Set the global data files (population,urban/rural, country code) for use of model with ShakeMaps.
:param popfile:
File name of population grid.
:param popyear:
Year population data was collected.
:param urbanfile:
File name of urban/rural grid (rural cells indicated with a 1, urban cells with a 2).
:param isofile:
File name of numeric ISO country code grid.
:returns:
None
"""
self._popfile = popfile
self._popyear = popyear
self._urbanfile = urbanfile
self._isofile = isofile
def getBuildingDesc(self, btype, desctype='short'):
"""Get a building description given a short building type code.
:param btype:
Short building type code ('A' (adobe), 'C' (reinforced concrete), etc.)
:param desctype:
A string, one of:
- 'short': Very short descriptions ('adobe block')
- 'operational': Short description, intended for use in automatically generated sentences about building types.
- 'long': Most verbose description ('Adobe block (unbaked dried mud block) walls')
:returns:
Either a short, operational, or long description of building types.
"""
bsheet = self._inventory.getDataFrame('BuildingTypes')
bsheet = bsheet.set_index('Code')
row = bsheet.loc[btype]
if desctype == 'short':
return row['ShortDescription']
elif desctype == 'operational':
return row['OperationalDescription']
else:
return row['LongDescription']
return None
def getWorkforce(self, ccode):
"""Get the workforce data corresponding to a given country code.
:param ccode:
Two letter ISO country code.
:returns:
Pandas series containing Workforce data for given country
(WorkForceTotal,WorkForceAgriculture,WorkForceIndustrial,WorkForceServices)
"""
try:
wforce = self._workforce.loc[ccode]
except:
wforce = None
return wforce
def getCollapse(self, ccode, mmi, inventory):
"""Return the collapse rates for a given country,intensity, and inventory.
:param ccode:
Two letter ISO country code.
:param mmi:
MMI value (one of 6.0,6.5,7.0,7.5,8.0,8.5,9.0)
:param inventory:
Pandas Series containing an inventory for the given country.
:returns:
Pandas Series object containing the collapse rates for given building types, ccode, and MMI.
"""
collapse_frame = self._collapse.getDataFrame(ccode)
collapse_frame = collapse_frame.set_index('BuildingCode')
try:
idx = inventory.index.drop('Unnamed: 0')
collapse_frame = collapse_frame.loc[idx]
except Exception:
collapse_dict = inventory.to_dict()
collapse = pd.Series(collapse_dict)
for key, value in collapse_dict.items():
collapse[key] = np.nan
return collapse
mmicol = 'MMI_%s' % str(mmi)
collapse = collapse_frame[mmicol]
return collapse
def getFatalityRates(self, ccode, timeofday, inventory):
"""Return fatality rates for a given country, time of day, and inventory.
:param ccode:
Two-letter ISO country code.
:param timeofday:
One of 'day','transit', or 'night'.
:param inventory:
Pandas Series containing an inventory for the given country.
:returns:
Pandas Series object containing fatality rates for given country, time of day, and inventory.
"""
fatalframe = self._casualty.getDataFrame(ccode)
fatalframe = fatalframe.set_index('BuildingCode')
timecol = TIMES[timeofday]
if 'Unnamed: 0' in inventory.index:
idx = inventory.index.drop('Unnamed: 0')
else:
idx = inventory.index
fatrates = fatalframe.loc[idx][timecol]
return fatrates
def getInventories(self, ccode, density):
"""Return two pandas Series objects corresponding to the urban or rural inventory for given country.
:param ccode:
Two-letter ISO country code.
:param density:
One of semimodel.URBAN (2) or semimodel.RURAL (1).
:returns:
Two Pandas Series: 1) Residential Inventory and 2) Non-Residential Inventory.
"""
if density == URBAN:
resinv = self._inventory.getDataFrame('UrbanResidential')
nresinv = self._inventory.getDataFrame('UrbanNonResidential')
else:
resinv = self._inventory.getDataFrame('RuralResidential')
nresinv = self._inventory.getDataFrame('RuralNonResidential')
resinv = resinv.set_index('CountryCode')
nresinv = nresinv.set_index('CountryCode')
# we may be missing inventory for certain countries (Bonaire?). Return empty series.
if ccode not in resinv.index or ccode not in nresinv.index:
return (pd.Series(), pd.Series())
# pandas series of residential inventory
resrow = resinv.loc[ccode]
resrow = resrow.drop('CountryName')
# pandas series of non-residential inventory
nresrow = nresinv.loc[ccode]
nresrow = nresrow.drop('CountryName')
# now trim down the series to only include finite and non-zero values
resrow = resrow[resrow.notnull()]
resrow = resrow[resrow > 0]
nresrow = nresrow[nresrow.notnull()]
nresrow = nresrow[nresrow > 0]
return (resrow, nresrow)
def getLosses(self, shakefile):
"""Calculate number of fatalities using semi-empirical approach.
:param shakefile:
Path to a ShakeMap grid.xml file.
:returns:
Tuple of:
1) Total number of fatalities
2) Dictionary of residential fatalities per building type, per country.
3) Dictionary of non-residential fatalities per building type, per country.
"""
# get shakemap geodict
shakedict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')
# get population geodict
popdict = get_file_geodict(self._popfile)
# get country code geodict
isodict = get_file_geodict(self._isofile)
# get urban grid geodict
urbdict = get_file_geodict(self._urbanfile)
# load all of the grids we need
if popdict == shakedict == isodict == urbdict:
# special case, probably for testing...
shakegrid = ShakeGrid.load(shakefile, adjust='res')
popgrid = read(self._popfile)
isogrid = read(self._isofile)
urbgrid = read(self._urbanfile)
else:
sampledict = popdict.getBoundsWithin(shakedict)
shakegrid = ShakeGrid.load(shakefile,
samplegeodict=sampledict,
resample=True,
method='linear',
adjust='res')
popgrid = read(self._popfile,
samplegeodict=sampledict,
resample=False)
isogrid = read(self._isofile,
samplegeodict=sampledict,
resample=True,
method='nearest',
doPadding=True,
padValue=0)
urbgrid = read(self._urbanfile,
samplegeodict=sampledict,
resample=True,
method='nearest',
doPadding=True,
padValue=RURAL)
# determine the local apparent time of day (based on longitude)
edict = shakegrid.getEventDict()
etime = edict['event_timestamp']
elon = edict['lon']
time_of_day, event_year, event_hour = get_time_of_day(etime, elon)
# round off our MMI data to nearest 0.5 (5.5 should stay 5.5, 5.4
# should become 5.5, 5.24 should become 5.0, etc.)
# TODO: Someday, make this more general to include perhaps grids of all IMT values, or
# at least the ones we have collapse data for.
mmidata = np.round(shakegrid.getLayer('mmi').getData() / 0.5) * 0.5
# get arrays from our other grids
popdata = popgrid.getData()
isodata = isogrid.getData()
urbdata = urbgrid.getData()
# modify the population values for growth rate by country
ucodes = np.unique(isodata[~np.isnan(isodata)])
for ccode in ucodes:
cidx = (isodata == ccode)
popdata[cidx] = self._popgrowth.adjustPopulation(
popdata[cidx], ccode, self._popyear, event_year)
# create a dictionary containing indoor populations by building type (in cells where MMI >= 6)
#popbystruct = get_indoor_pop(mmidata,popdata,urbdata,isodata,time_of_day)
# find all mmi values greater than 9, set them to 9
mmidata[mmidata > 9.0] = 9.0
# dictionary containers for sums of fatalities (res/nonres) by building type
res_fatal_by_ccode = {}
nonres_fatal_by_ccode = {}
# fatality sum
ntotal = 0
# loop over countries
ucodes = np.unique(isodata[~np.isnan(isodata)])
for ucode in ucodes:
if ucode == 0:
continue
res_fatal_by_btype = {}
nonres_fatal_by_btype = {}
cdict = self._country.getCountry(int(ucode))
ccode = cdict['ISO2']
# get the workforce Series data for the current country
wforce = self.getWorkforce(ccode)
if wforce is None:
logging.info('No workforce data for %s. Skipping.' %
(cdict['Name']))
continue
# loop over MMI values 6-9
for mmi in np.arange(6, 9.5, 0.5):
c1 = (mmidata == mmi)
c2 = (isodata == ucode)
if ucode > 900 and ucode != CALIFORNIA_US_CCODE:
ucode = US_CCODE
for dclass in [URBAN, RURAL]:
c3 = (urbdata == dclass)
# get the population data in those cells at MMI, in country, and density class
# I think I want an AND condition here
popcells = popdata[c1 & c2 & c3]
# get the population distribution across residential, non-residential, and outdoor.
res, nonres, outside = pop_dist(
popcells, wforce, time_of_day, dclass)
# get the inventory for urban residential
resrow, nresrow = self.getInventories(ccode, dclass)
# TODO - figure out why this is happening, make the following lines
# not necessary
if 'Unnamed: 0' in resrow:
resrow = resrow.drop('Unnamed: 0')
if 'Unnamed: 0' in nresrow:
nresrow = nresrow.drop('Unnamed: 0')
# now multiply the residential/non-residential population through the inventory data
numres = len(resrow)
numnonres = len(nresrow)
resmat = np.reshape(
resrow.values, (numres, 1)).astype(np.float32)
nresmat = np.reshape(
nresrow.values, (numnonres, 1)).astype(np.float32)
popres = np.tile(res, (numres, 1))
popnonres = np.tile(nonres, (numnonres, 1))
popresbuilding = (popres * resmat)
popnonresbuilding = (popnonres * nresmat)
# now we have the residential and non-residental population
# distributed through the building types for each cell that matches
# MMI,country, and density criteria.
# popresbuilding rows | |
<gh_stars>0
import json
import logging
import pytz
from typing import List
from datetime import datetime
from enum import Enum
from fastapi import BackgroundTasks
from sqlalchemy.orm import Session
from dispatch.database import SessionLocal
from dispatch.decorators import background_task
from dispatch.messaging import INCIDENT_WORKFLOW_CREATED_NOTIFICATION
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident import service as incident_service
from dispatch.incident.enums import IncidentStatus, IncidentSlackViewBlockId, NewIncidentSubmission
from dispatch.incident.models import Incident
from dispatch.incident_priority import service as incident_priority_service
from dispatch.incident_type import service as incident_type_service
from dispatch.participant import service as participant_service
from dispatch.participant.models import Participant, ParticipantUpdate
from dispatch.plugin import service as plugin_service
from dispatch.workflow import service as workflow_service
from dispatch.workflow.flows import send_workflow_notification
from dispatch.workflow.models import Workflow, WorkflowInstanceCreate
from dispatch.plugins.dispatch_slack import service as dispatch_slack_service
from .messaging import create_incident_reported_confirmation_message
from .service import get_user_profile_by_email, get_user_email
slack_client = dispatch_slack_service.create_slack_client()
log = logging.getLogger(__name__)
class UpdateParticipantBlockFields(str, Enum):
reason_added = "reason_added_field"
participant = "selected_participant_field"
class UpdateParticipantCallbacks(str, Enum):
submit_form = "update_participant_submit_form"
update_view = "update_participant_update_view"
class UpdateNotificationsGroupBlockFields(str, Enum):
update_members = "update_members_field"
class UpdateNotificationsGroupCallbacks(str, Enum):
submit_form = "update_notifications_group_submit_form"
class AddTimelineEventBlockFields(str, Enum):
date = "date_field"
hour = "hour_field"
minute = "minute_field"
timezone = "timezone_field"
description = "description_field"
class AddTimelineEventCallbacks(str, Enum):
submit_form = "add_timeline_event_submit_form"
class RunWorkflowBlockFields(str, Enum):
workflow_select = "run_workflow_select"
run_reason = "run_workflow_run_reason"
param = "run_workflow_param"
class RunWorkflowCallbacks(str, Enum):
submit_form = "run_workflow_submit_form"
update_view = "run_workflow_update_view"
def handle_modal_action(action: dict, background_tasks: BackgroundTasks):
"""Handles all modal actions."""
view_data = action["view"]
view_data["private_metadata"] = json.loads(view_data["private_metadata"])
action_id = view_data["callback_id"]
for f in action_functions(action_id):
background_tasks.add_task(f, action)
def action_functions(action_id: str):
"""Determines which function needs to be run."""
action_mappings = {
AddTimelineEventCallbacks.submit_form: [add_timeline_event_from_submitted_form],
NewIncidentSubmission.form_slack_view: [report_incident_from_submitted_form],
UpdateParticipantCallbacks.submit_form: [update_participant_from_submitted_form],
UpdateParticipantCallbacks.update_view: [update_update_participant_modal],
UpdateNotificationsGroupCallbacks.submit_form: [
update_notifications_group_from_submitted_form
],
RunWorkflowCallbacks.update_view: [update_workflow_modal],
RunWorkflowCallbacks.submit_form: [run_workflow_submitted_form],
}
# this allows for unique action blocks e.g. invite-user or invite-user-1, etc
for key in action_mappings.keys():
if key in action_id:
return action_mappings[key]
return []
def parse_submitted_form(view_data: dict):
"""Parse the submitted data and return important / required fields for Dispatch to create an incident."""
parsed_data = {}
state_elem = view_data.get("state")
state_values = state_elem.get("values")
for state in state_values:
state_key_value_pair = state_values[state]
for elem_key in state_key_value_pair:
elem_key_value_pair = state_values[state][elem_key]
if elem_key_value_pair.get("selected_option") and elem_key_value_pair.get(
"selected_option"
).get("value"):
parsed_data[state] = {
"name": elem_key_value_pair.get("selected_option").get("text").get("text"),
"value": elem_key_value_pair.get("selected_option").get("value"),
}
elif elem_key_value_pair.get("selected_date"):
parsed_data[state] = elem_key_value_pair.get("selected_date")
else:
parsed_data[state] = elem_key_value_pair.get("value")
return parsed_data
@background_task
def report_incident_from_submitted_form(action: dict, db_session: Session = None):
submitted_form = action.get("view")
parsed_form_data = parse_submitted_form(submitted_form)
requested_form_title = parsed_form_data.get(IncidentSlackViewBlockId.title)
requested_form_description = parsed_form_data.get(IncidentSlackViewBlockId.description)
requested_form_incident_type = parsed_form_data.get(IncidentSlackViewBlockId.type)
requested_form_incident_priority = parsed_form_data.get(IncidentSlackViewBlockId.priority)
# Send a confirmation to the user
blocks = create_incident_reported_confirmation_message(
title=requested_form_title,
incident_type=requested_form_incident_type.get("value"),
incident_priority=requested_form_incident_priority.get("value"),
)
user_id = action["user"]["id"]
channel_id = submitted_form.get("private_metadata")["channel_id"]
dispatch_slack_service.send_ephemeral_message(
client=slack_client,
conversation_id=channel_id,
user_id=user_id,
text="",
blocks=blocks,
)
# Create the incident
user_email = action["user"]["email"]
incident = incident_service.create(
db_session=db_session,
title=requested_form_title,
status=IncidentStatus.active,
description=requested_form_description,
incident_type=requested_form_incident_type,
incident_priority=requested_form_incident_priority,
reporter_email=user_email,
tags=[], # The modal does not currently support tags
)
incident_flows.incident_create_flow(incident_id=incident.id)
def create_block_option_from_template(text: str, value: str):
"""Helper function which generates the option block for modals / views"""
return {"text": {"type": "plain_text", "text": str(text), "emoji": True}, "value": str(value)}
def build_report_incident_blocks(channel_id: str, db_session: Session):
"""Builds all blocks required for the reporting incident modal."""
incident_type_options = []
for incident_type in incident_type_service.get_all(db_session=db_session):
incident_type_options.append(
create_block_option_from_template(text=incident_type.name, value=incident_type.name)
)
incident_priority_options = []
for incident_priority in incident_priority_service.get_all(db_session=db_session):
incident_priority_options.append(
create_block_option_from_template(
text=incident_priority.name, value=incident_priority.name
)
)
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Security Incident Report"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": "If you suspect a security incident and require help from security, "
"please fill out the following to the best of your abilities.",
}
],
},
{
"block_id": IncidentSlackViewBlockId.title,
"type": "input",
"label": {"type": "plain_text", "text": "Title"},
"element": {
"type": "plain_text_input",
"placeholder": {
"type": "plain_text",
"text": "A brief explanatory title. You can change this later.",
},
},
},
{
"block_id": IncidentSlackViewBlockId.description,
"type": "input",
"label": {"type": "plain_text", "text": "Description"},
"element": {
"type": "plain_text_input",
"placeholder": {
"type": "plain_text",
"text": "A summary of what you know so far. It's all right if this is incomplete.",
},
"multiline": True,
},
},
{
"block_id": IncidentSlackViewBlockId.type,
"type": "input",
"label": {"type": "plain_text", "text": "Type"},
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select Incident Type"},
"options": incident_type_options,
},
},
{
"block_id": IncidentSlackViewBlockId.priority,
"type": "input",
"label": {"type": "plain_text", "text": "Priority", "emoji": True},
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select Incident Priority"},
"options": incident_priority_options,
},
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Submit"},
"callback_id": NewIncidentSubmission.form_slack_view,
"private_metadata": json.dumps({"channel_id": str(channel_id)}),
}
return modal_template
@background_task
def create_report_incident_modal(incident_id: int, command: dict = None, db_session=None):
"""Creates a modal for reporting an incident."""
channel_id = command.get("channel_id")
trigger_id = command.get("trigger_id")
modal_create_template = build_report_incident_blocks(
channel_id=channel_id, db_session=db_session
)
dispatch_slack_service.open_modal_with_user(
client=slack_client, trigger_id=trigger_id, modal=modal_create_template
)
def build_incident_participants_select_block(incident: Incident, participant: Participant = None):
"""Builds a static select with all current participants."""
selected_option = None
participant_options = []
for p in incident.participants:
current_option = {
"text": {"type": "plain_text", "text": p.individual.name},
"value": str(p.id),
}
participant_options.append(current_option)
if participant:
if p.id == participant.id:
selected_option = current_option
if participant:
select_block = {
"block_id": UpdateParticipantBlockFields.participant,
"type": "input",
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select Participant"},
"options": participant_options,
"initial_option": selected_option,
"action_id": UpdateParticipantBlockFields.participant,
},
"label": {"type": "plain_text", "text": "Participant"},
}
else:
select_block = {
"block_id": UpdateParticipantBlockFields.participant,
"type": "actions",
"elements": [
{
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select Participant"},
"options": participant_options,
}
],
}
return select_block
def build_update_participant_blocks(incident: Incident, participant: Participant = None):
"""Builds all blocks required for updating the participant modal."""
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Edit Participant"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": "Use this form to edit why a particpant was added to this incident.",
}
],
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Submit"},
"callback_id": UpdateParticipantCallbacks.update_view,
"private_metadata": json.dumps({"incident_id": str(incident.id)}),
}
select_block = build_incident_participants_select_block(
incident=incident, participant=participant
)
modal_template["blocks"].append(select_block)
# we need to show the reason if we're updating
if participant:
modal_template["blocks"].append(
{
"block_id": UpdateParticipantBlockFields.reason_added,
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": True,
"initial_value": participant.added_reason or "",
"action_id": UpdateParticipantBlockFields.reason_added,
},
"label": {"type": "plain_text", "text": "Reason Added"},
}
)
modal_template["callback_id"] = UpdateParticipantCallbacks.submit_form
return modal_template
@background_task
def update_participant_from_submitted_form(action: dict, db_session=None):
"""Saves form data."""
submitted_form = action.get("view")
parsed_form_data = parse_submitted_form(submitted_form)
added_reason = parsed_form_data.get(UpdateParticipantBlockFields.reason_added)
participant_id = int(parsed_form_data.get(UpdateParticipantBlockFields.participant)["value"])
selected_participant = participant_service.get(
db_session=db_session, participant_id=participant_id
)
participant_service.update(
db_session=db_session,
participant=selected_participant,
participant_in=ParticipantUpdate(added_reason=added_reason),
)
@background_task
def update_update_participant_modal(action: dict, db_session=None):
"""Pushes an updated view to the update participant modal."""
trigger_id = action["trigger_id"]
incident_id = action["view"]["private_metadata"]["incident_id"]
participant_id = action["actions"][0]["selected_option"]["value"]
selected_participant = participant_service.get(
db_session=db_session, participant_id=participant_id
)
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
modal_update_template = build_update_participant_blocks(
incident=incident, participant=selected_participant
)
dispatch_slack_service.update_modal_with_user(
client=slack_client,
trigger_id=trigger_id,
view_id=action["view"]["id"],
modal=modal_update_template,
)
@background_task
def create_update_participant_modal(incident_id: int, command: dict, db_session=None):
"""Creates a modal for updating a participant."""
trigger_id = command["trigger_id"]
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
modal_create_template = build_update_participant_blocks(incident=incident)
dispatch_slack_service.open_modal_with_user(
client=slack_client, trigger_id=trigger_id, modal=modal_create_template
)
def build_update_notifications_group_blocks(incident: Incident, db_session: SessionLocal):
"""Builds all blocks required to update the membership of the notifications group."""
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Update Group Membership"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": "Use this form to update the membership of the notifications group.",
}
],
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Update"},
"callback_id": UpdateNotificationsGroupCallbacks.submit_form,
"private_metadata": json.dumps({"incident_id": str(incident.id)}),
}
group_plugin = plugin_service.get_active(db_session=db_session, plugin_type="participant-group")
members = group_plugin.instance.list(incident.notifications_group.email)
members_block = {
"type": "input",
"block_id": UpdateNotificationsGroupBlockFields.update_members,
"label": {"type": "plain_text", "text": "Members"},
"element": {
"type": "plain_text_input",
"action_id": UpdateNotificationsGroupBlockFields.update_members,
"multiline": True,
"initial_value": (", ").join(members),
},
}
modal_template["blocks"].append(members_block)
modal_template["blocks"].append(
{
"type": "context",
"elements": [{"type": "plain_text", "text": "Separate email addresses with commas."}],
},
)
return modal_template
@background_task
def create_update_notifications_group_modal(incident_id: int, command: dict, db_session=None):
"""Creates a modal for editing members of the notifications group."""
trigger_id = command["trigger_id"]
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
modal_create_template = build_update_notifications_group_blocks(
incident=incident, db_session=db_session
)
dispatch_slack_service.open_modal_with_user(
client=slack_client, trigger_id=trigger_id, modal=modal_create_template
)
@background_task
def update_notifications_group_from_submitted_form(action: dict, db_session=None):
"""Updates notifications group based on submitted form data."""
submitted_form = action.get("view")
parsed_form_data = parse_submitted_form(submitted_form)
current_members = (
submitted_form["blocks"][1]["element"]["initial_value"].replace(" ", "").split(",")
)
updated_members = (
parsed_form_data.get(UpdateNotificationsGroupBlockFields.update_members)
.replace(" ", "")
.split(",")
)
members_added = list(set(updated_members) - set(current_members))
members_removed = list(set(current_members) - set(updated_members))
incident_id = action["view"]["private_metadata"]["incident_id"]
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
group_plugin = plugin_service.get_active(db_session=db_session, plugin_type="participant-group")
group_plugin.instance.add(incident.notifications_group.email, members_added)
group_plugin.instance.remove(incident.notifications_group.email, members_removed)
def build_add_timeline_event_blocks(incident: Incident):
"""Builds all blocks required to add an event to the incident timeline."""
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Add Timeline Event"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": "Use this form to add an event to the incident timeline.",
}
],
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Add"},
"callback_id": AddTimelineEventCallbacks.submit_form,
"private_metadata": json.dumps({"incident_id": str(incident.id)}),
}
date_picker_block = {
"type": | |
np.conj(Ylms[pair1]) * Ylms[pair2]
term2b += F[det]*F[det]*Ylms[pair1]*Ylms[pair2]*crossTermsV[(pair1,pair2)] #((-1)**pair1[0])*crossTerms[det][((pair1[0],-pair1[1]),pair2)]
term2a = -np.real(term2a) / 4. /(distMpc/distMpcRef)**2
term2b = -term2b/4./(distMpc/distMpcRef)**2 # coefficient of exp(-4ipsi)
term1 = 0.
for det in detList:
for pair in rholmsDictionary[det]:
term1+= np.conj(F[det]*Ylms[pair])*rholmsDictionary[det][pair]( float(tshift[det]) )
term1 = term1 / (distMpc/distMpcRef) # coefficient of exp(-2ipsi)
# if the coefficients of the exponential are too large, do the integral by hand, in the gaussian limit? NOT IMPLEMENTED YET
if False: #xgterm2a+np.abs(term2b)+np.abs(term1)>100:
return term2a+ np.log(special.iv(0,np.abs(term1))) # an approximation, ignoring term2b entirely!
else:
# marginalize over phase. Ideally done analytically. Only works if the terms are not too large -- otherwise overflow can occur.
# Should probably implement a special solution if overflow occurs
def fnIntegrand(x):
return np.exp( term2a+ np.real(term2b*np.exp(-4.j*x)+ term1*np.exp(+2.j*x)))/np.pi # remember how the two terms enter -- note signs!
LmargPsi = integrate.quad(fnIntegrand,0,np.pi,limit=100,epsrel=1e-4)[0]
return np.log(LmargPsi)
def SingleDetectorLogLikelihood(rholm_vals, crossTerms,crossTermsV, Ylms, F, dist):
"""
Compute the value of the log-likelihood at a single detector from
several intermediate pieces of data.
Inputs:
- rholm_vals: A dictionary of values of inner product between data
and h_lm modes, < h_lm(t*) | d >, at a single time of interest t*
- crossTerms: A dictionary of inner products between h_lm modes:
< h_lm | h_l'm' >
- Ylms: Dictionary of values of -2-spin-weighted spherical harmonic modes
for a certain inclination and ref. phase, Y_lm(incl, - phiref)
- F: Complex-valued antenna pattern depending on sky location and
polarization angle, F = F_+ + i F_x
- dist: The distance from the source to detector in meters
Outputs: The value of ln L for a single detector given the inputs.
"""
global distMpcRef
distMpc = dist/(lsu.lsu_PC*1e6)
invDistMpc = distMpcRef/distMpc
Fstar = np.conj(F)
# Eq. 35 of Richard's notes
term1 = 0.
# for mode in rholm_vals:
for mode, Ylm in Ylms.items():
term1 += Fstar * np.conj( Ylms[mode]) * rholm_vals[mode]
term1 = np.real(term1) *invDistMpc
# Eq. 26 of Richard's notes
term2 = 0.
for pair1 in rholm_vals:
for pair2 in rholm_vals:
term2 += F * np.conj(F) * ( crossTerms[(pair1,pair2)])* np.conj(Ylms[pair1]) * Ylms[pair2] \
+ F*F*Ylms[pair1]*Ylms[pair2]*crossTermsV[pair1,pair2] #((-1)**pair1[0])* crossTerms[((pair1[0],-pair1[1]),pair2)]
# + F*F*Ylms[pair1]*Ylms[pair2]*((-1)**pair1[0])* crossTerms[((pair1[0],-pair1[1]),pair2)]
term2 = -np.real(term2) / 4. /(distMpc/distMpcRef)**2
return term1 + term2
def ComputeModeIPTimeSeries(hlms, data, psd, fmin, fMax, fNyq,
N_shift, N_window, analyticPSD_Q=False,
inv_spec_trunc_Q=False, T_spec=0.):
r"""
Compute the complex-valued overlap between
each member of a SphHarmFrequencySeries 'hlms'
and the interferometer data COMPLEX16FrequencySeries 'data',
weighted the power spectral density REAL8FrequencySeries 'psd'.
The integrand is non-zero in the range: [-fNyq, -fmin] union [fmin, fNyq].
This integrand is then inverse-FFT'd to get the inner product
at a discrete series of time shifts.
Returns a SphHarmTimeSeries object containing the complex inner product
for discrete values of the reference time tref. The epoch of the
SphHarmTimeSeries object is set to account for the transformation
"""
rholms = {}
assert data.deltaF == hlms[list(hlms.keys())[0]].deltaF
assert data.data.length == hlms[list(hlms.keys())[0]].data.length
deltaT = data.data.length/(2*fNyq)
# Create an instance of class to compute inner product time series
IP = lsu.ComplexOverlap(fmin, fMax, fNyq, data.deltaF, psd,
analyticPSD_Q, inv_spec_trunc_Q, T_spec, full_output=True)
# Loop over modes and compute the overlap time series
for pair in hlms.keys():
rho, rhoTS, rhoIdx, rhoPhase = IP.ip(hlms[pair], data)
rhoTS.epoch = data.epoch - hlms[pair].epoch
# rholms[pair] = lal.CutCOMPLEX16TimeSeries(rhoTS, N_shift, N_window) # Warning: code currently fails w/o this cut.
tmp= lsu.DataRollBins(rhoTS, N_shift) # restore functionality for bidirectional shifts: waveform need not start at t=0
rholms[pair] =lal.CutCOMPLEX16TimeSeries(rhoTS, 0, N_window)
return rholms
def InterpolateRholm(rholm, t,verbose=False):
h_re = np.real(rholm.data.data)
h_im = np.imag(rholm.data.data)
if verbose:
print("Interpolation length check ", len(t), len(h_re))
# spline interpolate the real and imaginary parts of the time series
h_real = interpolate.InterpolatedUnivariateSpline(t, h_re[:len(t)], k=3,ext='zeros')
h_imag = interpolate.InterpolatedUnivariateSpline(t, h_im[:len(t)], k=3,ext='zeros')
return lambda ti: h_real(ti) + 1j*h_imag(ti)
# Little faster
#def anon_intp(ti):
#idx = np.searchsorted(t, ti)
#return rholm.data.data[idx]
#return anon_intp
#from pygsl import spline
#spl_re = spline.cspline(len(t))
#spl_im = spline.cspline(len(t))
#spl_re.init(t, np.real(rholm.data.data))
#spl_im.init(t, np.imag(rholm.data.data))
#@profile
#def anon_intp(ti):
#re = spl_re.eval_e_vector(ti)
#return re + 1j*im
#return anon_intp
# Doesn't work, hits recursion depth
#from scipy.signal import cspline1d, cspline1d_eval
#re_coef = cspline1d(np.real(rholm.data.data))
#im_coef = cspline1d(np.imag(rholm.data.data))
#dx, x0 = rholm.deltaT, float(rholm.epoch)
#return lambda ti: cspline1d_eval(re_coef, ti) + 1j*cspline1d_eval(im_coef, ti)
def InterpolateRholms(rholms, t,verbose=False):
"""
Return a dictionary keyed on mode index tuples, (l,m)
where each value is an interpolating function of the overlap against data
as a function of time shift:
rholm_intp(t) = < h_lm(t) | d >
'rholms' is a dictionary keyed on (l,m) containing discrete time series of
< h_lm(t_i) | d >
't' is an array of the discrete times:
[t_0, t_1, ..., t_N]
"""
rholm_intp = {}
for mode in rholms.keys():
rholm = rholms[mode]
# The mode is identically zero, don't bother with it
if sum(abs(rholm.data.data)) == 0.0:
continue
rholm_intp[ mode ] = InterpolateRholm(rholm, t,verbose)
return rholm_intp
def ComputeModeCrossTermIP(hlmsA, hlmsB, psd, fmin, fMax, fNyq, deltaF,
analyticPSD_Q=False, inv_spec_trunc_Q=False, T_spec=0., verbose=True,prefix="U"):
"""
Compute the 'cross terms' between waveform modes, i.e.
< h_lm | h_l'm' >.
The inner product is weighted by power spectral density 'psd' and
integrated over the interval [-fNyq, -fmin] union [fmin, fNyq]
Returns a dictionary of inner product values keyed by tuples of mode indices
i.e. ((l,m),(l',m'))
"""
# Create an instance of class to compute inner product
IP = lsu.ComplexIP(fmin, fMax, fNyq, deltaF, psd, analyticPSD_Q,
inv_spec_trunc_Q, T_spec)
crossTerms = {}
for mode1 in hlmsA.keys():
for mode2 in hlmsB.keys():
crossTerms[ (mode1,mode2) ] = IP.ip(hlmsA[mode1], hlmsB[mode2])
if verbose:
print(" : ", prefix, " populated ", (mode1, mode2), " = ",\
crossTerms[(mode1,mode2) ])
return crossTerms
def ComplexAntennaFactor(det, RA, DEC, psi, tref):
"""
Function to compute the complex-valued antenna pattern function:
F+ + i Fx
'det' is a detector prefix string (e.g. 'H1')
'RA' and 'DEC' are right ascension and declination (in radians)
'psi' is the polarization angle
'tref' is the reference GPS time
"""
detector = lalsim.DetectorPrefixToLALDetector(det)
Fp, Fc = lal.ComputeDetAMResponse(detector.response, RA, DEC, psi, lal.GreenwichMeanSiderealTime(tref))
return Fp + 1j * Fc
def ComputeYlms(Lmax, theta, phi, selected_modes=None):
"""
Return a dictionary keyed by tuples
(l,m)
that contains the values of all
-2Y_lm(theta,phi)
with
l <= Lmax
-l <= m <= l
"""
Ylms = {}
for l in range(2,Lmax+1):
for m in range(-l,l+1):
if selected_modes is not None and (l,m) not in selected_modes:
continue
Ylms[ (l,m) ] = lal.SpinWeightedSphericalHarmonic(theta, phi,-2, l, m)
return Ylms
def ComputeArrivalTimeAtDetector(det, RA, DEC, tref):
"""
Function to compute the time of arrival at a detector
from the time of arrival at the geocenter.
'det' is a detector prefix string (e.g. 'H1')
'RA' and 'DEC' are right ascension and declination (in radians)
'tref' is the reference time at the geocenter. It can be either a float (in which case the return is a float) or a GPSTime object (in which case it returns a GPSTime)
"""
detector = lalsim.DetectorPrefixToLALDetector(det)
# if tref is a float or a GPSTime object,
# it shoud be automagically converted in the appropriate way
return tref + lal.TimeDelayFromEarthCenter(detector.location, RA, DEC, tref)
def ComputeArrivalTimeAtDetectorWithoutShift(det, RA, DEC, tref):
"""
Function to compute the time of arrival at a detector
from the time of arrival at the geocenter.
'det' is a detector prefix string (e.g. 'H1')
'RA' and 'DEC' are right ascension and declination (in radians)
'tref' is the reference time at the geocenter. It can be either a float (in which case the return is a float) or a GPSTime object (in which case it returns a GPSTime)
"""
detector = lalsim.DetectorPrefixToLALDetector(det)
print(detector, detector.location)
# if tref is a float or a GPSTime object,
# it shoud be automagically converted in the appropriate way
return lal.TimeDelayFromEarthCenter(detector.location, RA, DEC, tref)
# Create complex FD data that does not assume Hermitianity - i.e.
# contains positive and negative freq. content
# TIMING INFO:
# - epoch set so the merger event occurs at total time P.tref
def non_herm_hoff(P):
hp, hc = lalsim.SimInspiralChooseTDWaveform(P.phiref, P.deltaT, P.m1, P.m2,
P.s1x, P.s1y, P.s1z, P.s2x, P.s2y, P.s2z, P.fmin, P.fref, P.dist,
P.incl, P.lambda1, P.lambda2, P.waveFlags, P.nonGRparams,
P.ampO, P.phaseO, P.approx)
hp.epoch = hp.epoch + P.tref
hc.epoch = hc.epoch + P.tref
hoft = lalsim.SimDetectorStrainREAL8TimeSeries(hp, hc,
| |
from . import ClientImageHandling
from . import ClientParsing
from . import ClientPaths
from . import ClientRendering
from . import ClientSearch
from . import ClientServices
from . import ClientThreading
from . import HydrusConstants as HC
from . import HydrusExceptions
from . import HydrusFileHandling
from . import HydrusImageHandling
from . import HydrusPaths
from . import HydrusSerialisable
from . import HydrusThreading
import json
import os
import random
import threading
import time
import wx
from . import HydrusData
from . import ClientData
from . import ClientConstants as CC
from . import HydrusGlobals as HG
import collections
from . import HydrusTags
import traceback
import weakref
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving
def BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents ):
def AddParents( simple_children_to_parents, children_to_parents, child, parents ):
for parent in parents:
if parent not in children_to_parents[ child ]:
children_to_parents[ child ].append( parent )
if parent in simple_children_to_parents:
grandparents = simple_children_to_parents[ parent ]
AddParents( simple_children_to_parents, children_to_parents, child, grandparents )
service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
for ( service_key, simple_children_to_parents ) in list(service_keys_to_simple_children_to_parents.items()):
children_to_parents = service_keys_to_children_to_parents[ service_key ]
for ( child, parents ) in list(simple_children_to_parents.items()):
AddParents( simple_children_to_parents, children_to_parents, child, parents )
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat ):
service_keys_to_simple_children_to_parents = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, pairs ) in list(service_keys_to_pairs_flat.items()):
service_keys_to_simple_children_to_parents[ service_key ] = BuildSimpleChildrenToParents( pairs )
return service_keys_to_simple_children_to_parents
def BuildSimpleChildrenToParents( pairs ):
simple_children_to_parents = HydrusData.default_dict_set()
for ( child, parent ) in pairs:
if child == parent:
continue
if LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ): continue
simple_children_to_parents[ child ].add( parent )
return simple_children_to_parents
def CollapseTagSiblingPairs( groups_of_pairs ):
# This now takes 'groups' of pairs in descending order of precedence
# This allows us to mandate that local tags take precedence
# a pair is invalid if:
# it causes a loop (a->b, b->c, c->a)
# there is already a relationship for the 'bad' sibling (a->b, a->c)
valid_chains = {}
for pairs in groups_of_pairs:
pairs = list( pairs )
pairs.sort()
for ( bad, good ) in pairs:
if bad == good:
# a->a is a loop!
continue
if bad not in valid_chains:
we_have_a_loop = False
current_best = good
while current_best in valid_chains:
current_best = valid_chains[ current_best ]
if current_best == bad:
we_have_a_loop = True
break
if not we_have_a_loop:
valid_chains[ bad ] = good
# now we collapse the chains, turning:
# a->b, b->c ... e->f
# into
# a->f, b->f ... e->f
siblings = {}
for ( bad, good ) in list(valid_chains.items()):
# given a->b, want to find f
if good in siblings:
# f already calculated and added
best = siblings[ good ]
else:
# we don't know f for this chain, so let's figure it out
current_best = good
while current_best in valid_chains:
current_best = valid_chains[ current_best ] # pursue endpoint f
best = current_best
# add a->f
siblings[ bad ] = best
return siblings
def LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
potential_loop_paths = { parent }
while len( potential_loop_paths.intersection( list(simple_children_to_parents.keys()) ) ) > 0:
new_potential_loop_paths = set()
for potential_loop_path in potential_loop_paths.intersection( list(simple_children_to_parents.keys()) ):
new_potential_loop_paths.update( simple_children_to_parents[ potential_loop_path ] )
potential_loop_paths = new_potential_loop_paths
if child in potential_loop_paths:
return True
return False
class BitmapManager( object ):
MAX_MEMORY_ALLOWANCE = 512 * 1024 * 1024
def __init__( self, controller ):
self._controller = controller
self._unusued_bitmaps = collections.defaultdict( list )
self._destroyee_bitmaps = []
self._total_unused_memory_size = 0
self._media_background_bmp_path = None
self._media_background_bmp = None
self._awaiting_destruction = False
HG.client_controller.sub( self, 'MaintainMemory', 'memory_maintenance_pulse' )
def _AdjustTotalMemory( self, direction, key ):
( width, height, depth ) = key
amount = width * height * depth / 8
self._total_unused_memory_size += direction * amount
def _ClearDestroyees( self ):
def action_destroyee( item ):
( destroy_timestamp, bitmap ) = item
if HydrusData.TimeHasPassedPrecise( destroy_timestamp ) and bitmap:
bitmap.Destroy()
return False
else:
return True
try:
self._destroyee_bitmaps = list( filter( action_destroyee, self._destroyee_bitmaps ) )
finally:
self._awaiting_destruction = False
if len( self._destroyee_bitmaps ) > 0:
self._ScheduleDestruction()
def _ScheduleDestruction( self ):
if not self._awaiting_destruction:
self._controller.CallLaterWXSafe( self._controller, 1.0, self._ClearDestroyees )
self._awaiting_destruction = True
def ReleaseBitmap( self, bitmap ):
( width, height ) = bitmap.GetSize()
depth = bitmap.GetDepth()
key = ( width, height, depth )
if key in self._unusued_bitmaps and len( self._unusued_bitmaps[ key ] ) > 10:
self._destroyee_bitmaps.append( ( HydrusData.GetNowPrecise() + 0.5, bitmap ) )
self._ScheduleDestruction()
else:
self._unusued_bitmaps[ key ].append( bitmap )
self._AdjustTotalMemory( 1, key )
if self._total_unused_memory_size > self.MAX_MEMORY_ALLOWANCE:
self._controller.CallLaterWXSafe( self._controller, 1.0, self.MaintainMemory )
def GetBitmap( self, width, height, depth = 24 ):
if width < 0:
width = 20
if height < 0:
height = 20
key = ( width, height, depth )
if key in self._unusued_bitmaps:
bitmaps = self._unusued_bitmaps[ key ]
if len( bitmaps ) > 0:
bitmap = bitmaps.pop()
self._AdjustTotalMemory( -1, key )
return bitmap
else:
del self._unusued_bitmaps[ key ]
bitmap = wx.Bitmap( width, height, depth )
return bitmap
def GetBitmapFromBuffer( self, width, height, depth, data ):
bitmap = self.GetBitmap( width, height, depth = depth )
if depth == 24:
bitmap.CopyFromBuffer( data, format = wx.BitmapBufferFormat_RGB )
elif depth == 32:
bitmap.CopyFromBuffer( data, format = wx.BitmapBufferFormat_RGBA )
return bitmap
def GetMediaBackgroundBitmap( self ):
bmp_path = self._controller.new_options.GetNoneableString( 'media_background_bmp_path' )
if bmp_path != self._media_background_bmp_path:
self._media_background_bmp_path = bmp_path
if self._media_background_bmp is not None:
self.ReleaseBitmap( self._media_background_bmp )
try:
bmp = wx.Bitmap( self._media_background_bmp_path )
self._media_background_bmp = bmp
except Exception as e:
self._media_background_bmp = None
HydrusData.ShowText( 'Loading a bmp caused an error!' )
HydrusData.ShowException( e )
return None
return self._media_background_bmp
def MaintainMemory( self ):
destroy_time = HydrusData.GetNowPrecise() + 0.5
for bitmaps in self._unusued_bitmaps.values():
self._destroyee_bitmaps.extend( ( ( destroy_time, bitmap ) for bitmap in bitmaps ) )
self._unusued_bitmaps = collections.defaultdict( list )
self._total_unused_memory_size = 0
self._ScheduleDestruction()
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._rwlock = ClientThreading.FileRWLock()
self._prefixes_to_locations = {}
self._bad_error_occurred = False
self._missing_locations = set()
self._Reinit()
def _AddFile( self, hash, mime, source_path ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if HG.file_report_mode:
HydrusData.ShowText( 'Adding file from path: ' + str( ( source_path, dest_path ) ) )
successful = HydrusPaths.MirrorFile( source_path, dest_path )
if not successful:
raise Exception( 'There was a problem copying the file from ' + source_path + ' to ' + dest_path + '!' )
def _AddThumbnailFromBytes( self, hash, thumbnail_bytes ):
dest_path = self._GenerateExpectedThumbnailPath( hash )
if HG.file_report_mode:
HydrusData.ShowText( 'Adding thumbnail: ' + str( ( len( thumbnail_bytes ), dest_path ) ) )
try:
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( thumbnail_bytes )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file "{}" failed to write to path "{}". This event suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.'.format( hash.hex(), dest_path ) )
self._controller.pub( 'clear_thumbnails', { hash } )
self._controller.pub( 'new_thumbnails', { hash } )
| |
log1mX - self.Z)
return ret
def CDF(self,x):
if math.isnan(x):
return float("nan")
elif (x <= 0.0):
return 0.0
elif (x >= 1.0):
return 1.0
else:
return incompleteBetaFunction(x,self.alpha,self.beta)
def QF(self,p):
if math.isnan(p):
return float("nan")
elif (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return 1.0
elif (p == 0.0):
return 0.0
else:
return inverseIncompleteBetaFunction(p,self.alpha,self.beta)
################### Cauchy
class CauchyDistribution(object):
def __init__(self, location, scale, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.loc = location
self.s = scale
if self.s <= 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
term1 = 1.0/(math.pi*self.s)
term2 = 1.0/(1 + pow((x - self.loc)/self.s,2))
return term1 * term2
def CDF(self, x):
return 0.5 + math.atan2(x-self.loc, self.s)*(1.0/math.pi)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return float("-inf")
else:
return self.loc + self.s*math.tan(math.pi*(p - 0.5))
################### F
# from: http://www.stat.tamu.edu/~jnewton/604/chap3.pdf
class FDistribution(object):
def __init__(self, upperDOF, lowerDOF, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.maxIter = 1000
self.epsilon = 1e-8
self.d1 = float(upperDOF)
self.d2 = float(lowerDOF)
if (self.d1 <= 0.0) or (self.d2 <= 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self,x):
if (x <= 0.0):
return 0.0
elif (x == 0) and (self.d1 < 2.0):
return float("inf")
elif (x == 0) and (self.d1 == 2.0):
return 1.0
else:
num_arg1 = pow(self.d1/self.d2, self.d1/2.0)
num_arg2 = pow(x, (self.d1/2.0)-1.0)
den_arg1 = math.exp(logBetaFunction(self.d1/2.0, self.d2/2.0))
den_arg2 = pow((1.0 + (self.d1*x)/self.d2), (self.d1 + self.d2)/2.0)
return (num_arg1*num_arg2)/(den_arg1*den_arg2)
def CDF(self,x):
if math.isnan(x):
return float("nan")
elif x <= 0.0:
return 0.0
else:
arg1 = (self.d1*x)/(self.d1*x + self.d2)
arg2 = self.d1/2.0
arg3 = self.d2/2.0
return incompleteBetaFunction(arg1, arg2, arg3)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return 0.0
else:
low = 0.0
high = 1.0
while self.CDF(high) < p:
high *= 2.0
diff = None
while diff is None or abs(diff) > self.epsilon:
mid = (low + high) / 2.0
diff = self.CDF(mid) - p
if diff > 0:
high = mid
else:
low = mid
return mid
################### Lognormal
class LognormalDistribution(object):
def __init__(self, meanlog, sdlog, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.mu = meanlog
self.sigma = sdlog
self.epsilon = 1e-8
self.maxIter = 100
if self.sigma <= 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if x <= 0.0:
return 0.0
else:
term1 = 1.0/(x*self.sigma*math.sqrt(2.0*math.pi))
term2 = pow(math.log(x) - self.mu, 2.0)/(2.0*pow(self.sigma, 2.0))
return term1 * math.exp(-term2)
def CDF(self, x):
if x <= 0.0:
return 0.0
else:
return GaussianDistribution(0.0, 1.0, self.name, self.errcodeBase, self.pos).CDF((math.log(x) - self.mu)/self.sigma)
def QF(self, p):
if math.isnan(p):
return float("nan")
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 0.0):
return 0.0
elif (p == 1.0):
return float("inf")
else:
low = 0.0
high = 1.0
while self.CDF(high) < p:
high *= 2.0
diff = None
while diff is None or abs(diff) > self.epsilon:
mid = (low + high) / 2.0
diff = self.CDF(mid) - p
if diff > 0:
high = mid
else:
low = mid
return mid
# # Using Newton-Raphson algorithm
# if p <= .001:
# self.epsilon = 1e-5
# p1 = p
# if (p1 > 0.8) and (p1 < 0.9):
# p2 = .5
# else:
# p2 = 0.85
# counter = 0
# while (abs(p1 - p2) > self.epsilon) and (counter < self.maxIter):
# q2 = (self.CDF(p2) - p)
# p1 = p2
# p2 = p1 - (q2/self.PDF(p1))
# counter += 1
# return p2
################### Student's T
class TDistribution(object):
def __init__(self, DOF, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.df = float(DOF)
self.epsilon = 1e-8
self.maxIter = 800
if self.df <= 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
term1 = 1.0/(math.sqrt(self.df) * math.exp(logBetaFunction(0.5, self.df/2.0)))
term2 = pow(1.0 + (x*x/self.df), -(self.df + 1.0)/2.0)
return term1 * term2
def CDF(self, x):
if math.isnan(x):
return float("nan")
arg1 = self.df/(self.df + x*x)
arg2 = self.df/2.0
arg3 = 0.5
if (x > 0):
return 1.0 - 0.5*incompleteBetaFunction(arg1, arg2, arg3)
elif (x == 0.0):
return 0.5
else:
return 0.5*incompleteBetaFunction(arg1, arg2, arg3)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return float("-inf")
else:
low = -1.0
high = 1.0
while self.CDF(low) > p:
low *= 2.0
while self.CDF(high) < p:
high *= 2.0
diff = None
while diff is None or abs(diff) > self.epsilon:
mid = (low + high) / 2.0
diff = self.CDF(mid) - p
if diff > 0:
high = mid
else:
low = mid
return mid
# # Using Newton-Raphson algorithm
# if p <= .001:
# self.epsilon = 1e-5
# p1 = p
# if (p1 > 0.8) and (p1 < 0.9):
# p2 = .5
# else:
# p2 = 0.85
# counter = 0
# while (abs(p1 - p2) > self.epsilon) or (counter < self.maxIter):
# q2 = (self.CDF(p2) - p)
# p1 = p2
# p2 = p1 - (q2/self.PDF(p1))
# counter += 1
# return p2
################### Binomial
class BinomialDistribution(object):
def __init__(self, size, p_success, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.prob = p_success
self.n = float(size)
if self.n < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
elif (self.prob < 0.0) or (self.prob > 1.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if x < 0.0:
return 0.0
else:
if x == 0:
nchoosek = 1.0
elif (x < 0) or (x > self.n):
nchoosek = 0.0
else:
nchoosek = nChooseK(self.n, x)
return nchoosek * pow(self.prob, x) * pow(1.0 - self.prob, self.n - x)
def CDF(self, x):
if math.isnan(x):
return float("nan")
elif x < 0.0:
return 0.0
else:
if (self.n - x <= 0.0) or (self.prob == 0.0):
return 1.0
else:
x = math.floor(x)
return incompleteBetaFunction(1.0 - self.prob, self.n - x, 1.0 + x)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif math.isnan(p):
return float("nan")
elif (p == 1.0):
return self.n
elif (p == 0.0):
return 0.0
elif (p > 0.0) and (p < 1.0):
# step through CDFs until we find the right one
x = 0
p0 = 0.0
while (p0 < p):
p0 = p0 + self.PDF(x)
x += 1
return x - 1
else:
return 0.0
################### Uniform
class UniformDistribution(object):
def __init__(self, minimum, maximum, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.mi = minimum
self.ma = maximum
if self.mi >= self.ma:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (x < self.mi) or (x > self.ma):
return 0.0
elif (math.isnan(x)):
return float("nan")
else:
return 1.0/(self.ma - self.mi)
def CDF(self, x):
if (x < self.mi):
return 0.0
elif (x > self.ma):
return 1.0
else:
return (x - self.mi)/(self.ma - self.mi)
def QF(self, p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p > 0.0) or (p < 1.0):
return self.mi + p*(self.ma - self.mi)
elif (math.isnan(p)):
return float("nan")
else:
return 0.0
################### Geometric
class GeometricDistribution(object):
def __init__(self, p_success, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.prob = p_success
if (self.prob < 0.0) or (self.prob > 1.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (x < 0.0):
return 0.0
else:
return self.prob*pow(1.0 - self.prob, x)
def CDF(self, x):
if (x < 0.0):
return 0.0
else:
x = math.floor(x)
return 1.0 - pow(1.0 - | |
- reprojection
# reproject longitude to the landsat projection and save as tiff file
lon_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(lon_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected longitude data
lon_proy = lon_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
lon_fileName = os.path.join(temp_folder_PreSEBAL,'lon_resh.tif')
SEBAL.save_GeoTiff_proy(dest, lon_proy, lon_fileName, shape, nband=1)
# Calculate slope and aspect from the reprojected DEM
deg2rad,rad2deg,slope,aspect=SEBAL.Calc_Gradient(data_DEM, pixel_spacing)
if Determine_transmissivity == 1:
# calculate the coz zenith angle
Ra_mountain_24, Ra_inst, cos_zn_resh, dr, phi, delta = SEBAL.Calc_Ra_Mountain(lon,DOY,hour,minutes,lon_proy,lat_proy,slope,aspect)
cos_zn_fileName = os.path.join(temp_folder_PreSEBAL,'cos_zn.tif')
SEBAL.save_GeoTiff_proy(dest, cos_zn_resh, cos_zn_fileName, shape, nband=1)
# Save the Ra
Ra_inst_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_inst.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_inst, Ra_inst_fileName, shape, nband=1)
Ra_mountain_24_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_mountain_24.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_mountain_24, Ra_mountain_24_fileName, shape, nband=1)
#################### Calculate Transmissivity ##########################################
# Open the General_Input sheet
ws = wb['Meteo_Input']
# Extract the method radiation value
Value_Method_Radiation_inst = '%s' %str(ws['L%d' % number].value)
# Values to check if data is created
Check_Trans_inst = 0
Check_Trans_24 = 0
''' This is now turned off, so you need to fill in the instantanious transmissivity or Radiation
# Extract the data to the method of radiation
if int(Value_Method_Radiation_inst) == 2:
Field_Radiation_inst = '%s' %str(ws['N%d' % number].value)
if Field_Radiation_inst == 'None':
# Instantanious Transmissivity files must be created
Check_Trans_inst = 1
# Calculate Transmissivity
quarters_hours = np.ceil(minutes/30.) * 30
hours_GMT = hour - delta_GTM
if quarters_hours >= 60:
hours_GMT += 1
quarters_hours = 0
# Define the instantanious LANDSAF file
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year, month,day, hours_GMT, quarters_hours)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Reproject the Ra_inst data to match the LANDSAF data
Ra_inst_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_inst_fileName, file_Landsaf_inst, method = 1)
Ra_inst_3Km = Ra_inst_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_inst_3Km[Ra_inst_3Km==0] = np.nan
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_inst_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_inst_3Km = np.float_(Rs_inst_3Km)/10
Rs_inst_3Km[Rs_inst_3Km<0]=np.nan
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_3Km = Rs_inst_3Km/Ra_inst_3Km
Transmissivity_3Km_fileName = os.path.join(output_folder_temp,'Transmissivity_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_inst_3Km_dest, Transmissivity_3Km, Transmissivity_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_inst_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_3Km_fileName, cos_zn_fileName, method = 3)
Transmissivity_inst = Transmissivity_inst_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_inst[Transmissivity_inst>0.98] = 0.98
Transmissivity_inst_fileName = os.path.join(TRANS_outfolder,'Transmissivity_inst_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_inst_dest, Transmissivity_inst, Transmissivity_inst_fileName, shape, nband=1)
'''
# Extract the method radiation value
Value_Method_Radiation_24 = '%s' %str(ws['I%d' % number].value)
# Extract the data to the method of radiation
if int(Value_Method_Radiation_24) == 2:
Field_Radiation_24 = '%s' %str(ws['K%d' % number].value)
if Field_Radiation_24 == 'None':
# Daily Transmissivity files must be created
Check_Trans_24 = 1
# Create times that are needed to calculate daily Rs (LANDSAF)
Starttime_GMT = datetime.strptime(Startdate,'%Y-%m-%d') + timedelta(hours=-delta_GTM)
Endtime_GMT = Starttime_GMT + timedelta(days=1)
Times = pd.date_range(Starttime_GMT, Endtime_GMT,freq = '30min')
for Time in Times[:-1]:
year_LANDSAF = Time.year
month_LANDSAF = Time.month
day_LANDSAF = Time.day
hour_LANDSAF = Time.hour
min_LANDSAF = Time.minute
# Define the instantanious LANDSAF file
#re = glob.glob('')
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year_LANDSAF, month_LANDSAF,day_LANDSAF, hour_LANDSAF, min_LANDSAF)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_one_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_one_3Km = np.float_(Rs_one_3Km)/10
Rs_one_3Km[Rs_one_3Km < 0]=np.nan
if Time == Times[0]:
Rs_24_3Km_tot = Rs_one_3Km
else:
Rs_24_3Km_tot += Rs_one_3Km
Rs_24_3Km = Rs_24_3Km_tot / len(Times[:-1])
# Reproject the Ra_inst data to match the LANDSAF data
Ra_24_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_mountain_24_fileName, file_Landsaf_inst, method = 3)
Ra_24_3Km = Ra_24_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_24_3Km[Ra_24_3Km==0] = np.nan
# Do gapfilling
Ra_24_3Km = gap_filling(Ra_24_3Km,np.nan)
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_24_3Km = Rs_24_3Km/Ra_24_3Km
Transmissivity_24_3Km_fileName = os.path.join(temp_folder_PreSEBAL,'Transmissivity_24_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_24_3Km_dest, Transmissivity_24_3Km, Transmissivity_24_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_24_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_24_3Km_fileName, lon_fileName, method = 3)
Transmissivity_24 = Transmissivity_24_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_24[Transmissivity_24>0.98] = 0.98
Transmissivity_24_fileName = os.path.join(TRANS_outfolder,'Transmissivity_24_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_24_dest, Transmissivity_24, Transmissivity_24_fileName, shape, nband=1)
#################### Calculate NDVI for LANDSAT ##########################################
if Image_Type == 1:
# Define bands used for each Landsat number
if Landsat_nr == 5 or Landsat_nr == 7:
Bands = np.array([1, 2, 3, 4, 5, 7, 6])
elif Landsat_nr == 8:
Bands = np.array([2, 3, 4, 5, 6, 7, 10, 11])
else:
print('Landsat image not supported, use Landsat 7 or 8')
# Open MTL landsat and get the correction parameters
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
Lmin, Lmax, k1_c, k2_c = SEBAL.info_band_metadata(Landsat_meta_fileName, Bands)
# Mean solar exo-atmospheric irradiance for each band (W/m2/microm)
# for the different Landsat images (L5, L7, or L8)
ESUN_L5 = np.array([1983, 1796, 1536, 1031, 220, 83.44])
ESUN_L7 = np.array([1997, 1812, 1533, 1039, 230.8, 84.9])
ESUN_L8 = np.array([1973.28, 1842.68, 1565.17, 963.69, 245, 82.106])
# Open one band - To get the metadata of the landsat images only once (to get the extend)
src_FileName = os.path.join(input_folder, '%s_B2.TIF' % Name_Landsat_Image) # before 10!
ls,band_data,ulx,uly,lrx,lry,x_size_ls,y_size_ls = SEBAL.Get_Extend_Landsat(src_FileName)
# Crop the Landsat images to the DEM extent -
dst_FileName = os.path.join(temp_folder_PreSEBAL,'cropped_LS_b2.tif') # Before 10 !!
# Clip the landsat image to match the DEM map
lsc, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(src_FileName, lon_fileName)
data_LS = lsc.GetRasterBand(1).ReadAsArray()
SEBAL.save_GeoTiff_proy(dest, data_LS, dst_FileName, shape, nband=1)
# Get the extend of the remaining landsat file after clipping based on the DEM file
lsc,band_data,ulx,uly,lrx,lry,x_size_lsc,y_size_lsc = SEBAL.Get_Extend_Landsat(dst_FileName)
# Create the corrected signals of Landsat in 1 array
Reflect = SEBAL.Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn_resh,dr,Landsat_nr, cos_zn_fileName)
# Calculate temporal water mask
water_mask_temp=SEBAL.Water_Mask(shape,Reflect)
# Calculate NDVI
NDVI = SEBAL.Calc_NDVI(Reflect)
# Calculate albedo
albedo = SEBAL.Calc_albedo(Reflect)
# Save NDVI
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save albedo
albedo_FileName = os.path.join(Albedo_outfolder,'Albedo_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, albedo, albedo_FileName, shape, nband=1)
################### Extract Meteo data for Landsat days from SEBAL Excel ##################
# Open the Meteo_Input sheet
ws = wb['Meteo_Input']
# ---------------------------- Instantaneous Air Temperature ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Temp_inst = float(ws['B%d' %number].value) # Instantaneous Air Temperature (°C)
# if the data is not a value, than open as a string
except:
Temp_inst_name = '%s' %str(ws['B%d' %number].value)
Temp_inst_fileName = os.path.join(output_folder, 'Temp', 'Temp_inst_input.tif')
Temp_inst = SEBAL.Reshape_Reproject_Input_data(Temp_inst_name, Temp_inst_fileName, lon_fileName)
try:
RH_inst = float(ws['D%d' %number].value) # Instantaneous Relative humidity (%)
# if the data is not a value, than open as a string
except:
RH_inst_name = '%s' %str(ws['D%d' %number].value)
RH_inst_fileName = os.path.join(output_folder, 'Temp', 'RH_inst_input.tif')
RH_inst = SEBAL.Reshape_Reproject_Input_data(RH_inst_name, RH_inst_fileName, lon_fileName)
esat_inst = 0.6108 * np.exp(17.27 * Temp_inst / (Temp_inst + 237.3))
eact_inst = RH_inst * esat_inst / 100
#################### Calculate NDVI for VIIRS-PROBAV ##########################################
if Image_Type == 2:
if Name_PROBAV_Image == 'None':
offset_all = [-1, 1, -2, 2, -3, 3,-4, 4,-5 ,5 ,-6 , 6, -7, 7, -8, 8]
found_Name_PROBAV_Image = 0
for offset in offset_all:
if found_Name_PROBAV_Image == 1:
continue
else:
try:
Name_PROBAV_Image = SEBAL_RUNS[number + offset]['PROBA_V_name']
if not Name_PROBAV_Image == 'None':
found_Name_PROBAV_Image = 1
except:
pass
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name_2 = '%d%02d%02d' %(year, month, day)
# Define the output name
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name_2)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name_2)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name_2)
else:
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name)
# vegetation maps that will be generated
if not os.path.exists(NDVI_FileName):
# Define the bands that will be used
bands=['SM', 'B1', 'B2', 'B3', 'B4'] #'SM', 'BLUE', 'RED', 'NIR', 'SWIR'
# Set the index number at 0
index=0
# create a zero array with the shape of the reprojected DEM file
| |
<filename>dist/weewx-4.4.0/bin/weeimport/weeimport.py
#
# Copyright (c) 2009-2019 <NAME> <<EMAIL>> and
# <NAME>
#
# See the file LICENSE.txt for your full rights.
#
"""Module providing the base classes and API for importing observational data
into WeeWX.
"""
from __future__ import with_statement
from __future__ import print_function
from __future__ import absolute_import
# Python imports
import datetime
import logging
import numbers
import re
import sys
import time
from datetime import datetime as dt
import six
from six.moves import input
# WeeWX imports
import weecfg
import weecfg.database
import weewx
import weewx.qc
import weewx.wxservices
from weewx.manager import open_manager_with_config
from weewx.units import unit_constants, unit_nicknames, convertStd, to_std_system, ValueTuple
from weeutil.weeutil import timestamp_to_string, option_as_list, to_int, tobool, get_object, max_with_none
log = logging.getLogger(__name__)
# List of sources we support
SUPPORTED_SOURCES = ['CSV', 'WU', 'Cumulus', 'WD', 'WeatherCat']
# Minimum requirements in any explicit or implicit WeeWX field-to-import field
# map
MINIMUM_MAP = {'dateTime': {'units': 'unix_epoch'},
'usUnits': {'units': None},
'interval': {'units': 'minute'}}
# ============================================================================
# Error Classes
# ============================================================================
class WeeImportOptionError(Exception):
"""Base class of exceptions thrown when encountering an error with a
command line option.
"""
class WeeImportMapError(Exception):
"""Base class of exceptions thrown when encountering an error with an
external source-to-WeeWX field map.
"""
class WeeImportIOError(Exception):
"""Base class of exceptions thrown when encountering an I/O error with an
external source.
"""
class WeeImportFieldError(Exception):
"""Base class of exceptions thrown when encountering an error with a field
from an external source.
"""
class WeeImportDecodeError(Exception):
"""Base class of exceptions thrown when encountering a decode error with an
external source.
"""
# ============================================================================
# class Source
# ============================================================================
class Source(object):
""" Abstract base class used for interacting with an external data source
to import records into the WeeWX archive.
__init__() must define the following properties:
dry_run - Is this a dry run (ie do not save imported records
to archive). [True|False].
calc_missing - Calculate any missing derived observations.
[True|False].
ignore_invalid_data - Ignore any invalid data found in a source field.
[True|False].
tranche - Number of records to be written to archive in a
single transaction. Integer.
interval - Method of determining interval value if interval
field not included in data source.
['config'|'derive'|x] where x is an integer.
Child classes are used to interact with a specific source (eg CSV file,
WU). Any such child classes must define a getRawData() method which:
- gets the raw observation data and returns an iterable yielding data
dicts whose fields can be mapped to a WeeWX archive field
- defines an import data field-to-WeeWX archive field map (self.map)
self.raw_datetime_format - Format of date time data field from which
observation timestamp is to be derived. A
string in Python datetime string format such
as '%Y-%m-%d %H:%M:%S'. If the date time
data field cannot be interpreted as a string
wee_import attempts to interpret the field
as a unix timestamp. If the field is not a
valid unix timestamp an error is raised.
"""
# reg expression to match any HTML tag of the form <...>
_tags = re.compile(r'\<.*\>')
def __init__(self, config_dict, import_config_dict, options):
"""A generic initialisation.
Set some realistic default values for options read from the import
config file. Obtain objects to handle missing derived obs (if required)
and QC on imported data. Parse any --date command line option so we
know what records to import.
"""
# save our WeeWX config dict
self.config_dict = config_dict
# get our import config dict settings
# interval, default to 'derive'
self.interval = import_config_dict.get('interval', 'derive')
# do we ignore invalid data, default to True
self.ignore_invalid_data = tobool(import_config_dict.get('ignore_invalid_data',
True))
# tranche, default to 250
self.tranche = to_int(import_config_dict.get('tranche', 250))
# apply QC, default to True
self.apply_qc = tobool(import_config_dict.get('qc', True))
# calc-missing, default to True
self.calc_missing = tobool(import_config_dict.get('calc_missing', True))
# Some sources include UV index and solar radiation values even if no
# sensor was present. The WeeWX convention is to store the None value
# when a sensor or observation does not exist. Record whether UV and/or
# solar radiation sensor was present.
# UV, default to True
self.UV_sensor = tobool(import_config_dict.get('UV_sensor', True))
# solar, default to True
self.solar_sensor = tobool(import_config_dict.get('solar_sensor', True))
# initialise ignore extreme > 255.0 values for temperature and
# humidity fields for WD imports
self.ignore_extr_th = False
self.db_binding_wx = get_binding(config_dict)
self.dbm = open_manager_with_config(config_dict, self.db_binding_wx,
initialize=True,
default_binding_dict={'table_name': 'archive',
'manager': 'weewx.wxmanager.DaySummaryManager',
'schema': 'schemas.wview_extended.schema'})
# get the unit system used in our db
if self.dbm.std_unit_system is None:
# we have a fresh archive (ie no records) so cannot deduce
# the unit system in use, so go to our config_dict
self.archive_unit_sys = unit_constants[self.config_dict['StdConvert'].get('target_unit',
'US')]
else:
# get our unit system from the archive db
self.archive_unit_sys = self.dbm.std_unit_system
# get ourselves a QC object to do QC on imported records
self.import_QC = weewx.qc.QC(config_dict, parent='weeimport')
# Process our command line options
self.dry_run = options.dry_run
self.verbose = options.verbose
self.no_prompt = options.no_prompt
self.suppress = options.suppress
# By processing any --date, --from and --to options we need to derive
# self.first_ts and self.last_ts; the earliest and latest (inclusive)
# timestamps of data to be imported. If we have no --date, --from or
# --to then set both to None (we then get the default action for each
# import type).
# First we see if we have a valid --date, if not then we look for
# --from and --to.
if options.date or options.date == "":
# there is a --date but is it valid
try:
_first_dt = dt.strptime(options.date, "%Y-%m-%d")
except ValueError:
# Could not convert --date. If we have a --date it must be
# valid otherwise we can't continue so raise it.
_msg = "Invalid --date option specified."
raise WeeImportOptionError(_msg)
else:
# we have a valid date so do some date arithmetic
_last_dt = _first_dt + datetime.timedelta(days=1)
self.first_ts = time.mktime(_first_dt.timetuple())
self.last_ts = time.mktime(_last_dt.timetuple())
elif options.date_from or options.date_to or options.date_from == '' or options.date_to == '':
# There is a --from and/or a --to, but do we have both and are
# they valid.
# try --from first
try:
if 'T' in options.date_from:
_from_dt = dt.strptime(options.date_from, "%Y-%m-%dT%H:%M")
else:
_from_dt = dt.strptime(options.date_from, "%Y-%m-%d")
_from_ts = time.mktime(_from_dt.timetuple())
except TypeError:
# --from not specified we can't continue so raise it
_msg = "Missing --from option. Both --from and --to must be specified."
raise WeeImportOptionError(_msg)
except ValueError:
# could not convert --from, we can't continue so raise it
_msg = "Invalid --from option."
raise WeeImportOptionError(_msg)
# try --to
try:
if 'T' in options.date_to:
_to_dt = dt.strptime(options.date_to, "%Y-%m-%dT%H:%M")
else:
_to_dt = dt.strptime(options.date_to, "%Y-%m-%d")
# since it is just a date we want the end of the day
_to_dt += datetime.timedelta(days=1)
_to_ts = time.mktime(_to_dt.timetuple())
except TypeError:
# --to not specified , we can't continue so raise it
_msg = "Missing --to option. Both --from and --to must be specified."
raise WeeImportOptionError(_msg)
except ValueError:
# could not convert --to, we can't continue so raise it
_msg = "Invalid --to option."
raise WeeImportOptionError(_msg)
# If we made it here we have a _from_ts and _to_ts. Do a simple
# error check first.
if _from_ts > _to_ts:
# from is later than to, raise it
_msg = "--from value is later than --to value."
raise WeeImportOptionError(_msg)
self.first_ts = _from_ts
self.last_ts = _to_ts
else:
# no --date or --from/--to so we take the default, set all to None
self.first_ts = None
self.last_ts = None
# initialise a few properties we will need during the import
# answer flags
self.ans = None
self.interval_ans = None
# properties to help with processing multi-period imports
self.period_no = None
# total records processed
self.total_rec_proc = 0
# total unique records identified
self.total_unique_rec = 0
# total duplicate records identified
self.total_duplicate_rec = 0
# time we started to first save
self.t1 = None
# time taken to process
self.tdiff = None
# earliest timestamp imported
self.earliest_ts = None
# latest timestamp imported
self.latest_ts = None
# initialise two sets to hold timestamps of records for which we
# encountered duplicates
# duplicates seen over all periods
self.duplicates = set()
# duplicates seen over the current period
self.period_duplicates = set()
@staticmethod
def sourceFactory(options, args):
"""Factory to produce a Source object.
Returns an appropriate object depending on the source type. Raises a
weewx.UnsupportedFeature error if an object could | |
import os
import json
import boto3
import time
import requests
import pyotp
import logging
import random
from datetime import datetime
from requests.exceptions import HTTPError
from requests.exceptions import ConnectionError
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model.ui import StandardCard
from ask_sdk_model.ui import SimpleCard
from ask_sdk_model.interfaces.display import (
ImageInstance, Image, RenderTemplateDirective, ListTemplate1,
BackButtonBehavior, ListItem, BodyTemplate2, BodyTemplate1)
sb = SkillBuilder()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
authorized_users = {}
authorized_users[482] = "UU7SVR3XXVDSF7WQ"
authorized_users[738] = "U6VYRDPQZ5B3V2MW"
authorized_users[351] = "6GQFRWG6VRQ7OSKX"
class PipelineExecutionSummary:
def __init__(self):
self.pipelineExecutionId = None
self.status = None
self.startTime = None
self.lastUpdateTime = None
self.sourceRevisions = []
self.trigger = None
class SourceRevision:
def __init__(self):
self.actionName = None
self.revisionId = None
self.revisionSummary = None
self.revisionUrl = None
class PipelineTrigger:
def __init__(self):
self.triggerType = None
self.triggerDetail = None
class User:
def __init__(self):
self.userId = None
self.name = None
self.secret_key = None
# Debug helper function to write object into Json and handle datetimes
def serialize(obj):
if isinstance(obj, datetime):
return str(obj)
return obj.__dict__
# Helper function to print json in an easy to read format
def print_json(json_string):
print(json.dumps(json_string,sort_keys=True,indent = 4,default=serialize))
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
introduction_speech = [
"Hey, I'm <NAME>. What can I do for you?",
"Ops Buddy here, What do you need?",
"Hey there, what can I help you with?",
"Hey you're friendly neighborhood Ops Buddy here? What can I do for you today?",
"Hi I am ops buddy, you're personal ops assistant. How can I help you?",
"Yeah yeah, I'm ops buddy, blah blah blah, what do you want?"
]
card_text = "Available Operations\n- Site Health Check \n- Last Deployment Info \n- Deploy to Production"
# Randomly get an introduction for the list of possibles, this helps to make the app less repetative and more engaging
speech_selection = random.randint(0,len(introduction_speech))
speech_text = introduction_speech[speech_selection]
# Build repsonse to send back to Alexa
handler_input.response_builder.speak(speech_text).set_card(StandardCard("Ops Buddy",card_text)).set_should_end_session(False)
return handler_input.response_builder.response
class DeployProdIntentHandler(AbstractRequestHandler):
"""Handler for DeployProd Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_intent_name("DeployProdIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Extract User Input
slots = handler_input.request_envelope.request.intent.slots
user_id = int(slots.get("user_id").value)
passcode = slots.get("passcode").value
# Print info to logs for debugging purposes
print("Deploy Production")
print(f"UserID: {user_id}")
print(f"Passcode: {passcode}")
# Get the pipeline name from the environment variables
pipeline_name = os.getenv("pipeline_name")
# Trigger the Deployment if auth passes
pipeline_execution_id = self.trigger_deployment(pipeline_name,user_id,passcode)
# Generate Response for Deployment Status
if pipeline_execution_id:
speech_text = "<speak>Deployment Approved. Triggering the deployment now.<break time=\"1s\"/>Is there anything else I can do for you?</speak>"
card_text = f"Deployment Approved\nPipeline Execution ID: {pipeline_execution_id}"
handler_input.response_builder.speak(speech_text).set_card(StandardCard("Production Deployment Status",card_text)).set_should_end_session(False)
print("Deployment Approved")
else:
speech_text = "<speak>Invalid Authorization. Deployment Canceled.<break time=\"1s\"/>Is there anything else I can do for you?</speak>"
card_text = f"Deployment Canceled\nAuthorization Failure"
handler_input.response_builder.speak(speech_text).set_card(SimpleCard("Production Deployment Status", card_text)).set_should_end_session(False)
print("Deployment Canceled")
# Return the response
return handler_input.response_builder.response
def trigger_deployment(self,pipeline_name,user_id,passcode):
pipeline_execution_id = None
if pipeline_name is None:
print("No Pipeline Name was provided")
return None
# Determine if a deployment has been authorized
deployment_authorized = self.verify_passcode(user_id,passcode)
# If the user is authorized to deploy, deploy site
if deployment_authorized:
print(f"User {user_id} has been authenticated and triggered a deployment")
# Generate Codepipeline client
client = boto3.client('codepipeline')
# Trigger Deployment
response = client.start_pipeline_execution(
name=pipeline_name
)
# Get the pipeline execution id
pipeline_execution_id = response.get("pipelineExecutionId")
else:
print(f"User {user_id} attempted to trigger a deployment but failed authentication")
# If a deployment has been triggered, return the execution id otherwise return None
return pipeline_execution_id
# Verify a OTP for a specific user
def verify_passcode(self,userid,passcode):
# Get Secret Key by UserId
secret_key = authorized_users.get(userid)
# Ensure a secret key was retrieved
if secret_key == None:
print("The provided user id was not found. Authorization failed.")
return False
# Create TOTP Object
totp = pyotp.TOTP(secret_key)
# Verify TOTP Code
return totp.verify(passcode)
class HealthCheckIntentHandler(AbstractRequestHandler):
"""Handler for HealthCheck Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_intent_name("HealthCheckIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
print("Executing Healthcheck Intent")
# Get website url from environment variables
website_url = os.getenv("website_url")
# Call the website to determine the HTTP status code it returns
site_healthy,status_code = self.get_website_status(website_url)
# Print Info to logging for debugging
print(f"Website Url: {website_url}")
print(f"Site Health Status : {site_healthy}")
print(f"Site Health Status Code: {status_code}")
# Depending on the health status of the site return the proper message to the user
if site_healthy:
reprompt = "What else can I do for you?"
card_text = speech_text = f"The site is healthy and returned an HTTP status code of {str(status_code)}."
speech_text = f"<speak>The site is healthy and returned an HTTP status code of {str(status_code)}. <break time=\"1s\"/>Is there anything else I can do for you?</speak>"
handler_input.response_builder.speak(speech_text).ask(reprompt).set_card(SimpleCard("Health Check", card_text)).set_should_end_session(False)
elif site_healthy and status_code:
reprompt = "What else can I do for you?"
card_text =f"The site is unhealthy and returned an HTTP status code of {str(status_code)}."
speech_text =f"<speak>The site is unhealthy and returned an HTTP status code of {str(status_code)}.<break time=\"1s\"/>Is there anything else I can do for you?</speak>"
handler_input.response_builder.speak(speech_text).ask(reprompt).set_card(SimpleCard("Health Check", card_text)).set_should_end_session(False)
else:
reprompt = "What else can I do for you?"
card_text ="There was an issue when I attempted to reach the site"
speech_text ="<speak>There was an issue when I attempted to reach the site.<break time=\"1s\"/>Is there anything else I can do for you?</speak>"
handler_input.response_builder.speak(speech_text).ask(reprompt).set_card(SimpleCard("Health Check", card_text)).set_should_end_session(False)
return handler_input.response_builder.response
# Get website health check
def get_website_status(self,url):
status = None
status_code = None
try:
response = requests.get(url)
if response.status_code == 200:
status = True
status_code = response.status_code
else:
status = False
status_code = response.status_code
except:
status = False
return status,status_code
class LastDeploymentInfoIntent(AbstractRequestHandler):
"""Handler for retriving info about the last deployment """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_intent_name("LastDeploymentInfoIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
print("Executing Last Deployment Info Intent")
# Get pipeline name from environment variables
pipeline_name = os.getenv("pipeline_name")
print(f"Pipeline Name: {pipeline_name}")
pipeline_executions = self.get_pipeline_executions(pipeline_name)
# Grab the latest execution, the response from AWS appears to be sorted from newest to oldest
lastest_pipeline_execution = pipeline_executions[0]
# Get Deployment times and format them to the desired format that Alexa will interpt
start_time = lastest_pipeline_execution.startTime.strftime("%I:%M%p")
start_date = lastest_pipeline_execution.startTime.strftime("%Y%m%d")
finish_time = lastest_pipeline_execution.lastUpdateTime.strftime("%I:%M%p")
finish_date = lastest_pipeline_execution.lastUpdateTime.strftime("%Y%m%d")
start_datetime = lastest_pipeline_execution.startTime.strftime("%m/%d/%Y %I:%M %p")
finish_datetime = lastest_pipeline_execution.lastUpdateTime.strftime("%m/%d/%Y %I:%M %p")
# Create Speech and Card Text
reprompt = "What else can I do for you?"
speech_text = f"<speak>The last deployment {lastest_pipeline_execution.status}. It was started at {start_time} UTC on <say-as interpret-as=\"date\">{start_date}</say-as> and was last updated at {finish_time} UTC on <say-as interpret-as=\"date\">{finish_date}</say-as>.<break time=\"1s\"/>Is there anything else I can do for you?</speak>"
card_text = f"Deployment ID: {lastest_pipeline_execution.pipelineExecutionId}\nStatus: {lastest_pipeline_execution.status}\nStart Time: {start_datetime} UTC\nLast Update: {finish_datetime} UTC"
# Build repsonse to send back to Alexa
handler_input.response_builder.speak(speech_text).ask(reprompt).set_card(StandardCard("Last Deployment Info", card_text)).set_should_end_session(False)
return handler_input.response_builder.response
# Get the pipeline executions
def get_pipeline_executions(self,pipeline_name):
pipeline_execution_summaries = []
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(
pipelineName=pipeline_name,
maxResults=1,
)
for item in response.get("pipelineExecutionSummaries"):
pipeline_execution_summaries.append(self.parse_pipeline_execution(item))
return pipeline_execution_summaries
# Parse pipeline info
def parse_pipeline_execution(self,response_item):
# Parse Pipeline Execution Summary Info
pes = PipelineExecutionSummary()
pes.pipelineExecutionId = response_item.get("pipelineExecutionId")
pes.status = response_item.get("status")
pes.startTime = response_item.get("startTime")
pes.lastUpdateTime = response_item.get("lastUpdateTime")
# Parse Source Revisions
for sr in response_item.get("sourceRevisions"):
temp = SourceRevision()
temp.actionName = sr.get("actionName")
temp.revisionId = sr.get("revisionId")
temp.revisionSummary = sr.get("revisionSummary")
temp.revisionUrl = sr.get("revisionUrl")
# Add to the summary list
pes.sourceRevisions.append(temp)
# Parse Trigger
trigger = PipelineTrigger()
trigger.triggerType = response_item.get("triggerType")
trigger.triggerDetail = response_item.get("triggerDetail")
# Add triger info to the summary object
pes.trigger = trigger
return pes
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
reprompt = "What else can I do for you?"
card_text = "Available Operations\n- Site Health Check \n- Last Deployment Info \n- Deploy to Production"
speech_text = "<speak>You can ask me to check the health of your site, get information about the last deployment or even deploy to production.<break time=\"1s\"/>What can I do for you?</speak>"
handler_input.response_builder.speak(speech_text).ask(reprompt).set_card(SimpleCard("What can I do?", card_text)).set_should_end_session(False)
return handler_input.response_builder.response
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (is_intent_name("AMAZON.CancelIntent")(handler_input) or
is_intent_name("AMAZON.StopIntent")(handler_input) or
| |
is None:
self.name = f"js-{self.filename.replace('_', '-')}"
class _WebEngineScripts(QObject):
def __init__(self, tab, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = cast(QWidget, None)
self._greasemonkey = greasemonkey.gm_manager
def connect_signals(self):
"""Connect signals to our private slots."""
config.instance.changed.connect(self._on_config_changed)
self._tab.search.cleared.connect(functools.partial(
self._update_stylesheet, searching=False))
self._tab.search.finished.connect(self._update_stylesheet)
@pyqtSlot(str)
def _on_config_changed(self, option):
if option in ['scrolling.bar', 'content.user_stylesheets']:
self._init_stylesheet()
self._update_stylesheet()
@pyqtSlot(bool)
def _update_stylesheet(self, searching=False):
"""Update the custom stylesheet in existing tabs."""
css = shared.get_user_stylesheet(searching=searching)
code = javascript.assemble('stylesheet', 'set_css', css)
self._tab.run_js_async(code)
def _inject_js(self, name, js_code, *,
world=QWebEngineScript.ApplicationWorld,
injection_point=QWebEngineScript.DocumentCreation,
subframes=False):
"""Inject the given script to run early on a page load."""
script = QWebEngineScript()
script.setInjectionPoint(injection_point)
script.setSourceCode(js_code)
script.setWorldId(world)
script.setRunsOnSubFrames(subframes)
script.setName(f'_qute_{name}')
self._widget.page().scripts().insert(script)
def _remove_js(self, name):
"""Remove an early QWebEngineScript."""
scripts = self._widget.page().scripts()
script = scripts.findScript(f'_qute_{name}')
if not script.isNull():
scripts.remove(script)
def init(self):
"""Initialize global qutebrowser JavaScript."""
js_code = javascript.wrap_global(
'scripts',
resources.read_file('javascript/scroll.js'),
resources.read_file('javascript/webelem.js'),
resources.read_file('javascript/caret.js'),
)
# FIXME:qtwebengine what about subframes=True?
self._inject_js('js', js_code, subframes=True)
self._init_stylesheet()
self._greasemonkey.scripts_reloaded.connect(
self._inject_all_greasemonkey_scripts)
self._inject_all_greasemonkey_scripts()
self._inject_site_specific_quirks()
def _init_stylesheet(self):
"""Initialize custom stylesheets.
Partially inspired by QupZilla:
https://github.com/QupZilla/qupzilla/blob/v2.0/src/lib/app/mainapplication.cpp#L1063-L1101
"""
self._remove_js('stylesheet')
css = shared.get_user_stylesheet()
js_code = javascript.wrap_global(
'stylesheet',
resources.read_file('javascript/stylesheet.js'),
javascript.assemble('stylesheet', 'set_css', css),
)
self._inject_js('stylesheet', js_code, subframes=True)
@pyqtSlot()
def _inject_all_greasemonkey_scripts(self):
scripts = self._greasemonkey.all_scripts()
self._inject_greasemonkey_scripts(scripts)
def _remove_all_greasemonkey_scripts(self):
page_scripts = self._widget.page().scripts()
for script in page_scripts.toList():
if script.name().startswith("GM-"):
log.greasemonkey.debug('Removing script: {}'
.format(script.name()))
removed = page_scripts.remove(script)
assert removed, script.name()
def _inject_greasemonkey_scripts(self, scripts):
"""Register user JavaScript files with the current tab.
Args:
scripts: A list of GreasemonkeyScripts.
"""
if sip.isdeleted(self._widget):
return
# Since we are inserting scripts into a per-tab collection,
# rather than just injecting scripts on page load, we need to
# make sure we replace existing scripts, not just add new ones.
# While, taking care not to remove any other scripts that might
# have been added elsewhere, like the one for stylesheets.
page_scripts = self._widget.page().scripts()
self._remove_all_greasemonkey_scripts()
for script in scripts:
new_script = QWebEngineScript()
try:
world = int(script.jsworld)
if not 0 <= world <= qtutils.MAX_WORLD_ID:
log.greasemonkey.error(
f"script {script.name} has invalid value for '@qute-js-world'"
f": {script.jsworld}, should be between 0 and "
f"{qtutils.MAX_WORLD_ID}")
continue
except ValueError:
try:
world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]]
except KeyError:
log.greasemonkey.error(
f"script {script.name} has invalid value for '@qute-js-world'"
f": {script.jsworld}")
continue
new_script.setWorldId(world)
# Corresponds to "@run-at document-end" which is the default according to
# https://wiki.greasespot.net/Metadata_Block#.40run-at - however,
# QtWebEngine uses QWebEngineScript.Deferred (@run-at document-idle) as
# default.
#
# NOTE that this needs to be done before setSourceCode, so that
# QtWebEngine's parsing of GreaseMonkey tags will override it if there is a
# @run-at comment.
new_script.setInjectionPoint(QWebEngineScript.DocumentReady)
new_script.setSourceCode(script.code())
new_script.setName(f"GM-{script.name}")
new_script.setRunsOnSubFrames(script.runs_on_sub_frames)
if script.needs_document_end_workaround():
log.greasemonkey.debug(
f"Forcing @run-at document-end for {script.name}")
new_script.setInjectionPoint(QWebEngineScript.DocumentReady)
log.greasemonkey.debug(f'adding script: {new_script.name()}')
page_scripts.insert(new_script)
def _inject_site_specific_quirks(self):
"""Add site-specific quirk scripts."""
if not config.val.content.site_specific_quirks.enabled:
return
versions = version.qtwebengine_versions()
quirks = [
_Quirk(
'whatsapp_web',
injection_point=QWebEngineScript.DocumentReady,
world=QWebEngineScript.ApplicationWorld,
),
_Quirk('discord'),
_Quirk(
'googledocs',
# will be an UA quirk once we set the JS UA as well
name='ua-googledocs',
),
_Quirk(
'string_replaceall',
predicate=versions.webengine < utils.VersionNumber(5, 15, 3),
),
_Quirk(
'globalthis',
predicate=versions.webengine < utils.VersionNumber(5, 13),
),
_Quirk(
'object_fromentries',
predicate=versions.webengine < utils.VersionNumber(5, 13),
)
]
for quirk in quirks:
if not quirk.predicate:
continue
src = resources.read_file(f'javascript/quirks/{quirk.filename}.user.js')
if quirk.name not in config.val.content.site_specific_quirks.skip:
self._inject_js(
f'quirk_{quirk.filename}',
src,
world=quirk.world,
injection_point=quirk.injection_point,
)
class WebEngineTabPrivate(browsertab.AbstractTabPrivate):
"""QtWebEngine-related methods which aren't part of the public API."""
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def event_target(self):
return self._widget.render_widget()
def shutdown(self):
self._tab.shutting_down.emit()
self._tab.action.exit_fullscreen()
self._widget.shutdown()
def run_js_sync(self, code):
raise browsertab.UnsupportedOperationError
def _init_inspector(self, splitter, win_id, parent=None):
return webengineinspector.WebEngineInspector(splitter, win_id, parent)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser.
Signals:
abort_questions: Emitted when a new load started or we're shutting
down.
"""
abort_questions = pyqtSignal()
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id,
mode_manager=mode_manager,
private=private,
parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(tab=self)
self.scroller = WebEngineScroller(tab=self, parent=self)
self.caret = WebEngineCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(tab=self, parent=self)
self.search = WebEngineSearch(tab=self, parent=self)
self.printing = WebEnginePrinting(tab=self)
self.elements = WebEngineElements(tab=self)
self.action = WebEngineAction(tab=self)
self.audio = WebEngineAudio(tab=self, parent=self)
self.private_api = WebEngineTabPrivate(mode_manager=mode_manager,
tab=self)
self._permissions = _WebEnginePermissions(tab=self, parent=self)
self._scripts = _WebEngineScripts(tab=self, parent=self)
# We're assigning settings in _set_widget
self.settings = webenginesettings.WebEngineSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._child_event_filter = None
self._saved_zoom = None
self._scripts.init()
def _set_widget(self, widget):
# pylint: disable=protected-access
super()._set_widget(widget)
self._permissions._widget = widget
self._scripts._widget = widget
def _install_event_filter(self):
fp = self._widget.focusProxy()
if fp is not None:
fp.installEventFilter(self._tab_event_filter)
self._child_event_filter = eventfilter.ChildEventFilter(
eventfilter=self._tab_event_filter,
widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def load_url(self, url):
"""Load the given URL in this tab.
Arguments:
url: The QUrl to load.
"""
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3896
return
self._saved_zoom = self.zoom.factor()
self._load_url_prepare(url)
self._widget.load(url)
def url(self, *, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
world_id_type = Union[QWebEngineScript.ScriptWorldId, int]
if world is None:
world_id: world_id_type = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
if not 0 <= world_id <= qtutils.MAX_WORLD_ID:
raise browsertab.WebTabError(
"World ID should be between 0 and {}"
.format(qtutils.MAX_WORLD_ID))
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def reload(self, *, force=False):
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def renderer_process_pid(self) -> Optional[int]:
page = self._widget.page()
try:
return page.renderProcessPid()
except AttributeError:
# Added in Qt 5.15
return None
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def _show_error_page(self, url, error):
"""Show an error page in the tab."""
log.misc.debug("Showing error page for {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error=error)
self.set_html(error_page)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
title_url_str = title_url.toDisplayString(
QUrl.RemoveScheme) # type: ignore[arg-type]
if title == title_url_str.strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.history_item_triggered.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.abort_questions], url=urlstr)
if answer is None:
sip.assign(authenticator, QAuthenticator())
return
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
log.network.debug("Authentication requested for {}, netrc_used {}"
.format(url.toDisplayString(), self.data.netrc_used))
netrc_success = False
if not self.data.netrc_used:
self.data.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
log.network.debug("Asking for credentials")
answer = shared.authentication_required(
url, authenticator, abort_on=[self.abort_questions])
if not netrc_success and answer is None:
log.network.debug("Aborting auth")
sip.assign(authenticator, QAuthenticator())
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
# (seems to be back in later Qt versions as well)
self.search.clear()
super()._on_load_started()
self.data.netrc_used = False
@pyqtSlot('qint64')
def _on_renderer_process_pid_changed(self, pid):
log.webview.debug("Renderer process PID for tab {}: {}"
.format(self.tab_id, pid))
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
def _error_page_workaround(self, js_enabled, html):
"""Check if we're displaying a Chromium error page.
This gets called if we got a loadFinished(False), so we can display at
least some error page in situations where Chromium's can't be
displayed.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643
WORKAROUND for https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=882805
"""
match = re.search(r'"errorCode":"([^"]*)"', html)
if match is None:
return
error = match.group(1)
log.webview.error("Load error: {}".format(error))
missing_jst = 'jstProcess(' in html and 'jstProcess=' not in html
if js_enabled | |
262.4, 265.5, 289.1],
[289.5, 264.5, 275.2, 256.3, 259.8, 290.0, 260.7, 272.7],
[281.6, 278.4, 266.6, 264.4, 264.6, 264.2, 281.8, 271.2],
[285.1, 282.4, 289.6, 262.2, 285.0, 273.2, 257.9, 280.2],
[290.3, 284.4, 299.6, 266.6, 261.0, 273.5, 274.5, 284.6],
],
[
[270.8, 282.5, 290.8, 266.6, 285.0, 275.8, 290.5, 268.7],
[281.1, 283.5, 279.0, 272.2, 276.8, 280.3, 272.9, 275.6],
[283.8, 269.0, 276.2, 265.1, 283.9, 285.1, 280.4, 273.9],
[271.5, 280.2, 280.5, 278.4, 265.4, 271.7, 287.2, 261.4],
[290.3, 251.7, 269.1, 279.9, 281.1, 270.9, 259.6, 284.7],
],
[
[279.0, 264.6, 274.8, 282.1, 271.7, 254.4, 268.8, 271.1],
[293.9, 283.5, 265.1, 263.8, 278.4, 263.5, 270.8, 270.8],
[266.6, 257.7, 277.7, 275.2, 257.4, 269.6, 289.5, 269.2],
[274.4, 287.4, 277.3, 257.5, 269.0, 271.2, 272.6, 272.8],
[272.5, 271.5, 260.6, 274.3, 274.7, 262.7, 260.6, 253.6],
],
[
[278.7, 267.4, 279.0, 271.9, 269.8, 260.8, 284.9, 282.4],
[288.6, 262.9, 260.4, 272.2, 271.1, 280.6, 273.7, 282.8],
[272.1, 264.7, 284.6, 299.6, 258.7, 265.3, 269.5, 276.7],
[286.5, 271.9, 282.3, 266.2, 277.7, 260.4, 267.9, 287.9],
[269.8, 255.4, 276.4, 281.8, 266.6, 275.7, 288.3, 265.8],
],
[
[261.3, 245.6, 265.9, 267.4, 266.7, 276.5, 272.7, 256.9],
[264.1, 285.6, 278.5, 269.2, 268.6, 259.6, 253.3, 260.1],
[272.9, 266.8, 278.3, 280.0, 283.0, 281.2, 276.7, 275.0],
[273.1, 261.5, 276.6, 272.7, 280.9, 287.7, 273.2, 274.7],
[285.0, 271.5, 271.9, 264.1, 278.7, 273.1, 271.5, 255.9],
],
[
[264.6, 288.9, 278.1, 253.0, 281.4, 294.3, 252.1, 260.7],
[273.0, 275.1, 283.2, 256.1, 284.4, 283.8, 274.2, 288.1],
[260.1, 269.9, 277.9, 281.7, 282.4, 280.8, 278.3, 278.7],
[275.0, 274.1, 281.0, 269.8, 276.6, 276.2, 263.7, 264.0],
[280.4, 280.4, 257.8, 249.8, 275.1, 265.2, 261.9, 285.8],
],
[
[269.3, 274.1, 277.9, 265.4, 272.4, 274.9, 272.8, 270.7],
[276.4, 280.4, 294.0, 260.7, 281.6, 271.0, 283.6, 277.4],
[278.9, 257.9, 268.4, 279.0, 278.0, 276.4, 260.1, 260.9],
[282.2, 272.1, 249.6, 289.8, 269.7, 280.0, 280.9, 266.1],
[251.9, 269.4, 270.0, 278.7, 265.4, 271.9, 282.9, 256.7],
],
[
[258.5, 291.3, 274.2, 273.1, 276.9, 280.7, 275.0, 259.9],
[262.6, 266.9, 261.4, 274.7, 267.8, 296.9, 271.9, 261.0],
[266.9, 273.3, 274.6, 274.2, 264.5, 271.5, 288.2, 289.1],
[270.4, 288.9, 276.0, 268.6, 277.1, 277.8, 277.2, 284.2],
[279.6, 265.8, 280.9, 295.2, 255.6, 269.8, 265.8, 259.3],
],
[
[261.9, 275.9, 262.0, 273.1, 268.1, 277.6, 265.8, 285.0],
[260.2, 280.5, 262.2, 263.6, 264.0, 275.7, 262.7, 286.0],
[250.8, 284.8, 260.6, 272.9, 290.0, 264.2, 266.3, 264.6],
[278.6, 292.3, 272.7, 284.3, 285.9, 278.1, 273.1, 272.0],
[248.7, 268.6, 280.3, 274.9, 272.8, 298.1, 272.7, 281.5],
],
[
[282.3, 279.1, 265.4, 269.3, 258.5, 264.1, 272.3, 279.0],
[268.5, 274.1, 265.4, 256.7, 279.8, 275.6, 270.7, 285.5],
[269.1, 297.5, 283.9, 244.9, 258.4, 272.7, 265.2, 265.2],
[289.8, 281.7, 278.2, 299.5, 281.1, 270.7, 269.4, 275.4],
[273.0, 287.2, 272.5, 274.0, 287.7, 275.6, 278.4, 266.4],
],
[
[287.8, 302.7, 261.9, 270.8, 285.9, 285.5, 262.4, 274.5],
[281.9, 273.0, 268.4, 265.0, 279.0, 258.6, 266.1, 280.5],
[274.5, 277.5, 283.3, 266.4, 287.0, 270.0, 265.0, 269.0],
[287.4, 257.5, 269.6, 275.0, 278.6, 287.2, 279.4, 282.2],
[261.1, 265.7, 281.6, 271.7, 278.0, 272.4, 278.9, 264.4],
],
[
[280.2, 269.2, 247.1, 286.4, 273.2, 270.5, 284.3, 264.1],
[280.4, 264.9, 274.1, 275.1, 273.8, 286.6, 286.8, 276.2],
[264.1, 274.5, 276.3, 263.8, 277.7, 265.6, 269.1, 271.3],
[269.9, 287.5, 283.1, 267.8, 272.5, 272.0, 268.1, 291.4],
[267.6, 290.6, 277.0, 283.0, 285.9, 261.4, 274.7, 275.9],
],
[
[295.2, 298.9, 273.2, 274.7, 268.2, 274.9, 273.0, 277.7],
[261.1, 283.1, 261.0, 295.8, 284.1, 276.2, 280.9, 281.2],
[265.5, 277.7, 270.3, 260.5, 252.6, 273.5, 271.3, 278.4],
[286.9, 266.0, 277.0, 277.8, 280.6, 260.7, 277.7, 272.2],
[272.3, 291.7, 260.2, 272.0, 286.3, 276.4, 285.2, 260.8],
],
[
[285.7, 275.2, 279.7, 266.4, 269.5, 273.6, 272.7, 274.0],
[276.0, 265.2, 278.8, 271.5, 288.0, 273.8, 269.9, 254.7],
[262.7, 258.9, 279.5, 265.9, 276.1, 283.3, 286.1, 286.4],
[268.8, 272.1, 281.3, 269.7, 255.5, 273.0, 273.2, 275.1],
[255.8, 282.3, 262.0, 276.3, 289.3, 270.5, 265.6, 267.7],
],
[
[293.2, 280.3, 295.2, 273.1, 263.9, 266.4, 278.2, 279.8],
[283.8, 280.8, 280.5, 263.9, 279.7, 269.4, 246.4, 263.9],
[271.1, 257.8, 266.7, 263.8, 264.6, 256.0, 273.8, 298.3],
[278.6, 282.3, 267.9, 265.2, 277.1, 273.2, 283.6, 277.0],
[290.1, 275.6, 265.0, 267.6, 265.7, 263.7, 277.6, 290.6],
],
[
[275.3, 294.0, 267.9, 268.1, 268.5, 286.1, 289.5, 261.5],
[263.4, 276.0, 257.0, 289.8, 280.5, 262.4, 279.0, 272.5],
[280.0, 272.6, 279.6, 258.1, 271.4, 271.1, 290.0, 241.2],
[268.4, 290.5, 281.8, 276.8, 277.6, 282.8, 274.1, 267.6],
[275.6, 272.5, 260.1, 261.6, 264.9, 266.8, 277.0, 256.7],
],
[
[263.9, 274.4, 279.7, 260.2, 271.7, 270.0, 272.9, 271.1],
[281.6, 293.4, 286.1, 277.4, 275.0, 286.5, 279.0, 267.1],
[282.3, 272.2, 283.8, 277.5, 292.2, 287.3, 275.5, 274.4],
[278.2, 267.3, 276.3, 268.8, 264.1, 257.0, 278.8, 282.5],
[294.8, 280.0, 276.5, 266.6, 278.3, 256.7, 264.4, 291.7],
],
[
[274.2, 259.3, 267.9, 268.5, 279.0, 264.4, 263.2, 269.1],
[280.5, 272.5, 264.1, 290.7, 288.6, 263.6, 279.9, 278.5],
[276.4, 277.5, 275.6, 283.6, 288.1, 270.3, 286.0, 281.5],
[255.3, 259.7, 261.7, 290.6, 295.0, 280.1, 254.9, 262.2],
[255.4, 257.5, 273.1, 257.7, 258.4, 294.7, 279.3, 282.0],
],
[
[270.4, 273.0, 256.5, 259.9, 268.2, 258.4, 275.5, 294.0],
[280.1, 263.8, 258.3, 274.2, 273.0, 259.8, 253.7, 267.3],
[278.0, 260.8, 263.1, 269.8, 291.2, 279.7, 261.4, 288.6],
[272.1, 292.0, 287.7, 272.4, 273.5, 275.2, 270.0, 268.1],
[286.6, 289.0, 268.9, 277.8, 276.3, 278.3, 262.5, 280.1],
],
[
[275.3, 283.6, 274.4, 272.1, 272.1, 271.1, 273.2, 288.4],
[287.5, 258.1, 286.5, 277.6, 279.5, 288.8, 261.1, 281.0],
[272.5, 264.2, 258.5, 260.8, 267.7, 263.1, 277.2, 280.8],
[265.9, 280.6, 273.1, 244.5, 266.9, 270.7, 277.0, 266.7],
[262.6, 271.8, 270.4, 264.7, 277.9, 267.0, 281.2, 270.1],
],
[
[269.0, 265.1, 271.9, 260.6, 266.8, 255.9, 289.6, 257.0],
[263.2, 271.7, 279.0, 296.9, 271.8, 280.4, 289.9, 277.7],
[251.9, 272.5, 265.7, 272.6, 260.0, 289.5, 262.7, 267.6],
[272.1, 261.7, 265.0, 272.5, 260.6, 283.2, 273.3, 274.1],
[264.0, 266.1, 278.9, 261.3, 266.3, 277.6, 281.9, 284.8],
],
[
[258.6, 265.0, 272.3, 275.7, 279.0, 265.1, 270.8, 258.5],
[266.0, 267.3, 272.9, 274.0, 267.3, 260.6, 280.1, 268.4],
[271.9, 263.6, 266.4, 270.7, 249.5, 286.0, 283.6, 279.2],
[286.9, 288.0, 271.0, 272.5, 271.0, 268.6, 274.6, 267.2],
[269.9, 285.8, 287.2, 277.3, 263.6, 273.8, 281.6, 264.7],
],
[
[269.3, 262.2, 271.6, 265.8, 277.2, 276.9, 273.2, 255.7],
[257.7, 266.9, 269.7, 255.2, 265.2, 301.2, 284.5, 284.7],
[292.6, 268.3, 267.8, 283.6, 262.1, 276.8, 257.8, 271.6],
[259.7, 289.9, 268.4, 277.1, 281.3, 280.5, 265.2, 266.4],
[261.3, 270.0, 266.3, 271.8, 266.7, 254.9, 281.9, 268.6],
],
[
[284.2, 272.6, 278.2, 288.0, 277.0, 261.2, 263.4, 277.3],
[292.5, 270.3, 273.6, 280.3, 261.1, 275.7, 287.1, 278.1],
[295.6, 289.6, 259.1, 266.5, 272.6, 263.2, 272.3, 273.0],
[277.6, 265.0, 267.4, 286.5, 276.2, 276.7, 284.1, 272.1],
[268.4, 273.3, 279.4, 271.9, 261.0, 258.6, 254.6, 269.2],
],
[
[283.8, 265.7, 276.6, 273.9, 268.4, 273.4, 253.4, 271.6],
[276.3, 267.8, 261.1, 267.5, 264.4, 272.4, 291.2, 278.9],
[264.5, 288.0, 272.0, 275.1, 272.2, 275.9, 273.7, 276.4],
[261.7, 252.8, 263.5, 279.2, 285.4, 278.1, 257.6, 264.5],
[267.9, 271.1, 273.4, 276.0, 270.4, 280.8, 272.3, 271.2],
],
[
[272.1, 283.6, 274.5, 271.8, 260.5, 254.9, 280.2, 257.2],
[274.4, 273.8, 263.3, 272.1, 268.6, 279.6, 268.9, 255.7],
[288.6, 288.7, 260.7, 260.6, 273.0, 270.3, 260.6, 281.4],
[256.1, 279.3, 273.7, 250.0, 264.3, 279.6, 277.0, 282.7],
[278.4, 265.3, 272.3, 274.0, 270.8, 272.3, 275.2, 285.1],
],
[
[279.1, 268.2, 268.7, 275.2, 280.5, 274.3, 285.5, 249.8],
[281.2, 267.2, 269.4, 254.8, 284.3, 265.8, 275.3, 260.3],
[282.4, 281.2, 274.6, 277.3, 272.2, 256.8, 273.4, 284.1],
[281.1, 288.6, 269.8, 281.0, 264.7, 258.8, 281.7, 255.5],
[278.4, 280.5, 273.5, 272.0, 279.4, 278.1, 295.4, 246.6],
],
[
[270.0, 259.1, 266.0, 288.3, 268.3, 258.3, 270.1, 286.4],
[268.0, 266.1, 276.0, 253.1, 272.1, 271.2, 270.4, 270.7],
[279.8, 282.8, 264.2, 257.1, 292.4, 276.8, 270.5, 256.9],
[281.7, 280.6, 276.0, 266.0, 278.0, 278.5, 282.8, 268.0],
[275.3, 262.2, 251.9, 270.9, 273.2, 287.4, 285.4, 263.5],
],
[
[282.1, 283.6, 269.3, 276.5, 274.8, 271.5, 276.9, 274.5],
[270.7, 274.9, 286.1, 285.4, 277.8, 269.6, 269.9, 276.3],
[273.1, 278.9, 264.2, 277.1, 257.9, 271.7, 278.9, 262.2],
[292.8, 262.7, 276.5, 274.8, 266.3, 278.1, 277.9, 267.2],
[284.3, 276.9, 282.4, 292.9, 269.5, 269.0, 270.2, 284.6],
],
[
[273.7, 285.3, 272.8, 255.1, 275.0, 269.7, 255.4, 277.4],
[278.6, 272.9, 273.1, 299.3, 271.3, 274.8, 262.7, 272.2],
[272.7, 278.3, 260.2, 264.1, 288.9, 283.2, 259.9, 271.9],
[278.5, 270.0, 265.9, 276.4, 270.0, 255.6, 263.7, 260.0],
[273.4, 266.5, 267.4, 286.9, 268.0, 269.7, 275.1, 269.9],
],
[
[250.5, 267.5, 277.8, 287.8, 276.0, 272.6, 274.8, 292.1],
[263.8, 276.1, 265.4, 266.5, 262.2, 257.9, 275.3, 267.0],
[276.2, 277.0, 276.8, 295.3, 285.2, 257.5, 259.0, 287.8],
[291.8, | |
Element) -> None:
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node: Element) -> None:
self.body.append('}')
def visit_desc_annotation(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
def depart_desc_annotation(self, node: Element) -> None:
self.body.append('}}')
def visit_desc_content(self, node: Element) -> None:
if node.children and not isinstance(node.children[0], nodes.paragraph):
# avoid empty desc environment which causes a formatting bug
self.body.append('~')
def depart_desc_content(self, node: Element) -> None:
pass
def visit_seealso(self, node: Element) -> None:
self.body.append('\n\n\\sphinxstrong{%s:}\n\\nopagebreak\n\n'
% admonitionlabels['seealso'])
def depart_seealso(self, node: Element) -> None:
self.body.append("\n\n")
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append('\\subsubsection*{')
self.context.append('}\n')
self.in_title = 1
def depart_rubric(self, node: Element) -> None:
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node: Element) -> None:
self.in_footnote += 1
label = cast(nodes.label, node[0])
if self.in_parsed_literal:
self.body.append('\\begin{footnote}[%s]' % label.astext())
else:
self.body.append('%%\n\\begin{footnote}[%s]' % label.astext())
self.body.append('\\sphinxAtStartFootnote\n')
def depart_footnote(self, node: Element) -> None:
if self.in_parsed_literal:
self.body.append('\\end{footnote}')
else:
self.body.append('%\n\\end{footnote}')
self.in_footnote -= 1
def visit_label(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node: Element) -> None:
if len(self.tables) == 1:
if self.table.get_table_type() == 'longtable':
raise UnsupportedError(
'%s:%s: longtable does not support nesting a table.' %
(self.curfilestack[-1], node.line or ''))
else:
# change type of parent table to tabular
# see https://groups.google.com/d/msg/sphinx-users/7m3NeOBixeo/9LKP2B4WBQAJ
self.table.has_problematic = True
elif len(self.tables) > 2:
raise UnsupportedError(
'%s:%s: deeply nested tables are not implemented.' %
(self.curfilestack[-1], node.line or ''))
self.tables.append(Table(node))
if self.next_table_colspec:
self.table.colspec = '{%s}\n' % self.next_table_colspec
if 'colwidths-given' in node.get('classes', []):
logger.info(__('both tabularcolumns and :widths: option are given. '
':widths: is ignored.'), location=node)
self.next_table_colspec = None
def depart_table(self, node: Element) -> None:
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
dict(table=self.table, labels=labels))
self.body.append("\n\n")
self.body.append(table)
self.body.append("\n")
self.tables.pop()
def visit_colspec(self, node: Element) -> None:
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
if 'stub' in node:
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
pass
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
# Redirect head output until header is finished.
self.pushbody(self.table.header)
def depart_thead(self, node: Element) -> None:
self.popbody()
def visit_tbody(self, node: Element) -> None:
# Redirect body output until table is finished.
self.pushbody(self.table.body)
def depart_tbody(self, node: Element) -> None:
self.popbody()
def visit_row(self, node: Element) -> None:
self.table.col = 0
# fill columns if the row starts with the bottom of multirow cell
while True:
cell = self.table.cell(self.table.row, self.table.col)
if cell is None: # not a bottom of multirow cell
break
else: # a bottom of multirow cell
self.table.col += cell.width
if cell.col:
self.body.append('&')
if cell.width == 1:
# insert suitable strut for equalizing row heights in given multirow
self.body.append('\\sphinxtablestrut{%d}' % cell.cell_id)
else: # use \multicolumn for wide multirow cell
self.body.append('\\multicolumn{%d}{|l|}'
'{\\sphinxtablestrut{%d}}' %
(cell.width, cell.cell_id))
def depart_row(self, node: Element) -> None:
self.body.append('\\\\\n')
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
if all(underlined):
self.body.append('\\hline')
else:
i = 0
underlined.extend([False]) # sentinel
while i < len(underlined):
if underlined[i] is True:
j = underlined[i:].index(False)
self.body.append('\\cline{%d-%d}' % (i + 1, i + j))
i += j
i += 1
self.table.row += 1
def visit_entry(self, node: Element) -> None:
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
cell = self.table.cell()
context = ''
if cell.width > 1:
if self.config.latex_use_latex_multicolumn:
if self.table.col == 0:
self.body.append('\\multicolumn{%d}{|l|}{%%\n' % cell.width)
else:
self.body.append('\\multicolumn{%d}{l|}{%%\n' % cell.width)
context = '}%\n'
else:
self.body.append('\\sphinxstartmulticolumn{%d}%%\n' % cell.width)
context = '\\sphinxstopmulticolumn\n'
if cell.height > 1:
# \sphinxmultirow 2nd arg "cell_id" will serve as id for LaTeX macros as well
self.body.append('\\sphinxmultirow{%d}{%d}{%%\n' % (cell.height, cell.cell_id))
context = '}%\n' + context
if cell.width > 1 or cell.height > 1:
self.body.append('\\begin{varwidth}[t]{\\sphinxcolwidth{%d}{%d}}\n'
% (cell.width, self.table.colcount))
context = ('\\par\n\\vskip-\\baselineskip'
'\\vbox{\\hbox{\\strut}}\\end{varwidth}%\n') + context
self.needs_linetrimming = 1
if len(node.traverse(nodes.paragraph)) >= 2:
self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append('\\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
def depart_entry(self, node: Element) -> None:
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
# Remove empty lines from top of merged cell
while body and body[0] == "\n":
body.pop(0)
self.body.extend(body)
self.body.append(self.context.pop())
cell = self.table.cell()
self.table.col += cell.width
# fill columns if next ones are a bottom of wide-multirow cell
while True:
nextcell = self.table.cell()
if nextcell is None: # not a bottom of multirow cell
break
else: # a bottom part of multirow cell
self.table.col += nextcell.width
self.body.append('&')
if nextcell.width == 1:
# insert suitable strut for equalizing row heights in multirow
# they also serve to clear colour panels which would hide the text
self.body.append('\\sphinxtablestrut{%d}' % nextcell.cell_id)
else:
# use \multicolumn for wide multirow cell
self.body.append('\\multicolumn{%d}{l|}'
'{\\sphinxtablestrut{%d}}' %
(nextcell.width, nextcell.cell_id))
def visit_acks(self, node: Element) -> None:
# this is a list in the source, but should be rendered as a
# comma-separated list here
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append('\\begin{itemize}\n')
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append('\\end{itemize}\n')
def visit_enumerated_list(self, node: Element) -> None:
def get_enumtype(node: Element) -> str:
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
# fallback to arabic if alphabet counter overflows
enumtype = 'arabic'
return enumtype
def get_nested_level(node: Element) -> int:
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
return get_nested_level(node.parent) + 1
else:
return get_nested_level(node.parent)
enum = "enum%s" % toRoman(get_nested_level(node)).lower()
enumnext = "enum%s" % toRoman(get_nested_level(node) + 1).lower()
style = ENUMERATE_LIST_STYLE.get(get_enumtype(node))
prefix = node.get('prefix', '')
suffix = node.get('suffix', '.')
self.body.append('\\begin{enumerate}\n')
self.body.append('\\sphinxsetlistlabels{%s}{%s}{%s}{%s}{%s}%%\n' %
(style, enum, enumnext, prefix, suffix))
if 'start' in node:
self.body.append('\\setcounter{%s}{%d}\n' % (enum, node['start'] - 1))
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node: Element) -> None:
self.body.append('\\end{enumerate}\n')
def visit_list_item(self, node: Element) -> None:
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node: Element) -> None:
self.body.append('\n')
def visit_definition_list(self, node: Element) -> None:
self.body.append('\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node: Element) -> None:
self.body.append('\\end{description}\n')
def visit_definition_list_item(self, node: Element) -> None:
pass
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
self.in_term += 1
ctx = ''
if node.get('ids'):
ctx = '\\phantomsection'
for node_id in node['ids']:
ctx += self.hypertarget(node_id, anchor=False)
ctx += '}] \\leavevmode'
self.body.append('\\item[{')
self.context.append(ctx)
def depart_term(self, node: Element) -> None:
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node: Element) -> None:
self.body.append('{[}')
def depart_classifier(self, node: Element) -> None:
self.body.append('{]}')
def visit_definition(self, node: Element) -> None:
pass
def depart_definition(self, node: Element) -> None:
self.body.append('\n')
def visit_field_list(self, node: Element) -> None:
self.body.append('\\begin{quote}\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node: Element) -> None:
self.body.append('\\end{description}\\end{quote}\n')
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node: Element) -> None:
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
# insert blank line, if the paragraph follows a non-paragraph node in a compound
self.body.append('\\noindent\n')
elif index == 1 and isinstance(node.parent, (nodes.footnote, footnotetext)):
# don't insert blank line, if the paragraph is second child of a footnote
# (first one is label node)
pass
else:
self.body.append('\n')
def depart_paragraph(self, node: Element) -> None:
self.body.append('\n')
def visit_centered(self, node: Element) -> None:
self.body.append('\n\\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node: Element) -> None:
self.body.append('\n\\end{center}')
def visit_hlist(self, node: Element) -> None:
# for now, we don't support a more compact list format
# don't add individual itemize environments, but one for all columns
self.compact_list += 1
self.body.append('\\begin{itemize}\\setlength{\\itemsep}{0pt}'
'\\setlength{\\parskip}{0pt}\n')
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node: Element) -> None:
self.compact_list -= 1
| |
wx.stc.STC_ADA_STRING
wxSTC_ADA_STRINGEOL = wx.stc.STC_ADA_STRINGEOL
wxSTC_ADA_LABEL = wx.stc.STC_ADA_LABEL
wxSTC_ADA_COMMENTLINE = wx.stc.STC_ADA_COMMENTLINE
wxSTC_ADA_ILLEGAL = wx.stc.STC_ADA_ILLEGAL
wxSTC_BAAN_DEFAULT = wx.stc.STC_BAAN_DEFAULT
wxSTC_BAAN_COMMENT = wx.stc.STC_BAAN_COMMENT
wxSTC_BAAN_COMMENTDOC = wx.stc.STC_BAAN_COMMENTDOC
wxSTC_BAAN_NUMBER = wx.stc.STC_BAAN_NUMBER
wxSTC_BAAN_WORD = wx.stc.STC_BAAN_WORD
wxSTC_BAAN_STRING = wx.stc.STC_BAAN_STRING
wxSTC_BAAN_PREPROCESSOR = wx.stc.STC_BAAN_PREPROCESSOR
wxSTC_BAAN_OPERATOR = wx.stc.STC_BAAN_OPERATOR
wxSTC_BAAN_IDENTIFIER = wx.stc.STC_BAAN_IDENTIFIER
wxSTC_BAAN_STRINGEOL = wx.stc.STC_BAAN_STRINGEOL
wxSTC_BAAN_WORD2 = wx.stc.STC_BAAN_WORD2
wxSTC_LISP_DEFAULT = wx.stc.STC_LISP_DEFAULT
wxSTC_LISP_COMMENT = wx.stc.STC_LISP_COMMENT
wxSTC_LISP_NUMBER = wx.stc.STC_LISP_NUMBER
wxSTC_LISP_KEYWORD = wx.stc.STC_LISP_KEYWORD
wxSTC_LISP_KEYWORD_KW = wx.stc.STC_LISP_KEYWORD_KW
wxSTC_LISP_SYMBOL = wx.stc.STC_LISP_SYMBOL
wxSTC_LISP_STRING = wx.stc.STC_LISP_STRING
wxSTC_LISP_STRINGEOL = wx.stc.STC_LISP_STRINGEOL
wxSTC_LISP_IDENTIFIER = wx.stc.STC_LISP_IDENTIFIER
wxSTC_LISP_OPERATOR = wx.stc.STC_LISP_OPERATOR
wxSTC_LISP_SPECIAL = wx.stc.STC_LISP_SPECIAL
wxSTC_LISP_MULTI_COMMENT = wx.stc.STC_LISP_MULTI_COMMENT
wxSTC_EIFFEL_DEFAULT = wx.stc.STC_EIFFEL_DEFAULT
wxSTC_EIFFEL_COMMENTLINE = wx.stc.STC_EIFFEL_COMMENTLINE
wxSTC_EIFFEL_NUMBER = wx.stc.STC_EIFFEL_NUMBER
wxSTC_EIFFEL_WORD = wx.stc.STC_EIFFEL_WORD
wxSTC_EIFFEL_STRING = wx.stc.STC_EIFFEL_STRING
wxSTC_EIFFEL_CHARACTER = wx.stc.STC_EIFFEL_CHARACTER
wxSTC_EIFFEL_OPERATOR = wx.stc.STC_EIFFEL_OPERATOR
wxSTC_EIFFEL_IDENTIFIER = wx.stc.STC_EIFFEL_IDENTIFIER
wxSTC_EIFFEL_STRINGEOL = wx.stc.STC_EIFFEL_STRINGEOL
wxSTC_NNCRONTAB_DEFAULT = wx.stc.STC_NNCRONTAB_DEFAULT
wxSTC_NNCRONTAB_COMMENT = wx.stc.STC_NNCRONTAB_COMMENT
wxSTC_NNCRONTAB_TASK = wx.stc.STC_NNCRONTAB_TASK
wxSTC_NNCRONTAB_SECTION = wx.stc.STC_NNCRONTAB_SECTION
wxSTC_NNCRONTAB_KEYWORD = wx.stc.STC_NNCRONTAB_KEYWORD
wxSTC_NNCRONTAB_MODIFIER = wx.stc.STC_NNCRONTAB_MODIFIER
wxSTC_NNCRONTAB_ASTERISK = wx.stc.STC_NNCRONTAB_ASTERISK
wxSTC_NNCRONTAB_NUMBER = wx.stc.STC_NNCRONTAB_NUMBER
wxSTC_NNCRONTAB_STRING = wx.stc.STC_NNCRONTAB_STRING
wxSTC_NNCRONTAB_ENVIRONMENT = wx.stc.STC_NNCRONTAB_ENVIRONMENT
wxSTC_NNCRONTAB_IDENTIFIER = wx.stc.STC_NNCRONTAB_IDENTIFIER
wxSTC_FORTH_DEFAULT = wx.stc.STC_FORTH_DEFAULT
wxSTC_FORTH_COMMENT = wx.stc.STC_FORTH_COMMENT
wxSTC_FORTH_COMMENT_ML = wx.stc.STC_FORTH_COMMENT_ML
wxSTC_FORTH_IDENTIFIER = wx.stc.STC_FORTH_IDENTIFIER
wxSTC_FORTH_CONTROL = wx.stc.STC_FORTH_CONTROL
wxSTC_FORTH_KEYWORD = wx.stc.STC_FORTH_KEYWORD
wxSTC_FORTH_DEFWORD = wx.stc.STC_FORTH_DEFWORD
wxSTC_FORTH_PREWORD1 = wx.stc.STC_FORTH_PREWORD1
wxSTC_FORTH_PREWORD2 = wx.stc.STC_FORTH_PREWORD2
wxSTC_FORTH_NUMBER = wx.stc.STC_FORTH_NUMBER
wxSTC_FORTH_STRING = wx.stc.STC_FORTH_STRING
wxSTC_FORTH_LOCALE = wx.stc.STC_FORTH_LOCALE
wxSTC_MATLAB_DEFAULT = wx.stc.STC_MATLAB_DEFAULT
wxSTC_MATLAB_COMMENT = wx.stc.STC_MATLAB_COMMENT
wxSTC_MATLAB_COMMAND = wx.stc.STC_MATLAB_COMMAND
wxSTC_MATLAB_NUMBER = wx.stc.STC_MATLAB_NUMBER
wxSTC_MATLAB_KEYWORD = wx.stc.STC_MATLAB_KEYWORD
wxSTC_MATLAB_STRING = wx.stc.STC_MATLAB_STRING
wxSTC_MATLAB_OPERATOR = wx.stc.STC_MATLAB_OPERATOR
wxSTC_MATLAB_IDENTIFIER = wx.stc.STC_MATLAB_IDENTIFIER
wxSTC_MATLAB_DOUBLEQUOTESTRING = wx.stc.STC_MATLAB_DOUBLEQUOTESTRING
wxSTC_SCRIPTOL_DEFAULT = wx.stc.STC_SCRIPTOL_DEFAULT
wxSTC_SCRIPTOL_WHITE = wx.stc.STC_SCRIPTOL_WHITE
wxSTC_SCRIPTOL_COMMENTLINE = wx.stc.STC_SCRIPTOL_COMMENTLINE
wxSTC_SCRIPTOL_PERSISTENT = wx.stc.STC_SCRIPTOL_PERSISTENT
wxSTC_SCRIPTOL_CSTYLE = wx.stc.STC_SCRIPTOL_CSTYLE
wxSTC_SCRIPTOL_COMMENTBLOCK = wx.stc.STC_SCRIPTOL_COMMENTBLOCK
wxSTC_SCRIPTOL_NUMBER = wx.stc.STC_SCRIPTOL_NUMBER
wxSTC_SCRIPTOL_STRING = wx.stc.STC_SCRIPTOL_STRING
wxSTC_SCRIPTOL_CHARACTER = wx.stc.STC_SCRIPTOL_CHARACTER
wxSTC_SCRIPTOL_STRINGEOL = wx.stc.STC_SCRIPTOL_STRINGEOL
wxSTC_SCRIPTOL_KEYWORD = wx.stc.STC_SCRIPTOL_KEYWORD
wxSTC_SCRIPTOL_OPERATOR = wx.stc.STC_SCRIPTOL_OPERATOR
wxSTC_SCRIPTOL_IDENTIFIER = wx.stc.STC_SCRIPTOL_IDENTIFIER
wxSTC_SCRIPTOL_TRIPLE = wx.stc.STC_SCRIPTOL_TRIPLE
wxSTC_SCRIPTOL_CLASSNAME = wx.stc.STC_SCRIPTOL_CLASSNAME
wxSTC_SCRIPTOL_PREPROCESSOR = wx.stc.STC_SCRIPTOL_PREPROCESSOR
wxSTC_ASM_DEFAULT = wx.stc.STC_ASM_DEFAULT
wxSTC_ASM_COMMENT = wx.stc.STC_ASM_COMMENT
wxSTC_ASM_NUMBER = wx.stc.STC_ASM_NUMBER
wxSTC_ASM_STRING = wx.stc.STC_ASM_STRING
wxSTC_ASM_OPERATOR = wx.stc.STC_ASM_OPERATOR
wxSTC_ASM_IDENTIFIER = wx.stc.STC_ASM_IDENTIFIER
wxSTC_ASM_CPUINSTRUCTION = wx.stc.STC_ASM_CPUINSTRUCTION
wxSTC_ASM_MATHINSTRUCTION = wx.stc.STC_ASM_MATHINSTRUCTION
wxSTC_ASM_REGISTER = wx.stc.STC_ASM_REGISTER
wxSTC_ASM_DIRECTIVE = wx.stc.STC_ASM_DIRECTIVE
wxSTC_ASM_DIRECTIVEOPERAND = wx.stc.STC_ASM_DIRECTIVEOPERAND
wxSTC_ASM_COMMENTBLOCK = wx.stc.STC_ASM_COMMENTBLOCK
wxSTC_ASM_CHARACTER = wx.stc.STC_ASM_CHARACTER
wxSTC_ASM_STRINGEOL = wx.stc.STC_ASM_STRINGEOL
wxSTC_ASM_EXTINSTRUCTION = wx.stc.STC_ASM_EXTINSTRUCTION
wxSTC_F_DEFAULT = wx.stc.STC_F_DEFAULT
wxSTC_F_COMMENT = wx.stc.STC_F_COMMENT
wxSTC_F_NUMBER = wx.stc.STC_F_NUMBER
wxSTC_F_STRING1 = wx.stc.STC_F_STRING1
wxSTC_F_STRING2 = wx.stc.STC_F_STRING2
wxSTC_F_STRINGEOL = wx.stc.STC_F_STRINGEOL
wxSTC_F_OPERATOR = wx.stc.STC_F_OPERATOR
wxSTC_F_IDENTIFIER = wx.stc.STC_F_IDENTIFIER
wxSTC_F_WORD = wx.stc.STC_F_WORD
wxSTC_F_WORD2 = wx.stc.STC_F_WORD2
wxSTC_F_WORD3 = wx.stc.STC_F_WORD3
wxSTC_F_PREPROCESSOR = wx.stc.STC_F_PREPROCESSOR
wxSTC_F_OPERATOR2 = wx.stc.STC_F_OPERATOR2
wxSTC_F_LABEL = wx.stc.STC_F_LABEL
wxSTC_F_CONTINUATION = wx.stc.STC_F_CONTINUATION
wxSTC_CSS_DEFAULT = wx.stc.STC_CSS_DEFAULT
wxSTC_CSS_TAG = wx.stc.STC_CSS_TAG
wxSTC_CSS_CLASS = wx.stc.STC_CSS_CLASS
wxSTC_CSS_PSEUDOCLASS = wx.stc.STC_CSS_PSEUDOCLASS
wxSTC_CSS_UNKNOWN_PSEUDOCLASS = wx.stc.STC_CSS_UNKNOWN_PSEUDOCLASS
wxSTC_CSS_OPERATOR = wx.stc.STC_CSS_OPERATOR
wxSTC_CSS_IDENTIFIER = wx.stc.STC_CSS_IDENTIFIER
wxSTC_CSS_UNKNOWN_IDENTIFIER = wx.stc.STC_CSS_UNKNOWN_IDENTIFIER
wxSTC_CSS_VALUE = wx.stc.STC_CSS_VALUE
wxSTC_CSS_COMMENT = wx.stc.STC_CSS_COMMENT
wxSTC_CSS_ID = wx.stc.STC_CSS_ID
wxSTC_CSS_IMPORTANT = wx.stc.STC_CSS_IMPORTANT
wxSTC_CSS_DIRECTIVE = wx.stc.STC_CSS_DIRECTIVE
wxSTC_CSS_DOUBLESTRING = wx.stc.STC_CSS_DOUBLESTRING
wxSTC_CSS_SINGLESTRING = wx.stc.STC_CSS_SINGLESTRING
wxSTC_CSS_IDENTIFIER2 = wx.stc.STC_CSS_IDENTIFIER2
wxSTC_CSS_ATTRIBUTE = wx.stc.STC_CSS_ATTRIBUTE
wxSTC_POV_DEFAULT = wx.stc.STC_POV_DEFAULT
wxSTC_POV_COMMENT = wx.stc.STC_POV_COMMENT
wxSTC_POV_COMMENTLINE = wx.stc.STC_POV_COMMENTLINE
wxSTC_POV_NUMBER = wx.stc.STC_POV_NUMBER
wxSTC_POV_OPERATOR = wx.stc.STC_POV_OPERATOR
wxSTC_POV_IDENTIFIER = wx.stc.STC_POV_IDENTIFIER
wxSTC_POV_STRING = wx.stc.STC_POV_STRING
wxSTC_POV_STRINGEOL = wx.stc.STC_POV_STRINGEOL
wxSTC_POV_DIRECTIVE = wx.stc.STC_POV_DIRECTIVE
wxSTC_POV_BADDIRECTIVE = wx.stc.STC_POV_BADDIRECTIVE
wxSTC_POV_WORD2 = wx.stc.STC_POV_WORD2
wxSTC_POV_WORD3 = wx.stc.STC_POV_WORD3
wxSTC_POV_WORD4 = wx.stc.STC_POV_WORD4
wxSTC_POV_WORD5 = wx.stc.STC_POV_WORD5
wxSTC_POV_WORD6 = wx.stc.STC_POV_WORD6
wxSTC_POV_WORD7 = wx.stc.STC_POV_WORD7
wxSTC_POV_WORD8 = wx.stc.STC_POV_WORD8
wxSTC_LOUT_DEFAULT = wx.stc.STC_LOUT_DEFAULT
wxSTC_LOUT_COMMENT = wx.stc.STC_LOUT_COMMENT
wxSTC_LOUT_NUMBER = wx.stc.STC_LOUT_NUMBER
wxSTC_LOUT_WORD = wx.stc.STC_LOUT_WORD
wxSTC_LOUT_WORD2 = wx.stc.STC_LOUT_WORD2
wxSTC_LOUT_WORD3 = wx.stc.STC_LOUT_WORD3
wxSTC_LOUT_WORD4 = wx.stc.STC_LOUT_WORD4
wxSTC_LOUT_STRING = wx.stc.STC_LOUT_STRING
wxSTC_LOUT_OPERATOR = wx.stc.STC_LOUT_OPERATOR
wxSTC_LOUT_IDENTIFIER = wx.stc.STC_LOUT_IDENTIFIER
wxSTC_LOUT_STRINGEOL = wx.stc.STC_LOUT_STRINGEOL
wxSTC_ESCRIPT_DEFAULT = wx.stc.STC_ESCRIPT_DEFAULT
wxSTC_ESCRIPT_COMMENT = wx.stc.STC_ESCRIPT_COMMENT
wxSTC_ESCRIPT_COMMENTLINE = wx.stc.STC_ESCRIPT_COMMENTLINE
wxSTC_ESCRIPT_COMMENTDOC = wx.stc.STC_ESCRIPT_COMMENTDOC
wxSTC_ESCRIPT_NUMBER = wx.stc.STC_ESCRIPT_NUMBER
wxSTC_ESCRIPT_WORD = wx.stc.STC_ESCRIPT_WORD
wxSTC_ESCRIPT_STRING = wx.stc.STC_ESCRIPT_STRING
wxSTC_ESCRIPT_OPERATOR = wx.stc.STC_ESCRIPT_OPERATOR
wxSTC_ESCRIPT_IDENTIFIER = wx.stc.STC_ESCRIPT_IDENTIFIER
wxSTC_ESCRIPT_BRACE = wx.stc.STC_ESCRIPT_BRACE
wxSTC_ESCRIPT_WORD2 = wx.stc.STC_ESCRIPT_WORD2
wxSTC_ESCRIPT_WORD3 = wx.stc.STC_ESCRIPT_WORD3
wxSTC_PS_DEFAULT = wx.stc.STC_PS_DEFAULT
wxSTC_PS_COMMENT = wx.stc.STC_PS_COMMENT
wxSTC_PS_DSC_COMMENT = wx.stc.STC_PS_DSC_COMMENT
wxSTC_PS_DSC_VALUE = wx.stc.STC_PS_DSC_VALUE
wxSTC_PS_NUMBER = wx.stc.STC_PS_NUMBER
wxSTC_PS_NAME = wx.stc.STC_PS_NAME
wxSTC_PS_KEYWORD = wx.stc.STC_PS_KEYWORD
wxSTC_PS_LITERAL = wx.stc.STC_PS_LITERAL
wxSTC_PS_IMMEVAL = wx.stc.STC_PS_IMMEVAL
wxSTC_PS_PAREN_ARRAY = wx.stc.STC_PS_PAREN_ARRAY
wxSTC_PS_PAREN_DICT = wx.stc.STC_PS_PAREN_DICT
wxSTC_PS_PAREN_PROC = wx.stc.STC_PS_PAREN_PROC
wxSTC_PS_TEXT = wx.stc.STC_PS_TEXT
wxSTC_PS_HEXSTRING = wx.stc.STC_PS_HEXSTRING
wxSTC_PS_BASE85STRING = wx.stc.STC_PS_BASE85STRING
wxSTC_PS_BADSTRINGCHAR = wx.stc.STC_PS_BADSTRINGCHAR
wxSTC_NSIS_DEFAULT = wx.stc.STC_NSIS_DEFAULT
wxSTC_NSIS_COMMENT = wx.stc.STC_NSIS_COMMENT
wxSTC_NSIS_STRINGDQ = wx.stc.STC_NSIS_STRINGDQ
wxSTC_NSIS_STRINGLQ = wx.stc.STC_NSIS_STRINGLQ
wxSTC_NSIS_STRINGRQ = wx.stc.STC_NSIS_STRINGRQ
wxSTC_NSIS_FUNCTION = wx.stc.STC_NSIS_FUNCTION
wxSTC_NSIS_VARIABLE = wx.stc.STC_NSIS_VARIABLE
wxSTC_NSIS_LABEL = wx.stc.STC_NSIS_LABEL
wxSTC_NSIS_USERDEFINED = wx.stc.STC_NSIS_USERDEFINED
wxSTC_NSIS_SECTIONDEF = wx.stc.STC_NSIS_SECTIONDEF
wxSTC_NSIS_SUBSECTIONDEF = wx.stc.STC_NSIS_SUBSECTIONDEF
wxSTC_NSIS_IFDEFINEDEF = wx.stc.STC_NSIS_IFDEFINEDEF
wxSTC_NSIS_MACRODEF = wx.stc.STC_NSIS_MACRODEF
wxSTC_NSIS_STRINGVAR = wx.stc.STC_NSIS_STRINGVAR
wxSTC_NSIS_NUMBER = wx.stc.STC_NSIS_NUMBER
wxSTC_NSIS_SECTIONGROUP = wx.stc.STC_NSIS_SECTIONGROUP
wxSTC_NSIS_PAGEEX = wx.stc.STC_NSIS_PAGEEX
wxSTC_NSIS_FUNCTIONDEF = wx.stc.STC_NSIS_FUNCTIONDEF
wxSTC_NSIS_COMMENTBOX = wx.stc.STC_NSIS_COMMENTBOX
wxSTC_MMIXAL_LEADWS = wx.stc.STC_MMIXAL_LEADWS
wxSTC_MMIXAL_COMMENT = wx.stc.STC_MMIXAL_COMMENT
wxSTC_MMIXAL_LABEL = wx.stc.STC_MMIXAL_LABEL
wxSTC_MMIXAL_OPCODE = wx.stc.STC_MMIXAL_OPCODE
wxSTC_MMIXAL_OPCODE_PRE = wx.stc.STC_MMIXAL_OPCODE_PRE
wxSTC_MMIXAL_OPCODE_VALID = wx.stc.STC_MMIXAL_OPCODE_VALID
wxSTC_MMIXAL_OPCODE_UNKNOWN = wx.stc.STC_MMIXAL_OPCODE_UNKNOWN
wxSTC_MMIXAL_OPCODE_POST = wx.stc.STC_MMIXAL_OPCODE_POST
wxSTC_MMIXAL_OPERANDS = wx.stc.STC_MMIXAL_OPERANDS
wxSTC_MMIXAL_NUMBER = wx.stc.STC_MMIXAL_NUMBER
wxSTC_MMIXAL_REF = wx.stc.STC_MMIXAL_REF
wxSTC_MMIXAL_CHAR = wx.stc.STC_MMIXAL_CHAR
wxSTC_MMIXAL_STRING = wx.stc.STC_MMIXAL_STRING
wxSTC_MMIXAL_REGISTER = wx.stc.STC_MMIXAL_REGISTER
wxSTC_MMIXAL_HEX = wx.stc.STC_MMIXAL_HEX
wxSTC_MMIXAL_OPERATOR = wx.stc.STC_MMIXAL_OPERATOR
wxSTC_MMIXAL_SYMBOL = wx.stc.STC_MMIXAL_SYMBOL
wxSTC_MMIXAL_INCLUDE = wx.stc.STC_MMIXAL_INCLUDE
wxSTC_CLW_DEFAULT = wx.stc.STC_CLW_DEFAULT
wxSTC_CLW_LABEL = wx.stc.STC_CLW_LABEL
wxSTC_CLW_COMMENT = wx.stc.STC_CLW_COMMENT
wxSTC_CLW_STRING = wx.stc.STC_CLW_STRING
wxSTC_CLW_USER_IDENTIFIER = wx.stc.STC_CLW_USER_IDENTIFIER
wxSTC_CLW_INTEGER_CONSTANT = wx.stc.STC_CLW_INTEGER_CONSTANT
wxSTC_CLW_REAL_CONSTANT = wx.stc.STC_CLW_REAL_CONSTANT
wxSTC_CLW_PICTURE_STRING = wx.stc.STC_CLW_PICTURE_STRING
wxSTC_CLW_KEYWORD = wx.stc.STC_CLW_KEYWORD
wxSTC_CLW_COMPILER_DIRECTIVE = wx.stc.STC_CLW_COMPILER_DIRECTIVE
wxSTC_CLW_RUNTIME_EXPRESSIONS = wx.stc.STC_CLW_RUNTIME_EXPRESSIONS
wxSTC_CLW_BUILTIN_PROCEDURES_FUNCTION = wx.stc.STC_CLW_BUILTIN_PROCEDURES_FUNCTION
wxSTC_CLW_STRUCTURE_DATA_TYPE = wx.stc.STC_CLW_STRUCTURE_DATA_TYPE
wxSTC_CLW_ATTRIBUTE = wx.stc.STC_CLW_ATTRIBUTE
wxSTC_CLW_STANDARD_EQUATE = wx.stc.STC_CLW_STANDARD_EQUATE
wxSTC_CLW_ERROR = wx.stc.STC_CLW_ERROR
wxSTC_CLW_DEPRECATED = wx.stc.STC_CLW_DEPRECATED
wxSTC_LOT_DEFAULT = wx.stc.STC_LOT_DEFAULT
wxSTC_LOT_HEADER = wx.stc.STC_LOT_HEADER
wxSTC_LOT_BREAK = wx.stc.STC_LOT_BREAK
wxSTC_LOT_SET = wx.stc.STC_LOT_SET
wxSTC_LOT_PASS = wx.stc.STC_LOT_PASS
wxSTC_LOT_FAIL = wx.stc.STC_LOT_FAIL
wxSTC_LOT_ABORT = wx.stc.STC_LOT_ABORT
wxSTC_YAML_DEFAULT = wx.stc.STC_YAML_DEFAULT
wxSTC_YAML_COMMENT = wx.stc.STC_YAML_COMMENT
wxSTC_YAML_IDENTIFIER = wx.stc.STC_YAML_IDENTIFIER
wxSTC_YAML_KEYWORD = wx.stc.STC_YAML_KEYWORD
wxSTC_YAML_NUMBER = wx.stc.STC_YAML_NUMBER
wxSTC_YAML_REFERENCE = wx.stc.STC_YAML_REFERENCE
wxSTC_YAML_DOCUMENT = wx.stc.STC_YAML_DOCUMENT
wxSTC_YAML_TEXT = wx.stc.STC_YAML_TEXT
wxSTC_YAML_ERROR = wx.stc.STC_YAML_ERROR
wxSTC_TEX_DEFAULT = wx.stc.STC_TEX_DEFAULT
wxSTC_TEX_SPECIAL = wx.stc.STC_TEX_SPECIAL
wxSTC_TEX_GROUP = wx.stc.STC_TEX_GROUP
wxSTC_TEX_SYMBOL = wx.stc.STC_TEX_SYMBOL
wxSTC_TEX_COMMAND = wx.stc.STC_TEX_COMMAND
wxSTC_TEX_TEXT = wx.stc.STC_TEX_TEXT
wxSTC_METAPOST_DEFAULT = wx.stc.STC_METAPOST_DEFAULT
wxSTC_METAPOST_SPECIAL = wx.stc.STC_METAPOST_SPECIAL
wxSTC_METAPOST_GROUP = wx.stc.STC_METAPOST_GROUP
wxSTC_METAPOST_SYMBOL = wx.stc.STC_METAPOST_SYMBOL
wxSTC_METAPOST_COMMAND = wx.stc.STC_METAPOST_COMMAND
wxSTC_METAPOST_TEXT = wx.stc.STC_METAPOST_TEXT
wxSTC_METAPOST_EXTRA = wx.stc.STC_METAPOST_EXTRA
wxSTC_ERLANG_DEFAULT = wx.stc.STC_ERLANG_DEFAULT
wxSTC_ERLANG_COMMENT = wx.stc.STC_ERLANG_COMMENT
wxSTC_ERLANG_VARIABLE = wx.stc.STC_ERLANG_VARIABLE
wxSTC_ERLANG_NUMBER = wx.stc.STC_ERLANG_NUMBER
wxSTC_ERLANG_KEYWORD = wx.stc.STC_ERLANG_KEYWORD
wxSTC_ERLANG_STRING = wx.stc.STC_ERLANG_STRING
wxSTC_ERLANG_OPERATOR = wx.stc.STC_ERLANG_OPERATOR
wxSTC_ERLANG_ATOM = wx.stc.STC_ERLANG_ATOM
wxSTC_ERLANG_FUNCTION_NAME = wx.stc.STC_ERLANG_FUNCTION_NAME
wxSTC_ERLANG_CHARACTER = wx.stc.STC_ERLANG_CHARACTER
wxSTC_ERLANG_MACRO = wx.stc.STC_ERLANG_MACRO
wxSTC_ERLANG_RECORD = wx.stc.STC_ERLANG_RECORD
wxSTC_ERLANG_SEPARATOR = wx.stc.STC_ERLANG_SEPARATOR
wxSTC_ERLANG_NODE_NAME = wx.stc.STC_ERLANG_NODE_NAME
wxSTC_ERLANG_UNKNOWN = wx.stc.STC_ERLANG_UNKNOWN
wxSTC_MSSQL_DEFAULT = wx.stc.STC_MSSQL_DEFAULT
wxSTC_MSSQL_COMMENT = wx.stc.STC_MSSQL_COMMENT
wxSTC_MSSQL_LINE_COMMENT = wx.stc.STC_MSSQL_LINE_COMMENT
wxSTC_MSSQL_NUMBER = wx.stc.STC_MSSQL_NUMBER
wxSTC_MSSQL_STRING = wx.stc.STC_MSSQL_STRING
wxSTC_MSSQL_OPERATOR = wx.stc.STC_MSSQL_OPERATOR
wxSTC_MSSQL_IDENTIFIER = wx.stc.STC_MSSQL_IDENTIFIER
wxSTC_MSSQL_VARIABLE = wx.stc.STC_MSSQL_VARIABLE
wxSTC_MSSQL_COLUMN_NAME = wx.stc.STC_MSSQL_COLUMN_NAME
wxSTC_MSSQL_STATEMENT = wx.stc.STC_MSSQL_STATEMENT
wxSTC_MSSQL_DATATYPE = wx.stc.STC_MSSQL_DATATYPE
wxSTC_MSSQL_SYSTABLE = wx.stc.STC_MSSQL_SYSTABLE
wxSTC_MSSQL_GLOBAL_VARIABLE = wx.stc.STC_MSSQL_GLOBAL_VARIABLE
wxSTC_MSSQL_FUNCTION = wx.stc.STC_MSSQL_FUNCTION
wxSTC_MSSQL_STORED_PROCEDURE = wx.stc.STC_MSSQL_STORED_PROCEDURE
wxSTC_MSSQL_DEFAULT_PREF_DATATYPE = wx.stc.STC_MSSQL_DEFAULT_PREF_DATATYPE
wxSTC_MSSQL_COLUMN_NAME_2 = wx.stc.STC_MSSQL_COLUMN_NAME_2
wxSTC_V_DEFAULT = wx.stc.STC_V_DEFAULT
wxSTC_V_COMMENT = wx.stc.STC_V_COMMENT
wxSTC_V_COMMENTLINE = wx.stc.STC_V_COMMENTLINE
wxSTC_V_COMMENTLINEBANG = wx.stc.STC_V_COMMENTLINEBANG
wxSTC_V_NUMBER = wx.stc.STC_V_NUMBER
wxSTC_V_WORD = wx.stc.STC_V_WORD
wxSTC_V_STRING = wx.stc.STC_V_STRING
wxSTC_V_WORD2 = wx.stc.STC_V_WORD2
wxSTC_V_WORD3 = wx.stc.STC_V_WORD3
wxSTC_V_PREPROCESSOR = wx.stc.STC_V_PREPROCESSOR
wxSTC_V_OPERATOR = wx.stc.STC_V_OPERATOR
wxSTC_V_IDENTIFIER = wx.stc.STC_V_IDENTIFIER
wxSTC_V_STRINGEOL = wx.stc.STC_V_STRINGEOL
wxSTC_V_USER = wx.stc.STC_V_USER
wxSTC_KIX_DEFAULT = wx.stc.STC_KIX_DEFAULT
wxSTC_KIX_COMMENT = wx.stc.STC_KIX_COMMENT
wxSTC_KIX_STRING1 = wx.stc.STC_KIX_STRING1
wxSTC_KIX_STRING2 = wx.stc.STC_KIX_STRING2
wxSTC_KIX_NUMBER = wx.stc.STC_KIX_NUMBER
wxSTC_KIX_VAR = wx.stc.STC_KIX_VAR
wxSTC_KIX_MACRO = wx.stc.STC_KIX_MACRO
wxSTC_KIX_KEYWORD = wx.stc.STC_KIX_KEYWORD
wxSTC_KIX_FUNCTIONS = wx.stc.STC_KIX_FUNCTIONS
wxSTC_KIX_OPERATOR = wx.stc.STC_KIX_OPERATOR
wxSTC_KIX_IDENTIFIER = wx.stc.STC_KIX_IDENTIFIER
wxSTC_GC_DEFAULT = wx.stc.STC_GC_DEFAULT
wxSTC_GC_COMMENTLINE = wx.stc.STC_GC_COMMENTLINE
wxSTC_GC_COMMENTBLOCK = wx.stc.STC_GC_COMMENTBLOCK
wxSTC_GC_GLOBAL = wx.stc.STC_GC_GLOBAL
wxSTC_GC_EVENT = wx.stc.STC_GC_EVENT
wxSTC_GC_ATTRIBUTE = wx.stc.STC_GC_ATTRIBUTE
wxSTC_GC_CONTROL = wx.stc.STC_GC_CONTROL
wxSTC_GC_COMMAND = wx.stc.STC_GC_COMMAND
wxSTC_GC_STRING = wx.stc.STC_GC_STRING
wxSTC_GC_OPERATOR = wx.stc.STC_GC_OPERATOR
wxSTC_SN_DEFAULT = wx.stc.STC_SN_DEFAULT
wxSTC_SN_CODE = wx.stc.STC_SN_CODE
wxSTC_SN_COMMENTLINE = wx.stc.STC_SN_COMMENTLINE
wxSTC_SN_COMMENTLINEBANG = wx.stc.STC_SN_COMMENTLINEBANG
wxSTC_SN_NUMBER = wx.stc.STC_SN_NUMBER
wxSTC_SN_WORD = wx.stc.STC_SN_WORD
wxSTC_SN_STRING = wx.stc.STC_SN_STRING
wxSTC_SN_WORD2 = wx.stc.STC_SN_WORD2
wxSTC_SN_WORD3 = wx.stc.STC_SN_WORD3
wxSTC_SN_PREPROCESSOR = wx.stc.STC_SN_PREPROCESSOR
wxSTC_SN_OPERATOR = wx.stc.STC_SN_OPERATOR
wxSTC_SN_IDENTIFIER = wx.stc.STC_SN_IDENTIFIER
wxSTC_SN_STRINGEOL = wx.stc.STC_SN_STRINGEOL
wxSTC_SN_REGEXTAG = wx.stc.STC_SN_REGEXTAG
wxSTC_SN_SIGNAL = wx.stc.STC_SN_SIGNAL
wxSTC_SN_USER = wx.stc.STC_SN_USER
wxSTC_AU3_DEFAULT = wx.stc.STC_AU3_DEFAULT
wxSTC_AU3_COMMENT = wx.stc.STC_AU3_COMMENT
wxSTC_AU3_COMMENTBLOCK = wx.stc.STC_AU3_COMMENTBLOCK
wxSTC_AU3_NUMBER = wx.stc.STC_AU3_NUMBER
wxSTC_AU3_FUNCTION = wx.stc.STC_AU3_FUNCTION
wxSTC_AU3_KEYWORD = wx.stc.STC_AU3_KEYWORD
wxSTC_AU3_MACRO = wx.stc.STC_AU3_MACRO
wxSTC_AU3_STRING = wx.stc.STC_AU3_STRING
wxSTC_AU3_OPERATOR = wx.stc.STC_AU3_OPERATOR
wxSTC_AU3_VARIABLE = wx.stc.STC_AU3_VARIABLE
wxSTC_AU3_SENT = wx.stc.STC_AU3_SENT
wxSTC_AU3_PREPROCESSOR = wx.stc.STC_AU3_PREPROCESSOR
wxSTC_AU3_SPECIAL = wx.stc.STC_AU3_SPECIAL
wxSTC_AU3_EXPAND = wx.stc.STC_AU3_EXPAND
wxSTC_AU3_COMOBJ = wx.stc.STC_AU3_COMOBJ
wxSTC_APDL_DEFAULT = wx.stc.STC_APDL_DEFAULT
wxSTC_APDL_COMMENT = wx.stc.STC_APDL_COMMENT
wxSTC_APDL_COMMENTBLOCK = wx.stc.STC_APDL_COMMENTBLOCK
wxSTC_APDL_NUMBER = wx.stc.STC_APDL_NUMBER
wxSTC_APDL_STRING = wx.stc.STC_APDL_STRING
wxSTC_APDL_OPERATOR = wx.stc.STC_APDL_OPERATOR
wxSTC_APDL_WORD = wx.stc.STC_APDL_WORD
wxSTC_APDL_PROCESSOR = wx.stc.STC_APDL_PROCESSOR
wxSTC_APDL_COMMAND = wx.stc.STC_APDL_COMMAND
wxSTC_APDL_SLASHCOMMAND = wx.stc.STC_APDL_SLASHCOMMAND
wxSTC_APDL_STARCOMMAND = wx.stc.STC_APDL_STARCOMMAND
wxSTC_APDL_ARGUMENT = wx.stc.STC_APDL_ARGUMENT
wxSTC_APDL_FUNCTION = wx.stc.STC_APDL_FUNCTION
wxSTC_SH_DEFAULT = wx.stc.STC_SH_DEFAULT
wxSTC_SH_ERROR = wx.stc.STC_SH_ERROR
wxSTC_SH_COMMENTLINE = wx.stc.STC_SH_COMMENTLINE
wxSTC_SH_NUMBER = wx.stc.STC_SH_NUMBER
wxSTC_SH_WORD = wx.stc.STC_SH_WORD
wxSTC_SH_STRING = wx.stc.STC_SH_STRING
wxSTC_SH_CHARACTER = wx.stc.STC_SH_CHARACTER
wxSTC_SH_OPERATOR = wx.stc.STC_SH_OPERATOR
wxSTC_SH_IDENTIFIER = wx.stc.STC_SH_IDENTIFIER
wxSTC_SH_SCALAR = wx.stc.STC_SH_SCALAR
wxSTC_SH_PARAM = wx.stc.STC_SH_PARAM
wxSTC_SH_BACKTICKS = wx.stc.STC_SH_BACKTICKS
wxSTC_SH_HERE_DELIM = wx.stc.STC_SH_HERE_DELIM
wxSTC_SH_HERE_Q = wx.stc.STC_SH_HERE_Q
wxSTC_ASN1_DEFAULT = wx.stc.STC_ASN1_DEFAULT
wxSTC_ASN1_COMMENT = wx.stc.STC_ASN1_COMMENT
wxSTC_ASN1_IDENTIFIER = wx.stc.STC_ASN1_IDENTIFIER
wxSTC_ASN1_STRING = wx.stc.STC_ASN1_STRING
wxSTC_ASN1_OID = wx.stc.STC_ASN1_OID
wxSTC_ASN1_SCALAR = wx.stc.STC_ASN1_SCALAR
wxSTC_ASN1_KEYWORD = wx.stc.STC_ASN1_KEYWORD
wxSTC_ASN1_ATTRIBUTE = wx.stc.STC_ASN1_ATTRIBUTE
wxSTC_ASN1_DESCRIPTOR = wx.stc.STC_ASN1_DESCRIPTOR
wxSTC_ASN1_TYPE = wx.stc.STC_ASN1_TYPE
wxSTC_ASN1_OPERATOR = wx.stc.STC_ASN1_OPERATOR
wxSTC_VHDL_DEFAULT = wx.stc.STC_VHDL_DEFAULT
wxSTC_VHDL_COMMENT = wx.stc.STC_VHDL_COMMENT
wxSTC_VHDL_COMMENTLINEBANG = wx.stc.STC_VHDL_COMMENTLINEBANG
wxSTC_VHDL_NUMBER = wx.stc.STC_VHDL_NUMBER
wxSTC_VHDL_STRING = wx.stc.STC_VHDL_STRING
wxSTC_VHDL_OPERATOR = wx.stc.STC_VHDL_OPERATOR
wxSTC_VHDL_IDENTIFIER = wx.stc.STC_VHDL_IDENTIFIER
wxSTC_VHDL_STRINGEOL = wx.stc.STC_VHDL_STRINGEOL
wxSTC_VHDL_KEYWORD = wx.stc.STC_VHDL_KEYWORD
wxSTC_VHDL_STDOPERATOR = wx.stc.STC_VHDL_STDOPERATOR
wxSTC_VHDL_ATTRIBUTE = wx.stc.STC_VHDL_ATTRIBUTE
wxSTC_VHDL_STDFUNCTION = wx.stc.STC_VHDL_STDFUNCTION
wxSTC_VHDL_STDPACKAGE = wx.stc.STC_VHDL_STDPACKAGE
wxSTC_VHDL_STDTYPE = wx.stc.STC_VHDL_STDTYPE
wxSTC_VHDL_USERWORD = wx.stc.STC_VHDL_USERWORD
wxSTC_CAML_DEFAULT = wx.stc.STC_CAML_DEFAULT
wxSTC_CAML_IDENTIFIER = wx.stc.STC_CAML_IDENTIFIER
wxSTC_CAML_TAGNAME = wx.stc.STC_CAML_TAGNAME
wxSTC_CAML_KEYWORD = wx.stc.STC_CAML_KEYWORD
wxSTC_CAML_KEYWORD2 = wx.stc.STC_CAML_KEYWORD2
wxSTC_CAML_KEYWORD3 = wx.stc.STC_CAML_KEYWORD3
wxSTC_CAML_LINENUM = wx.stc.STC_CAML_LINENUM
wxSTC_CAML_OPERATOR = wx.stc.STC_CAML_OPERATOR
wxSTC_CAML_NUMBER = wx.stc.STC_CAML_NUMBER
wxSTC_CAML_CHAR = wx.stc.STC_CAML_CHAR
wxSTC_CAML_STRING = wx.stc.STC_CAML_STRING
wxSTC_CAML_COMMENT = wx.stc.STC_CAML_COMMENT
wxSTC_CAML_COMMENT1 = wx.stc.STC_CAML_COMMENT1
wxSTC_CAML_COMMENT2 = wx.stc.STC_CAML_COMMENT2
wxSTC_CAML_COMMENT3 = wx.stc.STC_CAML_COMMENT3
wxSTC_HA_DEFAULT = wx.stc.STC_HA_DEFAULT
wxSTC_HA_IDENTIFIER = wx.stc.STC_HA_IDENTIFIER
wxSTC_HA_KEYWORD = wx.stc.STC_HA_KEYWORD
wxSTC_HA_NUMBER = wx.stc.STC_HA_NUMBER
wxSTC_HA_STRING = wx.stc.STC_HA_STRING
wxSTC_HA_CHARACTER = wx.stc.STC_HA_CHARACTER
wxSTC_HA_CLASS = wx.stc.STC_HA_CLASS
wxSTC_HA_MODULE = wx.stc.STC_HA_MODULE
wxSTC_HA_CAPITAL = wx.stc.STC_HA_CAPITAL
wxSTC_HA_DATA = wx.stc.STC_HA_DATA
wxSTC_HA_IMPORT = wx.stc.STC_HA_IMPORT
wxSTC_HA_OPERATOR = wx.stc.STC_HA_OPERATOR
wxSTC_HA_INSTANCE = wx.stc.STC_HA_INSTANCE
wxSTC_HA_COMMENTLINE = wx.stc.STC_HA_COMMENTLINE
wxSTC_HA_COMMENTBLOCK = wx.stc.STC_HA_COMMENTBLOCK
wxSTC_HA_COMMENTBLOCK2 = wx.stc.STC_HA_COMMENTBLOCK2
wxSTC_HA_COMMENTBLOCK3 = wx.stc.STC_HA_COMMENTBLOCK3
wxSTC_T3_DEFAULT = wx.stc.STC_T3_DEFAULT
wxSTC_T3_X_DEFAULT = wx.stc.STC_T3_X_DEFAULT
wxSTC_T3_PREPROCESSOR = wx.stc.STC_T3_PREPROCESSOR
wxSTC_T3_BLOCK_COMMENT = wx.stc.STC_T3_BLOCK_COMMENT
wxSTC_T3_LINE_COMMENT = wx.stc.STC_T3_LINE_COMMENT
wxSTC_T3_OPERATOR = wx.stc.STC_T3_OPERATOR
wxSTC_T3_KEYWORD = wx.stc.STC_T3_KEYWORD
wxSTC_T3_NUMBER = wx.stc.STC_T3_NUMBER
wxSTC_T3_IDENTIFIER = wx.stc.STC_T3_IDENTIFIER
wxSTC_T3_S_STRING = wx.stc.STC_T3_S_STRING
wxSTC_T3_D_STRING = wx.stc.STC_T3_D_STRING
wxSTC_T3_X_STRING = wx.stc.STC_T3_X_STRING
wxSTC_T3_LIB_DIRECTIVE = wx.stc.STC_T3_LIB_DIRECTIVE
wxSTC_T3_MSG_PARAM = wx.stc.STC_T3_MSG_PARAM
wxSTC_T3_HTML_TAG = wx.stc.STC_T3_HTML_TAG
wxSTC_T3_HTML_DEFAULT = wx.stc.STC_T3_HTML_DEFAULT
wxSTC_T3_HTML_STRING = wx.stc.STC_T3_HTML_STRING
wxSTC_T3_USER1 = wx.stc.STC_T3_USER1
wxSTC_T3_USER2 = wx.stc.STC_T3_USER2
wxSTC_T3_USER3 = wx.stc.STC_T3_USER3
wxSTC_REBOL_DEFAULT = wx.stc.STC_REBOL_DEFAULT
wxSTC_REBOL_COMMENTLINE = wx.stc.STC_REBOL_COMMENTLINE
wxSTC_REBOL_COMMENTBLOCK = wx.stc.STC_REBOL_COMMENTBLOCK
wxSTC_REBOL_PREFACE = wx.stc.STC_REBOL_PREFACE
wxSTC_REBOL_OPERATOR = wx.stc.STC_REBOL_OPERATOR
wxSTC_REBOL_CHARACTER = wx.stc.STC_REBOL_CHARACTER
wxSTC_REBOL_QUOTEDSTRING = wx.stc.STC_REBOL_QUOTEDSTRING
wxSTC_REBOL_BRACEDSTRING = wx.stc.STC_REBOL_BRACEDSTRING
wxSTC_REBOL_NUMBER = wx.stc.STC_REBOL_NUMBER
wxSTC_REBOL_PAIR = wx.stc.STC_REBOL_PAIR
wxSTC_REBOL_TUPLE = wx.stc.STC_REBOL_TUPLE
wxSTC_REBOL_BINARY = wx.stc.STC_REBOL_BINARY
wxSTC_REBOL_MONEY = wx.stc.STC_REBOL_MONEY
wxSTC_REBOL_ISSUE = wx.stc.STC_REBOL_ISSUE
wxSTC_REBOL_TAG = wx.stc.STC_REBOL_TAG
wxSTC_REBOL_FILE = wx.stc.STC_REBOL_FILE
wxSTC_REBOL_EMAIL = wx.stc.STC_REBOL_EMAIL
wxSTC_REBOL_URL = wx.stc.STC_REBOL_URL
wxSTC_REBOL_DATE = wx.stc.STC_REBOL_DATE
wxSTC_REBOL_TIME = wx.stc.STC_REBOL_TIME
wxSTC_REBOL_IDENTIFIER = wx.stc.STC_REBOL_IDENTIFIER
wxSTC_REBOL_WORD = wx.stc.STC_REBOL_WORD
wxSTC_REBOL_WORD2 = wx.stc.STC_REBOL_WORD2
wxSTC_REBOL_WORD3 = wx.stc.STC_REBOL_WORD3
wxSTC_REBOL_WORD4 = wx.stc.STC_REBOL_WORD4
wxSTC_REBOL_WORD5 = wx.stc.STC_REBOL_WORD5
wxSTC_REBOL_WORD6 = wx.stc.STC_REBOL_WORD6
wxSTC_REBOL_WORD7 = wx.stc.STC_REBOL_WORD7
wxSTC_REBOL_WORD8 = wx.stc.STC_REBOL_WORD8
wxSTC_SQL_DEFAULT = wx.stc.STC_SQL_DEFAULT
wxSTC_SQL_COMMENT = wx.stc.STC_SQL_COMMENT
wxSTC_SQL_COMMENTLINE = wx.stc.STC_SQL_COMMENTLINE
wxSTC_SQL_COMMENTDOC = wx.stc.STC_SQL_COMMENTDOC
wxSTC_SQL_NUMBER = wx.stc.STC_SQL_NUMBER
wxSTC_SQL_WORD = wx.stc.STC_SQL_WORD
wxSTC_SQL_STRING = wx.stc.STC_SQL_STRING
wxSTC_SQL_CHARACTER = wx.stc.STC_SQL_CHARACTER
wxSTC_SQL_SQLPLUS = wx.stc.STC_SQL_SQLPLUS
wxSTC_SQL_SQLPLUS_PROMPT = wx.stc.STC_SQL_SQLPLUS_PROMPT
wxSTC_SQL_OPERATOR = wx.stc.STC_SQL_OPERATOR
wxSTC_SQL_IDENTIFIER = wx.stc.STC_SQL_IDENTIFIER
wxSTC_SQL_SQLPLUS_COMMENT = wx.stc.STC_SQL_SQLPLUS_COMMENT
wxSTC_SQL_COMMENTLINEDOC = wx.stc.STC_SQL_COMMENTLINEDOC
wxSTC_SQL_WORD2 = wx.stc.STC_SQL_WORD2
wxSTC_SQL_COMMENTDOCKEYWORD = wx.stc.STC_SQL_COMMENTDOCKEYWORD
wxSTC_SQL_COMMENTDOCKEYWORDERROR = wx.stc.STC_SQL_COMMENTDOCKEYWORDERROR
wxSTC_SQL_USER1 = wx.stc.STC_SQL_USER1
wxSTC_SQL_USER2 = wx.stc.STC_SQL_USER2
wxSTC_SQL_USER3 = wx.stc.STC_SQL_USER3
wxSTC_SQL_USER4 = wx.stc.STC_SQL_USER4
wxSTC_SQL_QUOTEDIDENTIFIER = wx.stc.STC_SQL_QUOTEDIDENTIFIER
wxSTC_ST_DEFAULT = wx.stc.STC_ST_DEFAULT
wxSTC_ST_STRING = wx.stc.STC_ST_STRING
wxSTC_ST_NUMBER = wx.stc.STC_ST_NUMBER
wxSTC_ST_COMMENT = wx.stc.STC_ST_COMMENT
wxSTC_ST_SYMBOL = wx.stc.STC_ST_SYMBOL
wxSTC_ST_BINARY = wx.stc.STC_ST_BINARY
wxSTC_ST_BOOL = wx.stc.STC_ST_BOOL
wxSTC_ST_SELF = wx.stc.STC_ST_SELF
wxSTC_ST_SUPER = wx.stc.STC_ST_SUPER
wxSTC_ST_NIL = wx.stc.STC_ST_NIL
wxSTC_ST_GLOBAL = wx.stc.STC_ST_GLOBAL
wxSTC_ST_RETURN = wx.stc.STC_ST_RETURN
wxSTC_ST_SPECIAL = wx.stc.STC_ST_SPECIAL
wxSTC_ST_KWSEND = wx.stc.STC_ST_KWSEND
wxSTC_ST_ASSIGN = wx.stc.STC_ST_ASSIGN
wxSTC_ST_CHARACTER = wx.stc.STC_ST_CHARACTER
wxSTC_ST_SPEC_SEL = wx.stc.STC_ST_SPEC_SEL
wxSTC_FS_DEFAULT = wx.stc.STC_FS_DEFAULT
wxSTC_FS_COMMENT = wx.stc.STC_FS_COMMENT
wxSTC_FS_COMMENTLINE = wx.stc.STC_FS_COMMENTLINE
wxSTC_FS_COMMENTDOC = wx.stc.STC_FS_COMMENTDOC
wxSTC_FS_COMMENTLINEDOC = wx.stc.STC_FS_COMMENTLINEDOC
wxSTC_FS_COMMENTDOCKEYWORD = wx.stc.STC_FS_COMMENTDOCKEYWORD
wxSTC_FS_COMMENTDOCKEYWORDERROR = wx.stc.STC_FS_COMMENTDOCKEYWORDERROR
wxSTC_FS_KEYWORD = wx.stc.STC_FS_KEYWORD
wxSTC_FS_KEYWORD2 = wx.stc.STC_FS_KEYWORD2
wxSTC_FS_KEYWORD3 = wx.stc.STC_FS_KEYWORD3
wxSTC_FS_KEYWORD4 = wx.stc.STC_FS_KEYWORD4
wxSTC_FS_NUMBER = wx.stc.STC_FS_NUMBER
wxSTC_FS_STRING = wx.stc.STC_FS_STRING
wxSTC_FS_PREPROCESSOR = wx.stc.STC_FS_PREPROCESSOR
wxSTC_FS_OPERATOR = wx.stc.STC_FS_OPERATOR
wxSTC_FS_IDENTIFIER = wx.stc.STC_FS_IDENTIFIER
wxSTC_FS_DATE = wx.stc.STC_FS_DATE
wxSTC_FS_STRINGEOL = wx.stc.STC_FS_STRINGEOL
wxSTC_FS_CONSTANT = wx.stc.STC_FS_CONSTANT
wxSTC_FS_ASM = wx.stc.STC_FS_ASM
wxSTC_FS_LABEL = wx.stc.STC_FS_LABEL
wxSTC_FS_ERROR = wx.stc.STC_FS_ERROR
wxSTC_FS_HEXNUMBER = wx.stc.STC_FS_HEXNUMBER
wxSTC_FS_BINNUMBER = wx.stc.STC_FS_BINNUMBER
wxSTC_CSOUND_DEFAULT = wx.stc.STC_CSOUND_DEFAULT
wxSTC_CSOUND_COMMENT = wx.stc.STC_CSOUND_COMMENT
wxSTC_CSOUND_NUMBER = wx.stc.STC_CSOUND_NUMBER
wxSTC_CSOUND_OPERATOR = wx.stc.STC_CSOUND_OPERATOR
wxSTC_CSOUND_INSTR = wx.stc.STC_CSOUND_INSTR
wxSTC_CSOUND_IDENTIFIER = wx.stc.STC_CSOUND_IDENTIFIER
wxSTC_CSOUND_OPCODE = wx.stc.STC_CSOUND_OPCODE
wxSTC_CSOUND_HEADERSTMT = wx.stc.STC_CSOUND_HEADERSTMT
wxSTC_CSOUND_USERKEYWORD = wx.stc.STC_CSOUND_USERKEYWORD
wxSTC_CSOUND_COMMENTBLOCK = wx.stc.STC_CSOUND_COMMENTBLOCK
wxSTC_CSOUND_PARAM = wx.stc.STC_CSOUND_PARAM
wxSTC_CSOUND_ARATE_VAR = wx.stc.STC_CSOUND_ARATE_VAR
wxSTC_CSOUND_KRATE_VAR = wx.stc.STC_CSOUND_KRATE_VAR
wxSTC_CSOUND_IRATE_VAR = wx.stc.STC_CSOUND_IRATE_VAR
wxSTC_CSOUND_GLOBAL_VAR = wx.stc.STC_CSOUND_GLOBAL_VAR
wxSTC_CSOUND_STRINGEOL = wx.stc.STC_CSOUND_STRINGEOL
wxSTC_CMD_REDO = wx.stc.STC_CMD_REDO
wxSTC_CMD_SELECTALL = wx.stc.STC_CMD_SELECTALL
wxSTC_CMD_UNDO = wx.stc.STC_CMD_UNDO
wxSTC_CMD_CUT = wx.stc.STC_CMD_CUT
wxSTC_CMD_COPY = wx.stc.STC_CMD_COPY
wxSTC_CMD_PASTE = wx.stc.STC_CMD_PASTE
wxSTC_CMD_CLEAR = wx.stc.STC_CMD_CLEAR
wxSTC_CMD_LINEDOWN = wx.stc.STC_CMD_LINEDOWN
wxSTC_CMD_LINEDOWNEXTEND = wx.stc.STC_CMD_LINEDOWNEXTEND
wxSTC_CMD_LINEUP = wx.stc.STC_CMD_LINEUP
wxSTC_CMD_LINEUPEXTEND = wx.stc.STC_CMD_LINEUPEXTEND
wxSTC_CMD_CHARLEFT = wx.stc.STC_CMD_CHARLEFT
wxSTC_CMD_CHARLEFTEXTEND = wx.stc.STC_CMD_CHARLEFTEXTEND
wxSTC_CMD_CHARRIGHT = wx.stc.STC_CMD_CHARRIGHT
wxSTC_CMD_CHARRIGHTEXTEND = wx.stc.STC_CMD_CHARRIGHTEXTEND
wxSTC_CMD_WORDLEFT = wx.stc.STC_CMD_WORDLEFT
wxSTC_CMD_WORDLEFTEXTEND = wx.stc.STC_CMD_WORDLEFTEXTEND
wxSTC_CMD_WORDRIGHT = wx.stc.STC_CMD_WORDRIGHT
wxSTC_CMD_WORDRIGHTEXTEND = wx.stc.STC_CMD_WORDRIGHTEXTEND
wxSTC_CMD_HOME = wx.stc.STC_CMD_HOME
wxSTC_CMD_HOMEEXTEND = wx.stc.STC_CMD_HOMEEXTEND
wxSTC_CMD_LINEEND = wx.stc.STC_CMD_LINEEND
wxSTC_CMD_LINEENDEXTEND = wx.stc.STC_CMD_LINEENDEXTEND
wxSTC_CMD_DOCUMENTSTART = wx.stc.STC_CMD_DOCUMENTSTART
wxSTC_CMD_DOCUMENTSTARTEXTEND = wx.stc.STC_CMD_DOCUMENTSTARTEXTEND
wxSTC_CMD_DOCUMENTEND = wx.stc.STC_CMD_DOCUMENTEND
wxSTC_CMD_DOCUMENTENDEXTEND = wx.stc.STC_CMD_DOCUMENTENDEXTEND
wxSTC_CMD_PAGEUP = wx.stc.STC_CMD_PAGEUP
wxSTC_CMD_PAGEUPEXTEND = wx.stc.STC_CMD_PAGEUPEXTEND
wxSTC_CMD_PAGEDOWN = wx.stc.STC_CMD_PAGEDOWN
wxSTC_CMD_PAGEDOWNEXTEND = wx.stc.STC_CMD_PAGEDOWNEXTEND
wxSTC_CMD_EDITTOGGLEOVERTYPE = wx.stc.STC_CMD_EDITTOGGLEOVERTYPE
wxSTC_CMD_CANCEL = wx.stc.STC_CMD_CANCEL
wxSTC_CMD_DELETEBACK = wx.stc.STC_CMD_DELETEBACK
wxSTC_CMD_TAB = wx.stc.STC_CMD_TAB
wxSTC_CMD_BACKTAB = wx.stc.STC_CMD_BACKTAB
wxSTC_CMD_NEWLINE = wx.stc.STC_CMD_NEWLINE
wxSTC_CMD_FORMFEED = wx.stc.STC_CMD_FORMFEED
wxSTC_CMD_VCHOME = wx.stc.STC_CMD_VCHOME
wxSTC_CMD_VCHOMEEXTEND = wx.stc.STC_CMD_VCHOMEEXTEND
wxSTC_CMD_ZOOMIN = wx.stc.STC_CMD_ZOOMIN
wxSTC_CMD_ZOOMOUT = wx.stc.STC_CMD_ZOOMOUT
wxSTC_CMD_DELWORDLEFT = wx.stc.STC_CMD_DELWORDLEFT
wxSTC_CMD_DELWORDRIGHT = wx.stc.STC_CMD_DELWORDRIGHT
wxSTC_CMD_LINECUT = wx.stc.STC_CMD_LINECUT
wxSTC_CMD_LINEDELETE = wx.stc.STC_CMD_LINEDELETE
wxSTC_CMD_LINETRANSPOSE = wx.stc.STC_CMD_LINETRANSPOSE
wxSTC_CMD_LINEDUPLICATE = wx.stc.STC_CMD_LINEDUPLICATE
wxSTC_CMD_LOWERCASE = wx.stc.STC_CMD_LOWERCASE
wxSTC_CMD_UPPERCASE = wx.stc.STC_CMD_UPPERCASE
wxSTC_CMD_LINESCROLLDOWN = wx.stc.STC_CMD_LINESCROLLDOWN
wxSTC_CMD_LINESCROLLUP = wx.stc.STC_CMD_LINESCROLLUP
wxSTC_CMD_DELETEBACKNOTLINE = wx.stc.STC_CMD_DELETEBACKNOTLINE
wxSTC_CMD_HOMEDISPLAY = wx.stc.STC_CMD_HOMEDISPLAY
wxSTC_CMD_HOMEDISPLAYEXTEND = wx.stc.STC_CMD_HOMEDISPLAYEXTEND
wxSTC_CMD_LINEENDDISPLAY = wx.stc.STC_CMD_LINEENDDISPLAY
wxSTC_CMD_LINEENDDISPLAYEXTEND = wx.stc.STC_CMD_LINEENDDISPLAYEXTEND
wxSTC_CMD_HOMEWRAP = wx.stc.STC_CMD_HOMEWRAP
wxSTC_CMD_HOMEWRAPEXTEND = | |
<gh_stars>1-10
#
# Copyright (c) 2022 <NAME>
#
# MIT License - See LICENSE file accompanying this package.
#
"""
Implementation of CloudInitDoc, a container that can render cloud-init
user-data documents that are single-part or multi-part.
"""
from base64 import b64encode
from typing import Optional, List, Union
from io import BytesIO
import gzip
from .typehints import JsonableDict
from .exceptions import CloudInitGenError
from .renderable import CloudInitRenderable
from .part import (
CloudInitPart,
CloudInitPartConvertible,
MimeHeadersConvertible
)
GZIP_FIXED_MTIME: float = 0.0
"""A fixed mktime() value that is used for the timestamp when gzipping cloud-init data.
This makes the rendered data deterministic and stable, which helps keep infrastructure
automation tools like terraform and Pulumi from needlessly updating cloud instances. """
CloudInitDocConvertible = Optional[
Union[
CloudInitRenderable,
str,
bytes,
JsonableDict,
]
]
"""Type hint for values that can be used as initialization content for a CloudInitDoc"""
class CloudInitDoc(CloudInitRenderable):
"""
A container for a complete cloud-init user-data document, which may consist of
zero or more CloudInnitDataPart's, or can be a raw bytes value to
be passed directly to cloud-init.
"""
parts: List[CloudInitRenderable]
"""If raw_binary is None, a List of renderable parts to be rendered. An empty list
indicates an empty/null user-data document. A list with a single item
results in that item being directly rendered. A list with more than
one item is rendered as a multipart MIME document. Ignored if
raw_binary is not None."""
raw_binary: Optional[bytes]=None
"""If not None, a raw binary encoding of the entire user-data document,
which can be passed directly to cloud-init. This field exists only
so that users can choose to render user-data themselves, and still
pass the result to an API that expects CloudInitDoc."""
def __init__(
self,
content: CloudInitDocConvertible=None,
mime_type: Optional[str]=None,
headers: MimeHeadersConvertible=None
):
"""Create a container for a complete cloud-init user-data document,
which may consist of zero or more CloudInnitDataPart's, or can
be a raw bytes value to be passed directly to cloud-init.
Args:
content (CloudInitDocConvertible, optional):
If None, an empty document is created--parts can be added before rendering with add().
If another CloudInitDoc object, creates a clone of that object.
If a bytes value, then this parameter is directly used for final raw binary
rendering of the document (This option exists only
so that users can choose to render user-data themselves, and still
pass the result to an API that expects CloudInitDoc).
Otherwise, causes a single part to be immediately added to the document
as if add() had been called. Included for convenience in creating single-part
documents, which is common. Defaults to None.
mime_type (Optional[str], optional):
If content is not None and not bytes, as described for add(). Ignored if content is None. Defaults to None.
headers (MimeHeadersConvertible, optional):
If content is not None and not bytes, as described for add(). Ignored if content is None. Defaults to None.
Raises:
CloudInitGenError: An error occured building the first part of the document.
"""
self.parts = []
if not content is None:
if isinstance(content, CloudInitDoc):
self.parts = content.parts[:]
self.raw_binary = content.raw_binary
elif isinstance(content, bytes):
if len(content) > 16383:
raise CloudInitGenError(f"raw binary user data too big: {len(content)}")
self.raw_binary = content
else:
self.add(content, mime_type=mime_type, headers=headers)
def add(self,
content: Union[CloudInitRenderable, CloudInitPartConvertible],
mime_type: Optional[str]=None,
headers: MimeHeadersConvertible=None
):
"""Add a single renderable part of a potentially multi-part cloud-init document.
Args:
content (Optional[Union[CloudInitPart, str, JsonableDict]]):
The content to be rendered for the part. If mime_type is None, this can be:
1. None, indicating this is a null part. No action will be taken.
2. A string beginning with "#". The first line is interpreted as
a cloud-init comment header that identifies the type. The remainder
becomes the content of the part (for shebang-style parts the comment
line is also left in the part's content).
3. A string beginning with "Content-Type:" or "MIME-Version:". The string
is parsed as a MIME document with embedded headers. The headers in the
document are merged with and override any headers passed to this constructor.
The MIME type of the part is obtained from the "Content-Type" header, and
the payload becomes the part's content.
4. A JsonableDict. The content is converted to YAML and the MIME type is
set to "text/cloud-config". This is a common type of input to cloud-init.
5. A CloudInitRenderable that has already been initialized. The item is
directly added as a part.
If mime_type is not None, then content may be:
1. A string. The string will be used as the content of the part without further
interpretation.
2. A JsonableDict. The dict is converted to YAML, and the YAML string is used
as the part's content.
mime_type (Optional[str], optional):
The full MIME type of the part, or None to infer the MIME type from the
content argument, as described above. Defaults to None.
headers (MimeHeadersConvertible, optional):
An optional ordered dict of MIME headers to associate with the part. Content-Type
and MIME-Version are explicitly removed from this dict and handled specially. Any
additional headers will be included in the rendering of this part if MIME
rendering is selected. If comment-header rendering is selected, the headers are
discarded. Defaults to None.
Raises:
CloudInitGenError: An attempt was made to add a part to a document that was created with raw_binary
CloudInitGenError: An error occured building the part
"""
if not content is None:
if not self.raw_binary is None:
raise CloudInitGenError(f"Cannot add parts to CloudInitDoc initialized with raw binary payload")
if not isinstance(content, CloudInitRenderable):
content = CloudInitPart(content, mime_type=mime_type, headers=headers)
if not content.is_null_content():
self.parts.append(content)
def is_null_content(self) -> bool:
"""Return True if this is a null document
Returns:
bool: True if rendering this document will return None
"""
return self.raw_binary is None and len(self.parts) ==0
def render(
self,
include_mime_version: bool=True,
force_mime: bool=False,
include_from: bool=False
) -> Optional[str]:
"""Renders the entire cloudinit user-data document to a string suitable for passing
to cloud-init directly. For single-part documents, renders them directly. For
multi-part documents, wraps the parts in a multipart MIME encoding.
Args:
include_mime_version (bool, optional):
True if a MIME-Version header should be included.
Ignored if a single-part document and comment-style
headers are selected. Note that cloud-init REQUIRES
this header for the outermost MIME document, so for
compatibility it should be left at True. Defaults to True.
force_mime (bool, optional): If True, MIME-style headers will be used.
By default, a comment-style header will be used if this
is a single-part document and there is an appropriate
comment header for for the single part's MIME type.
Defaults to False.
include_from (bool, optional): If True, any 'From' header associated with
the part will be included; otherwise it will be stripped.
Defaults to False. This parameter is included as part of
CloudInitRenderable interface, but it has no effect on
CloudInitDoc.
Returns:
Optional[str]: The entire document rendered as a string suitable for passing to cloud-init
directly, or None if this is a null/empty document (with zero parts). If
raw_binary was provided at construction time, then that value
is simply decoded as UTF-8.
"""
result: Optional[str]
if self.raw_binary is None:
if not len(self.parts) > 0 and not include_mime_version:
raise CloudInitGenError("include_mime_version MUST be True for the outermost cloud_init_data part")
if len(self.parts) == 0:
result = None
elif len(self.parts) == 1:
result = self.parts[0].render(include_mime_version=include_mime_version, force_mime=force_mime)
else:
# Parts of a multi-part document are forced into MIME mode
rendered_parts = [ part.render(force_mime=True, include_mime_version=False) for part in self.parts ]
# Find a unique boundary string that is not in any of the rendered parts
unique = 0
while True:
boundary = f'::{unique}::'
for rp in rendered_parts:
assert not rp is None
if boundary in rp:
break
else:
break
unique += 1
result = f'Content-Type: multipart/mixed; boundary="{boundary}"\n'
if include_mime_version:
result += 'MIME-Version: 1.0\n'
result += '\n'
for rp in rendered_parts:
result += f"--{boundary}\n{rp}\n"
result += f"--{boundary}--\n"
else:
result = self.raw_binary.decode('utf-8')
return result
def render_binary(self, include_mime_version: bool=True) -> Optional[bytes]:
"""Renders the entire cloudinit user-data document to a binary bytes buffer suitable for passing
to cloud-init directly. For single-part documents, renders them directly. For
multi-part documents, wraps the parts in | |
given user.
Parameters
----------
conn : Connection
The database connection to use.
user : DBUser or int
The linked user to search for. May be a `DBUser` object or an `int`
referring to a user ID.
"""
try:
user_id = user.id
except AttributeError:
user_id = int(user)
async with conn.cursor() as cur:
await cur.execute(
f'SELECT * FROM {cls.table_name} WHERE user_id = ?',
(user_id,))
row = await cur.fetchone()
if row is None:
return None
if not isinstance(user, DBUser):
user = await DBUser.from_id(conn, row['user_id'])
return cls.from_row(conn, row, user)
async def fetch_user(self) -> DBUser:
"""Fetch the database user linked to this participant.
Sets `self.user` to the fetched `DBUser` and returns it.
"""
if self.user_id is None:
raise ValueError('Participant has no linked user.')
self.user = await DBUser.from_id(self._connection, self.user_id)
return self.user
async def save(self):
"""Save this `Participant` to the database."""
async with self._connection.cursor() as cur:
await cur.execute(f"""
INSERT INTO {self.table_name} VALUES (?, ?, ?)
ON CONFLICT (participant_id) DO UPDATE SET
name = excluded.name,
user_id = excluded.user_id
""", (self.id, self.name, self.user_id))
self.id = cur.lastrowid
await self._connection.commit()
async def get_participant(conn: Connection, name: str,
ignore_case: bool = True) -> Participant:
"""Get an existing `Participant` or create a new one.
This is a convenient and generalized function for Zerobot modules that
enables the most common actions related to database participants: lookup of
existing participants and the creation of new ones.
Parameters
----------
conn : Connection
The database connection to use.
name : str
The name to look up; usually from a message source or command argument.
ignore_case : bool, optional
Ignore case even for aliases marked as case-sensitive. ``True`` by
default.
Returns
-------
Participant
An existing participant matching `name` or a totally new one if there
were no matches for `name`.
Notes
-----
ZeroBot's Core defines triggers that, along with foreign key constraints,
prevents Users and Participants from becoming inconsistent. As long as
these measures are not circumvented, you shouldn't need to worry about any
user/participant discrepencies, getting a `Participant` without its
associated `DBUser`, having a user with no associated participant, etc.
"""
if name is None:
return None
if name.strip() == '':
raise ValueError('Name is empty or whitespace')
if ignore_case:
criteria = 'lower(pan.name) = lower(?1)'
else:
criteria = ('pan.name = ?1 OR case_sensitive = 0 '
'AND lower(pan.name) = lower(?1)')
async with conn.cursor() as cur:
await cur.execute(f"""
SELECT participant_id, name, user_id
FROM participants_all_names AS "pan"
WHERE {criteria}
""", (name,))
row = await cur.fetchone()
if row is None:
# Create a new Participant
participant = Participant(conn, None, name)
await participant.save()
else:
participant = Participant.from_row(conn, row)
try:
await participant.fetch_user()
except ValueError: # user_id was NULL
pass
return participant
async def find_participant(conn: Connection, pattern: str,
case_sensitive: bool = False) -> Optional[Participant]:
"""Return the first Participant that matches `pattern`.
Parameters
----------
conn : Connection
The database connection to use.
pattern : str
A regular expression to match participants.
case_sensitive : bool, optional
Whether or not the pattern should be case sensitive. Defaults to
`False`.
"""
if pattern is None:
return None
# The `m` flag is included because of the use of
# `group_concat(name, char(10))` in queries needing to match aliases.
re_flags = 'm' if case_sensitive else 'mi'
pattern = f'(?{re_flags}:{pattern})'
async with conn.cursor() as cur:
await cur.execute(f"""
SELECT participant_id,
group_concat(name, char(10)) AS "name_list"
FROM participants_all_names
GROUP BY participant_id
HAVING name_list REGEXP ?
""", (pattern,))
row = await cur.fetchone()
if row is None:
return None
return await Participant.from_id(conn, row[0])
async def get_user(conn: Connection, name: str,
ignore_case: bool = True) -> Optional[DBUser]:
"""Get an existing user by their name or an alias.
This is a convenient and generalized function for ZeroBot modules that
facilitates simple user lookup. The given `name` will match against both
canonical user names and any aliases associated with a user, taking alias
case sensitivity into account.
Parameters
----------
conn : Connection
The database connection to use.
name : str
The name to look up; usually from a message source or command argument.
ignore_case : bool, optional
Ignore case even for aliases marked as case-sensitive. ``True`` by
default.
Returns
-------
Optional[DBUser]
The matched user or ``None`` if there were no matches for `name`.
"""
if name.strip() == '':
raise ValueError('Name is empty or whitespace')
if ignore_case:
criteria = 'lower(name) = lower(?1)'
else:
criteria = ('name = ?1 OR case_sensitive = 0'
'AND lower(name) = lower(?1)')
user = None
async with conn.cursor() as cur:
await cur.execute(f"""
SELECT user_id FROM users_all_names WHERE {criteria}
""", (name,))
row = await cur.fetchone()
if row is not None:
user = await DBUser.from_id(conn, row[0])
return user
class Source(DBModel):
"""A specific combination of protocol, server, and channel.
A source is a particular combination of a protocol, server, and channel
where something (typically a message) originated. ZeroBot modules should
create and reference sources when possible as they can be used by all
feature modules in a centralized manner, as opposed to each module creating
its own unique channel and/or server references.
Parameters
----------
source_id : int
The source's ID.
protocol : str
A protocol identifier, e.g. ``discord``.
server : str, optional
The name of a server on the given `protocol`.
channel : str, optional
The name of a channel on the given `server`.
Attributes
----------
id
"""
table_name = 'sources'
def __init__(self, conn: Connection, source_id: int, protocol: str,
server: str = None, channel: str = None):
super().__init__(conn)
self.id = source_id
if protocol is not None:
self.protocol = protocol
else:
raise TypeError('protocol cannot be None')
self.server = server
self.channel = channel
def __repr__(self):
attrs = ['id', 'protocol', 'server', 'channel']
repr_str = ' '.join(f'{a}={getattr(self, a)!r}' for a in attrs)
return f'<{self.__class__.__name__} {repr_str}>'
def __str__(self):
return ', '.join(
attr for attr in (self.server, self.channel) if attr is not None)
def __eq__(self, other):
return self.id == other.id
@classmethod
def from_row(cls, conn: Connection, row: sqlite3.Row) -> Source:
"""Construct a `Source` from a database row.
Parameters
----------
conn : Connection
The database connection to use.
row : sqlite3.Row
A row returned from the database.
"""
return cls(conn, *row)
@classmethod
async def from_id(cls, conn: Connection,
source_id: int) -> Optional[Source]:
"""Construct a `Source` by ID from the database.
Returns `None` if there was no `Source` with the given ID.
Parameters
----------
conn : Connection
The database connection to use.
source_id : int
The ID of the source to fetch.
"""
async with conn.cursor() as cur:
await cur.execute(
f'SELECT * FROM {cls.table_name} WHERE source_id = ?',
(source_id,))
row = await cur.fetchone()
if row is None:
return None
return cls.from_row(conn, row)
async def save(self):
"""Save this `Source` to the database."""
async with self._connection.cursor() as cur:
await cur.execute(f"""
INSERT INTO {self.table_name} VALUES (?, ?, ?, ?)
ON CONFLICT (source_id) DO UPDATE SET
protocol = excluded.protocol,
server = excluded.server,
channel = excluded.channel
""", (self.id, self.protocol, self.server, self.channel))
self.id = cur.lastrowid
await self._connection.commit()
async def get_source(conn: Connection, protocol: str, server: str = None,
channel: str = None) -> int:
"""Get an existing source or create a new one.
This is a convenient and generalized function for Zerobot modules that
enables the most common actions related to database sources: lookup of
existing sources and the creation of new ones.
Parameters
----------
conn : Connection
The database connection to use.
protocol : str
A protocol identifier, e.g. ``discord``.
server : str, optional
The name of a server on the given `protocol`.
channel : str, optional
The name of a channel on the given `server`.
Returns
-------
int
The ``source_id`` of either an existing or newly created source.
"""
for attr in ('protocol', 'server', 'channel'):
if locals()[attr].strip() == '':
raise ValueError(f'{attr} is empty or whitespace')
async with conn.cursor() as cur:
await cur.execute(f"""
SELECT source_id, protocol, server, channel
FROM {Source.table_name}
WHERE protocol = ? AND server = ? AND channel = ?
""", (protocol, server, channel))
row = await cur.fetchone()
if row is None:
# Create new Source
source = Source(conn, None, protocol, server, channel)
await source.save()
else:
source = Source.from_row(conn, row)
| |
of a certain size. For use during
# CDDL parsing.
def set_size_range(self, min_size, max_size):
if min_size == max_size:
return self.set_size(min_size)
elif min_size > max_size:
raise TypeError(
"Invalid size range (min %d, max %d)" %
(min_size, max_size))
else:
self.set_size(None)
self.set_min_size(min_size)
self.set_max_size(max_size)
# Set self.minSize, and self.minValue if type is UINT.
def set_min_size(self, minSize):
if self.type is "UINT":
self.minValue = 256**min(0, abs(minSize-1))
self.minSize = minSize
# Set self.max_size, and self.maxValue if type is UINT.
def set_max_size(self, max_size):
if self.type is "UINT" and max_size is not None:
if max_size > 8:
raise TypeError(
"Size too large for integer. size %d" %
max_size)
self.maxValue = 256**max_size - 1
self.max_size = max_size
# Set the self.cbor of this element. For use during CDDL parsing.
def set_cbor(self, cbor, cborseq):
if self.type != "BSTR":
raise TypeError(
"%s must be used with bstr." %
(".cborseq" if cborseq else ".cbor",))
self.cbor = cbor
if cborseq:
self.cbor.maxQ = self.DEFAULT_MAXQ
self.cbor.set_base_name("cbor")
# Set the self.key of this element. For use during CDDL parsing.
def set_key(self, key):
if self.key is not None:
raise TypeError("Cannot have two keys: " + key)
if key.type == "GROUP":
raise TypeError("A key cannot be a group")
self.key = key
self.key.set_base_name("key")
# Set the self.label OR self.key of this element. In the CDDL "foo: bar", foo can be either a label or a key
# depending on whether it is in a map. This code uses a slightly different method for choosing between label and
# key. If the string is recognized as a type, it is treated as a key. For
# use during CDDL parsing.
def set_key_or_label(self, key_or_label):
if key_or_label.type == "OTHER" and key_or_label.value not in my_types:
self.set_label(key_or_label.value)
else:
if key_or_label.type == "OTHER" and self.label is None:
self.set_label(key_or_label.value)
self.set_key(key_or_label)
# Append to the self.value of this element. Used with the "MAP", "LIST", "UNION", and "GROUP" types, which all have
# a python list as self.value. The list represents the "children" of the
# type. For use during CDDL parsing.
def add_value(self, value):
self.value.append(value)
# Parse from the beginning of instr (string) until a full element has been parsed. self will become that element.
# This function is recursive, so if a nested element ("MAP"/"LIST"/"UNION"/"GROUP") is encountered, this function
# will create new instances and add them to self.value as a list. Likewise, if a key or cbor definition is
# encountered, a new element will be created and assigned to self.key or self.cbor. When new elements are created,
# getValue() is called on those elements, via parse().
def get_value(self, instr):
# The following regexes match different parts of the element. The order of the list is important because it
# implements the operator precendence defined in the CDDL spec. Note that some regexes are inserted afterwards
# because they involve a match of a concatenation of all the initial
# regexes (with a '|' between each element).
types = [
(r'\A(?!\/\/).+?(?=\/\/)',
lambda union_str:
self.type_and_value("UNION", lambda: parse("(%s)" % union_str if ',' in union_str else union_str))),
(r'\/\/\s*(?P<item>.+?)(?=\/\/|\Z)',
lambda union_str: self.add_value(parse("(%s)" % union_str if ',' in union_str else union_str)[0])),
(r'([+*?])',
self.set_quantifier),
(r'(\d*\*\*\d*)',
self.set_quantifier),
(r'uint(?!\w)',
lambda _: self.type_and_value("UINT", lambda: None)),
(r'nint(?!\w)',
lambda _: self.type_and_value("NINT", lambda: None)),
(r'int(?!\w)',
lambda _: self.type_and_value("INT", lambda:None)),
(r'float(?!\w)',
lambda _: self.type_and_value("FLOAT", lambda: None)),
(r'float16(?!\w)',
lambda _: self.type_value_size("FLOAT", None, 2)),
(r'float32(?!\w)',
lambda _: self.type_value_size("FLOAT", None, 4)),
(r'float64(?!\w)',
lambda _: self.type_value_size("FLOAT", None, 8)),
(r'\-?\d*\.\d+',
lambda num: self.type_and_value("FLOAT", lambda: int(num))),
(r'\d+\.\.\d+',
lambda _range: self.type_and_range("UINT", *map(int, _range.split("..")))),
(r'\-\d+\.\.\d+',
lambda _range: self.type_and_range("INT", *map(int, _range.split("..")))),
(r'\-\d+\.\.\-\d+',
lambda _range: self.type_and_range("NINT", *map(int, _range.split("..")))),
(r'\-\d+',
lambda num: self.type_and_value("NINT", lambda: int(num))),
(r'0[xbo]\w+',
lambda num: self.type_and_value("UINT", lambda: int(num, 0))),
(r'\d+',
lambda num: self.type_and_value("UINT", lambda: int(num))),
(r'bstr(?!\w)',
lambda _: self.type_and_value("BSTR", lambda: None)),
(r'tstr(?!\w)',
lambda _: self.type_and_value("TSTR", lambda: None)),
(r'\".*?\"(?<!\\)',
lambda string: self.type_and_value("TSTR", lambda: string.strip('"'))),
(r'\[(?P<item>(?>[^[\]]+|(?R))*)\]',
lambda list_str: self.type_and_value("LIST", lambda: parse(list_str))),
(r'\((?P<item>(?>[^\(\)]+|(?R))*)\)',
lambda group_str: self.type_and_value("GROUP", lambda: parse(group_str))),
(r'{(?P<item>(?>[^{}]+|(?R))*)}',
lambda _map: self.type_and_value("MAP", lambda: parse(_map))),
(r'bool(?!\w)',
lambda _: self.type_and_value("BOOL", lambda: None)),
(r'true(?!\w)',
lambda _: self.type_and_value("BOOL", lambda: True)),
(r'false(?!\w)',
lambda _: self.type_and_value("BOOL", lambda: False)),
(r'nil(?!\w)',
lambda _: self.type_and_value("NIL", lambda: None)),
(r'any(?!\w)',
lambda _: self.type_and_value("ANY", lambda: None)),
(r'(\$?\$?[\w-]+)',
lambda other_str: self.type_and_value("OTHER", lambda: other_str.strip("$"))),
(r'\.size \(?(?P<item>\d+\.\.\d+)\)?',
lambda _range: self.set_size_range(*map(int, _range.split("..")))),
(r'\.size \(?(?P<item>\d+)\)?',
lambda size: self.set_size(int(size))),
(r'\.cbor (?P<item>[\w-]+)',
lambda type_str: self.set_cbor(parse(type_str)[0], False)),
(r'\.cborseq (?P<item>[\w-]+)',
lambda type_str: self.set_cbor(parse(type_str)[0], True))
]
all_type_regex = '|'.join([regex for (regex, _) in types[3:]])
for i in range(0, all_type_regex.count("item")):
all_type_regex = all_type_regex.replace("item", "it%dem" % i, 1)
types.insert(3, (r'(?P<item>'+all_type_regex+r')\s*\:',
lambda key_str: self.set_key_or_label(parse(key_str)[0])))
types.insert(4, (r'(?P<item>'+all_type_regex+r')\s*\=\>',
lambda key_str: self.set_key(parse(key_str)[0])))
types.insert(5, (r'(?P<item>(('+all_type_regex+r')\s*)+?)(?=\/)',
lambda union_str: self.type_and_value("UNION", lambda: parse(union_str))))
types.insert(6, (r'\/\s*(?P<item>(('+all_type_regex+r')\s*)+?)(?=\/|\,|\Z)',
lambda union_str: self.add_value(parse(union_str)[0])))
# Keep parsing until a comma, or to the end of the string.
while instr != '' and instr[0] != ',':
match_obj = None
for (reg, handler) in types:
match_obj = match(reg, instr)
if match_obj:
try:
match_str = match_obj.group("item")
except IndexError:
match_str = match_obj.group(0)
try:
handler(match_str)
except Exception as e:
raise Exception("Failed while parsing this: '%s'" % match_str) from e
self.match_str += match_str
old_len = len(instr)
instr = sub(reg, '', instr, count=1).lstrip()
if old_len == len(instr):
raise Exception("empty match")
break
if not match_obj:
raise TypeError("Could not parse this: '%s'" % instr)
instr = instr[1:]
if not self.type:
raise ValueError("No proper value while parsing: %s" % instr)
# Return the unparsed part of the string.
return instr
# For checking whether this element has a key (i.e. to check that it is a valid "MAP" child).
# This must have some recursion since CDDL allows the key to be hidden
# behind layers of indirection.
def elem_has_key(self):
return self.key is not None\
or (self.type == "OTHER" and my_types[self.value].elem_has_key())\
or (self.type in ["GROUP", "UNION"] and all(child.elem_has_key() for child in self.value))
# Function for performing validations that must be done after all parsing is complete. This is recursive, so
# it will post_validate all its children + key + cbor.
def post_validate(self):
# Validation of this element.
if self.type == "MAP":
none_keys = [child for child in self.value if not child.elem_has_key()]
if none_keys:
raise TypeError("Map entry must have key: " + str(none_keys))
if self.type == "OTHER":
if self.value not in my_types.keys() or not isinstance(
my_types[self.value], type(self)):
raise TypeError("%s has not been parsed." % self.value)
if self.type == "LIST":
for child in self.value[:-1]:
if child.type == "ANY":
if child.minQ != child.maxQ:
raise TypeError(f"ambiguous quantity of 'any' is not supported in list, "
+ "except as last element:\n{str(child)}")
if self.type == "UNION" and len(self.value) > 1:
if any(((not child.key and child.type == "ANY") or (
child.key and child.key.type == "ANY")) for child in self.value):
raise TypeError(
"'any' inside union is not supported since it would always be triggered.")
# Validation of child elements.
if self.type in ["MAP", "LIST", "UNION", "GROUP"]:
for child in self.value:
child.post_validate()
if self.key:
self.key.post_validate()
if self.cbor:
self.cbor.post_validate()
def __repr__(self):
return self.mrepr(False)
# Class for generating C code that decodes CBOR and validates it according
# to the CDDL.
class CodeGenerator(CddlParser):
indentation = "\t"
newl_ind = "\n" + indentation
def __init__(self, base_name=None):
super(CodeGenerator, self).__init__()
# The prefix used for C code accessing this element, i.e. the struct
# hierarchy leading
self.accessPrefix = None
# up to this element. This can change multiple times during generation to suit
# different situations.
# The delimiter used between elements in the accessPrefix.
self.access_delimiter = "."
# Used as a guard against endless recursion in self.dependsOn()
self.dependsOnCall = False
self.base_name = base_name # Used as default for self.get_base_name()
# Base name used for functions, variables, and typedefs.
def get_base_name(self):
return ((self.base_name or self.label
or (self.key.value if self.key and self.key.type in ["TSTR", "OTHER"] else None)
or (f"{self.value}_{self.type.lower()}" if self.type == "TSTR" and self.value is not None else None)
or (f"{self.type.lower()}{self.value}" if self.type in ["INT", "UINT"] and self.value is not None else None)
or (next((key for key, value in my_types.items() if value == self), None))
or ("_"+self.value if self.type == "OTHER" else None)
or ("_"+self.value[0].get_base_name()
if self.type in ["LIST", "GROUP"] and self.value is not None else None)
or (self.cbor.value if self.cbor and self.cbor.type in ["TSTR", | |
2*mckin**2*q_cut + q_cut**2)/mbkin**4)))**2)/mbkin**4 -
(8640*mckin**8*((mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)**(3/2)*(72*mckin**2*muG -
(144*mckin**4*muG)/mbkin**2 - (432*mckin**6*muG)/mbkin**4 -
(36*mckin**2*muG**2)/mbkin**2 + (72*mckin**4*muG**2)/mbkin**4 +
(216*mckin**6*muG**2)/mbkin**6 + (36*mckin**2*muG*mupi)/mbkin**2 -
(72*mckin**4*muG*mupi)/mbkin**4 - (216*mckin**6*muG*mupi)/mbkin**6 +
(96 + (368*mckin**2)/mbkin**2 - (192*mckin**4)/mbkin**4)*rE +
4*(3 - (35*mckin**2)/mbkin**2 + (78*mckin**4)/mbkin**4 +
(66*mckin**6)/mbkin**6)*rG + 72*mbkin*rhoD + (408*mckin**2*rhoD)/
mbkin + (1680*mckin**4*rhoD)/mbkin**3 + (528*mckin**6*rhoD)/mbkin**5 +
36*sB + (516*mckin**2*sB)/mbkin**2 + (696*mckin**4*sB)/mbkin**4 +
(264*mckin**6*sB)/mbkin**6 - (304*mckin**2*sE)/mbkin**2 -
(192*mckin**4*sE)/mbkin**4 - 9*sqB - (67*mckin**2*sqB)/mbkin**2 -
(138*mckin**4*sqB)/mbkin**4 - (66*mckin**6*sqB)/mbkin**6)*
np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*
mckin**2 + mckin**4 - 2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/
mbkin**4))/(mbkin**2 + mckin**2 - q_cut + mbkin**2*
np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)))**3)/mbkin**8)/
(540*((mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)**(3/2)*
((np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)*(mbkin**6 - 7*mbkin**4*mckin**2 -
7*mbkin**2*mckin**4 + mckin**6 - mbkin**4*q_cut - mckin**4*q_cut -
mbkin**2*q_cut**2 - mckin**2*q_cut**2 + q_cut**3))/mbkin**6 -
(12*mckin**4*np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j +
(mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4))/(mbkin**2 + mckin**2 - q_cut +
mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 -
2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/mbkin**4))))/mbkin**4)**
3) +
(api4*((72*mbkin**4*((-8*(mbkin - mckin)**2*(mbkin + mckin)*
(3*mbkin + 3*mckin + 8*mbkin*mckin))/(9*mbkin**6) +
(16*(4*mbkin**3 - 4*mbkin**2*mckin + 3*mckin**2 + 8*mbkin*mckin**2)*
q_cut)/(9*mbkin**6) - (8*(3 + 8*mbkin)*q_cut**2)/(9*mbkin**6))*
((-1 + mckin**2/mbkin**2)**2*(1 - (7*mckin**2)/mbkin**2 - (7*mckin**4)/
mbkin**4 + mckin**6/mbkin**6) + ((-3 + (14*mckin**2)/mbkin**2 +
(26*mckin**4)/mbkin**4 + (14*mckin**6)/mbkin**6 - (3*mckin**8)/
mbkin**8)*q_cut)/mbkin**2 + (2*(mbkin**6 - 2*mbkin**4*mckin**2 -
2*mbkin**2*mckin**4 + mckin**6)*q_cut**2)/mbkin**10 +
(2*(mbkin**4 + mbkin**2*mckin**2 + mckin**4)*q_cut**3)/mbkin**10 -
(3*(mbkin**2 + mckin**2)*q_cut**4)/mbkin**10 + q_cut**5/mbkin**10)**2*
(1 - (23*mckin**2)/mbkin**2 - (398*mckin**4)/mbkin**4 -
(398*mckin**6)/mbkin**6 - (23*mckin**8)/mbkin**8 +
mckin**10/mbkin**10 + ((mbkin**8 - 20*mbkin**6*mckin**2 - 102*mbkin**4*
mckin**4 - 20*mbkin**2*mckin**6 + mckin**8)*q_cut)/mbkin**10 +
((mbkin**6 - 15*mbkin**4*mckin**2 - 15*mbkin**2*mckin**4 + mckin**6)*
q_cut**2)/mbkin**10 + ((-4 + (2*mckin**2)/mbkin**2 - (4*mckin**4)/
mbkin**4)*q_cut**3)/mbkin**6 - (4*(mbkin**2 + mckin**2)*q_cut**4)/
mbkin**10 + (5*q_cut**5)/mbkin**10) + ((-1 + mckin**2/mbkin**2)**2 -
(2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 + q_cut**2/mbkin**4)*
((64*mbkin*(-((-1 + mckin**2/mbkin**2)**4*(1 + mckin**2/mbkin**2)**2*
(503 - (9464*mckin**2)/mbkin**2 + (69322*mckin**4)/mbkin**4 -
(179128*mckin**6)/mbkin**6 - (217124*mckin**8)/mbkin**8 +
(134968*mckin**10)/mbkin**10 - (44170*mckin**12)/mbkin**12 +
(3064*mckin**14)/mbkin**14 + (109*mckin**16)/mbkin**16)) +
((-1 + mckin**2/mbkin**2)**2*(2903 - (41547*mckin**2)/mbkin**2 +
(196111*mckin**4)/mbkin**4 + (84389*mckin**6)/mbkin**6 -
(2226350*mckin**8)/mbkin**8 - (4060522*mckin**10)/mbkin**10 -
(2007262*mckin**12)/mbkin**12 + (332278*mckin**14)/mbkin**14 +
(144215*mckin**16)/mbkin**16 - (185931*mckin**18)/mbkin**18 +
(19663*mckin**20)/mbkin**20 + (613*mckin**22)/mbkin**22)*q_cut)/
mbkin**2 - (2*(2870 - (34643*mckin**2)/mbkin**2 +
(127322*mckin**4)/mbkin**4 + (190963*mckin**6)/mbkin**6 -
(1214040*mckin**8)/mbkin**8 - (2984930*mckin**10)/mbkin**10 -
(3081596*mckin**12)/mbkin**12 - (1121550*mckin**14)/mbkin**14 +
(431494*mckin**16)/mbkin**16 + (81077*mckin**18)/mbkin**18 -
(159838*mckin**20)/mbkin**20 + (20891*mckin**22)/mbkin**22 +
(540*mckin**24)/mbkin**24)*q_cut**2)/mbkin**4 -
(2*(-969 + (3860*mckin**2)/mbkin**2 + (20318*mckin**4)/mbkin**4 -
(143553*mckin**6)/mbkin**6 + (43662*mckin**8)/mbkin**8 +
(243236*mckin**10)/mbkin**10 - (166416*mckin**12)/mbkin**12 -
(159266*mckin**14)/mbkin**14 + (150619*mckin**16)/mbkin**16 +
(20136*mckin**18)/mbkin**18 - (11694*mckin**20)/mbkin**20 +
(67*mckin**22)/mbkin**22)*q_cut**3)/mbkin**6 +
((8985 - (76768*mckin**2)/mbkin**2 + (136327*mckin**4)/mbkin**4 +
(661168*mckin**6)/mbkin**6 + (524386*mckin**8)/mbkin**8 +
(437592*mckin**10)/mbkin**10 + (491062*mckin**12)/mbkin**12 -
(151632*mckin**14)/mbkin**14 - (284475*mckin**16)/mbkin**16 +
(57128*mckin**18)/mbkin**18 + (2563*mckin**20)/mbkin**20)*q_cut**4)/
mbkin**8 - ((12407 - (70063*mckin**2)/mbkin**2 + (19312*mckin**4)/
mbkin**4 + (726800*mckin**6)/mbkin**6 + (1038918*mckin**8)/
mbkin**8 + (304322*mckin**10)/mbkin**10 - (490544*mckin**12)/
mbkin**12 - (209264*mckin**14)/mbkin**14 + (102019*mckin**16)/
mbkin**16 + (1997*mckin**18)/mbkin**18)*q_cut**5)/mbkin**10 -
(4*(-354 + (2543*mckin**2)/mbkin**2 - (2380*mckin**4)/mbkin**4 -
(59099*mckin**6)/mbkin**6 - (24326*mckin**8)/mbkin**8 +
(44915*mckin**10)/mbkin**10 + (6624*mckin**12)/mbkin**12 -
(8999*mckin**14)/mbkin**14 + (564*mckin**16)/mbkin**16)*q_cut**6)/
mbkin**12 + (4*(2103 - (3452*mckin**2)/mbkin**2 -
(14684*mckin**4)/mbkin**4 - (17263*mckin**6)/mbkin**6 -
(5615*mckin**8)/mbkin**8 + (7734*mckin**10)/mbkin**10 +
(11390*mckin**12)/mbkin**12 + (1035*mckin**14)/mbkin**14)*q_cut**7)/
mbkin**14 + ((-5829 + (9694*mckin**2)/mbkin**2 + (45611*mckin**4)/
mbkin**4 + (25144*mckin**6)/mbkin**6 - (44863*mckin**8)/
mbkin**8 - (44134*mckin**10)/mbkin**10 - (1047*mckin**12)/
mbkin**12)*q_cut**8)/mbkin**16 - ((343 + (6467*mckin**2)/
mbkin**2 + (16652*mckin**4)/mbkin**4 - (4820*mckin**6)/
mbkin**6 - (5767*mckin**8)/mbkin**8 + (2021*mckin**10)/
mbkin**10)*q_cut**9)/mbkin**18 + (2*(830 + (1771*mckin**2)/
mbkin**2 + (3242*mckin**4)/mbkin**4 + (3405*mckin**6)/
mbkin**6 + (928*mckin**8)/mbkin**8)*q_cut**10)/mbkin**20 -
(2*(247 + (420*mckin**2)/mbkin**2 + (1194*mckin**4)/mbkin**4 +
(291*mckin**6)/mbkin**6)*q_cut**11)/mbkin**22 +
((3 + (92*mckin**2)/mbkin**2 + (65*mckin**4)/mbkin**4)*q_cut**12)/
mbkin**24 - ((9 + (19*mckin**2)/mbkin**2)*q_cut**13)/mbkin**26 +
(8*q_cut**14)/mbkin**28))/3 + 72*((-4*((-1 + mckin**2/mbkin**2)**2*
(1 - (7*mckin**2)/mbkin**2 - (7*mckin**4)/mbkin**4 +
mckin**6/mbkin**6) + ((-3 + (14*mckin**2)/mbkin**2 +
(26*mckin**4)/mbkin**4 + (14*mckin**6)/mbkin**6 -
(3*mckin**8)/mbkin**8)*q_cut)/mbkin**2 + (2*(mbkin**6 -
2*mbkin**4*mckin**2 - 2*mbkin**2*mckin**4 + mckin**6)*q_cut**2)/
mbkin**10 + (2*(mbkin**4 + mbkin**2*mckin**2 + mckin**4)*q_cut**3)/
mbkin**10 - (3*(mbkin**2 + mckin**2)*q_cut**4)/mbkin**10 +
q_cut**5/mbkin**10)**2*(69*mbkin**10 + 184*mbkin**10*mckin +
2319*mbkin**8*mckin**2 - 184*mbkin**9*mckin**2 + 6368*mbkin**8*
mckin**3 + 1194*mbkin**6*mckin**4 - 6368*mbkin**7*mckin**4 +
9552*mbkin**6*mckin**5 - 3306*mbkin**4*mckin**6 - 9552*mbkin**5*
mckin**6 + 736*mbkin**4*mckin**7 - 291*mbkin**2*mckin**8 -
736*mbkin**3*mckin**8 - 40*mbkin**2*mckin**9 + 15*mckin**10 +
40*mbkin*mckin**10 + 63*mbkin**8*q_cut + 8*mbkin**9*q_cut +
160*mbkin**8*mckin*q_cut + 492*mbkin**6*mckin**2*q_cut - 320*mbkin**7*
mckin**2*q_cut + 1632*mbkin**6*mckin**3*q_cut - 738*mbkin**4*mckin**4*
q_cut - 2448*mbkin**5*mckin**4*q_cut + 480*mbkin**4*mckin**5*q_cut -
252*mbkin**2*mckin**6*q_cut - 640*mbkin**3*mckin**6*q_cut -
32*mbkin**2*mckin**7*q_cut + 15*mckin**8*q_cut + 40*mbkin*mckin**8*
q_cut + 51*mbkin**6*q_cut**2 + 16*mbkin**7*q_cut**2 + 120*mbkin**6*mckin*
q_cut**2 - 45*mbkin**4*mckin**2*q_cut**2 - 360*mbkin**5*mckin**2*q_cut**2 +
240*mbkin**4*mckin**3*q_cut**2 - 189*mbkin**2*mckin**4*q_cut**2 -
480*mbkin**3*mckin**4*q_cut**2 - 24*mbkin**2*mckin**5*q_cut**2 +
15*mckin**6*q_cut**2 + 40*mbkin*mckin**6*q_cut**2 - 42*mbkin**4*q_cut**3 -
96*mbkin**5*q_cut**3 - 16*mbkin**4*mckin*q_cut**3 + 48*mbkin**2*mckin**2*
q_cut**3 + 64*mbkin**3*mckin**2*q_cut**3 + 64*mbkin**2*mckin**3*q_cut**3 -
60*mckin**4*q_cut**3 - 160*mbkin*mckin**4*q_cut**3 - 36*mbkin**2*q_cut**4 -
128*mbkin**3*q_cut**4 + 32*mbkin**2*mckin*q_cut**4 - 60*mckin**2*q_cut**4 -
160*mbkin*mckin**2*q_cut**4 + 75*q_cut**5 + 200*mbkin*q_cut**5))/(9*
mbkin**8) + (1 - (23*mckin**2)/mbkin**2 - (398*mckin**4)/
mbkin**4 - (398*mckin**6)/mbkin**6 - (23*mckin**8)/mbkin**8 +
mckin**10/mbkin**10 + ((mbkin**8 - 20*mbkin**6*mckin**2 -
102*mbkin**4*mckin**4 - 20*mbkin**2*mckin**6 + mckin**8)*q_cut)/
mbkin**10 + ((mbkin**6 - 15*mbkin**4*mckin**2 - 15*mbkin**2*
mckin**4 + mckin**6)*q_cut**2)/mbkin**10 +
((-4 + (2*mckin**2)/mbkin**2 - (4*mckin**4)/mbkin**4)*q_cut**3)/
mbkin**6 - (4*(mbkin**2 + mckin**2)*q_cut**4)/mbkin**10 +
(5*q_cut**5)/mbkin**10)*((8*mbkin**2*(3 + 8*mbkin)*
((-1 + mckin**2/mbkin**2)**2*(1 - (7*mckin**2)/mbkin**2 -
(7*mckin**4)/mbkin**4 + mckin**6/mbkin**6) +
((-3 + (14*mckin**2)/mbkin**2 + (26*mckin**4)/mbkin**4 +
(14*mckin**6)/mbkin**6 - (3*mckin**8)/mbkin**8)*q_cut)/
mbkin**2 + (2*(mbkin**6 - 2*mbkin**4*mckin**2 - 2*mbkin**2*
mckin**4 + mckin**6)*q_cut**2)/mbkin**10 + (2*(mbkin**4 +
mbkin**2*mckin**2 + mckin**4)*q_cut**3)/mbkin**10 -
(3*(mbkin**2 + mckin**2)*q_cut**4)/mbkin**10 + q_cut**5/mbkin**10)**2)/
9 + 2*mbkin**4*((-1 + mckin**2/mbkin**2)**2*(1 - (7*mckin**2)/
mbkin**2 - (7*mckin**4)/mbkin**4 + mckin**6/mbkin**6) +
((-3 + (14*mckin**2)/mbkin**2 + (26*mckin**4)/mbkin**4 +
(14*mckin**6)/mbkin**6 - (3*mckin**8)/mbkin**8)*q_cut)/
mbkin**2 + (2*(mbkin**6 - 2*mbkin**4*mckin**2 - 2*mbkin**2*
mckin**4 + mckin**6)*q_cut**2)/mbkin**10 +
(2*(mbkin**4 + mbkin**2*mckin**2 + mckin**4)*q_cut**3)/mbkin**10 -
(3*(mbkin**2 + mckin**2)*q_cut**4)/mbkin**10 + q_cut**5/mbkin**10)*
((-4*(mbkin - mckin)**2*(mbkin + mckin)*(27*mbkin**7 +
27*mbkin**6*mckin + 72*mbkin**7*mckin - 21*mbkin**5*
mckin**2 - 21*mbkin**4*mckin**3 - 56*mbkin**5*mckin**3 -
93*mbkin**3*mckin**4 - 93*mbkin**2*mckin**5 - 248*mbkin**3*
mckin**5 + 15*mbkin*mckin**6 + 15*mckin**7 + 40*mbkin*
mckin**7))/(9*mbkin**12) + (4*(51*mbkin**8 + 24*mbkin**9 +
112*mbkin**8*mckin + 72*mbkin**6*mckin**2 - 224*mbkin**7*
mckin**2 + 416*mbkin**6*mckin**3 - 108*mbkin**4*mckin**4 -
624*mbkin**5*mckin**4 + 336*mbkin**4*mckin**5 - 204*mbkin**2*
mckin**6 - 448*mbkin**3*mckin**6 - 96*mbkin**2*mckin**7 +
45*mckin**8 + 120*mbkin*mckin**8)*q_cut)/(9*mbkin**12) -
(8*(12*mbkin**6 + 16*mbkin**7 + 16*mbkin**6*mckin - 6*mbkin**4*
mckin**2 - 48*mbkin**5*mckin**2 + 32*mbkin**4*mckin**3 -
33*mbkin**2*mckin**4 - 64*mbkin**3*mckin**4 - 24*mbkin**2*
mckin**5 + 15*mckin**6 + 40*mbkin*mckin**6)*q_cut**2)/
(9*mbkin**12) - (8*(6*mbkin**4 + 24*mbkin**5 - 8*mbkin**4*
mckin + 6*mbkin**2*mckin**2 + 32*mbkin**3*mckin**2 -
16*mbkin**2*mckin**3 + 15*mckin**4 + 40*mbkin*mckin**4)*
q_cut**3)/(9*mbkin**12) + (4*(9*mbkin**2 + 32*mbkin**3 -
8*mbkin**2*mckin + 15*mckin**2 + 40*mbkin*mckin**2)*q_cut**4)/
(3*mbkin**12) - (20*(3 + 8*mbkin)*q_cut**5)/(9*mbkin**12))))) -
12*((-128*mckin**4*(mbkin**2 - 2*mbkin*mckin + mckin**2 - q_cut)**2*
(mbkin**2 + 2*mbkin*mckin + mckin**2 - q_cut)**2*(3*mbkin**4 + 8*
mbkin**4*mckin - 6*mbkin**2*mckin**2 - 8*mbkin**3*mckin**2 - 8*
mbkin**2*mckin**3 + 3*mckin**4 + 8*mbkin*mckin**4 - 3*mbkin**2*
q_cut - 8*mbkin**2*mckin*q_cut - 3*mckin**2*q_cut - 8*mbkin*mckin**2*q_cut)*
(17*mbkin**16 - 230*mbkin**14*mckin**2 - 508*mbkin**12*mckin**4 +
7790*mbkin**10*mckin**6 + 16102*mbkin**8*mckin**8 + 7790*mbkin**6*
mckin**10 - 508*mbkin**4*mckin**12 - 230*mbkin**2*mckin**14 + 17*
mckin**16 - 30*mbkin**14*q_cut + 122*mbkin**12*mckin**2*q_cut + 1566*
mbkin**10*mckin**4*q_cut + 3382*mbkin**8*mckin**6*q_cut + 3382*mbkin**6*
mckin**8*q_cut + 1566*mbkin**4*mckin**10*q_cut + 122*mbkin**2*mckin**12*
q_cut - 30*mckin**14*q_cut - 17*mbkin**12*q_cut**2 + 180*mbkin**10*mckin**2*
q_cut**2 + 2125*mbkin**8*mckin**4*q_cut**2 + 3656*mbkin**6*mckin**6*
q_cut**2 + 2125*mbkin**4*mckin**8*q_cut**2 + 180*mbkin**2*mckin**10*
q_cut**2 - 17*mckin**12*q_cut**2 + 50*mbkin**10*q_cut**3 + 62*mbkin**8*
mckin**2*q_cut**3 - 1104*mbkin**6*mckin**4*q_cut**3 - 1104*mbkin**4*
mckin**6*q_cut**3 + 62*mbkin**2*mckin**8*q_cut**3 + 50*mckin**10*q_cut**3 -
15*mbkin**8*q_cut**4 + 22*mbkin**6*mckin**2*q_cut**4 + 34*mbkin**4*mckin**4*
q_cut**4 + 22*mbkin**2*mckin**6*q_cut**4 - 15*mckin**8*q_cut**4 - 2*mbkin**6*
q_cut**5 - 198*mbkin**4*mckin**2*q_cut**5 - 198*mbkin**2*mckin**4*q_cut**5 -
2*mckin**6*q_cut**5 + 5*mbkin**4*q_cut**6 + 60*mbkin**2*mckin**2*q_cut**6 + 5*
mckin**4*q_cut**6 - 18*mbkin**2*q_cut**7 - 18*mckin**2*q_cut**7 + 10*q_cut**8))/
(mbkin**26*(mbkin**2 + mckin**2 - q_cut + mbkin**2*np.sqrt(0j +
(mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4))*(-mbkin**2 - mckin**2 + q_cut +
mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 -
2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/mbkin**4))) +
((-32*mckin**4*(mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 -
2*mbkin**2*q_cut - | |
from abc import ABC, abstractmethod
from copy import deepcopy
import numpy as np
import tensorflow as tf
# source: http://geomalgorithms.com/a06-_intersect-2.html
# source: https://www.erikrotteveel.com/python/three-dimensional-ray-tracing-in-python/
gpu_phy_devices = tf.config.list_physical_devices('GPU')
try:
for gpu in gpu_phy_devices:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError:
pass
faraway = 99999 # faraway distance
precision = tf.float64 # default precision
pi = tf.constant(np.pi, dtype=precision)
# for numerical stability epsilon
if precision == tf.float32:
epsilon = tf.constant(1.e-07, precision)
elif precision == tf.float64:
epsilon = tf.constant(1.e-15, precision)
def set_precision(p):
global precision
precision = p
def mag(tensor):
"""
Calculate magnitude of the vector, return scalar tensor
"""
if tf.equal(tensor.get_shape().rank, 1):
_mag = tf.sqrt(tf.tensordot(tensor, tensor, 1))
else:
_mag = tf.sqrt(tf.reduce_sum(tensor*tensor, 1))
return _mag
def ray_reflection(rays, normal):
"""
Calculate reflection of rays `rays` with normal `normal`
`
:param rays: Rays directional vector, shape Nx3
:type rays: Ray
:param normal: normal vector
:type normal: tf.Tensor
"""
ray_direction = rays.p1 - tf.multiply(normal, tf.expand_dims(tf.reduce_sum(normal * rays.p1, 1), 1)) * 2.
# if directional vector small enough, then assume 0.
ray_direction = tf.where(tf.greater(tf.abs(ray_direction), epsilon), ray_direction, tf.zeros_like(ray_direction))
return ray_direction
def norm(tensor):
"""
Calculate norm of the vector, return normalized vector
"""
_mag = mag(tensor)
if tf.equal(tensor.get_shape().rank, 1):
return tensor * (1.0 / tf.where(tf.less_equal(_mag, epsilon), tf.ones_like(_mag), _mag))
else:
return tensor * tf.expand_dims(1.0 / tf.where(tf.less_equal(_mag, epsilon), tf.ones_like(_mag), _mag), 1)
def tile_vector(tensor, num):
return tf.tile(tf.expand_dims(tensor, 0), [num, 1])
def polar(tensor):
_norm = norm(tensor)
phi, theta = tf.math.atan2((_norm[:, 0]+epsilon), _norm[:, 1]), tf.math.acos(_norm[:, 2])
return tf.where(tf.less(phi, 0.), 2*pi+phi, phi), theta
class Ray:
def __init__(self, p0, p1, intensity, interact_num):
"""
Basic Ray class, originating from `p0` with a directional vector of `p1
`
:param p0: 3D vectors for the origins of rays
:type p0: tf.Tensor
:param p1: 3D vectors for the origins of rays
:type p1: tf.Tensor
:param intensity: Initial intensity of rays
:type intensity: tf.Tensor
:param interact_num: Initial number of interaction experienced by rays
:type interact_num: tf.Tensor
"""
self.p0 = p0 # ray origins
self.p1 = p1 # ray direction
self.intensity = intensity
self.interact_num = interact_num
p0_rows, p0_columns = p0.get_shape()
tf.debugging.assert_equal(tf.size(self.p0), tf.size(self.p1), message="Rays shape not equal")
tf.debugging.assert_equal(p0_rows, tf.size(self.intensity), message="Rays shape not equal")
tf.debugging.assert_equal(p0_rows, tf.size(self.interact_num), message="Rays shape not equal")
def __getitem__(self, key):
return Ray(self.p0[key], self.p1[key], self.intensity[key], self.interact_num[key])
def __setitem__(self, key, value):
if key.dtype == tf.bool:
key_3 = tf.concat([tf.expand_dims(key, 1), tf.expand_dims(key, 1), tf.expand_dims(key, 1)], 1)
self.p0 = tf.where(key_3, value.p0, self.p0)
self.p1 = tf.where(key_3, value.p1, self.p1)
self.intensity = tf.where(key, value.intensity, self.intensity)
self.interact_num = tf.where(key, value.interact_num, self.interact_num)
else:
self.p0[key] = value.p0
self.p1[key] = value.p1
self.intensity[key] = value.intensity
self.interact_num[key] = value.interact_num
def size(self):
num_rays = tf.size(self.p0) // 3
return num_rays
def copy(self):
return deepcopy(self)
class Surface(ABC):
"""
Basic class for surfaces
"""
def __init__(self):
pass
@abstractmethod
def vertices(self):
pass
class Triangle(Surface):
def __init__(self, v0, v1, v2, reflectivity=1.):
"""
A triangle with vertices `v0`, `v1`, `v2` and `reflectivity`
:param v0: 3D vectors for a vertex
:type v0: tf.Tensor
:param v1: 3D vectors for a vertex
:type v1: tf.Tensor
:param v2: 3D vectors for a vertex
:type v2: tf.Tensor
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
super().__init__()
self.v0 = tf.cast(v0, precision)
self.v1 = tf.cast(v1, precision)
self.v2 = tf.cast(v2, precision)
self.u = self.v1 - self.v0
self.v = self.v2 - self.v0
self.reflectivity = reflectivity
self.normal = norm(tf.linalg.cross(self.u, self.v))
@property
def vertices(self):
return tf.stack([self.v0, self.v1, self.v2])
def intersect(self, rays):
num_rays = rays.size()
tiled_v = tile_vector(self.v, num_rays)
tiled_u = tile_vector(self.u, num_rays)
tiled_normal = tile_vector(self.normal, num_rays)
b = tf.reduce_sum(tiled_normal*rays.p1, 1)
a = tf.reduce_sum(tiled_normal*(self.v0 - rays.p0), 1)
# check if the ray is close enough to be parallel or close enough to lie in the plane
cond_0_1 = tf.greater(tf.abs(b), epsilon)
cond_0_2 = tf.greater(tf.abs(a), epsilon)
cond_0 = tf.logical_and(cond_0_1, cond_0_2)
rI = tf.expand_dims(tf.where(tf.logical_or(cond_0, tf.less(a/b, 0.)), a/b, tf.zeros_like(a)), -1)
rI = tf.where(tf.greater(tf.abs(rI), epsilon), rI, tf.zeros_like(rI))
p_intersect = rays.p0 + rays.p1 * rI
w = p_intersect - self.v0 # p0 + rI * p1 - v0
wv_dot = tf.reduce_sum(w*tiled_v, 1)
wu_dot = tf.reduce_sum(w*tiled_u, 1)
uv_dot = tf.tensordot(self.u, self.v, 1)
uu_dot = tf.tensordot(self.u, self.u, 1)
vv_dot = tf.tensordot(self.v, self.v, 1)
denom = uv_dot * uv_dot - uu_dot * vv_dot
si = (uv_dot * wv_dot - vv_dot * wu_dot) / denom
ti = (uv_dot * wu_dot - uu_dot * wv_dot) / denom
ray_direction = ray_reflection(rays, tiled_normal)
cond_1 = tf.less_equal(tf.squeeze(rI), 0.)
cond_2 = tf.less(si, 0.)
cond_3 = tf.greater(si, 1.)
cond_4 = tf.less(ti, 0.)
cond_5 = tf.greater(si + ti, 1.)
no_interaction_idx = tf.logical_or(tf.logical_or(tf.logical_or(tf.logical_or(cond_1, cond_2), cond_3), cond_4), cond_5)
no_interaction_idx_3 = tf.concat([tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1)], 1)
_p_intersect = tf.where(no_interaction_idx_3, rays.p0, p_intersect)
ray_direction = tf.where(no_interaction_idx_3, rays.p1, ray_direction)
new_interact_num = tf.where(no_interaction_idx, rays.interact_num, rays.interact_num+1)
new_intensity = tf.where(no_interaction_idx, rays.intensity, rays.intensity*self.reflectivity)
return Ray(_p_intersect, ray_direction, intensity=new_intensity, interact_num=new_interact_num)
class Plane(Surface):
def __init__(self, v0, v1, v2, v3, reflectivity=1.):
"""
A plane with vertices `v0`, `v1`, `v2` and `reflectivity`
:param v0: 3D vectors for a vertex
:type v0: tf.Tensor
:param v1: 3D vectors for a vertex
:type v1: tf.Tensor
:param v2: 3D vectors for a vertex
:type v2: tf.Tensor
:param v3: 3D vectors for a vertex
:type v3: tf.Tensor
:param reflectivity: Reflectivity of the surface
:type reflectivity: tf.Tensor
"""
super().__init__()
self.v0 = tf.cast(v0, precision)
self.v1 = tf.cast(v1, precision)
self.v2 = tf.cast(v2, precision)
self.v3 = tf.cast(v3, precision)
self.u = self.v1 - self.v0
self.v = self.v3 - self.v0
self.reflectivity = reflectivity
self.normal = norm(tf.linalg.cross(self.u, self.v))
@property
def vertices(self):
return tf.stack([self.v0, self.v1, self.v2, self.v3])
def intersect(self, rays):
num_rays = rays.size()
tiled_v = tile_vector(self.v, num_rays)
tiled_u = tile_vector(self.u, num_rays)
tiled_normal = tile_vector(self.normal, num_rays)
b = tf.reduce_sum(tiled_normal*rays.p1, 1)
a = tf.reduce_sum(tiled_normal*(self.v0 - rays.p0), 1)
# check if the ray is close enough to be parallel or close enough to lie in the plane
cond_0_1 = tf.greater(tf.abs(b), epsilon)
cond_0_2 = tf.greater(tf.abs(a), epsilon)
cond_0 = tf.logical_and(cond_0_1, cond_0_2)
rI = tf.expand_dims(tf.where(tf.logical_or(cond_0, tf.less(a/b, 0.)), a/b, tf.zeros_like(a)), -1)
p_intersect = rays.p0 + rays.p1 * rI
w = p_intersect - self.v0 # p0 + rI * p1 - v0
wv_dot = tf.reduce_sum(w*tiled_v, 1)
wu_dot = tf.reduce_sum(w*tiled_u, 1)
uv_dot = tf.tensordot(self.u, self.v, 1)
uu_dot = tf.tensordot(self.u, self.u, 1)
vv_dot = tf.tensordot(self.v, self.v, 1)
denom = uv_dot * uv_dot - uu_dot * vv_dot
si = (uv_dot * wv_dot - vv_dot * wu_dot) / denom
ti = (uv_dot * wu_dot - uu_dot * wv_dot) / denom
ray_direction = ray_reflection(rays, tiled_normal)
cond_1 = tf.less(tf.squeeze(rI), epsilon)
cond_2 = tf.less(si, 0.)
cond_3 = tf.greater(si, 1.)
cond_4 = tf.less(ti, 0.)
cond_5 = tf.greater(ti, 1.)
no_interaction_idx = tf.logical_or(tf.logical_or(tf.logical_or(tf.logical_or(cond_1, cond_2), cond_3), cond_4), cond_5)
no_interaction_idx_3 = tf.concat([tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1)], 1)
p_intersect = tf.where(no_interaction_idx_3, rays.p0, p_intersect)
ray_direction = tf.where(no_interaction_idx_3, rays.p1, ray_direction)
new_interact_num = tf.where(no_interaction_idx, rays.interact_num, rays.interact_num+1)
new_intensity = tf.where(no_interaction_idx, rays.intensity, rays.intensity*self.reflectivity)
return Ray(p_intersect, ray_direction, intensity=new_intensity, interact_num=new_interact_num)
class Pyramid:
def __init__(self, center, width, height, reflectivity=1.):
"""
A pyramid
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param width: width of the base
:type width: float
:param height: height of the base
:type height: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
self.center = tf.cast(center, precision) # center of the pyramid base
self.width = tf.cast(width, precision) # width of the pyramid base
self.height = tf.cast(height, precision)
self.reflectivity = reflectivity
self.top_left = self.center + tf.stack([-1. * self.width / 2., self.width / 2., 0.])
self.top_right = self.center + tf.stack([self.width / 2., self.width / 2., 0.])
self.bottom_left = self.center + tf.stack([-1. * self.width / 2., -1. * self.width / 2., 0.])
self.bottom_right = self.center + tf.stack([self.width / 2., -1. * self.width / 2., 0.])
self.top_v = self.center + tf.stack([0., 0., self.height])
self.vertices = tf.stack([self.top_left, self.top_right, self.bottom_right, self.bottom_left, self.top_v])
self.tri_1 = Triangle(self.top_v, self.top_left, self.top_right, self.reflectivity)
self.tri_2 = Triangle(self.top_v, self.top_right, self.bottom_right, self.reflectivity)
self.tri_3 = Triangle(self.top_v, self.bottom_right, self.bottom_left, self.reflectivity)
self.tri_4 = Triangle(self.top_v, self.bottom_left, self.top_left, self.reflectivity)
self.tris = [self.tri_1, self.tri_2, self.tri_3, self.tri_4]
def intersect(self, rays):
_pt = rays.copy() # by default assume not intersecting with pyramid
distance = tf.ones(rays.size(), dtype=precision) * faraway
for tri in self.tris:
pt = tri.intersect(rays)
interacted_idx = tf.greater(pt.interact_num, rays.interact_num)
dist = mag(rays.p0-pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
if tf.math.count_nonzero(interacted_w_shortest_idx) == | |
from __future__ import print_function
from itertools import chain
from math import log
from .utility import init_matrix, init_3d_matrix
class HiddenMarkovModel:
"""
Notation used:
HMM: Hidden Markov Model
O: Observation sequence
S: Hidden state sequence
A: State transition probability distribution matrix
B: Observation emission probability distribution matrix
pi: Initial state probability distribution vector
lambda: A HMM comprised of (A,B,pi)
"""
def __init__(self, A, B, pi, all_obs, all_states, single_states=None, order=1):
if(single_states == None):
self._single_states = all_states
else:
self._single_states = single_states
self._all_states = all_states
self._all_obs = all_obs
self._A = A
self._B = B
self._pi = pi
self._highest_order = order
def evaluate(self, sequence):
"""
Evaluation Problem: Calculate P(O|lambda).
Calculates the probability of emitting the given observation
sequence based on the HMM. Uses the forward algorithm.
Args:
sequence (list<char>): observation sequence O
Returns:
float: probability of sequence being emitted
"""
self._check_legal_sequence(sequence)
if(len(sequence) == 0):
return 0
alpha = self._forward(sequence)
fwd_probability = sum(map(
lambda s: alpha[s][len(sequence) - 1],
range(len(self._all_states)))
)
return fwd_probability
def decode(self, sequence):
"""
Decoding Problem: Given O and lambda, find S such that S 'best'
describes O using lambda. Uses the Viterbi Algorithm.
Args:
sequence (list<char>): observation sequence O
Returns:
list<string>: hidden state sequence S
"""
self._check_legal_sequence(sequence)
if(len(sequence) == 0):
return []
return self._viterbi(sequence)
def learn(self, sequences, delta=0.0001, k_smoothing=0.0, iterations=-1):
"""
Learning Problem: Reestimate the model parameters (A,B,pi) iteratively
using the Baum-Welch Algorithm (EM). Maximize P(O|lambda).
It should be known that pi is currently not fully updated for HMMs
of order greater than one.
Args:
sequences (list<O>): list of observations O = (O1,O2,...On) used
to train the initial (A,B,pi) parameters.
delta (float): log value of iterative improvement such that when
evaluation probabilities improve by less than delta the
learning process is complete.
k_smoothing (float): Smoothing parameter for add-k smoothing to
avoid zero probability. Value should be between [0.0, 1.0].
iterations (int): number of iterations to perform. Will return
if convergence is found before all iterations
have been performed.
Returns:
(int): number of iterations to achieve convergence.
"""
self._check_legal_sequence(set(chain.from_iterable(sequences)))
num_sequences = len(sequences)
cur_iterations = 0
if(num_sequences == 0):
return cur_iterations
prior_score = sum(map(
lambda O: log(self.evaluate(O)),
sequences
)) / num_sequences
while True:
for seq in sequences:
self._train(seq, k_smoothing)
cur_iterations += 1
new_score = sum(map(
lambda O: log(self.evaluate(O)),
sequences
)) / num_sequences
if(abs(prior_score - new_score) < delta):
break
if(iterations > -1 and cur_iterations >= iterations):
break
prior_score = new_score
return cur_iterations
def get_parameters(self):
""" Dictionary of all model parameters. """
return {
"A": self._A,
"B": self._B,
"pi": self._pi,
"all_obs": self._all_obs,
"all_states": self._all_states,
"single_states": self._single_states
}
def display_parameters(self):
""" Display the lambda parameters (A,B,pi) on the console. """
names = [
"Starting probabilities (pi):",
"Transition probabilities (A):",
"Emission probabilities (B):"
]
for i, parameter in enumerate([self._pi, self._A, self._B]):
print(names[i])
for element in parameter:
print(element)
# ----------------- #
# Private #
# ----------------- #
def _check_legal_sequence(self, seq):
""" Throws ValueError if an element of seq is not in self._all_obs """
illegal_obs = list([x for x in seq if x not in self._all_obs])
if(len(illegal_obs) == 0):
return True
if(len(illegal_obs) == 1):
msg = "Observation out of vocabulary: '"
else:
msg = "Observations out of vocabulary: '"
raise ValueError(msg + ", ".join(illegal_obs) + "'")
def _forward(self, sequence):
rows = len(self._all_states)
columns = len(sequence)
alpha = init_matrix(rows, columns, "float")
# initialization step
for s_index, state in enumerate(self._single_states):
o_index = self._all_obs.index(sequence[0])
alpha[s_index][0] = (
self._pi[0][state]
* self._B[s_index][o_index]
)
# iterative step
for t_index in range(columns - 1):
obs = sequence[t_index + 1]
for s_index, state in enumerate(self._all_states):
single_state_index = self._single_states.index(
self._get_state_by_order(state, 1)
)
for s_prime in range(len(self._all_states)):
if(t_index + 1 < self._highest_order):
state_by_order = self._get_state_by_order(
self._all_states[s_index],
t_index + 2
)
a_prob = self._pi[t_index + 1][state_by_order]
else:
a_prob = self._A[s_prime][s_index]
alpha[s_index][t_index + 1] += (
alpha[s_prime][t_index]
* a_prob
* self._B[single_state_index][self._all_obs.index(obs)]
)
return alpha
def _backward(self, sequence):
rows = len(self._all_states)
columns = len(sequence)
beta = init_matrix(rows, columns, "float")
# initialization step
for s_index, state in enumerate(self._all_states):
beta[s_index][-1] = 1
# iterative step
for t_index in reversed(range(columns-1)):
obs = sequence[t_index + 1]
for s_index in range(len(self._all_states)):
for s_prime, state in enumerate(self._all_states):
single_state_index = self._single_states.index(
self._get_state_by_order(state, 1)
)
beta[s_index][t_index] += (
beta[s_prime][t_index + 1]
* self._A[s_index][s_prime]
* self._B[single_state_index][self._all_obs.index(obs)]
)
return beta
def _viterbi(self, sequence):
"""
Notation used:
delta: matrix holding the highest probability state path
at observation time t.
psi: backpointer matrix maintaining which state maximized delta.
Args:
sequence (list<char>): observation sequence O
Returns:
list<string>: hidden state sequence S
"""
delta, psi = self._viterbi_forward(sequence)
return self._viterbi_backward(delta, psi, sequence)
def _viterbi_forward(self, sequence):
""" build probability quantities delta and backpointers psi """
rows = len(self._all_states)
columns = len(sequence)
delta = init_matrix(rows, columns, "int")
psi = init_matrix(rows, columns, 'int,int')
# initialization step
obs_index = self._all_obs.index(sequence[0])
for s_index, state in enumerate(self._all_states):
single_state = self._get_state_by_order(state, 1)
single_state_index = self._single_states.index(single_state)
delta[s_index][0] = (
self._pi[0][single_state]
* self._B[single_state_index][obs_index]
)
# iterative step
for o_index in range(1, columns):
o_master_index = self._all_obs.index(sequence[o_index])
for s_index, state in enumerate(self._all_states):
max_prob = 0
row_back = 0
col_back = 0
single_state_index = self._single_states.index(self._get_state_by_order(state, 1))
emission_multiplier = self._B[single_state_index][o_master_index]
# a multiplier of 0.0 nullfies the following computation
if emission_multiplier == 0.0:
continue
for prev_s_index in range(rows):
transition_multiplier = 0
if(o_index < self._highest_order):
state_by_order = self._get_state_by_order(
self._all_states[s_index],
o_index + 1
)
transition_multiplier = self._pi[o_index][state_by_order]
else:
transition_multiplier = self._A[prev_s_index][s_index]
cur_prob = (
delta[prev_s_index][o_index - 1]
* transition_multiplier
* emission_multiplier
)
if cur_prob > max_prob:
max_prob = cur_prob
row_back = prev_s_index
col_back = o_index - 1
delta[s_index][o_index] = max_prob
psi[s_index][o_index] = (row_back, col_back)
return delta, psi
def _viterbi_backward(self, delta, psi, sequence):
""" Decode by following the backpointers of psi """
rev_output = []
j_max = len(sequence)
max_final = 0
i_final = 0
# find highest probability start state
for i in range(len(self._all_states)):
current_final = delta[i][j_max - 1]
if current_final > max_final:
max_final = current_final
i_final = i
rev_output.append(self._get_state_by_order(self._all_states[i_final], 1))
i_cur = psi[i_final][j_max - 1][0]
j_cur = psi[i_final][j_max - 1][1]
for j in range(j_max - 2, -1, -1):
rev_output.append(self._get_state_by_order(self._all_states[i_cur], 1))
i_cur_old = i_cur
i_cur = psi[i_cur][j_cur][0]
j_cur = psi[i_cur_old][j_cur][1]
return rev_output[::-1]
def _train(self, sequence, k_smoothing=0.0):
"""
Use the Baum-Welch Algorithm which utilizes Expectation-Maximization
and the Forward-Backward algorithm to find the maximum likelihood
estimate for parameters (A,B,pi).
Notation used:
gamma: Probability of being in state i at time t
given O and (A,B,pi).
Row: state. Column: observation
xi: Joint probability of being in state i at time t and
state (i + 1) at time (t + 1) given O and (A,B,pi).
xi[state i][state j][time t]
Args:
sequence (list<char>): Observation sequence O
k_smoothing (float): Smoothing parameter for add-k smoothing to
avoid zero probability. Value should be between [0.0, 1.0].
"""
rows = len(self._all_states)
columns = len(sequence)
alpha = self._forward(sequence)
beta = self._backward(sequence)
# build gamma
gamma = init_matrix(rows, columns, "float")
for s_index in range(rows):
for o_index in range(columns):
prob = alpha[s_index][o_index] * beta[s_index][o_index]
prob /= sum(map(
lambda j: alpha[j][o_index] * beta[j][o_index],
range(rows)
))
gamma[s_index][o_index] = prob
# buid xi
xi = init_3d_matrix(rows, rows, columns - 1)
for o_index in range(columns - 1):
obs = sequence[o_index]
obs_next = sequence[o_index + 1]
denominator = 0.0
for s_from in range(rows):
for s_to, state_to in enumerate(self._all_states):
single_state_index = self._single_states.index(
self._get_state_by_order(state_to, 1)
)
prob = (
alpha[s_from][o_index]
* beta[s_to][o_index + 1]
* self._A[s_from][s_to]
* self._B[single_state_index][self._all_obs.index(obs_next)]
)
xi[s_from][s_to][o_index] = prob
denominator += prob
if denominator == 0:
continue
for s_from in range(rows):
for s_to in range(rows):
xi[s_from][s_to][o_index] /= denominator
# update all parameters (A,B,pi).
for s_index, state in enumerate(self._all_states):
# update pi
self._pi[self._highest_order - 1][state] = (
(gamma[s_index][0] + k_smoothing)
/ (1 + rows * k_smoothing)
)
# update A
gamma_sum = sum(map(
lambda o_index: gamma[s_index][o_index],
range(columns - 1)
))
if(gamma_sum == 0):
for s_prime in range(rows):
self._A[s_index][s_prime] = 0
else:
for s_prime in range(rows):
xi_sum = sum(map(
lambda o_index: xi[s_index][s_prime][o_index],
range(columns - 1)
))
self._A[s_index][s_prime] = (
(xi_sum + k_smoothing)
/ (gamma_sum + (rows * k_smoothing))
)
# update B
gamma_sum += gamma[s_index][columns - 1]
single_state_index = self._single_states.index(
self._get_state_by_order(state, 1)
)
if(gamma_sum == 0):
for | |
#!/usr/bin/python
from krrt.utils import get_opts, write_file
from krrt.planning.strips.representation import parse_problem, generate_action, Action
from krrt.planning import parse_output_FF, parse_output_popf, parse_output_ipc
from krrt.sat.CNF import OptimizedLevelWeightedFormula
from linearizer import count_linearizations
from lifter import lift_POP, make_layered_POP
from pop import POP
from gurobipy import *
import time
import networkx as nx
USAGE_STRING = "\n\
Usage: python encoder.py -<option> <argument> -<option> <argument> ... <FLAG> <FLAG> ...\n\n\
\n\
Where options are:\n\
-domain <pddl domain file>\n\
-prob <pddl problem file>\n\
-ffout <captured FF output>\n\
-mercout <captured Mercury output>\n\
"
def encode_POP_v1(dom, prob, pop, flags, popfile):
# For sanitization, make sure we close the pop
pop.transativly_close()
allF, allA, I, G = parse_problem(dom, prob)
F = pop.F
A = pop.A
I = pop.I
G = pop.G
init = pop.init
goal = pop.goal
adders = {}
deleters = {}
needers = {}
for f in F:
adders[f] = set([])
deleters[f] = set([])
needers[f] = set([])
for a in A:
for f in a.adds:
adders[f].add(a)
for f in a.dels:
deleters[f].add(a)
for f in a.precond:
needers[f].add(a)
times = [time.time()]
try:
# Create a new model
m = Model("min_reorder")
m.Params.Threads = 1
# Create support variables
sup_vars = []
sup_var_mapping = {}
for f in F:
sup_var_mapping[f] = {}
for a2 in needers[f]:
sup_var_mapping[f][a2] = {}
for a1 in adders[f]:
if a1 != a2:
sup_vars.append(m.addVar(vtype=GRB.BINARY, name="X_%s_%s_%s" % (a1, a2, f)))
sup_var_mapping[f][a2][a1] = sup_vars[-1]
# Create the promotion / demotion variables
interference_vars = []
interference_var_mapping = {}
for f in F:
interference_var_mapping[f] = {}
for a1 in (needers[f] | adders[f]):
for a2 in deleters[f]:
if a1 != a2:
if a1 not in interference_var_mapping[f]:
interference_var_mapping[f][a1] = {}
if a2 not in interference_var_mapping[f]:
interference_var_mapping[f][a2] = {}
if a2 not in interference_var_mapping[f][a1]:
interference_vars.append(m.addVar(vtype=GRB.BINARY, name="I_%s_%s_%s" % (a1, a2, f)))
interference_var_mapping[f][a1][a2] = interference_vars[-1]
if a1 not in interference_var_mapping[f][a2]:
interference_vars.append(m.addVar(vtype=GRB.BINARY, name="I_%s_%s_%s" % (a2, a1, f)))
interference_var_mapping[f][a2][a1] = interference_vars[-1]
# Create the ordering variables
order_vars = []
order_var_mapping = {}
for a1 in A:
order_var_mapping[a1] = {}
for a2 in A:
order_vars.append(m.addVar(vtype=GRB.BINARY, name="O_%s_%s" % (a1, a2)))
order_var_mapping[a1][a2] = order_vars[-1]
# Integrate new variables
m.update()
# Set objective
m.setObjective(quicksum(order_vars), GRB.MINIMIZE)
#################
## Constraints ##
#################
# Mutual exclusion
counter = 1
for f in interference_var_mapping:
for a1 in interference_var_mapping[f]:
for a2 in interference_var_mapping[f][a1]:
if a1 != a2:
x = interference_var_mapping[f][a1][a2]
y = interference_var_mapping[f][a2][a1]
m.addConstr(x + y == 1, "mut_ex_%d" % counter)
counter += 1
# Single supporter
counter = 1
for f in F:
for a2 in sup_var_mapping[f]:
m.addConstr(quicksum(list(sup_var_mapping[f][a2].values())) == 1, "sup_%d" % counter)
counter += 1
# Causal-link protection
counter = 1
for f in F:
for a2 in sup_var_mapping[f]:
for a1 in sup_var_mapping[f][a2]:
for ad in deleters[f]:
if a1 != a2 and a1 != ad and a2 != ad:
x = sup_var_mapping[f][a2][a1]
y = interference_var_mapping[f][ad][a1]
z = interference_var_mapping[f][a2][ad]
m.addConstr((1 - x) + (y + z) >= 1, "prom_dom_%d" % counter)
counter += 1
# Link support with ordering
counter = 1
for f in F:
for a2 in sup_var_mapping[f]:
for a1 in sup_var_mapping[f][a2]:
if a1 != a2:
x = sup_var_mapping[f][a2][a1]
y = order_var_mapping[a1][a2]
m.addConstr(y - x >= 0, "order_sup_%d" % counter)
counter += 1
# Link intereference with ordering
counter = 1
for f in interference_var_mapping:
for a1 in interference_var_mapping[f]:
for a2 in interference_var_mapping[f][a1]:
if a1 != a2:
x = interference_var_mapping[f][a1][a2]
y = order_var_mapping[a1][a2]
m.addConstr(y - x >= 0, "order_inter_%d" % counter)
counter += 1
# Transitive closure
counter = 1
for a1 in A:
for a2 in A:
for a3 in A:
x = order_var_mapping[a1][a2]
y = order_var_mapping[a2][a3]
z = order_var_mapping[a1][a3]
m.addConstr((1 - x) + (1 - y) + z >= 1, "trans_%d" % counter)
counter += 1
# Forbid self loops
counter = 1
for a in A:
m.addConstr(order_var_mapping[a][a] == 0, "noloop_%d" % counter)
counter += 1
# Init and goal
m.addConstr(order_var_mapping[init][goal] == 1)
for a in A - set([init, goal]):
m.addConstr(order_var_mapping[init][a] == 1)
m.addConstr(order_var_mapping[a][goal] == 1)
#############################
times.append(time.time())
m.optimize()
#for v in m.getVars():
# print v.varName, v.x
print('\nObj:', m.objVal)
times.append(time.time())
print("Encoding Time: %f" % (times[1] - times[0]))
print("Solving Time: %f\n" % (times[2] - times[1]))
except GurobiError as e:
print('Error reported:', end=' ')
print(e)
def encode_POP_v2(dom, prob, pop, flags, popfile):
# For sanitization, make sure we close the pop
pop.transativly_close()
allF, allA, I, G = parse_problem(dom, prob)
F = pop.F
A = pop.A
I = pop.I
G = pop.G
init = pop.init
goal = pop.goal
adders = {}
deleters = {}
needers = {}
for f in F:
adders[f] = set([])
deleters[f] = set([])
needers[f] = set([])
for a in A:
for f in a.adds:
adders[f].add(a)
for f in a.dels:
deleters[f].add(a)
for f in a.precond:
needers[f].add(a)
times = [time.time()]
# Create a new model
m = Model("min_reorder")
m.Params.Threads = 1
# Create the vars for each action
v2a = {}
a2v = {}
for a in A:
a2v[a] = m.addVar(vtype=GRB.BINARY, name="act_%s" % str(a))
m.update()
v2a[a2v[a]] = a
# Create the vars for each action ordering
v2o = {}
o2v = {}
for a1 in A:
for a2 in A:
o2v[(a1,a2)] = m.addVar(vtype=GRB.BINARY, name="ord_%s_%s" % (str(a1), str(a2)))
m.update()
v2o[o2v[(a1,a2)]] = (a1,a2)
# Create the vars for each possible action support
v2s = {}
s2v = {}
for a2 in A:
for p in a2.precond:
for a1 in adders[p]:
s2v[(a1,p,a2)] = m.addVar(vtype=GRB.BINARY, name="sup_%s_%s_%s" % (str(a1), str(p), str(a2)))
m.update()
v2s[s2v[(a1,p,a2)]] = (a1,p,a2)
# Integrate new variables
m.update()
order_count = 1 + len(list(o2v.keys()))
# Set objective
# Use the first if only optimizing for the number of ordering constraints
m.setObjective(quicksum(list(v2o.keys())), GRB.MINIMIZE)
#m.setObjective(quicksum(v2o.keys() + [order_count * var for var in v2a.keys()]), GRB.MINIMIZE)
#################
## Constraints ##
#################
# Uncomment the following if every action should be included
for a in A:
m.addConstr(a2v[a] == 1)
# Add the antisymmetric ordering constraints
for a in A:
m.addConstr(o2v[(a,a)] == 0)
# Add the transitivity constraints
for a1 in A:
for a2 in A:
for a3 in A:
x = o2v[(a1,a2)]
y = o2v[(a2,a3)]
z = o2v[(a1,a3)]
m.addConstr((1 - x) + (1 - y) + z >= 1)
# Add the ordering -> actions constraints
for a1 in A:
for a2 in A:
m.addConstr(o2v[(a1,a2)] <= a2v[a1])
m.addConstr(o2v[(a1,a2)] <= a2v[a2])
# Init and goal
m.addConstr(o2v[(init,goal)] == 1)
for a in A - set([init, goal]):
m.addConstr((1 - a2v[a]) + o2v[(init,a)] == 1)
m.addConstr((1 - a2v[a]) + o2v[(a,goal)] == 1)
# Orderings exclude one another
for a1 in A:
for a2 in A:
m.addConstr(o2v[(a1,a2)] + o2v[(a2,a1)] <= 1)
# Ensure that we have a goal and init action.
m.addConstr(a2v[init] == 1)
m.addConstr(a2v[goal] == 1)
# Satisfy all the preconditions
for a2 in A:
for p in a2.precond:
m.addConstr((1 - a2v[a2]) + quicksum([s2v[(a1,p,a2)] for a1 in [x for x in adders[p] if x is not a2]]) >= 1)
# Create unthreatened support
for a2 in A:
for p in a2.precond:
# Can't support yourself (not strictly neccessary, but useful for visualizing output)
if (a2, p, a2) in s2v:
m.addConstr(s2v[(a2, p, a2)] == 0)
for a1 in [x for x in adders[p] if x is not a2]:
# Support implies ordering
m.addConstr((1 - s2v[(a1,p,a2)]) + o2v[(a1,a2)] >= 1)
# Forbid threats
#print "\n%s--%s-->%s: %s" % (str(a1), str(p), str(a2), str(deleters[p]))
for ad in [x for x in deleters[p] if x not in set([a1,a2])]:
#print "...%s--%s-->%s: %s" % (str(a1), str(p), str(a2), str(ad))
m.addConstr((1 - s2v[(a1,p,a2)]) + (1 - a2v[ad]) + o2v[(ad,a1)] + o2v[(a2,ad)] >= 1)
#############################
times.append(time.time())
m.optimize()
#for v in m.getVars():
# print v.varName, v.x
print('\nObj:', m.objVal)
print("Actions: %d / %d" % (sum([int(v.x) for v in list(v2a.keys())]), len(A)))
print('Orderings:', sum([int(v.x) for v in list(v2o.keys())]))
times.append(time.time())
print("Encoding Time: %f" % (times[1] - times[0]))
print("Solving Time: %f\n" % (times[2] - times[1]))
if popfile:
p = POP()
for act in A:
p.add_action(act)
for v in list(v2s.keys()):
if 1 == int(v.x):
p.link_actions(v2s[v][0], v2s[v][2], str(v2s[v][1]))
for v in list(v2o.keys()):
if 1 == int(v.x):
p.link_actions(v2o[v][0], v2o[v][1], '')
######################
## OUTPUT SETTINGS ##
######################
# Comment out if you want to see all of the edges in the closure
p.transitivly_reduce()
# Comment out if you want the | |
# Copyright (c) 2016 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper object for Iotic Points.
Points are come in two types:
* Feed's where they output data from a Thing
* Control's where they are a way of sending data to a Thing
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from IoticAgent.Core.Validation import Validation
from IoticAgent.Core.Const import R_FEED, R_CONTROL
from IoticAgent.Core.compat import Sequence, Mapping, raise_from, string_types, ensure_unicode
from .Resource import Resource
from .utils import private_names_for, foc_to_str
from .PointMeta import PointMeta
_POINT_TYPES = frozenset((R_FEED, R_CONTROL))
class Point(Resource):
# overridden by subclasses (e.g. R_FEED)
_type = None
"""
Point class. A base class for feed or control.
"""
def __init__(self, client, lid, pid, guid):
if self._type not in _POINT_TYPES:
raise TypeError('_type not set to a valid point type')
super(Point, self).__init__(client, guid)
self.__lid = Validation.lid_check_convert(lid)
self.__pid = Validation.pid_check_convert(pid)
def __hash__(self):
# Why not just hash guid? Because Point is used before knowing guid in some cases
# Why not hash without guid? Because in two separate containers one could have identicial points
# (if not taking guid into account)
return hash(self.__lid) ^ hash(self.__pid) ^ hash(self._type) ^ hash(self.guid)
def __eq__(self, other):
return (isinstance(other, Point) and
self.guid == other.guid and
self._type == other._type and
self.__lid == other.__lid and
self.__pid == other.__pid)
def __str__(self):
return '%s (%s: %s, %s)' % (self.guid, foc_to_str(self._type), self.__lid, self.__pid)
@property
def lid(self):
"""
The local id of the Thing that advertises this Point. This is unique to you on this container.
"""
return self.__lid
@property
def pid(self):
"""
Point id - the local id of this Point. This is unique to you on this container.
Think of it as a nickname for the Point
"""
return self.__pid
@property
def foc(self):
"""
Whether this Point is a feed or control. String of either `"feed"` or `"control"`
"""
return foc_to_str(self._type)
def rename(self, new_pid):
"""
Rename the Point.
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
new_pid (string): The new local identifier of your Point
"""
logger.info("rename(new_pid=\"%s\") [lid=%s, pid=%s]", new_pid, self.__lid, self.__pid)
evt = self._client._request_point_rename(self._type, self.__lid, self.__pid, new_pid)
self._client._wait_and_except_if_failed(evt)
self.__pid = new_pid
def list(self, limit=50, offset=0):
"""
List `all` the values on this Point.
Returns:
QAPI list function payload
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
limit (integer, optional): Return this many value details
offset (integer, optional): Return value details starting at this offset
"""
logger.info("list(limit=%s, offset=%s) [lid=%s,pid=%s]", limit, offset, self.__lid, self.__pid)
evt = self._client._request_point_value_list(self.__lid, self.__pid, self._type, limit=limit, offset=offset)
self._client._wait_and_except_if_failed(evt)
return evt.payload['values']
def list_followers(self):
"""
List followers for this point, i.e. remote follows for feeds and remote attaches for controls.
Returns:
QAPI subscription list function payload
::
{
"<Subscription GUID 1>": "<GUID of follower1>",
"<Subscription GUID 2>": "<GUID of follower2>"
}
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
limit (integer, optional): Return this many value details
offset (integer, optional): Return value details starting at this offset
"""
evt = self._client._request_point_list_detailed(self._type, self.__lid, self.__pid)
self._client._wait_and_except_if_failed(evt)
return evt.payload['subs']
def get_meta(self):
"""
Get the metadata object for this Point
Returns:
A :doc:`IoticAgent.IOT.PointMeta` PointMeta object
**OR**
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
"""
rdf = self.get_meta_rdf(fmt='n3')
return PointMeta(self, rdf, self._client.default_lang, fmt='n3')
def get_meta_rdf(self, fmt='n3'):
"""
Get the metadata for this Point in rdf fmt
Advanced users who want to manipulate the RDF for this Point directly without the
:doc:`IoticAgent.IOT.PointMeta` PointMeta)helper object
Returns:
The RDF in the format you specify.
**OR**
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
fmt (string, optional): The format of RDF you want returned. Valid formats are: "xml", "n3", "turtle"
"""
evt = self._client._request_point_meta_get(self._type, self.__lid, self.__pid, fmt=fmt)
self._client._wait_and_except_if_failed(evt)
return evt.payload['meta']
def set_meta_rdf(self, rdf, fmt='n3'):
"""
Set the metadata for this Point in rdf fmt
"""
evt = self._client._request_point_meta_set(self._type, self.__lid, self.__pid, rdf, fmt=fmt)
self._client._wait_and_except_if_failed(evt)
def create_tag(self, tags):
"""
Create tags for a Point in the language you specify. Tags can only contain alphanumeric (unicode) characters
and the underscore. Tags will be stored lower-cased.
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
tags (list): The list of tags you want to add to your Point, e.g. ["garden", "soil"]
"""
if isinstance(tags, str):
tags = [tags]
evt = self._client._request_point_tag_update(self._type, self.__lid, self.__pid, tags, delete=False)
self._client._wait_and_except_if_failed(evt)
def delete_tag(self, tags):
"""
Delete tags for a Point in the language you specify. Case will be ignored and any tags matching lower-cased
will be deleted.
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
tags (list): The list of tags you want to delete from your Point, e.g. ["garden", "soil"]
"""
if isinstance(tags, str):
tags = [tags]
evt = self._client._request_point_tag_update(self._type, self.__lid, self.__pid, tags, delete=True)
self._client._wait_and_except_if_failed(evt)
def list_tag(self, limit=50, offset=0):
"""
List `all` the tags for this Point
Returns:
List of tags, as below
::
[
"mytag1",
"mytag2"
"ein_name",
"nochein_name"
]
**OR**
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
limit (integer, optional): Return at most this many tags
offset (integer, optional): Return tags starting at this offset
"""
evt = self._client._request_point_tag_list(self._type, self.__lid, self.__pid, limit=limit, offset=offset)
self._client._wait_and_except_if_failed(evt)
return evt.payload['tags']
def create_value(self, label, vtype, lang=None, description=None, unit=None):
"""
Create a value on this Point. Values are descriptions in semantic metadata of the individual data items
you are sharing (or expecting to receive, if this Point is a control). This will help others to search for
your feed or control. If a value with the given label (and language) already exists, its fields are updated
with the provided ones (or unset, if None).
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
label (string): The label for this value e.g. "Temperature". The label must be unique for this
Point. E.g. You can't have two data values called "Volts" but you can have "volts1" and "volts2".
lang (string, optional): The two-character ISO 639-1 language code to use for the description. None means
use the default language for your agent. See :doc:`IoticAgent.IOT.Config`
vtype (xsd:datatype): The datatype of the data you are describing, e.g. dateTime. We recommend
you use a Iotic Labs-defined constant from :doc:`IoticAgent.Datatypes`
description (string, optional): The longer descriptive text for this value.
unit (ontology url, optional): The url of the ontological description of the unit of your value. We
recommend you use a constant from :doc:`IoticAgent.Units`
::
# example with no units as time is unit-less
my_feed.create_value("timestamp",
Datatypes.DATETIME,
"en",
"time of reading")
::
# example with a unit from the Units class
my_feed.create_value("temperature",
Datatypes.DECIMAL,
"en",
"Fish-tank temperature in celsius",
Units.CELSIUS)
"""
evt = self._client._request_point_value_create(self.__lid, self.__pid, self._type, label, vtype, lang,
description, unit)
self._client._wait_and_except_if_failed(evt)
def delete_value(self, label=None):
"""
Delete the labelled value (or all values) on this Point
Raises:
IOTException: Infrastructure problem detected
LinkException: Communications problem between you and the infrastructure
Args:
label (string, optional): The label for the value you want to delete. If not specified, all values for this
point will be removed.
"""
evt = self._client._request_point_value_delete(self.__lid, self.__pid, self._type, label=label)
self._client._wait_and_except_if_failed(evt)
class Feed(Point):
"""
`Feeds` are advertised when a Thing has data to share. They are for out-going data which will get shared with
any remote Things that have followed them. Feeds are one-to-many.
"""
_type = R_FEED
def get_template(self):
"""
Get new :doc:`IoticAgent.IOT.PointValueHelper` PointDataObject instance to use for sharing data.
"""
return self._client._get_point_data_handler_for(self).get_template()
def share(self, data, mime=None, time=None):
"""
Share some data from this Feed
Raises:
| |
<filename>psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py
# Princeton University licenses this file to You under the Apache License,
# Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ***************************** PredictionErrorMechanism ***********************
"""
Contents
--------
* `PredictionErrorMechanism_Overview`
* `PredictionErrorMechanism_Creation`
* `PredictionErrorMechanism_Structure`
* `PredictionErrorMechanism_Execution`
* `PredictionErrorMechanism_Example`
* `PredictionErrorMechanism_Class_Reference`
.. _PredictionErrorMechanism_Overview:
Overview
--------
A PredictionErrorMechanism is a subclass of `ComparatorMechanism` that receives two inputs (a sample and a target),
and calculates the temporal difference prediction error as found in `<NAME>, and Sejnowski (1996)
<http://www.jneurosci.org/content/jneuro/16/5/1936.full.pdf>`_ using its `function
<PredictionErrorMechanism.function>`, and places the delta values (the difference between the actual and predicted
reward) in its *OUTCOME* `OutputPort <PredictionErrorMechanism.output_port>`.
.. _PredictionErrorMechanism_Creation:
Creating a PredictionErrorMechanism
-----------------------------------
A PredictionErrorMechanism is usually created automatically when a `LearningMechanism`
`is created <LearningMechanism_Creation>` using the `TDLearning` function).
A PredictionErrorMechanism can also be created directly by calling its constructor.
Its **sample** and **target** arguments are used to specify the OutputPorts
that provide the sample and target inputs, respectively (see
`ObjectiveMechanism Monitored Output Ports <ObjectiveMechanism_Monitor>`
for details). When the PredictionErrorMechanism is created, two InputPorts are
created, one each for its sample and target inputs (and named, by default
*SAMPLE* and *TARGET*). Each is assigned a MappingProjection from the
corresponding OutputPort specified in the **sample** and **target** arguments.
It is important to recognize that the value of the *SAMPLE* and *TARGET*
InputPorts must have the same length and type, so that they can be compared
using the PredictionErrorMechanism's `function
<PredictionErrorMechanism.function>.` By default, they use the format of the
OutputPorts specified in the **sample** and **target** arguments, respectively,
and the `MappingProjection` to each uses an `IDENTITY_MATRIX`. Therefore, for
the default configuration, the OutputPorts specified in the **sample** and
**target** arguments must have values of the same length and type. If these
differ, the **input_ports** argument can be used to explicitly specify the
format of the PredictionErrorMechanism's *SAMPLE* and *TARGET* InputPorts, to
insure they are compatible with one another (as well as to customize their
names, if desired). If the **input_ports** argument is used, *both* the sample
and target InputPorts must be specified. Any of the formats for `specifying
InputPorts <InputPort_Specification>` can be used in the argument. If values
are assigned for the InputPorts, they must be of equal length and type. Their
types must also be compatible with the value of the OutputPorts specified in
the **sample** and **target** arguments. However, the length specified for an
InputPort can differ from its corresponding OutputPort; in that case, by
default, the MappingProjection created uses a `FULL_CONNECTIVITY` matrix. Thus,
OutputPorts of differing lengths can be mapped to the sample and target
InputPorts of a PredictionErrorMechanism (see the `example
<PredictionErrorMechanism_Example>` below), so long as the latter of of the
same length. If a projection other than a `FULL_CONNECTIVITY` matrix is
needed, this can be specified using the *PROJECTION* entry of a `Port
specification dictionary <Port_Specification>` for the InputPort in the
**input_ports** argument.
.. _PredictionErrorMechanism_Structure:
Structure
---------
A PredictionErrorMechanism has two `input_ports
<ComparatorMechanism.input_ports>`, each of which receives a
`MappingProjection` from a corresponding OutputPort specified in the
**sample** and **target** arguments of its constructor. The InputPorts are
listed in the Mechanism's `input_ports <ComparatorMechanism.input_ports>`
attribute and named, respectively, *SAMPLE* and *TARGET*. The OutputPorts
from which they receive their projections (specified in the **sample** and
**target** arguments) are listed in the Mechanism's `sample
<ComparatorMechanism.sample>` and `target
<ComparatorMechanism.target>` attributes as well as in its
`monitored_output_ports <ComparatorMechanism.monitored_output_ports>`
attribute. The PredictionErrorMechanism's `function
<PredictionErrorMechanism.function>` calculates the difference between the
predicted reward and the true reward at each timestep in **SAMPLE**. By
default, it uses a `PredictionErrorDeltaFunction`. However, the
`function <PredictionErrorMechanism.function>` can be customized, so long as it
is replaced with one that takes two arrays with the same format as its inputs
and generates a similar array as its result. The result is assigned as the
value of the PredictionErrorMechanism's *OUTCOME* (`primary
<OutputPort_Primary>`) OutputPort.
.. _PredictionErrorMechanism_Execution:
Execution
---------
When a PredictionErrorMechanism is executed, it updates its input_ports with
the values of the OutputPorts specified in its **sample** and **target**
arguments, and then uses its `function <PredictionErrorMechanism.function>` to
compare these. By default, the result is assigned to the `value
<PredictionErrorMechanism.value>` of its *OUTCOME* `output_port
<PredictionErrorMechanism.output_port>`, and as the first item of the
Mechanism's `output_values <PredictionErrorMechanism.output_values>` attribute.
.. _PredictionErrorMechanism_Example:
Example
-------
.. _PredictionErrorMechanism_Default_Input_Value_Example:
*Formatting InputPort values*
The **default_variable** argument can be used to specify a particular format
for the SAMPLE and/or TARGET InputPorts of a PredictionErrorMechanism. This
can be useful when one or both of these differ from the format of the
OutputPort(s) specified in the **sample** and **target** arguments. For
example, for `Temporal Difference Learning <TDLearning>`, a
PredictionErrorMechanism is used to compare the predicted reward from the
sample with the true reward (the target). In the example below, the sample
Mechanism is a `TransferMechanism` that uses the `Linear` function to output
the sample values. Because the output is a vector, specifying it as the
PredictionErrorMechanism's **sample** argument will generate a corresponding
InputPort with a vector as its value. This should match the reward
signal specified in the PredictionErrorMechanism's **target** argument, the
value of which is a vector of the same length as the output of sample.
>>> import psyneulink as pnl
>>> sample_mech = pnl.TransferMechanism(size=5,
... function=pnl.Linear())
>>> reward_mech = pnl.TransferMechanism(size=5)
>>> prediction_error_mech = pnl.PredictionErrorMechanism(sample=sample_mech,
... target=reward_mech)
Note that ``sample_mech`` is specified to take an array of length 5 as its
input, and therefore generate one of the same length as its `primary output
<OutputPort_Primary>`. Since it is assigned as the **sample** of the
PredictionErrorMechanism, by default this will create a *SAMPLE* InputPort of
length 5, that will match the length of the *TARGET* InputPort.
Currently the default method of implementing temporal difference learning in
PsyNeuLink requires the values of *SAMPLE* and *TARGET* to be provided as an
array representing a full time series as an experiment. See
`MontagueDayanSejnowski.py` in the Scripts folder for an example.
.. _PredictionErrorMechanism_Class_Reference
Class Reference
---------------
"""
from typing import Iterable
import numpy as np
import typecheck as tc
from psyneulink.core.components.functions.combinationfunctions import PredictionErrorDeltaFunction
from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.globals.keywords import INITIALIZING, OUTCOME, PREDICTION_ERROR_MECHANISM, SAMPLE, TARGET
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF
from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel, PREFERENCE_SET_NAME
from psyneulink.core.globals.utilities import is_numeric
from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import ComparatorMechanism, ComparatorMechanismError
__all__ = [
'PredictionErrorMechanism',
'PredictionErrorMechanismError'
]
class PredictionErrorMechanismError(ComparatorMechanismError):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class PredictionErrorMechanism(ComparatorMechanism):
"""
PredictionErrorMechanism( \
sample, \
target, \
function=PredictionErrorDeltaFunction, \
output_ports=[OUTCOME], \
params=None, \
name=None, \
prefs=None)
Subclass of ComparatorMechanism that calculates the prediction error between the predicted reward and the target.
See `ComparatorMechanism <ComparatorMechanism_Class_Reference>` for additional arguments and attributes.
Arguments
---------
sample : OutputPort, Mechanism_Base, dict, number, or str
specifies the *SAMPLE* InputPort, that is evaluated by the `function <PredictionErrorMechanism.function>`.
target : OutputPort, Mechanism_Base, dict, number, or str
specifies the *TARGET* InputPort used by the function to evaluate `sample<PredictionErrorMechanism.sample>`.
function : CombinationFunction, ObjectiveFunction, function, or method : default PredictionErrorDeltaFunction
the function used to evaluate the SAMPLE and TARGET inputs.
learning_rate : Number : default 0.3
controls the weight of later timesteps compared to earlier ones; higher rates weight later timesteps more
heavily than previous ones.
Attributes
----------
sample : OutputPort, Mechanism_Base, dict, number, or str
the *SAMPLE* `InputPort`, the `value <InputPort.value>` of which will be evaluated by the function.
target : OutputPort, Mechanism_Base, dict, number, or str
the *TARGET* `InputPort`, the `value <InputPort.value>` of which will be used to evaluate `sample
<PredictionErrorMechanism.sample>`.
function : CombinationFunction, ObjectiveFunction, Function, or method : default PredictionErrorDeltaFunction
the function used to evaluate the sample and target inputs.
output_ports : str, Iterable : default OUTCOME
by default, contains only the *OUTCOME* (`primary <OutputPort_Primary>`) `OutputPort` of the
PredictionErrorMechanism.
learning_rate : Number : default 0.3
controls the weight of later timesteps compared to earlier ones; higher rates weight later timesteps more
heavily than previous ones.
"""
componentType = PREDICTION_ERROR_MECHANISM
classPreferenceLevel = PreferenceLevel.SUBTYPE
classPreferences = {
PREFERENCE_SET_NAME: 'PredictionErrorMechanismCustomClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE)
}
class Parameters(ComparatorMechanism.Parameters):
"""
Attributes
----------
variable
see `variable <PredictionErrorMechanism.variable>`
:default value: None
:type:
:read only: True
function
see `function <PredictionErrorMechanism.function>`
:default value: `PredictionErrorDeltaFunction`
:type: `Function`
learning_rate
see `learning_rate <PredictionErrorMechanism.learning_rate>`
:default value: 0.3
:type: ``float``
"""
variable = Parameter(None, read_only=True, pnl_internal=True, constructor_argument='default_variable')
learning_rate = Parameter(0.3, modulable=True)
function = Parameter(PredictionErrorDeltaFunction, stateful=False, loggable=False)
sample = None
target = None
@tc.typecheck
def __init__(self,
sample: tc.optional(tc.any(OutputPort, | |
= angle, s_s = s_s, w_text = w_text, font = font)
self.collection = []
self.changeFlag = True
self.enumerator_p()
self.kill()
#ДАТЬ ПАРАМЕТРЫ ОБЪЕКТА
def get_conf(self, obj):
return get_conf.get_conf(obj, graf)
def get_circle_conf(self, obj):
return get_conf.get_circle_conf(obj, graf)
def get_arc_conf(self, obj):
return get_conf.get_arc_conf(obj, graf)
def get_line_conf(self, obj):
return get_conf.get_line_conf(obj, graf)
def get_line_coord(self, obj):
return get_conf.get_line_coord(obj, graf)
def get_text_conf(self, obj):
return get_conf.get_text_conf(obj, graf)
def get_dim_conf(self, obj):
return get_conf.get_dim_conf(obj, graf)
def get_dimR_conf(self, obj):
return get_conf.get_dimR_conf(obj, graf)
#ИЗИЕНЕНИЕ УЗЛОВ
def editEvent(self, event):
self.curent_class = edit.Edit_node(graf)
#СОПРЯЖЕНИЕ
def filletEvent(self, event=None):
self.curent_class = fillet.Fillet(graf)
#ДАТЬ ПРИМИТИВ БЛИЖАЙШИЙ К ТОЧКЕ
def get_obj(self, x, y, t_obj = 'line'):
return get_object.get_obj(x, y, graf, t_obj)
#СМЕЩЕНИЕ
def offsetEvent(self, event=None):
self.curent_class = offset.Offset(graf)
#МАСШТАБИРОВАНИЕ ОБЪЕКТОВ
def scaleEvent(self, event=None):
self.curent_class = scale_object.Scale_object(graf)
#ВРАЩЕНИЕ
def rotateEvent(self, event=None):
self.curent_class = rotate_object.Rotate_object(graf)
#ЗЕРКАЛО (не применятеся к сложным объектам, содержащим текст)
def mirrorEvent(self, event=None):
self.curent_class = mirror_object.Mirror_object(graf)
#ПЕРЕМЕЩЕНИЕ
def moveEvent(self, event=None):
self.curent_class = move_object.Move_object(graf)
#КОПИРОВАНИЕ
def copyEvent(self,event=None):
self.curent_class = copy_object.Copy_object(graf)
#ВЫДЕЛЕНИЕ
def lapping_sel(self,event):
grab_object.lapping2(graf, select = 'select')
#СНЯТИЕ ВЫДЕЛЕНИЯ
def lapping_desel(self, event):
grab_object.lapping2(graf, select = 'deselect')
def resRect(self, event):
self.rectx2=event.x
self.recty2=event.y
self.priv_coord = (self.rectx2, self.recty2)
self.rectx,self.recty = self.coordinator(self.rectx,self.recty)
self.set_coord()
if self.rectx2<self.rectx:#Цвет зависит от координат x
color = self.left_color
else:
color = self.right_color
if self.rect:
self.c.coords(self.rect, self.rectx, self.recty, self.rectx2, self.recty2)
self.c.itemconfig(self.rect, outline = color)
else:
self.rect=self.c.create_rectangle(self.rectx, self.recty, self.rectx2, self.recty2, fill=None,outline=color, tags=['line', 'obj', 'rect'])#Нарисовать заново по новым координатам
def set_coord(self):
self.xynachres=self.c.coords(self.nachCoordy)
self.zoomOLDres = self.zoomOLD
def delete(self, event=None, elements = None, add_history = None): #Уделение объектов
def dele(i, h = None):#Удаляет пришедший объект с канваса и из ALLOBJECT
if h:
e = self.get_conf(i)
self.e_list.append(e)
self.c.delete(i)
del self.ALLOBJECT[i]
if ('c_', i) in self.history_undo:
self.history_undo.remove(('c_', i))
t1 = time.time()
if elements == None:#Если не заданы элементы для удаления
self.set_coord()
self.e_list = []
map(lambda x: dele(x, h = 'add'), self.collection)#Перебрать коллекцию
self.collection = []
self.history_undo.append(('delete', (self.e_list, self.xynachres, self.zoomOLDres)))
self.changeFlag = True
self.enumerator_p()
self.kill()
else:#Если заданы элементы для удаления
map(dele, elements)
t2 = time.time()
print ('delete', t2-t1)
def sbros(self):#Сбрасывает коллекцию - переводит список веделенных объектов в collectionBack.
t1 = time.time()
self.collectionBack = self.collection
self.c.delete('clone')
self.collection = []
t2 = time.time()
print ('sbros', t2-t1)
def BackCol(self, event):#core-feature!!! - Возвращает в коллекцию предыдущий набор
if self.resFlag == False and (not self.collection):#Если начего не рисуется и коллекция не пуста
def BC(i):
if i in self.ALLOBJECT:#Если объект есть в обхем списке (не был удален)
self.collection.append(i)#Добавить в коллекцию
print (111)
map(BC, self.collectionBack)#Перебрать старую коллекцию
select_clone.Select_clone(self.collection, graf)
self.colObj()#Посчитать колличество выделенных объектов
draft_gui.gui.update_prop()
def colObj(self):#Пишет информацию о количестве выбранных объектов
if self.collection:
self.info.config(text = ('Selected %s objects') %(len(self.collection)))
else:
self.info.config(text ='')
def back_color(self, color, obj):
if obj[0] in ['c', 'a']:
for i in self.ALLOBJECT[obj]['id']:
tag = self.ALLOBJECT[obj]['id'][i]
if 'line' in tag:
self.c.itemconfig(i, fill = color)
if 'cir' in tag or 'a' in tag:
self.c.itemconfig(i, outline = color)
else:
self.c.itemconfig(obj, fill = color)
def collektor_sel(self, event):
x = event.x
y = event.y
self.collektor(x, y, select = 'select')
def collektor_desel(self, event):
x = event.x
y = event.y
self.collektor(x, y, select = 'deselect')
def collektor(self, x, y, select):#Добавляет в коллекцию объект, приметивы которого в активном состоянии (находятся под курсором)
#Получить номер объекта по текущему активному приметиву
Num = get_object.get_obj(x, y, graf, 'all')
#Если не нажат Shift
if select == 'select':
#Если объект отсутствует в коллекции - добавить, сменить цвет
if Num not in self.collection and Num in self.ALLOBJECT:
self.collection.append(Num)
select_clone.Select_clone((Num,), graf)
self.Old_sel = None
#Если нажат Shift
else:
#Если объект в коллекции - вырвать, вернуть цвет
if Num in self.collection:
self.collection.remove(Num)
self.c.delete('C'+Num)
draft_gui.gui.update_prop()
#Сосчитать колличество выделенных объектов
self.colObj()
def mass_collektor(self, mass, select):#Добавляет в коллекцию объекты из массы приметивов
t1 = time.time()
old_col = self.collection
if select == 'select':#Если дабавить
append_list = []#Заместо коллекции
gettags = self.c.gettags
append = append_list.append
for content in mass:
Num = gettags(content)[1]#Получить номер объекта по приметиву
if Num not in self.collection and Num not in append_list and Num[0] != 'C':#Если объект отсутствует в коллекции - добавить, сменить цвет
append(Num)
select_clone.Select_clone(append_list, graf)
self.collection.extend(append_list)
else: #Если вырвать
delete_list = []
for content in mass:
Num = self.c.gettags(content)[1]#Получить номер объекта по приметиву
if Num in self.collection and Num not in delete_list and Num[0] != 'C':#Если объект в коллекции - вырвать из нее, вернуть цвет
#Если объекта нет в списке удаления
delete_list.append(Num)
#перебрать delete_list, удалить все его объекты из коллекции
for i in delete_list:
self.collection.remove(i)
self.c.delete('C'+i)
if old_col != self.collection:
draft_gui.gui.update_prop()
t2 = time.time()
print ('mass_collektor', t2-t1)
def edit_collektor(self, edit_mass): #Добавляет в коллекцию объекты из массы приметивов, если в массе есть размеры - то остальные объекты не попадут в коллекцию
prov = True #True, пока не попался размер
append_list = []
for content in edit_mass:
non_ap = False
Num = self.c.gettags(content)[1]#Получить номер объекта по приметиву
if Num not in append_list and Num[0] != 'C':
if Num[0] in ('d', 'r'):
prov = False
if Num[0] == 'r':
line1 = self.get_snap_line(Num)[0]
c = self.c.coords(line1) #get_conf.get_line_coord(line1, graf)#
xc = c[0]
yc = c[1]
if (xc, yc) == (self.ex, self.ey):
non_ap = True
elif Num[0] == 'c':
x0, y0, R, fill, width, sloy = get_conf.get_circle_conf(Num, graf)
if (x0, y0) == (self.ex, self.ey):
non_ap = True
elif Num[0] == 'a':
xc, yc, dx1, dy1, dx2, dy2, fill, width, sloy = get_conf.get_arc_conf(Num, graf)
if (xc, yc) == (self.ex, self.ey):
non_ap = True
if non_ap == False:
append_list.append(Num)
select_clone.Select_clone(append_list, graf)
if self.Old_sel in append_list:
self.Old_sel = None
self.collection.extend(append_list)
if self.tip_p == 'c' and prov == True and len(self.collection)==1:#Если объект 1, это линия и привязка к середине
return 'line_c'#Включит режим Move
else:
return 'another'#Включит режим Edit
def edit_c(self, edit_mass): #Проверяет, какие объекты находятся в коллекции - если только размеры по линии - оставляет коллекцию неизменной, если есть другие объекты - оставляет в кол. только те, к которым есть привязка в данный момент
delete_list = []#Список объектов из коллекции, к которым привязка нет
dim_list = []#Список размеров из коллекции
line_dim_edit = True#Будет True - пока не попался НЕразмер
for content in edit_mass:#Перебрать пришедшую коллекцию
if content[0] == 'd':#Если объект == размер
dim_list.append(content)#Добавить в список размеров
else:
line_dim_edit = False#Иначе неразмер попался
undel_obj = False#Если False - убрать объект из коллекции
find = self.ALLOBJECT[content]['id']#self.c.find_withtag(content)#Получить приметивы объекта
for i in find:#Перебрать их
if i in self.find_privs2:#Если приметив в списке приметивов - привязок
undel_obj = True#Оставить объект в коллекции
if undel_obj == False:#Если не удалять - False
delete_list.append(content)#Добавить объект в список удаления
self.c.delete('C'+content)
map(lambda i: self.collection.remove(i), delete_list)#перебрать delete_list, удалить все его объекты из коллекции
#core-feature!!! - определяет, по одной линии все размеры или нет. Если да - можно перенести всю размерную цепочку
if line_dim_edit == True:#Если ни одного неразмера не попалось
if len(dim_list) > 1:#Если количество размеров > 1
line3_list = []#Список первых координат размерных линий размеров
ort1 = None#ориентация первого размера
ort2 = None#То же второго
bFlag = False#Если False - то все размерные линии имеют одну общую координату (x или y) и лежат по одной линии
for i in dim_list:# Перебрать список размеров
if dim_list.index(i) == 0: #Если размер первый в списке
ort1 = self.ALLOBJECT[i]['ort']#Присвоить его ориентацию первой переменной
else:
ort2 = self.ALLOBJECT[i]['ort']#Иначе второй
if ort1 != ort2:#Если переменные не равны - Вылететь, коллекцию больше не изменять
bFlag = True
break
line3 = self.get_snap_line(i)[2]#Взять размерную линию размера
coord = self.c.coords(line3)#Взять координаты размерной линии
line3_list.append(coord[0:2])#Добавить в список координат только 2 первые координаты
if bFlag == False:#Если Вылетания не произошло
for ind, i in enumerate(line3_list):#Перебрать список координат
if ort1 == 'vertical':#Если оринтация вертикальная
if i == line3_list[-1]:#Если элемент последний в списке
ii = -1#Второй элемент - взять предыдущий
else:
ii = 1#Иначе - последующий
if i[1] != line3_list[ind + ii][1]:#Если координата y второго не равна y первого - Вылететь, коллекцию больше не изменять
bFlag = True
break
else:
if i == line3_list[-1]:
ii = -1
else:
ii = 1
if i[0] != line3_list[ind + ii][0]:#Если координата x второго не равна x первого - Вылететь, коллекцию больше не изменять
bFlag = True
break
if bFlag == False:#Если вылетания и | |
9 or bufsz)
def fileno(self):
return self.f.fileno()
def _write_header(self, pkt):
self.header_present=1
if self.linktype == None:
if type(pkt) is list or type(pkt) is tuple:
pkt = pkt[0]
self.linktype = LLNumTypes.get(pkt.__class__,1)
if self.append:
# Even if prone to race conditions, this seems to be
# safest way to tell whether the header is already present
# because we have to handle compressed streams that
# are not as flexible as basic files
g = [open,gzip.open][self.gz](self.filename,"rb")
if g.read(16):
return
self.f.write(struct.pack(self.endian+"IHHIIII", 0xa1b2c3d4L,
2, 4, 0, 0, MTU, self.linktype))
self.f.flush()
def write(self, pkt):
"""accepts a either a single packet or a list of packets
to be written to the dumpfile
"""
if not self.header_present:
self._write_header(pkt)
for p in pkt:
self._write_packet(p)
def _write_packet(self, packet):
"""writes a single packet to the pcap file
"""
s = str(packet)
l = len(s)
sec = int(packet.time)
usec = int((packet.time-sec)*1000000)
self.f.write(struct.pack(self.endian+"IIII", sec, usec, l, l))
self.f.write(s)
if self.gz and self.sync:
self.f.flush()
def flush(self):
return self.f.flush()
def close(self):
return self.f.close()
re_extract_hexcap = re.compile("^(0x[0-9a-fA-F]{2,}[ :\t]|(0x)?[0-9a-fA-F]{2,}:|(0x)?[0-9a-fA-F]{3,}[: \t]|) *(([0-9a-fA-F]{2} {,2}){,16})")
def import_hexcap():
p = ""
try:
while 1:
l = raw_input().strip()
try:
p += re_extract_hexcap.match(l).groups()[3]
except:
warning("Parsing error during hexcap")
continue
except EOFError:
pass
p = p.replace(" ","")
p2=""
for i in range(len(p)/2):
p2 += chr(int(p[2*i:2*i+2],16))
return p2
def wireshark(pktlist):
f = os.tempnam("scapy")
wrpcap(f, pktlist)
os.spawnlp(os.P_NOWAIT, conf.prog.wireshark, conf.prog.wireshark, "-r", f)
def hexedit(x):
x = str(x)
f = os.tempnam("scapy")
open(f,"w").write(x)
os.spawnlp(os.P_WAIT, conf.prog.hexedit, conf.prog.hexedit, f)
x = open(f).read()
os.unlink(f)
return x
#####################
## knowledge bases ##
#####################
class KnowledgeBase:
def __init__(self, filename):
self.filename = filename
self.base = None
def lazy_init(self):
self.base = ""
def reload(self, filename = None):
if filename is not None:
self.filename = filename
oldbase = self.base
self.base = None
self.lazy_init()
if self.base is None:
self.base = oldbase
def get_base(self):
if self.base is None:
self.lazy_init()
return self.base
##########################
## IP location database ##
##########################
class IPCountryKnowledgeBase(KnowledgeBase):
"""
How to generate the base :
db = []
for l in open("GeoIPCountryWhois.csv").readlines():
s,e,c = l.split(",")[2:5]
db.append((int(s[1:-1]),int(e[1:-1]),c[1:-1]))
cPickle.dump(gzip.open("xxx","w"),db)
"""
def lazy_init(self):
self.base = load_object(self.filename)
class CountryLocKnowledgeBase(KnowledgeBase):
def lazy_init(self):
f=open(self.filename)
self.base = {}
while 1:
l = f.readline()
if not l:
break
l = l.strip().split(",")
if len(l) != 3:
continue
c,lat,long = l
self.base[c] = (float(long),float(lat))
f.close()
def locate_ip(ip):
ip=map(int,ip.split("."))
ip = ip[3]+(ip[2]<<8L)+(ip[1]<<16L)+(ip[0]<<24L)
cloc = country_loc_kdb.get_base()
db = IP_country_kdb.get_base()
d=0
f=len(db)-1
while (f-d) > 1:
guess = (d+f)/2
if ip > db[guess][0]:
d = guess
else:
f = guess
s,e,c = db[guess]
if s <= ip and ip <= e:
return cloc.get(c,None)
###############
## p0f stuff ##
###############
# File format (according to p0f.fp) :
#
# wwww:ttt:D:ss:OOO...:QQ:OS:Details
#
# wwww - window size
# ttt - initial TTL
# D - don't fragment bit (0=unset, 1=set)
# ss - overall SYN packet size
# OOO - option value and order specification
# QQ - quirks list
# OS - OS genre
# details - OS description
class p0fKnowledgeBase(KnowledgeBase):
def __init__(self, filename):
KnowledgeBase.__init__(self, filename)
#self.ttl_range=[255]
def lazy_init(self):
try:
f=open(self.filename)
except IOError:
warning("Can't open base %s" % self.filename)
return
try:
self.base = []
for l in f:
if l[0] in ["#","\n"]:
continue
l = tuple(l.split(":"))
if len(l) < 8:
continue
li = map(int,l[1:4])
#if li[0] not in self.ttl_range:
# self.ttl_range.append(li[0])
# self.ttl_range.sort()
self.base.append((l[0], li[0], li[1], li[2], l[4], l[5], l[6], l[7][:-1]))
except:
warning("Can't parse p0f database (new p0f version ?)")
self.base = None
f.close()
def packet2p0f(pkt):
while pkt.haslayer(IP) and pkt.haslayer(TCP):
pkt = pkt.getlayer(IP)
if isinstance(pkt.payload, TCP):
break
pkt = pkt.payload
if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP):
raise TypeError("Not a TCP/IP packet")
if pkt.payload.flags & 0x13 != 0x02: #S,!A,!F
raise TypeError("Not a syn packet")
#t = p0f_kdb.ttl_range[:]
#t += [pkt.ttl]
#t.sort()
#ttl=t[t.index(pkt.ttl)+1]
ttl = pkt.ttl
df = (pkt.flags & 2) / 2
ss = len(pkt)
# from p0f/config.h : PACKET_BIG = 100
if ss > 100:
ss = 0
ooo = ""
mss = -1
qqT = False
qqP = False
#qqBroken = False
ilen = (pkt[TCP].dataofs << 2) - 20 # from p0f.c
for option in pkt.payload.options:
ilen -= 1
if option[0] == "MSS":
ooo += "M" + str(option[1]) + ","
mss = option[1]
# FIXME: qqBroken
ilen -= 3
elif option[0] == "WScale":
ooo += "W" + str(option[1]) + ","
# FIXME: qqBroken
ilen -= 2
elif option[0] == "Timestamp":
if option[1][0] == 0:
ooo += "T0,"
else:
ooo += "T,"
if option[1][1] != 0:
qqT = True
ilen -= 9
elif option[0] == "SAckOK":
ooo += "S,"
ilen -= 1
elif option[0] == "NOP":
ooo += "N,"
elif option[0] == "EOL":
ooo += "E,"
if ilen > 0:
qqP = True
else:
ooo += "?,"
# FIXME: ilen
ooo = ooo[:-1]
if ooo == "": ooo = "."
win = pkt.payload.window
if mss != -1:
if win % mss == 0:
win = "S" + str(win/mss)
elif win % (mss + 40) == 0:
win = "T" + str(win/(mss+40))
win = str(win)
qq = ""
if qqP:
qq += "P"
if pkt[IP].id == 0:
qq += "Z"
if pkt[IP].options != '':
qq += "I"
if pkt[TCP].urgptr != 0:
qq += "U"
if pkt[TCP].reserved != 0:
qq += "X"
if pkt[TCP].ack != 0:
qq += "A"
if qqT:
qq += "T"
if pkt[TCP].flags & 40 != 0:
# U or P
qq += "F"
if not isinstance(pkt[TCP].payload, NoPayload):
qq += "D"
# FIXME : "!" - broken options segment
if qq == "":
qq = "."
return (win,
ttl,
df,
ss,
ooo,
qq)
def p0f_correl(x,y):
d = 0
# wwww can be "*" or "%nn"
d += (x[0] == y[0] or y[0] == "*" or (y[0][0] == "%" and x[0].isdigit() and (int(x[0]) % int(y[0][1:])) == 0))
# ttl
d += (y[1] >= x[1] and y[1] - x[1] < 32)
for i in [2, 3, 5]:
d += (x[i] == y[i])
xopt = x[4].split(",")
yopt = y[4].split(",")
if len(xopt) == len(yopt):
same = True
for i in range(len(xopt)):
if not (xopt[i] == yopt[i] or
(len(yopt[i]) == 2 and len(xopt[i]) > 1 and
yopt[i][1] == "*" and xopt[i][0] == yopt[i][0]) or
(len(yopt[i]) > 2 and len(xopt[i]) > 1 and
yopt[i][1] == "%" and xopt[i][0] == yopt[i][0] and
int(xopt[i][1:]) % int(yopt[i][2:]) == 0)):
same = False
break
if same:
d += len(xopt)
return d
def p0f(pkt):
"""Passive OS fingerprinting: which OS emitted this TCP SYN ?
p0f(packet) -> accuracy, [list of guesses]
"""
pb = p0f_kdb.get_base()
if not pb:
warning("p0f base empty.")
return []
s = len(pb[0][0])
r = []
sig = packet2p0f(pkt)
max = len(sig[4].split(",")) + 5
for b in pb:
d = p0f_correl(sig,b)
if d == max:
r.append((b[6], b[7], b[1] - pkt[IP].ttl))
return r
def prnp0f(pkt):
try:
r = p0f(pkt)
except:
return
if r == []:
r = ("UNKNOWN", "[" + ":".join(map(str, packet2p0f(pkt))) + ":?:?]", None)
else:
r = r[0]
uptime = None
try:
uptime = pkt2uptime(pkt)
except:
pass
if uptime == 0:
uptime = None
res = pkt.sprintf("%IP.src%:%TCP.sport% - " + r[0] + " " + r[1])
if uptime is not None:
res += pkt.sprintf(" (up: " + str(uptime/3600) + " hrs)\n -> %IP.dst%:%TCP.dport%")
else:
res += pkt.sprintf("\n -> %IP.dst%:%TCP.dport%")
if r[2] is not None:
res += " (distance " + str(r[2]) + ")"
print res
def pkt2uptime(pkt, HZ=100):
"""Calculate the date the machine which emitted the packet booted using TCP timestamp
pkt2uptime(pkt, [HZ=100])"""
if not isinstance(pkt, Packet):
raise TypeError("Not a TCP packet")
if isinstance(pkt,NoPayload):
raise TypeError("Not a TCP packet")
if not isinstance(pkt, TCP):
return pkt2uptime(pkt.payload)
for opt in pkt.options:
if opt[0] == "Timestamp":
#t = pkt.time - opt[1][0] * 1.0/HZ
#return time.ctime(t)
t = opt[1][0] / HZ
return t
raise TypeError("No timestamp option")
#################
## Queso stuff ##
#################
def quesoTCPflags(flags):
if flags == "-":
return "-"
flv = "FSRPAUXY"
v = 0
for i in flags:
v |= 2**flv.index(i)
return "%x" % v
class QuesoKnowledgeBase(KnowledgeBase):
def lazy_init(self):
try:
f = open(self.filename)
except IOError:
return
self.base = {}
p = None
try:
for l in f:
l = l.strip()
if not l or l[0] == ';':
continue
if l[0] == '*':
if p is not None:
p[""] = name
name | |
# constants.py
# Copyright 2010, 2020 <NAME>
# Licence: See LICENCE (BSD licence)
"""Constants for Portable Game Notation (PGN) parser.
The defined constants are used when parsing PGN and FEN text, and when checking
the PGN game score represents a legal sequence of moves and variations.
"""
# r'(?:([a-h])x)?(([a-h])(?:[2-7]|([18])))(?:=([QRBN]))?' is the alternative
# considered for the pawn element of PGN_FORMAT.
# It's good point is the destination square is in a single captured group.
# It's bad point is the extra choices at the start of the element.
# The chosen version is probably slightly quicker, but 233,768 games were
# processed in just under 22 minutes to see a possible difference of 10 seconds
# in elapsed time.
# Changed to r'(?:([a-h])(?:x([a-h]))?(?:([2-7])|([18])(?:=([QRBN]))))' because
# it is easy to convert to the FIDE style for pawn promotion, 'e8Q' not 'e8=Q',
# additionally allowed in TEXT_FORMAT. (It's another 10 seconds quicker too.)
PGN_FORMAT = r'|'.join((
r''.join((r'(?#Start Tag)\[\s*',
r'(?#Tag Name)([A-Za-z0-9_]+)\s*',
r'(?#Tag Value)"((?:[^\\"]|\\.)*)"\s*',
r'(?#End Tag)(\])')),
r'(?:(?#Moves)(?#Piece)([KQRBN])([a-h1-8]?)(x?)([a-h][1-8])',
r'(?#Pawn)(?:([a-h])(?:x([a-h]))?(?:([2-7])|([18])(?:=([QRBN]))))',
r'(?#Castle)(O-O-O|O-O)(?#sevoM))',
r'(?#Game termination)(1-0|1/2-1/2|0-1|\*)',
r'(?#Move number)([1-9][0-9]*)',
r'(?#Dots)(\.+)',
r'(?#EOL comment)(;(?:[^\n]*))',
r'(?#Comment)(\{[^}]*\})',
r'(?#Start RAV)(\()',
r'(?#End RAV)(\))',
r'(?#Numeric Annotation Glyph)(\$(?:[1-9][0-9]{0,2}))',
r'(?#Reserved)(<[^>]*>)',
r'(?#Escaped)(\A%[^\n]*|\n%[^\n]*)',
r'(?#Pass)(--)',
r'(?#Check indicators)(?<=[1-8QRBNO])([+#])',
r'(?#Traditional Annonations)(?<=[1-8QRBNO+#])([!?][!?]?)',
r'(?#Bad Comment)(\{[^}]*)',
r'(?#Bad Reserved)(<[^>]*)',
r'(?#Bad Tag)(\[[^"]*".*?"\s*\])',
r'(?#End of file marker)(\032)(?=\[[^"]*".*?"\s*\])',
))
PGN_DISAMBIGUATION = r''.join(
(r'(?#Disambiguation PGN)',
r'(x?[a-h][1-8]',
))
TEXT_DISAMBIGUATION = r''.join(
(r'(?#Disambiguation Text)',
r'((?:-|x[QRBN]?)?[a-h][1-8]',
))
IGNORE_CASE_DISAMBIGUATION = r''.join(
(r'(?#Disambiguation Text)',
r'((?:(?:-|[xX][QRBNqrbn]?)?[a-hA-H][1-8](?:=[QRBNqrbn])?)|',
r'(?:b[xX][QRBNqrbn]?[a-hA-H][18])|',
r'(?#Promotion)',
r'=[QRBNqrbn]',
))
ANYTHING_ELSE = r'(?#Anything else)\S+[ \t\r\f\v]*)'
IMPORT_FORMAT = r'|'.join((
PGN_FORMAT,
PGN_DISAMBIGUATION,
ANYTHING_ELSE))
TEXT_FORMAT = r'|'.join((
PGN_FORMAT,
TEXT_DISAMBIGUATION,
ANYTHING_ELSE)).replace(
r'O-O-O|O-O', r'O-O-O|O-O|0-0-0|0-0').replace(
r'(?:=([QRBN])', r'(?:=?([QRBN])').replace(
r'8QRBNO', r'8QRBNO0').replace(
r'?:x', r'?:(?:[2-7][-x]|x)').replace(
r'x?', r'[-x]?')
# Assume 'B' means bishop unless followed by '[1-8]', and 'b' means bishop
# unless followed by '[1-8xX]'. There are positions where both a bishop and
# a pawn on the b-file can capture on a square on the 'a' and 'c' files: upper
# or lower case is the only practical way to decide (the following moves may
# be legal after either bishop or pawn capture). It is almost certain a SAN
# sequence like 'B8d5' will not be used in games to distinguish between two
# bishops able to move to 'd5'.
# The FIDE notation for pawn promotion is not supported when ignoring case
# because the sequence 'bxc8q' is ambiguous, in upper or lower case, until
# after the position has been examined.
IGNORE_CASE_FORMAT = r'|'.join((
PGN_FORMAT,
IGNORE_CASE_DISAMBIGUATION,
ANYTHING_ELSE)).replace(
r'QRBNO', r'QRBNOqrbno0').replace(
r'[KQRBN]', r'[KQRNkqrn]|B(?![1-8])|b(?![1-8xX])').replace(
r'O-O-O|O-O',
r'[Oo0]-[Oo0]-[Oo0]|[Oo0]-[Oo0]').replace(
r'[a-h][1-8]', r'[a-hA-H][1-8]').replace(
r'[a-h1-8]', r'[a-hA-H1-8]').replace(
r'[a-h]', r'[a-hA-H]').replace(
r'(x?)', r'([xX]?)').replace(
r'?:x', r'?:(?:[2-7][-xX]|[xX])').replace(
r'[QRBN]', r'[QRBNqrbn]')
# Indicies of captured groups in PGN input format for match.group.
IFG_TAG_NAME = 1
IFG_TAG_VALUE = 2
IFG_END_TAG = 3
IFG_PIECE_MOVE = 4
IFG_PIECE_MOVE_FROM_FILE_OR_RANK = 5
IFG_PIECE_CAPTURE = 6
IFG_PIECE_DESTINATION = 7
IFG_PAWN_FROM_FILE = 8
IFG_PAWN_CAPTURE_TO_FILE = 9
IFG_PAWN_TO_RANK = 10
IFG_PAWN_PROMOTE_TO_RANK = 11
IFG_PAWN_PROMOTE_PIECE = 12
IFG_CASTLES = 13
IFG_GAME_TERMINATION = 14
IFG_MOVE_NUMBER = 15
IFG_DOTS = 16
IFG_COMMENT_TO_EOL = 17
IFG_COMMENT = 18
IFG_START_RAV = 19
IFG_END_RAV = 20
IFG_NUMERIC_ANNOTATION_GLYPH = 21
IFG_RESERVED = 22
IFG_ESCAPE = 23
IFG_PASS = 24
IFG_CHECK_INDICATOR = 25
IFG_TRADITIONAL_ANNOTATION = 26
IFG_BAD_COMMENT = 27
IFG_BAD_RESERVED = 28
IFG_BAD_TAG = 29
IFG_END_OF_FILE_MARKER = 30
IFG_OTHER_WITH_NON_NEWLINE_WHITESPACE = 31
# For spotting the pawn-move-like string which is the destination of a fully
# disambiguated piece move, say 'Qb4d4+??' including optional sufficies, where
# Qb4 has been rejected as a move because there is a Q on b4.
DISAMBIGUATE_TEXT = r'\A(x?)([a-h][1-8])'
DISAMBIGUATE_PGN = r'\Ax?[a-h][1-8]'
# The game.GameIgnoreCasePGN class may need to ignore '=Q', and similar, if,
# for example, it were processed by peek ahead to decide between a pawn and
# bishop move for 'bxc8'.
DISAMBIGUATE_PROMOTION = r'\A=[QRBNqrbn]'
# Indicies of captured groups for fully disambiguated piece move.
DG_CAPTURE = 1
DG_DESTINATION = 2
# For spotting the second part, of two, of a movetext token in long algebraic
# format (LAN). The first part, such as 'Qe2', will have been found by the
# IMPORT_FORMAT rules. LAN_FORMAT is similar to DISAMBIGUATE_TEXT.
LAN_FORMAT = r'(?#Lower case)\A([-x]?)([a-h][1-8])(?:=(qrbn))?'
LAN_MOVE_SEPARATOR = '-'
# Indicies of captured groups for long algebraic notation move.
LAN_CAPTURE_OR_MOVE = 1
LAN_DESTINATION = 2
LAN_PROMOTE_PIECE = 3
# For normalising a text promotion move to PGN.
TEXT_PROMOTION = r'(?#Lower case)([a-h](?:[x-][a-h])?[18]=?)([qrbn])'
# Indicies of captured groups for normalising promotion move to PGN.
TP_MOVE = 1
TP_PROMOTE_TO_PIECE = 2
# For spotting rejected possible SAN b-pawn move tokens which may be first
# part of bishop move, ignoring case if necessary.
# The token is assumed to not represent a pawn move.
PAWN_MOVE_TOKEN_POSSIBLE_BISHOP = r'\A[Bb][1-8]\Z'
# The parser.PGN.read_games method uses UNTERMINATED when deciding if a PGN Tag
# found in an error sequence should start a new game.
UNTERMINATED = '<{'
# Traditional annotations are mapped to Numeric Annotation Glyphs (NAG).
# About 100 NAGs are defined in the PGN standard.
SUFFIX_ANNOTATION_TO_NAG = {
'!!': '$3', '!?': '$5', '!': '$1', '??': '$4', '?!': '$6', '?': '$2'}
# Seven Tag Roster.
TAG_EVENT = 'Event'
TAG_SITE = 'Site'
TAG_DATE = 'Date'
TAG_ROUND = 'Round'
TAG_WHITE = 'White'
TAG_BLACK = 'Black'
TAG_RESULT = 'Result'
SEVEN_TAG_ROSTER = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
# Default Seven Tag Roster values.
DEFAULT_TAG_VALUE = '?'
DEFAULT_TAG_DATE_VALUE = '????.??.??'
DEFAULT_TAG_RESULT_VALUE = '*'
DEFAULT_SORT_TAG_VALUE = DEFAULT_TAG_VALUE.replace('?', ' ')
DEFAULT_SORT_TAG_RESULT_VALUE = DEFAULT_TAG_RESULT_VALUE.replace('*', ' ')
SEVEN_TAG_ROSTER_DEFAULTS = {
TAG_DATE: DEFAULT_TAG_DATE_VALUE,
TAG_RESULT: DEFAULT_TAG_RESULT_VALUE,
}
# Supplemental tags with defined default values.
# Other supplmental tags exist; the ones defined here have a default value.
TAG_WHITETITLE = 'WhiteTitle'
TAG_BLACKTITLE = 'BlackTitle'
TAG_WHITEELO = 'WhiteElo'
TAG_BLACKELO = 'BlackElo'
TAG_WHITENA = 'WhiteNA'
TAG_BLACKNA = 'BlackNA'
SUPPLEMENTAL_TAG_ROSTER = (
TAG_WHITETITLE,
TAG_BLACKTITLE,
TAG_WHITEELO,
TAG_BLACKELO,
TAG_WHITENA,
TAG_BLACKNA,
)
DEFAULT_SUPPLEMENTAL_TAG_VALUE = '-'
# FEN Tags.
TAG_FEN = 'FEN'
TAG_SETUP = 'SetUp'
SETUP_VALUE_FEN_ABSENT = '0'
SETUP_VALUE_FEN_PRESENT = '1'
# PGN constants
PGN_CAPTURE_MOVE = 'x'
PGN_PAWN = ''
PGN_KING = 'K'
PGN_QUEEN = 'Q'
PGN_ROOK = 'R'
PGN_BISHOP = 'B'
PGN_KNIGHT = 'N'
PGN_O_O = 'O-O'
PGN_O_O_O = 'O-O-O'
PGN_PROMOTION = '='
PGN_NAMED_PIECES = PGN_KING + PGN_QUEEN + PGN_ROOK + PGN_BISHOP + PGN_KNIGHT
# Maximum line length in PGN file for movetext excluding EOL ('\n') is 79.
# Some PGN Tags are allowed to exceed this.
# The rule may not be enforcable for comments, especially any re-exported,
# without disturbing any formatting attempts with EOL and spaces.
PGN_MAXIMUM_LINE_LENGTH = 79
PGN_LINE_SEPARATOR = '\n'
PGN_TOKEN_SEPARATOR = ' '
PGN_DOT = '.'
# FEN constants
FEN_FIELD_COUNT = 6
FEN_PIECE_PLACEMENT_FIELD_INDEX = 0
FEN_ACTIVE_COLOR_FIELD_INDEX = 1
FEN_CASTLING_AVAILABILITY_FIELD_INDEX = 2
FEN_EN_PASSANT_TARGET_SQUARE_FIELD_INDEX = 3
FEN_HALFMOVE_CLOCK_FIELD_INDEX = 4
FEN_FULLMOVE_NUMBER_FIELD_INDEX = 5
FEN_WHITE_ACTIVE = 'w'
FEN_BLACK_ACTIVE = 'b'
FEN_FIELD_DELIM = ' '
FEN_RANK_DELIM = '/'
FEN_NULL = '-'
FEN_WHITE_KING = 'K'
FEN_WHITE_QUEEN = 'Q'
FEN_WHITE_ROOK = 'R'
FEN_WHITE_BISHOP = 'B'
FEN_WHITE_KNIGHT = 'N'
FEN_WHITE_PAWN = 'P'
FEN_BLACK_KING = 'k'
FEN_BLACK_QUEEN = 'q'
FEN_BLACK_ROOK = 'r'
FEN_BLACK_BISHOP = 'b'
FEN_BLACK_KNIGHT = 'n'
FEN_BLACK_PAWN = 'p'
FEN_TO_PGN = {FEN_WHITE_KING: PGN_KING,
FEN_WHITE_QUEEN: PGN_QUEEN,
FEN_WHITE_ROOK: PGN_ROOK,
FEN_WHITE_BISHOP: PGN_BISHOP,
FEN_WHITE_KNIGHT: PGN_KNIGHT,
FEN_WHITE_PAWN: PGN_PAWN,
FEN_BLACK_KING: PGN_KING,
FEN_BLACK_QUEEN: PGN_QUEEN,
FEN_BLACK_ROOK: PGN_ROOK,
FEN_BLACK_BISHOP: PGN_BISHOP,
FEN_BLACK_KNIGHT: PGN_KNIGHT,
FEN_BLACK_PAWN: PGN_PAWN,
}
FEN_PAWNS = {FEN_WHITE_PAWN: FEN_WHITE_ACTIVE, FEN_BLACK_PAWN: FEN_BLACK_ACTIVE}
FEN_INITIAL_CASTLING = (
FEN_WHITE_KING + FEN_WHITE_QUEEN + FEN_BLACK_KING + FEN_BLACK_QUEEN)
# Mapping for FEN string to piece-square names: 'Pp' missing because pawns are
# not named in moves, and 'a4' as a piece-square name means a black pawn.
FEN_WHITE_PIECES = ''.join(
(FEN_WHITE_KING,
FEN_WHITE_QUEEN,
FEN_WHITE_ROOK,
FEN_WHITE_BISHOP,
FEN_WHITE_KNIGHT,
FEN_WHITE_PAWN,
))
FEN_BLACK_PIECES = ''.join(
(FEN_BLACK_KING,
FEN_BLACK_QUEEN,
FEN_BLACK_ROOK,
FEN_BLACK_BISHOP,
FEN_BLACK_KNIGHT,
FEN_BLACK_PAWN,
))
FEN_PIECE_NAMES = FEN_WHITE_PIECES + FEN_BLACK_PIECES
FILE_NAMES = 'abcdefgh'
RANK_NAMES = '87654321'
CASTLING_RIGHTS = {
FILE_NAMES[0] + RANK_NAMES[-1]: FEN_WHITE_QUEEN,
FILE_NAMES[-1] + RANK_NAMES[-1]: FEN_WHITE_KING,
FILE_NAMES[0] + RANK_NAMES[0]: FEN_BLACK_QUEEN,
FILE_NAMES[-1] + RANK_NAMES[0]: FEN_BLACK_KING,
FILE_NAMES[4] + RANK_NAMES[-1]: FEN_WHITE_KING + FEN_WHITE_QUEEN,
FILE_NAMES[4] + RANK_NAMES[0]: FEN_BLACK_KING + FEN_BLACK_QUEEN,
}
CASTLING_PIECE_FOR_SQUARE = {
FILE_NAMES[0] + RANK_NAMES[-1]: FEN_WHITE_ROOK,
FILE_NAMES[-1] + RANK_NAMES[-1]: FEN_WHITE_ROOK,
FILE_NAMES[0] + RANK_NAMES[0]: FEN_BLACK_ROOK,
FILE_NAMES[-1] + RANK_NAMES[0]: FEN_BLACK_ROOK,
FILE_NAMES[4] + RANK_NAMES[-1]: FEN_WHITE_KING,
FILE_NAMES[4] + RANK_NAMES[0]: FEN_BLACK_KING,
}
CASTLING_MOVE_RIGHTS = {
(FEN_WHITE_ACTIVE, PGN_O_O): FEN_WHITE_KING,
(FEN_WHITE_ACTIVE, PGN_O_O_O): FEN_WHITE_QUEEN,
(FEN_BLACK_ACTIVE, PGN_O_O): FEN_BLACK_KING,
(FEN_BLACK_ACTIVE, PGN_O_O_O): FEN_BLACK_QUEEN,
}
OTHER_SIDE = {FEN_WHITE_ACTIVE: FEN_BLACK_ACTIVE,
FEN_BLACK_ACTIVE: FEN_WHITE_ACTIVE}
PIECE_TO_KING = {
FEN_WHITE_KING: FEN_WHITE_KING,
FEN_WHITE_QUEEN: FEN_WHITE_KING,
FEN_WHITE_ROOK: FEN_WHITE_KING,
FEN_WHITE_BISHOP: FEN_WHITE_KING,
FEN_WHITE_KNIGHT: FEN_WHITE_KING,
FEN_WHITE_PAWN: FEN_WHITE_KING,
FEN_BLACK_KING: FEN_BLACK_KING,
FEN_BLACK_QUEEN: FEN_BLACK_KING,
FEN_BLACK_ROOK: FEN_BLACK_KING,
FEN_BLACK_BISHOP: FEN_BLACK_KING,
FEN_BLACK_KNIGHT: FEN_BLACK_KING,
FEN_BLACK_PAWN: FEN_BLACK_KING,
}
PROMOTED_PIECE_NAME = {
FEN_WHITE_ACTIVE: {
PGN_QUEEN: FEN_WHITE_QUEEN,
PGN_ROOK: FEN_WHITE_ROOK,
PGN_BISHOP: FEN_WHITE_BISHOP,
PGN_KNIGHT: FEN_WHITE_KNIGHT,
},
FEN_BLACK_ACTIVE: {
PGN_QUEEN: FEN_BLACK_QUEEN,
PGN_ROOK: FEN_BLACK_ROOK,
PGN_BISHOP: FEN_BLACK_BISHOP,
PGN_KNIGHT: FEN_BLACK_KNIGHT,
},
}
files = {}
for f in FILE_NAMES:
files[f] = {f + r for r in RANK_NAMES}
ranks = {}
for r in RANK_NAMES:
ranks[r] = {f + r for f in FILE_NAMES}
ROOK_MOVES = {}
for f in files:
for r in ranks:
ROOK_MOVES[f+r] = files[f].union(ranks[r])
ROOK_MOVES[f+r].remove(f+r)
left_to_right = []
right_to_left = []
for e in range(len(FILE_NAMES)):
left_to_right.append(set())
for x, y in zip(FILE_NAMES[e:], RANK_NAMES):
left_to_right[-1].add(x + y)
right_to_left.append(set())
for x, y in zip(reversed(FILE_NAMES[:e+1]), RANK_NAMES[:e+2]):
right_to_left[-1].add(x + y)
for e in range(len(RANK_NAMES) - 1):
left_to_right.append(set())
for x, y in zip(FILE_NAMES[:-e-1], RANK_NAMES[e+1:]):
left_to_right[-1].add(x + y)
right_to_left.append(set())
for x, y in zip(reversed(FILE_NAMES[-e-1:]), RANK_NAMES[-e-1:]):
right_to_left[-1].add(x + y)
BISHOP_MOVES = {}
for f in FILE_NAMES:
for r in RANK_NAMES:
sq = f + r
for eltr, ltr in enumerate(left_to_right):
if sq in ltr:
for ertl, rtl in enumerate(right_to_left):
if sq in rtl:
BISHOP_MOVES[sq] = ltr.union(rtl)
BISHOP_MOVES[sq].remove(sq)
break
break
KNIGHT_MOVES = | |
__all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0605": (
"Invalid format for __all__, must be tuple or list",
"invalid-all-format",
"Used when __all__ has an invalid format.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import(s) %s from wildcard import of %s",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
class ScopeConsumer(NamedTuple):
"""Store nodes and their consumption states."""
to_consume: Dict[str, List[nodes.NodeNG]]
consumed: Dict[str, List[nodes.NodeNG]]
consumed_uncertain: DefaultDict[str, List[nodes.NodeNG]]
scope_type: str
class NamesConsumer:
"""A simple class to handle consumed, to consume and scope type info of node locals."""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(
copy.copy(node.locals), {}, collections.defaultdict(list), scope_type
)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
consumed_uncertain = [
f"{k}->{v}" for k, v in self._atomic.consumed_uncertain.items()
]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
consumed_uncertain = ", ".join(consumed_uncertain)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
consumed_uncertain: {consumed_uncertain}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def consumed_uncertain(self) -> DefaultDict[str, List[nodes.NodeNG]]:
"""Retrieves nodes filtered out by get_next_to_consume() that may not
have executed, such as statements in except blocks, or statements
in try blocks (when evaluating their corresponding except and finally
blocks). Checkers that want to treat the statements as executed
(e.g. for unused-variable) may need to add them back.
"""
return self._atomic.consumed_uncertain
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, consumed_nodes):
"""Mark the given nodes as consumed for the name.
If all of the nodes for the name were consumed, delete the name from
the to_consume dictionary
"""
unconsumed = [n for n in self.to_consume[name] if n not in set(consumed_nodes)]
self.consumed[name] = consumed_nodes
if unconsumed:
self.to_consume[name] = unconsumed
else:
del self.to_consume[name]
def get_next_to_consume(self, node: nodes.Name) -> Optional[List[nodes.NodeNG]]:
"""Return a list of the nodes that define `node` from this scope.
If it is uncertain whether a node will be consumed, such as for statements in
except blocks, add it to self.consumed_uncertain instead of returning it.
Return None to indicate a special case that needs to be handled by the caller.
"""
name = node.name
parent_node = node.parent
found_nodes = self.to_consume.get(name)
node_statement = node.statement(future=True)
if (
found_nodes
and isinstance(parent_node, nodes.Assign)
and parent_node == found_nodes[0].parent
):
lhs = found_nodes[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_nodes = None
if (
found_nodes
and isinstance(parent_node, nodes.For)
and parent_node.iter == node
and parent_node.target in found_nodes
):
found_nodes = None
# Before filtering, check that this node's name is not a nonlocal
if any(
isinstance(child, nodes.Nonlocal) and node.name in child.names
for child in node.frame(future=True).get_children()
):
return found_nodes
# And is not part of a test in a filtered comprehension
if VariablesChecker._has_homonym_in_comprehension_test(node):
return found_nodes
# Filter out assignments in ExceptHandlers that node is not contained in
if found_nodes:
found_nodes = [
n
for n in found_nodes
if not isinstance(n.statement(future=True), nodes.ExceptHandler)
or n.statement(future=True).parent_of(node)
]
# Filter out assignments in an Except clause that the node is not
# contained in, assuming they may fail
if found_nodes:
uncertain_nodes = self._uncertain_nodes_in_except_blocks(
found_nodes, node, node_statement
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in a Finally block of a Try/Finally,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in an ExceptHandler,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
return found_nodes
@staticmethod
def _uncertain_nodes_in_except_blocks(
found_nodes: List[nodes.NodeNG],
node: nodes.NodeNG,
node_statement: nodes.Statement,
) -> List[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain
because they are in an except block.
"""
uncertain_nodes = []
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
# Only testing for statements in the except block of TryExcept
closest_except_handler = utils.get_node_first_ancestor_of_type(
other_node_statement, nodes.ExceptHandler
)
if not closest_except_handler:
continue
# If the other node is in the same scope as this node, assume it executes
if closest_except_handler.parent_of(node):
continue
closest_try_except: nodes.TryExcept = closest_except_handler.parent
try_block_returns = any(
isinstance(try_statement, nodes.Return)
for try_statement in closest_try_except.body
)
# If the try block returns, assume the except blocks execute.
if try_block_returns:
# Exception: if this node is in the final block of the other_node_statement,
# it will execute before returning. Assume the except statements are uncertain.
if (
isinstance(node_statement.parent, nodes.TryFinally)
and node_statement in node_statement.parent.finalbody
and closest_try_except.parent.parent_of(node_statement)
):
uncertain_nodes.append(other_node)
# Assume the except blocks execute, so long as each handler
# defines the name, raises, or returns.
elif all(
NamesConsumer._defines_name_raises_or_returns(node.name, handler)
for handler in closest_try_except.handlers
):
continue
if NamesConsumer._check_loop_finishes_via_except(node, closest_try_except):
continue
# Passed all tests for uncertain | |
to use
a proxy mesh."""
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
#v_count = len(mesh.vertices)
#normal = np.zeros(v_count * 3)#, dtype=type)
mesh.vertices.foreach_get('normal', arr.ravel())
#normal.shape = (v_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
def get_v_nor(ob, nor_arr):
ob.data.vertices.foreach_get('normal', nor_arr.ravel())
return nor_arr
def closest_point_edge(e1, e2, p):
'''Returns the location of the point on the edge'''
vec1 = e2 - e1
vec2 = p - e1
d = np.dot(vec2, vec1) / np.dot(vec1, vec1)
cp = e1 + vec1 * d
return cp
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
'''Creates vertex groups and sets weights. "groups" is a list of strings
for the names of the groups. "weights" is a list of weights corresponding
to the strings. Each vertex is assigned a weight for each vertex group to
avoid calling vertex weights that are not assigned. If the groups are
already present, the previous weights will be preserved. To reset weights
delete the created groups'''
if ob is None:
ob = bpy.context.object
vg = ob.vertex_groups
for g in range(0, len(groups)):
if groups[g] not in vg.keys(): # Don't create groups if there are already there
vg.new(groups[g])
vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
else:
vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups.
def get_bmesh(obj=None):
ob = get_last_object()[1]
if ob is None:
ob = obj
obm = bmesh.new()
if ob.mode == 'OBJECT':
obm.from_mesh(ob.data)
elif ob.mode == 'EDIT':
obm = bmesh.from_edit_mesh(ob.data)
return obm
def get_minimal_edges(ob):
obm = get_bmesh(ob)
obm.edges.ensure_lookup_table()
obm.verts.ensure_lookup_table()
obm.faces.ensure_lookup_table()
# get sew edges:
sew = [i.index for i in obm.edges if len(i.link_faces)==0]
# so if I have a vertex with one or more sew edges attached
# I need to get the mean location of all verts shared by those edges
# every one of those verts needs to move towards the total mean
# get linear edges
e_count = len(obm.edges)
eidx = np.zeros(e_count * 2, dtype=np.int32)
e_bool = np.zeros(e_count, dtype=np.bool)
e_bool[sew] = True
ob.data.edges.foreach_get('vertices', eidx)
eidx.shape = (e_count, 2)
# get diagonal edges:
diag_eidx = []
start = 0
stop = 0
step_size = [len(i.verts) for i in obm.faces]
p_v_count = np.sum(step_size)
p_verts = np.ones(p_v_count, dtype=np.int32)
ob.data.polygons.foreach_get('vertices', p_verts)
# can only be understood on a good day when the coffee flows (uses rolling and slicing)
# creates uniqe diagonal edge sets
for f in obm.faces:
fv_count = len(f.verts)
stop += fv_count
if fv_count > 3: # triangles are already connected by linear springs
skip = 2
f_verts = p_verts[start:stop]
for fv in range(len(f_verts)):
if fv > 1: # as we go around the loop of verts in face we start overlapping
skip = fv + 1 # this lets us skip the overlap so we don't have mirror duplicates
roller = np.roll(f_verts, fv)
for r in roller[skip:-1]:
diag_eidx.append([roller[0], r])
start += fv_count
# eidx groups
sew_eidx = eidx[e_bool]
lin_eidx = eidx[~e_bool]
diag_eidx = np.array(diag_eidx)
# deal with sew verts connected to more than one edge
s_t_rav = sew_eidx.T.ravel()
s_uni, s_inv, s_counts = np.unique(s_t_rav,return_inverse=True, return_counts=True)
s_multi = s_counts > 1
multi_groups = None
if np.any(s_counts):
multi_groups = []
ls = sew_eidx[:,0]
rs = sew_eidx[:,1]
for i in s_uni[s_multi]:
gr = np.array([i])
gr = np.append(gr, ls[rs==i])
gr = np.append(gr, rs[ls==i])
multi_groups.append(gr)
return lin_eidx, diag_eidx, sew_eidx, multi_groups
def add_remove_virtual_springs(remove=False):
ob = get_last_object()[1]
cloth = get_cloth_data(ob)
obm = get_bmesh()
obm.verts.ensure_lookup_table()
count = len(obm.verts)
idxer = np.arange(count, dtype=np.int32)
sel = np.array([v.select for v in obm.verts])
selected = idxer[sel]
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
if virtual_springs.shape[0] == 0:
virtual_springs.shape = (0, 2)
if remove:
ls = virtual_springs[:, 0]
in_sel = np.in1d(ls, idxer[sel])
deleter = np.arange(ls.shape[0], dtype=np.int32)[in_sel]
for i in reversed(deleter):
ob.mclo.virtual_springs.remove(i)
return
existing = np.append(cloth.eidx, virtual_springs, axis=0)
flip = existing[:, ::-1]
existing = np.append(existing, flip, axis=0)
ls = existing[:,0]
#springs = []
for i in idxer[sel]:
# to avoid duplicates:
# where this vert occurs on the left side of the existing spring list
v_in = existing[i == ls]
v_in_r = v_in[:,1]
not_in = selected[~np.in1d(selected, v_in_r)]
idx_set = not_in[not_in != i]
for sv in idx_set:
#springs.append([i, sv])
new_vs = ob.mclo.virtual_springs.add()
new_vs.vertex_id_1 = i
new_vs.vertex_id_2 = sv
# gets appended to eidx in the cloth_init function after calling get connected polys in case geometry changes
def generate_guide_mesh():
"""Makes the arrow that appears when creating pins"""
verts = [[0.0, 0.0, 0.0], [-0.01, -0.01, 0.1], [-0.01, 0.01, 0.1], [0.01, -0.01, 0.1], [0.01, 0.01, 0.1], [-0.03, -0.03, 0.1], [-0.03, 0.03, 0.1], [0.03, 0.03, 0.1], [0.03, -0.03, 0.1], [-0.01, -0.01, 0.2], [-0.01, 0.01, 0.2], [0.01, -0.01, 0.2], [0.01, 0.01, 0.2]]
edges = [[0, 5], [5, 6], [6, 7], [7, 8], [8, 5], [1, 2], [2, 4], [4, 3], [3, 1], [5, 1], [2, 6], [4, 7], [3, 8], [9, 10], [10, 12], [12, 11], [11, 9], [3, 11], [9, 1], [2, 10], [12, 4], [6, 0], [7, 0], [8, 0]]
faces = [[0, 5, 6], [0, 6, 7], [0, 7, 8], [0, 8, 5], [1, 3, 11, 9], [1, 2, 6, 5], [2, 4, 7, 6], [4, 3, 8, 7], [3, 1, 5, 8], [12, 10, 9, 11], [4, 2, 10, 12], [3, 4, 12, 11], [2, 1, 9, 10]]
name = 'ModelingClothPinGuide'
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
else:
mesh = bpy.data.meshes.new('ModelingClothPinGuide')
mesh.from_pydata(verts, edges, faces)
mesh.update()
mesh_ob = bpy.data.objects.new(name, mesh)
bpy.context.scene.objects.link(mesh_ob)
mesh_ob.show_x_ray = True
return mesh_ob
def create_guide():
"""Spawns the guide"""
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
return mesh_ob
mesh_ob = generate_guide_mesh()
bpy.context.scene.objects.active = mesh_ob
bpy.ops.object.material_slot_add()
if 'ModelingClothPinGuide' in bpy.data.materials:
mat = bpy.data.materials['ModelingClothPinGuide']
else:
mat = bpy.data.materials.new(name='ModelingClothPinGuide')
mat.use_transparency = True
mat.alpha = 0.35
mat.emit = 2
mat.game_settings.alpha_blend = 'ALPHA_ANTIALIASING'
mat.diffuse_color = (1, 1, 0)
mesh_ob.material_slots[0].material = mat
return mesh_ob
def delete_guide():
"""Deletes the arrow"""
if 'ModelingClothPinGuide' in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects['ModelingClothPinGuide'])
if 'ModelingClothPinGuide' in bpy.data.meshes:
guide_mesh = bpy.data.meshes['ModelingClothPinGuide']
guide_mesh.user_clear()
bpy.data.meshes.remove(guide_mesh)
def scale_source(multiplier):
"""grow or shrink the source shape"""
ob = get_last_object()[1]
if ob is not None:
if ob.mclo.enable:
count = len(ob.data.vertices)
co = np.zeros(count*3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', co)
co.shape = (count, 3)
mean = np.mean(co, axis=0)
co -= mean
co *= multiplier
co += mean
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_set('co', co.ravel())
cloth = get_cloth_data(ob)
if hasattr(cloth, 'cy_dists'):
cloth.cy_dists *= multiplier
def reset_shapes(ob=None):
"""Sets the modeling cloth key to match the source key.
Will regenerate shape keys if they are missing"""
if ob is None:
if bpy.context.object.mclo.enable:
ob = bpy.context.object
else:
ob = bpy.context.scene.mclo.last_object
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
keys = ob.data.shape_keys.key_blocks
count = len(ob.data.vertices)
co = np.zeros(count * 3, dtype=np.float32)
keys['Basis'].data.foreach_get('co', co)
#co = applied_key_co(ob, None, 'modeling cloth source key')
#keys['modeling cloth source key'].data.foreach_set('co', co)
keys['modeling cloth key'].data.foreach_set('co', co)
# reset the data stored in the class
cloth = get_cloth_data(ob)
cloth.vel[:] = 0
co.shape = (co.shape[0]//3, 3)
cloth.co = co
keys['modeling cloth key'].mute = True
keys['modeling cloth key'].mute = False
def get_spring_mix(ob, eidx):
rs = []
ls = []
minrl = []
for i in eidx:
r = eidx[eidx == i[1]].shape[0]
l = eidx[eidx == i[0]].shape[0]
rs.append (min(r,l))
ls.append (min(r,l))
mix = 1 / np.array(rs + ls, dtype=np.float32) ** 1.2
return mix
def collision_data_update(self, context):
ob = self.id_data
if ob.mclo.self_collision:
create_cloth_data(ob)
def refresh_noise(self, context):
ob = self.id_data
cloth = get_cloth_data(ob)
if cloth:
zeros = np.zeros(cloth.count, dtype=np.float32)
random = np.random.random(cloth.count)
zeros[:] = random
cloth.noise = ((zeros + -0.5) * ob.mclo.noise * 0.1)[:, nax]
def generate_wind(wind_vec, ob, cloth):
"""Maintains a wind array and adds it to the cloth vel"""
tri_nor = cloth.normals # non-unit calculated by tri_normals_in_place() per each triangle
w_vec = revert_rotation(ob, wind_vec)
turb = ob.mclo.turbulence
if turb != 0:
w_vec += np.random.random(3).astype(np.float32) * | |
# coding: utf-8
# 我觉得变量太多的alpha factor我暂时搁置,以及我觉得rank函数比较奇怪,因为range太大了,是否需要设置一个窗口呢?
#
#
# ## Dropped Index:
# - Alpha30(要用到fama三因子)
# - Alpha75(要用到BENCHMARKINDEX)
# - Alpha143(要用到SELF函数)
# - Alpha149(要用到BENCHMARKINDEX)
# - Alpha181(要用到BENCHMARKINDEX)
# - Alpha182(要用到BENCHMARKINDEX)
### 对于:?较为复杂的表达式,我都先用一些中间变量存储中间运算的结果,以下均采用这一做法,不赘述
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
from scipy.stats.stats import spearmanr
from BTC_Alpha_func import *
def Alpha1(para_list):
return -1 * CORR(RANK(DELTA(LOG(VOLUME),para_list[0])), RANK((CLOSE-OPEN)/OPEN), para_list[1])
def Alpha2(para_list):
return (-1 * DELTA((((CLOSE - LOW) - (HIGH - CLOSE)) / (HIGH - LOW)), para_list[0])).fillna(0)
def Alpha3(para_list):
cache = CLOSE - ((~(CLOSE>DELAY(CLOSE,para_list[0])))*MIN(LOW,DELAY(CLOSE,para_list[0]))\
+ (~(CLOSE>DELAY(CLOSE,para_list[0])))*MAX(HIGH,DELAY(CLOSE,para_list[0])))
return SUM((~(CLOSE==DELAY(CLOSE,1)) * cache), para_list[1])
#这里保留1,是因为我觉得Volume/mean(volume,window_size)还是有明确的概念的
def Alpha4(para_list):
#tail计算的是倒数第二个冒号后面的结果
tail = (((VOLUME / MEAN(VOLUME,para_list[0])) <= 1) * 1\
- ~((VOLUME / MEAN(VOLUME,para_list[0])) <= 1) * (-1))
#med计算的是中间的一个判断句(第1个冒号之后)的结果
med = ((SUM(CLOSE, para_list[1]) / para_list[1]) < ((SUM(CLOSE, para_list[2]) / para_list[2]) - STD(CLOSE, para_list[2]))) * 1\
+ ~(((SUM(CLOSE, para_list[1]) / para_list[1]) < ((SUM(CLOSE, para_list[2]) / para_list[2]) - STD(CLOSE, para_list[2])))) * tail
return (((SUM(CLOSE, para_list[2]) / para_list[2]) + STD(CLOSE, para_list[2])) < (SUM(CLOSE, para_list[1]) / para_list[1])) * (-1)\
+ ~(((SUM(CLOSE, para_list[2]) / para_list[2]) + STD(CLOSE, para_list[2])) < (SUM(CLOSE, para_list[1]) / para_list[1])) * med
def Alpha5(para_list):
return (-1 * TSMAX(CORR(TSRANK(VOLUME, para_list[0]), TSRANK(HIGH, para_list[0]), para_list[0]), para_list[1]))
#here para_list[0] is a float between(0,1)
def Alpha6(para_list):
return (RANK(SIGN(DELTA((((OPEN * para_list[0]) + (HIGH * (1.0-para_list[0])))), para_list[1])))* (-1))
def Alpha7(para_list):
return ((RANK(MAX((VWAP - CLOSE), para_list[0])) + RANK(MIN((VWAP - CLOSE), para_list[0]))) * RANK(DELTA(VOLUME, para_list[0])))
#here para_list[0] is a float between(0,1)
def Alpha8(para_list):
return RANK(DELTA(((((HIGH + LOW) / 2) * para_list[0]) + (VWAP * (1.0-para_list[0]))), para_list[1]) * -1)
#所有的SMA我都加上了assert,我其实在函数里也已经加上了assert,以下不赘述
def Alpha9(para_list):
assert para_list[2] <= para_list[1]
return SMA(((HIGH+LOW)/2-(DELAY(HIGH,para_list[0])+DELAY(LOW,para_list[0]))/2)*(HIGH-LOW)/VOLUME,para_list[1],para_list[2])
#para_list[2] 原来就是平方的,这里先改成了para_list[2]
def Alpha10(para_list):
return RANK(MAX((STD(RET, para_list[0]) * (RET < 0) + (CLOSE * (~(RET < 0)))**para_list[2], para_list[1])))
def Alpha11(para_list):
return SUM(((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW)*VOLUME, para_list[0])
def Alpha12(para_list):
return (RANK((OPEN - (SUM(VWAP, para_list[0]) / para_list[0])))) * (-1 * (RANK(ABS((CLOSE - VWAP)))))
#para_list[0]原来就是开方的,这里也先改了
def Alpha13(para_list):
return (((HIGH * LOW)**para_list[0]) - VWAP) #这个是取调和平均的 我们就算他不用优化把= =
def Alpha14(para_list):
return CLOSE-DELAY(CLOSE, para_list[0])
#这里的1.0保留
def Alpha15(para_list):
return OPEN/DELAY(CLOSE,para_list[0])-1.0
def Alpha16(para_list):
return (-1 * TSMAX(RANK(CORR(RANK(VOLUME), RANK(VWAP), para_list[0])), para_list[0]))
def Alpha17(para_list):
return RANK((VWAP - MAX(VWAP, para_list[0])))**(DELTA(CLOSE, para_list[1]))
def Alpha18(para_list):
return CLOSE/DELAY(CLOSE,para_list[0])
def Alpha19(para_list):
return (CLOSE <= DELAY(CLOSE,para_list[0])) * (CLOSE - DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])\
+ (CLOSE > DELAY(CLOSE,para_list[0])) * (CLOSE - DELAY(CLOSE,para_list[0])/CLOSE)
#100.0保留,表示百分数,以下同
def Alpha20(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*100.0
def Alpha21(para_list):
return REGBETA(MEAN(CLOSE,para_list[0]),SEQUENCE(para_list[0]),para_list[0])
def Alpha22(para_list):
return MEAN((CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])\
-DELAY((CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0]),para_list[1]),para_list[2])
def Alpha23(para_list):
return SMA((CLOSE> DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2])\
/(SMA((CLOSE> DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2])\
+SMA((CLOSE<=DELAY(CLOSE,para_list[0]))*STD(CLOSE,para_list[1]),para_list[1],para_list[2]))*100.0
def Alpha24(para_list):
return SMA(CLOSE-DELAY(CLOSE,para_list[0]),para_list[0],para_list[1])
def Alpha25(para_list):
return ((-1 * RANK((DELTA(CLOSE,para_list[0]) * (1 - RANK(DECAYLINEAR((VOLUME / MEAN(VOLUME,para_list[1])), para_list[2])))))) * (1.0 + RANK(SUM(RET, para_list[3]))))
def Alpha26(para_list):
return (((SUM(CLOSE, para_list[0]) / para_list[0]) - CLOSE)) + ((CORR(VWAP, DELAY(CLOSE, para_list[1]), para_list[2])))
def Alpha27(para_list):
return WMA((CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*100.0\
+(CLOSE-DELAY(CLOSE,para_list[1]))/DELAY(CLOSE,para_list[1])*100.0,para_list[2])
#这里的para_list[3]原先设置为9,para_list[4],para_list[5]分别的设置为3和2
def Alpha28(para_list):
return para_list[4]*SMA((CLOSE-TSMIN(LOW,para_list[0]))/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0]))*100,para_list[1],para_list[2])\
-para_list[5]*SMA(SMA((CLOSE-TSMIN(LOW,para_list[0]))/(MAX( HIGH,para_list[3])-TSMAX(LOW,para_list[0]))*100,para_list[1],para_list[2]),para_list[1],para_list[2])
def Alpha29(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])*VOLUME
def Alpha30(para_list):
return CLOSE - CLOSE
def Alpha31(para_list):
return (CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])*100.0
def Alpha32(para_list):
return (-1 * SUM(RANK(CORR(RANK(HIGH), RANK(VOLUME), para_list[0])), para_list[0]))
def Alpha33(para_list):
return ((((-1 * TSMIN(LOW, para_list[0])) + DELAY(TSMIN(LOW, para_list[0]), para_list[0])) * RANK(((SUM(RET, para_list[1]) - SUM(RET, para_list[2])) / (para_list[3]))))* TSRANK(VOLUME, para_list[0]))
def Alpha34(para_list):
return MEAN(CLOSE,para_list[0])/CLOSE
#here para_list[2] is a float between(0,1)
def Alpha35(para_list):
return (-MIN(RANK(DECAYLINEAR(DELTA(OPEN, para_list[0]), para_list[1])),\
RANK(DECAYLINEAR(CORR((VOLUME), ((OPEN * para_list[2]) + (OPEN *(1-para_list[2]))), para_list[3]),para_list[4]))))
def Alpha36(para_list):
return RANK(SUM(CORR(RANK(VOLUME), RANK(VWAP), para_list[0]), para_list[1]))
def Alpha37(para_list):
return (- RANK(((SUM(OPEN, para_list[0]) * SUM(RET, para_list[0]))\
- DELAY((SUM(OPEN, para_list[0]) * SUM(RET, para_list[0])), para_list[1]))))
def Alpha38(para_list):
return ((SUM(HIGH, para_list[0])/para_list[0]) < HIGH) * (-1.0 * DELTA(HIGH, para_list[1]))
def Alpha39(para_list):
return (-(RANK(DECAYLINEAR(DELTA((CLOSE), para_list[0]),para_list[1]))\
-RANK(DECAYLINEAR(CORR(((VWAP * para_list[2]) + (OPEN * (1-para_list[2]))), SUM(MEAN(VOLUME,para_list[3]), para_list[4]), para_list[5]), para_list[6]))))
def Alpha40(para_list):
return SUM((CLOSE > DELAY(CLOSE,para_list[0]))*VOLUME, para_list[1])\
/SUM((CLOSE<= DELAY(CLOSE,para_list[0]))*VOLUME, para_list[1])*100.0
def Alpha41(para_list):
return (RANK(-MAX(DELTA((VWAP), para_list[0]), para_list[1])))
def Alpha42(para_list):
return ((-RANK(STD(HIGH, para_list[0]))) * CORR(HIGH, VOLUME, para_list[0]))
def Alpha43(para_list):
return SUM(VOLUME * (CLOSE>DELAY(CLOSE,para_list[0]))\
-VOLUME *(~(CLOSE>DELAY(CLOSE,para_list[0]))) * (CLOSE<DELAY(CLOSE,para_list[0])), para_list[1])
def Alpha44(para_list):
return TSRANK(DECAYLINEAR(CORR(LOW, MEAN(VOLUME,para_list[0]), para_list[1]), para_list[2]), para_list[3])\
+ TSRANK(DECAYLINEAR(DELTA(VWAP, para_list[4]), para_list[5]), para_list[6])
def Alpha45(para_list):
return RANK(DELTA(CLOSE * para_list[0] + OPEN * (1-para_list[0]), para_list[1]))\
* RANK(CORR(VWAP, MEAN(VOLUME, para_list[2]), para_list[3]))
#这里4.0也有很明确的概念,就是表示4个window的平均值
def Alpha46(para_list):
return (MEAN(CLOSE,para_list[0])\
+ MEAN(CLOSE,para_list[1])\
+ MEAN(CLOSE,para_list[2])\
+ MEAN(CLOSE,para_list[3]))/(4.0*CLOSE)
def Alpha47(para_list):
return SMA((TSMAX(HIGH,para_list[0])-CLOSE)/(TSMAX(HIGH,para_list[0]) - TSMIN(LOW,para_list[0]))*100.0, para_list[1], para_list[2])
def Alpha48(para_list):
return (-(RANK(SIGN(CLOSE - DELAY(CLOSE, para_list[0]))\
+ SIGN(DELAY(CLOSE, para_list[0]) - DELAY(CLOSE, para_list[1]))\
+ SIGN(DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])))\
* SUM(VOLUME, para_list[1] + para_list[2])) / SUM(VOLUME, para_list[3]))
def Alpha49(para_list):
dividend = SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])
divisor = SUM(~((HIGH+LOW) >= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])
return divisor/dividend
def Alpha50(para_list):
subtend = SUM(~((HIGH+LOW) <= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])\
/(SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1]))
minuend = SUM(~((HIGH+LOW) >= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])\
/(SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1]))
return subtend - minuend
def Alpha51(para_list):
return SUM(~((HIGH+LOW) <= (DELAY(HIGH,para_list[0]) + DELAY(LOW,para_list[0])))\
*MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1])\
/(SUM(MAX(ABS(HIGH-DELAY(HIGH,para_list[0])),ABS(LOW-DELAY(LOW,para_list[0]))),para_list[1]))
def Alpha52(para_list):
return SUM(MAX(0, HIGH-DELAY((HIGH+LOW+CLOSE)/3,para_list[0])), para_list[1])\
/SUM(MAX(0, DELAY((HIGH+LOW+CLOSE)/3,para_list[0]) - LOW),para_list[1])* 100.0
def Alpha53(para_list):
return COUNT(CLOSE>DELAY(CLOSE,para_list[0]),para_list[1])/para_list[1]*100.0
def Alpha54(para_list):
return (-RANK((STD(ABS(CLOSE - OPEN), para_list[0]) + (CLOSE - OPEN)) + CORR(CLOSE, OPEN, para_list[0])))
#part_B1_value中有/2,/4算是decay sum吧。。,我也替换成了两个参数
def Alpha55(para_list):
part_C_value = MAX(ABS(HIGH-DELAY(CLOSE,para_list[0])),\
ABS(LOW- DELAY(CLOSE,para_list[0])))
part_A_value = (CLOSE+(CLOSE-OPEN)/2.0-DELAY(OPEN,para_list[0]))
part_B1_cond = (ABS(HIGH-DELAY(CLOSE,para_list[0])) > ABS(LOW -DELAY(CLOSE,para_list[0])))\
&(ABS(HIGH-DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(LOW, para_list[0])))
part_B2_cond = (ABS(LOW- DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(LOW, para_list[0])))\
&(ABS(LOW- DELAY(CLOSE,para_list[0])) > ABS(HIGH-DELAY(CLOSE,para_list[0])))
part_B1_value= ABS(HIGH-DELAY(CLOSE,para_list[0])) + ABS(LOW -DELAY(CLOSE,para_list[0]))/para_list[1]\
+ ABS(DELAY(CLOSE,para_list[0])-DELAY(OPEN, para_list[0]))/para_list[2]
part_B2nvalue= (ABS(HIGH-DELAY(LOW ,para_list[0])) + ABS(DELAY(CLOSE,para_list[0])-DELAY(OPEN,para_list[0]))/para_list[2])
part_B_value = (part_B1_cond | (~part_B1_cond) & part_B2_cond) * part_B1_value\
+ ((~part_B1_cond) & (~part_B2_cond)) * part_B2nvalue
return SUM(part_A_value/part_B_value*part_C_value, para_list[1])
#这个signal是返回一个bool list,与原文对照过了,表达式一致,很迷
def Alpha56(paralist):
return RANK((OPEN - TSMIN(OPEN, para_list[0]))) < RANK((RANK(CORR(SUM(((HIGH + LOW)/2.0), para_list[1]), SUM(MEAN(VOLUME,para_list[2]), para_list[3]), para_list[4]))**para_list[5]))
def Alpha57(para_list):
return SMA((CLOSE-TSMIN(LOW,para_list[0]))/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2])
def Alpha58(para_list):
return COUNT(CLOSE>DELAY(CLOSE,para_list[0]),para_list[1])/para_list[1]
def Alpha59(para_list):
return SUM((CLOSE!=DELAY(CLOSE,para_list[0]))*CLOSE\
- ((CLOSE>DELAY(CLOSE,para_list[0]))* MIN(LOW, DELAY(CLOSE,para_list[0]))\
+ ~(CLOSE>DELAY(CLOSE,para_list[0]) * MAX(HIGH,DELAY(CLOSE,para_list[0])))), para_list[1])
def Alpha60(para_list):
return SUM(((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW)*VOLUME,para_list[0])
def Alpha61(para_list):
return (-MAX(RANK(DECAYLINEAR(DELTA(VWAP,para_list[0]),para_list[1])),\
RANK(DECAYLINEAR(RANK(CORR(LOW,MEAN(VOLUME,para_list[2]), para_list[3])),para_list[4]))))
def Alpha62(para_list):
return (-CORR(HIGH, RANK(VOLUME), para_list[0]))
def Alpha63(para_list):
return (SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2]))
def Alpha64(para_list):
return -MAX(RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), para_list[0]),para_list[0])),\
RANK(DECAYLINEAR(MAX(CORR(RANK(CLOSE), RANK(MEAN(VOLUME,para_list[1])), para_list[0]), para_list[2]), para_list[3])))
def Alpha65(para_list):
return MEAN(CLOSE,para_list[0])/CLOSE
def Alpha66(para_list):
return (CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])
def Alpha67(para_list):
return SMA(MAX(CLOSE-DELAY(CLOSE,),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])),para_list[1],para_list[2])
def Alpha68(para_list):
return SMA(((HIGH+LOW)/2-(DELAY(HIGH,para_list[0])+DELAY(LOW,para_list[0]))/para_list[0])*(HIGH-LOW)/VOLUME,para_list[1],para_list[2])
def Alpha69(para_list):
cache= (SUM(DTM,para_list[0])>SUM(DBM,para_list[0])) * (SUM(DTM,para_list[0])- SUM(DBM,para_list[0]))/SUM(DTM,para_list[0]) +(~(SUM(DTM,para_list[0])>SUM(DBM,para_list[0])) & (SUM(DTM,para_list[0])!=SUM(DBM,para_list[0])) * (SUM(DTM,para_list[0])- SUM(DBM,para_list[0]))/SUM(DBM,para_list[0]))
return cache.fillna(method='ffill').fillna(method='bfill')
def Alpha70(para_list):
return STD(AMOUNT,para_list[0])
def Alpha71(para_list):
return (CLOSE-MEAN(CLOSE,para_list[0]))/MEAN(CLOSE,para_list[0])
def Alpha72(para_list):
return SMA((TSMAX(HIGH,para_list[0])-CLOSE)/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2])
def Alpha73(para_list):
return (TSRANK(DECAYLINEAR(DECAYLINEAR(CORR(CLOSE, VOLUME,para_list[0]),para_list[1]),para_list[2]),para_list[3])-RANK(DECAYLINEAR(CORR(VWAP, MEAN(VOLUME,30),4),3))) * -1
#para_list[0] is a float between (0,1)
def Alpha74(para_list):
return RANK(CORR(SUM(((LOW * para_list[0]) + VWAP*(1-para_list[0])), para_list[1]), SUM(MEAN(VOLUME,para_list[2]),para_list[1]), para_list[3])) + RANK(CORR(RANK(VWAP), RANK(VOLUME), para_list[4]))
def Alpha75(para_list):
return CLOSE - CLOSE
def Alpha76(para_list):
return STD(ABS((CLOSE/DELAY(CLOSE,para_list[0])-1.0))/VOLUME,para_list[1])/MEAN(ABS((CLOSE/DELAY(CLOSE,para_list[0])-1.0))/VOLUME,para_list[1])
def Alpha77(para_list):
return MIN(RANK(DECAYLINEAR(((((HIGH + LOW) / 2) + HIGH) - (VWAP+HIGH)),para_list[0])),RANK(DECAYLINEAR(CORR(((HIGH + LOW) / 2), MEAN(VOLUME,para_list[1]),para_list[2]),para_list[3])))
#here para_list[1] is a float
def Alpha78(para_list):
return ((HIGH+LOW+CLOSE)/3-MEAN((HIGH+LOW+CLOSE)/3,para_list[0]))/(para_list[1]*MEAN(ABS(CLOSE-MEAN((HIGH+LOW+CLOSE)/3,para_list[0])),para_list[0]))
def Alpha79(para_list):
return SMA(MAX(CLOSE-DELAY(CLOSE,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(CLOSE-DELAY(CLOSE,para_list[0])) ,para_list[1],para_list[2])
def Alpha80(para_list):
return (VOLUME-DELAY(VOLUME,para_list[0]))/DELAY(VOLUME,para_list[0])
def Alpha81(para_list):
return SMA(VOLUME,para_list[0],para_list[1])
def Alpha82(para_list):
return SMA((TSMAX(HIGH,para_list[0])-CLOSE)/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2])
def Alpha83(para_list):
return (-RANK(COVIANCE(RANK(HIGH), RANK(VOLUME), para_list[0])))
def Alpha84(para_list):
return SUM((CLOSE>DELAY(CLOSE,para_list[0]))*VOLUME+\
(~(CLOSE>DELAY(CLOSE,para_list[0]))&(CLOSE<DELAY(CLOSE,para_list[0])))*(-VOLUME),para_list[1])
def Alpha85(para_list):
return TSRANK((VOLUME / MEAN(VOLUME,para_list[0])),para_list[0])\
* TSRANK((-1 * DELTA(CLOSE, para_list[1])), para_list[2])
#para_list[0] is a float
def Alpha86(para_list):
return ( para_list[0] < (((DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3]))) *(-1.0)\
+ (~(para_list[0] < (((DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3]))))\
* ((((( DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3])) < 0) * 1.0\
+ (~((((DELAY(CLOSE, para_list[1]) - DELAY(CLOSE, para_list[2])) / para_list[2]) - ((DELAY(CLOSE, para_list[3]) - CLOSE) / para_list[3])) < 0)) *(-1.0))
#LOW*0.9 + LOW*0.1 难道不就是LOW吗?改为HIGH*para_list[4] + LOW*(1-para_list[4]),因此para_list[4] is a float between 0 and 1
def Alpha87(para_list):
return (-(RANK(DECAYLINEAR(DELTA(VWAP, para_list[0]), para_list[1]))\
+ TSRANK(DECAYLINEAR((((LOW) - VWAP) / (OPEN - ((HIGH*para_list[4] + LOW*(1-para_list[4])) / 2))), para_list[2]), para_list[3])))
def Alpha88(para_list):
return (CLOSE-DELAY(CLOSE,para_list[0]))/DELAY(CLOSE,para_list[0])
def Alpha89(para_list):
return (SMA(CLOSE,para_list[0],para_list[3])\
-SMA(CLOSE,para_list[1],para_list[4])\
-SMA(SMA(CLOSE,para_list[0],para_list[3])\
-SMA(CLOSE,para_list[1],para_list[4]),para_list[2],para_list[5]))
def Alpha90(para_list):
return (-RANK(CORR(RANK(VWAP), RANK(VOLUME), para_list[0])))
def Alpha91(para_list):
return (-(RANK((CLOSE - MAX(CLOSE, para_list[0])))\
*RANK(CORR((MEAN(VOLUME,para_list[1])), LOW, para_list[0]))))
#para_list[0] is a float between 0 and 1
def Alpha92(para_list):
return -MAX(RANK(DECAYLINEAR(DELTA(((CLOSE* para_list[0])+ (VWAP*(1-para_list[0]))),para_list[1]),para_list[2])),\
TSRANK(DECAYLINEAR(ABS(CORR((MEAN(VOLUME,para_list[3])), CLOSE, para_list[4])), para_list[5]), para_list[6]))
def Alpha93(para_list):
return SUM(~(OPEN>=DELAY(OPEN,para_list[0]))*MAX((OPEN-LOW),(OPEN-DELAY(OPEN,para_list[0]))),para_list[1])
def Alpha94(para_list):
return SUM((CLOSE>DELAY(CLOSE,para_list[0])*VOLUME\
+ (~(CLOSE>DELAY(CLOSE,para_list[0])))*(-VOLUME)*(CLOSE<DELAY(CLOSE,para_list[0]))),para_list[1])
def Alpha95(para_list):
return STD(AMOUNT,para_list[0])
def Alpha96(para_list):
return SMA(SMA((CLOSE-TSMIN(LOW,para_list[0]))/(TSMAX(HIGH,para_list[0])-TSMIN(LOW,para_list[0])),para_list[1],para_list[2]),para_list[3],para_list[4])
#跟Alpha95重复
def Alpha97(para_list):
return STD(VOLUME,para_list[0])
#para_list[2] is a float
def Alpha98(para_list):
condition = ((DELTA((SUM(CLOSE, para_list[0]) / para_list[0]), para_list[0]) / DELAY(CLOSE, para_list[0])) <= para_list[2])
return -(condition * ((CLOSE - TSMIN(CLOSE, para_list[0])))\
+(~condition) * DELTA(CLOSE, para_list[1]))
def Alpha99(para_list):
return (-RANK(COVIANCE(RANK(CLOSE), RANK(VOLUME), para_list[0])))
#跟97,95重复
def Alpha100(para_list):
return STD(VOLUME,para_list[0])
'''just return True & False, para_list[4] is a float between 0 and 1'''
def Alpha101(para_list):
return (-(RANK(CORR(CLOSE, SUM(MEAN(VOLUME,para_list[0]), para_list[1]), para_list[2])) <
RANK(CORR(RANK(((HIGH * para_list[4]) + (VWAP * (1-para_list[4])))), RANK(VOLUME), para_list[3]))))
def Alpha102(para_list):
return SMA(MAX(VOLUME-DELAY(VOLUME,para_list[0]),0),para_list[1],para_list[2])\
/SMA(ABS(VOLUME-DELAY(VOLUME,para_list[0])) ,para_list[1],para_list[2])
def Alpha103(para_list):
return ((para_list[0]-LOWDAY(LOW,para_list[0]))/para_list[0])
def Alpha104(para_list):
return (-(DELTA(CORR(HIGH, VOLUME, para_list[0]), para_list[0]) * RANK(STD(CLOSE, para_list[1]))))
def Alpha105(para_list):
return (-1 * CORR(RANK(OPEN), RANK(VOLUME), para_list[0]))
def Alpha106(para_list):
return CLOSE-DELAY(CLOSE,para_list[0])
def Alpha107(para_list):
return -RANK(OPEN - DELAY(HIGH, para_list[0]))\
* RANK(OPEN - DELAY(CLOSE, para_list[0]))\
* RANK(OPEN - DELAY(LOW, para_list[0]))
def Alpha108(para_list):
return (-(RANK((HIGH - MIN(HIGH, para_list[0])))**RANK(CORR((VWAP), (MEAN(VOLUME,para_list[1])), para_list[2]))))
def Alpha109(para_list):
return SMA(HIGH-LOW,para_list[0],para_list[1])/SMA(SMA(HIGH-LOW,para_list[0],para_list[1]),para_list[0],para_list[1])
def Alpha110(para_list):
return SUM(MAX(0,HIGH-DELAY(CLOSE,para_list[0])),para_list[1])\
/SUM(MAX(0,-LOW+DELAY(CLOSE,para_list[0])),para_list[1])
def Alpha111(para_list):
return SMA(VOLUME*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),para_list[0],para_list[2])\
-SMA(VOLUME*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),para_list[1],para_list[3])
def Alpha112(para_list):
return (SUM((CLOSE-DELAY(CLOSE,para_list[0])>0) * (CLOSE-DELAY(CLOSE,para_list[1])),para_list[2])\
-SUM((CLOSE-DELAY(CLOSE,para_list[0])<0) * ABS(CLOSE-DELAY(CLOSE,para_list[1])),para_list[2]))\
/(SUM((CLOSE-DELAY(CLOSE,para_list[0])>0) * (CLOSE-DELAY(CLOSE,para_list[1])),para_list[2])\
+SUM((CLOSE-DELAY(CLOSE,para_list[0])<0) * ABS(CLOSE-DELAY(CLOSE,para_list[1])),para_list[2]))
def Alpha113(para_list):
return -(RANK(SUM(DELAY(CLOSE, para_list[0]), para_list[1]) / para_list[1]) * CORR(CLOSE, VOLUME, para_list[2]))\
* RANK(CORR(SUM( CLOSE, para_list[0]), SUM(CLOSE, para_list[1]), para_list[2]))
def Alpha114(para_list):
return ((RANK(DELAY(((HIGH - LOW) / (SUM(CLOSE, para_list[0]) / para_list[0])), para_list[1])) * RANK(RANK(VOLUME)))
/ (((HIGH - LOW) / (SUM(CLOSE, para_list[0]) / para_list[0])) / (VWAP - CLOSE)))
#para_list[0] is a float between 0 and 1
def Alpha115(para_list):
return RANK(CORR(((HIGH * para_list[0]) + (CLOSE * (1-para_list[0]))), MEAN(VOLUME, para_list[1]),para_list[2]))\
| |
import re
from flaski import app
from flask_login import current_user
from flask_caching import Cache
from flaski.routines import check_session_app
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from ._utils import handle_dash_exception, parse_table, protect_dashviews, validate_user_access, \
make_navbar, make_footer, make_options, make_table, META_TAGS, make_min_width, \
change_table_minWidth, change_fig_minWidth
from ._aadatalake import read_results_files, read_gene_expression, read_genes, read_significant_genes, \
filter_samples, filter_genes, filter_gene_expression, nFormat, read_dge,\
make_volcano_plot, make_ma_plot, make_pca_plot, make_annotated_col
import uuid
from werkzeug.utils import secure_filename
import json
from flask import session
import pandas as pd
import os
CURRENTAPP="aadatalake"
navbar_title="RNAseq data lake"
dashapp = dash.Dash(CURRENTAPP,url_base_pathname=f'/{CURRENTAPP}/' , meta_tags=META_TAGS, server=app, external_stylesheets=[dbc.themes.BOOTSTRAP], title="FLASKI", assets_folder="/flaski/flaski/static/dash/")
protect_dashviews(dashapp)
cache = Cache(dashapp.server, config={
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': 'redis://:%s@%s' %( os.environ.get('REDIS_PASSWORD'), os.environ.get('REDIS_ADDRESS') ) #'redis://localhost:6379'),
})
controls = [
html.H5("Filters", style={"margin-top":10}),
html.Label('Data sets'), dcc.Dropdown( id='opt-datasets', multi=True),
html.Label('Groups',style={"margin-top":10}), dcc.Dropdown( id='opt-groups', multi=True),
html.Label('Samples',style={"margin-top":10}), dcc.Dropdown( id='opt-samples', multi=True),
html.Label('Gene names',style={"margin-top":10}), dcc.Dropdown( id='opt-genenames', multi=True),
html.Label('Gene IDs',style={"margin-top":10}), dcc.Dropdown( id='opt-geneids', multi=True),
html.Label('Download file prefix',style={"margin-top":10}), dcc.Input(id='download_name', value="data.lake", type='text') ]
side_bar=[ dbc.Card(controls, body=True),
html.Button(id='submit-button-state', n_clicks=0, children='Submit', style={"width": "100%","margin-top":4, "margin-bottom":4} )
]
# Define Layout
dashapp.layout = html.Div( [ html.Div(id="navbar"), dbc.Container(
fluid=True,
children=[
html.Div(id="app_access"),
html.Div(id="redirect-pca"),
html.Div(id="redirect-volcano"),
html.Div(id="redirect-ma"),
dcc.Store(data=str(uuid.uuid4()), id='session-id'),
dbc.Row(
[
dbc.Col( dcc.Loading(
id="loading-output-1",
type="default",
children=html.Div(id="side_bar"),
style={"margin-top":"0%"}
),
md=3, style={"height": "100%",'overflow': 'scroll'} ),
dbc.Col( dcc.Loading(
id="loading-output-2",
type="default",
children=[ html.Div(id="my-output")],
style={"margin-top":"50%","height": "100%"} ),
md=9, style={"height": "100%","width": "100%",'overflow': 'scroll'})
],
style={"min-height": "87vh"}),
] )
] + make_footer()
)
## all callback elements with `State` will be updated only once submit is pressed
## all callback elements wiht `Input` will be updated everytime the value gets changed
@dashapp.callback(
Output(component_id='my-output', component_property='children'),
Input('session-id', 'data'),
Input('submit-button-state', 'n_clicks'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State(component_id='download_name', component_property='value'),
)
def update_output(session_id, n_clicks, datasets, groups, samples, genenames, geneids, download_name):
if not validate_user_access(current_user,CURRENTAPP):
return None
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
## samples
results_files=selected_results_files[["Set","Group","Reps"]]
results_files.columns=["Set","Group","Sample"]
results_files=results_files.drop_duplicates()
results_files_=make_table(results_files,"results_files")
# results_files_ = dbc.Table.from_dataframe(results_files, striped=True, bordered=True, hover=True)
download_samples=html.Div(
[
html.Button(id='btn-samples', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-samples")
]
)
## gene expression
if datasets or groups or samples or genenames or geneids :
gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
gene_expression_=make_table(gene_expression,"gene_expression")#,fixed_columns={'headers': True, 'data': 2} )
# gene_expression_ = dbc.Table.from_dataframe(gene_expression, striped=True, bordered=True, hover=True)
download_geneexp=html.Div(
[
html.Button(id='btn-geneexp', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-geneexp")
]
)
gene_expression_bol=True
else:
gene_expression_bol=False
## PCA
selected_sets=list(set(selected_results_files["Set"]))
if len(selected_sets) == 1 :
pca_data=filter_gene_expression(ids2labels,None,None,cache)
pca_plot, pca_pa, pca_df=make_pca_plot(pca_data,selected_sets[0])
pca_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".pca" }}
pca_plot=dcc.Graph(figure=pca_plot, config=pca_config, style={"width":"100%","overflow-x":"auto"})
iscatter_pca=html.Div(
[
html.Button(id='btn-iscatter_pca', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
pca_bol=True
else:
pca_bol=False
## differential gene expression
dge_bol=False
volcano_plot=None
if not samples:
if len(selected_sets) == 1 :
dge_groups=list(set(selected_results_files["Group"]))
if len(dge_groups) == 2:
dge=read_dge(selected_sets[0], dge_groups, cache)
dge_plots=dge.copy()
if genenames:
dge=dge[dge["gene name"].isin(genenames)]
if geneids:
dge=dge[dge["gene id"].isin(geneids)]
dge_=make_table(dge,"dge")
download_dge=html.Div(
[
html.Button(id='btn-dge', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-dge")
]
)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
volcano_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".volcano" }}
volcano_plot, volcano_pa, volcano_df=make_volcano_plot(dge_plots, selected_sets[0], annotate_genes)
volcano_plot.update_layout(clickmode='event+select')
volcano_plot=dcc.Graph(figure=volcano_plot, config=volcano_config, style={"width":"100%","overflow-x":"auto"}, id="volcano_plot")
iscatter_volcano=html.Div(
[
html.Button(id='btn-iscatter_volcano', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
ma_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".ma" }}
ma_plot, ma_pa, ma_df=make_ma_plot(dge_plots, selected_sets[0],annotate_genes )
ma_plot.update_layout(clickmode='event+select')
ma_plot=dcc.Graph(figure=ma_plot, config=ma_config, style={"width":"100%","overflow-x":"auto"}, id="ma_plot")
iscatter_ma=html.Div(
[
html.Button(id='btn-iscatter_ma', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
dge_bol=True
if ( dge_bol ) & ( pca_bol ) :
minwidth=["Samples","Expression", "PCA", "DGE","Volcano","MA"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
dge_=change_table_minWidth(dge_,minwidth)
pca_plot=change_fig_minWidth(pca_plot,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ pca_plot, iscatter_pca ],
label="PCA", id="tab-pca",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
dcc.Tab( [ dge_, download_dge],
label="DGE", id="tab-dge",
style={"margin-top":"0%"}),
dcc.Tab( [ dbc.Row( [
dbc.Col(volcano_plot),
dbc.Col( [ html.Div(id="volcano-plot-table") ]
) ],
style={"minWidth":minwidth}),
dbc.Row([iscatter_volcano,html.Div(id="volcano-bt")]),
],
label="Volcano", id="tab-volcano",
style={"margin-top":"0%"}),
dcc.Tab( [ dbc.Row( [
dbc.Col(ma_plot),
dbc.Col( [ html.Div(id="ma-plot-table") ]
) ],
style={"minWidth":minwidth}),
dbc.Row([iscatter_ma,html.Div(id="ma-bt")]),
] ,
label="MA", id="tab-ma",
style={"margin-top":"0%"})
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
elif pca_bol :
minwidth=["Samples","Expression", "PCA"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
pca_plot=change_fig_minWidth(pca_plot,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ pca_plot, iscatter_pca ],
label="PCA", id="tab-pca",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
elif gene_expression_bol:
minwidth=["Samples","Expression"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
else:
minwidth=["Samples"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
return out
@dashapp.callback(
Output('volcano-plot-table', 'children'),
Output('volcano-bt', 'children'),
Input('volcano_plot', 'selectedData')
)
def display_volcano_data(selectedData):
if selectedData:
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
df=pd.DataFrame({"Selected genes":selected_genes})
df=make_table(df,"selected_volcano")
st=df.style_table
st["width"]="50%"
st["margin-top"]="40px"
st["align"]="center"
st["margin-left"]="auto"
st["margin-right"]="auto"
df.style_table=st
df.style_cell={'whiteSpace': 'normal', 'textAlign': 'center'}
download_selected_volcano=html.Div(
[
html.Button(id='btn-selected_volcano', n_clicks=0, children='Excel',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"}),
dcc.Download(id="download-selected_volcano")
])
return df, download_selected_volcano
else:
return None, None
@dashapp.callback(
Output("download-selected_volcano", "data"),
Input("btn-selected_volcano", "n_clicks"),
State('volcano_plot', 'selectedData'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_selected_volcano(n_clicks,selectedData,datasets,groups,download_name):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
dge=dge[dge["gene name"].isin(selected_genes)]
fileprefix=secure_filename(str(download_name))
filename="%s.dge.volcano_selected.xlsx" %fileprefix
return dcc.send_data_frame(dge.to_excel, filename, sheet_name="dge.volcano", index=False)
@dashapp.callback(
Output("redirect-volcano", 'children'),
Input("btn-iscatter_volcano", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
prevent_initial_call=True,
)
def volcano_to_iscatterplot(n_clicks,datasets, groups, genenames, geneids):
if n_clicks:
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
volcano_plot, volcano_pa, volcano_df=make_volcano_plot(dge, dge_datasets[0], annotate_genes)
reset_info=check_session_app(session,"iscatterplot",current_user.user_apps)
volcano_pa["xcols"]=volcano_df.columns.tolist()
volcano_pa["ycols"]=volcano_df.columns.tolist()
volcano_pa["groups"]=["None"]+volcano_df.columns.tolist()
volcano_df["datalake_search"]=volcano_df["gene name"].apply(lambda x: make_annotated_col(x, annotate_genes) )
volcano_pa["labels_col"]=["select a column.."]+volcano_df.columns.tolist()
volcano_pa["labels_col_value"]="select a column.."
volcano_df=volcano_df.drop(["___label___"],axis=1)
session["filename"]="<from RNAseq lake>"
session["plot_arguments"]=volcano_pa
session["COMMIT"]=app.config['COMMIT']
session["app"]="iscatterplot"
session["df"]=volcano_df.to_json()
return dcc.Location(pathname="/iscatterplot", id="index")
@dashapp.callback(
Output('ma-plot-table', 'children'),
Output('ma-bt', 'children'),
Input('ma_plot', 'selectedData')
)
def display_ma_data(selectedData):
if selectedData:
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
df=pd.DataFrame({"Selected genes":selected_genes})
df=make_table(df,"selected_ma")
st=df.style_table
st["width"]="50%"
st["margin-top"]="40px"
st["align"]="center"
st["margin-left"]="auto"
st["margin-right"]="auto"
df.style_table=st
df.style_cell={'whiteSpace': 'normal', 'textAlign': 'center'}
download_selected_ma=html.Div(
[
html.Button(id='btn-selected_ma', n_clicks=0, children='Excel',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"}),
dcc.Download(id="download-selected_ma")
])
return df, download_selected_ma
else:
return None, None
@dashapp.callback(
Output("download-selected_ma", "data"),
Input("btn-selected_ma", "n_clicks"),
State('ma_plot', 'selectedData'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_selected_ma(n_clicks,selectedData,datasets,groups,download_name):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
dge=dge[dge["gene name"].isin(selected_genes)]
fileprefix=secure_filename(str(download_name))
filename="%s.dge.ma_selected.xlsx" %fileprefix
return dcc.send_data_frame(dge.to_excel, filename, sheet_name="dge.ma", index=False)
@dashapp.callback(
Output("redirect-ma", 'children'),
Input("btn-iscatter_ma", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
prevent_initial_call=True,
)
def ma_to_iscatterplot(n_clicks,datasets, groups, genenames, geneids):
if n_clicks:
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
ma_plot, ma_pa, ma_df=make_ma_plot(dge, dge_datasets[0],annotate_genes )
reset_info=check_session_app(session,"iscatterplot",current_user.user_apps)
ma_pa["xcols"]=ma_df.columns.tolist()
ma_pa["ycols"]=ma_df.columns.tolist()
ma_pa["groups"]=["None"]+ma_df.columns.tolist()
ma_df["datalake_search"]=ma_df["gene name"].apply(lambda x: make_annotated_col(x, annotate_genes) )
ma_df=ma_df.drop(["___label___"],axis=1)
ma_pa["labels_col"]=["select a column.."]+ma_df.columns.tolist()
ma_pa["labels_col_value"]="select a column.."
session["filename"]="<from RNAseq lake>"
session["plot_arguments"]=ma_pa
session["COMMIT"]=app.config['COMMIT']
session["app"]="iscatterplot"
session["df"]=ma_df.to_json()
return dcc.Location(pathname="/iscatterplot", id="index")
@dashapp.callback(
Output("redirect-pca", 'children'),
Input("btn-iscatter_pca", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
prevent_initial_call=True,
)
def pca_to_iscatterplot(n_clicks,datasets, groups):
if n_clicks:
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
pca_data=filter_gene_expression(ids2labels,None,None,cache)
selected_sets=list(set(selected_results_files["Set"]))
pca_plot, pca_pa, pca_df=make_pca_plot(pca_data,selected_sets[0])
reset_info=check_session_app(session,"iscatterplot",current_user.user_apps)
pca_pa["xcols"]=pca_df.columns.tolist()
pca_pa["ycols"]=pca_df.columns.tolist()
pca_pa["groups"]=["None"]+pca_df.columns.tolist()
pca_pa["labels_col"]=["select a column.."]+pca_df.columns.tolist()
pca_pa["labels_col_value"]="select a column.."
session["filename"]="<from RNAseq lake>"
session["plot_arguments"]=pca_pa
session["COMMIT"]=app.config['COMMIT']
session["app"]="iscatterplot"
session["df"]=pca_df.to_json()
return dcc.Location(pathname="/iscatterplot", id="index")
@dashapp.callback(
Output("download-samples", "data"),
Input("btn-samples", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_samples(n_clicks,datasets, groups, samples, fileprefix):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
results_files=selected_results_files[["Set","Group","Reps"]]
results_files.columns=["Set","Group","Sample"]
results_files=results_files.drop_duplicates()
fileprefix=secure_filename(str(fileprefix))
filename="%s.samples.xlsx" %fileprefix
return dcc.send_data_frame(results_files.to_excel, filename, sheet_name="samples", index=False)
@dashapp.callback(
Output("download-geneexp", "data"),
Input("btn-geneexp", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_geneexp(n_clicks,datasets, groups, samples, genenames, geneids, fileprefix):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
fileprefix=secure_filename(str(fileprefix))
filename="%s.gene_expression.xlsx" %fileprefix
return dcc.send_data_frame(gene_expression.to_excel, filename, sheet_name="gene exp.", index=False)
@dashapp.callback(
Output("download-dge", "data"),
Input("btn-dge", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_dge(n_clicks,datasets, groups, samples, genenames, geneids, fileprefix):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
# gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
if not samples:
dge_datasets=list(set(selected_results_files["Set"]))
if len(dge_datasets) == 1 :
dge_groups=list(set(selected_results_files["Group"]))
if len(dge_groups) == 2:
dge=read_dge(dge_datasets[0], dge_groups, cache, html=False)
if genenames:
dge=dge[dge["gene name"].isin(genenames)]
if geneids:
dge=dge[dge["gene id"].isin(geneids)]
fileprefix=secure_filename(str(fileprefix))
filename="%s.dge.xlsx" %fileprefix
return dcc.send_data_frame(dge.to_excel, filename, sheet_name="dge", index=False)
@dashapp.callback(
Output(component_id='opt-datasets', component_property='options'),
Output(component_id='opt-genenames', component_property='options'),
Output(component_id='opt-geneids', component_property='options'),
| |
<filename>Generate Files/template.py
#template.py module
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog
from tkinter import messagebox
from pandas import concat, read_csv
from style import window_style, LayoutManager, check_size
from dragdrop import SourceDragManager, TargetDragManager
import misc
class Template(tk.Toplevel):
def __init__(self, master, conditions, abundance=None, localization=None, n=False, weightcutoff=0.5):
self.master = master
self.n = n
self.abundance = abundance
self.localization = localization
self.conditions = conditions
self.weightcutoff = weightcutoff
master.iconify()
tk.Toplevel.__init__(self, master)
self.populate_template()
window_style()
LM = LayoutManager(self, children = 1)
LM.apply_default_layout()
def populate_template(self):
self.files = tk.StringVar()
self.files.set('Choose file(s) to import')
#frames
self.frame = ttk.LabelFrame(self, text = 'Instructions', labelanchor = tk.N)
# label
self.instructions = ttk.Label(self.frame, text = '1. Click "Open" to choose the .csv or .txt file(s) with edge data you want to import.\n'
'2. Click "Expand All" to view the imported column headers.\n'
'3. Drag and drop the appropriate columns into the correct headers in the window to the right.\n'
'4. Click "Continue" once done to check that the information has been imported correctly.\n'
'\n'
'Available shortcuts and mouse combos for drag-and-drop: \n'
'Shift-LeftClick will select consecutive table items \n'
'Control-LeftClick will add items one by one to the selection \n'
'Backspace or delete will clear items from the tables \n'
'LeftClick and drag will attempt to copy a single item to the data table \n'
'RightClick or MiddleClick and drag will copy a group of items to consecutive cells on the other table \n'
'Items can only be copied unidirectionally from the import table to the export table \n'
'If you accidentally delete an item from the data table, re-copy it from the import table \n'
'If you accidentally delete an item from the import table, re-import the file and only the missing node(s) will be re-added to the table \n',
wraplength = 1000, justify = tk.LEFT)
# Entries
self.file_entry = ttk.Entry(self, textvariable = self.files)
# Treeview tables
# source file tree
self.file_tree = TreeviewMaker(self)
self.file_tree.config(columns = ['Column Number', 'Header'])
self.file_tree.column('#0', width = 150)
self.file_tree.heading('#0', text = 'click to sort', command = self.file_tree.sort_AZ)
self.file_tree.column('Column Number', anchor = tk.CENTER)
for col in self.file_tree['columns']:
self.file_tree.heading(col, text = col, command = lambda col=col: self.file_tree.hide_show_cols(col))
self.file_tree.column(col, width = 150)
self.file_tree.df_dict = {}
self.file_tree.moveable_nodes = []
# target file tree
self.target_tree = TreeviewMaker(self)
self.target_tree.config(columns = ['Source', 'File Name', 'Header', 'Column Number'])
self.target_tree.populate_target_tree(self.conditions, self.abundance, self.localization, self.n)
for col in self.target_tree['columns']:
self.target_tree.heading(col, text = col, command = lambda col=col: self.target_tree.hide_show_cols(col))
self.target_tree.column(col, width = 150)
self.target_tree['displaycolumns'] = ['File Name', 'Header']
self.target_tree.expand_all(self.target_tree.get_children()[0])
self.target_tree.tag_configure('curritem', background = 'sky blue')
# Buttons
self.open_button = ttk.Button(self, text = 'Open', command = lambda: self.file_tree.populate_file_tree(self.files))
self.file_tree_expand = ttk.Button(self, text = 'Expand All', command = self.file_tree.expand_all)
self.file_tree_collapse = ttk.Button(self, text = 'Collapse All', command = self.file_tree.collapse_all)
self.target_tree_expand = ttk.Button(self, text = 'Expand All', command = self.target_tree.expand_all)
self.target_tree_collapse = ttk.Button(self, text = 'Collapse All', command = self.target_tree.collapse_all)
self.GO_button = ttk.Button(self, text = 'Continue', command = lambda: self.ConfirmInput())
# layout
self.frame.grid(row=0, column=0, columnspan = 4, sticky = 'NSEW')
self.instructions.pack(side = tk.LEFT)
self.file_entry.grid(row = 1, column = 0, sticky = 'NSEW')
self.open_button.grid(row = 1, column = 1, sticky = 'NSEW')
self.file_tree.grid(row = 3, column = 0, columnspan = 2, rowspan =2, sticky = 'NSEW')
self.GO_button.grid(row = 1, column = 2, columnspan =2, sticky ='NSEW')
self.file_tree_expand.grid(row = 2, column = 0, sticky = 'NSEW')
self.file_tree_collapse.grid(row = 2, column = 1, sticky = 'NSEW')
self.target_tree_expand.grid(row = 2, column = 2, sticky = 'NSEW')
self.target_tree_collapse.grid(row = 2, column = 3, sticky = 'NSEW')
self.target_tree.grid(row=3, column=2, rowspan = 2, columnspan=2, sticky = 'NSEW')
# make the window and widgets scalable
for row in range(0, 4):
for col in range(0, 4):
self.rowconfigure(row, weight=1)
self.columnconfigure(col, weight=1)
self.frame.rowconfigure(0, weight=1)
for col in range(0, 4):
self.frame.columnconfigure(col, weight=1)
# make sure window size is not larger than the screen
check_size(self)
# configure dnd
sourceDND = SourceDragManager()
sourceDND.make_dragable(self.file_tree)
targetDND = TargetDragManager()
targetDND.make_dragable(self.target_tree)
#############################################################################
def ConfirmInput(self):
'''Purpose is to show the user how the program is going to input the data and make sure that it looks correct'''
self.top = tk.Toplevel(self.master)
self.top.iconify()
if self.n:
self.nodes = self.get_tree_data()
# some sort of check here to make sure that all the nodes are represented in the nodes file
if self.nodes is not None:
# visualize nodes data in GUI
self.tree_from_df(self.nodes)
self.populate_confirm()
self.top.deiconify()
else:
self.top.destroy()
else:
self.edges = self.get_tree_data()
if self.edges is not None:
# visualize edges data in GUI
self.tree_from_df(self.edges)
self.populate_confirm()
self.top.deiconify()
else:
self.top.destroy()
def get_nodes(self, conditions):
# clear existing widgets
for wid in misc.all_children(self):
wid.destroy()
# variables
localization = tk.BooleanVar()
# labels
label2 = ttk.Label(self, text = 'Do you have localization data for your bait and preys?\nIf not, we will assign them using UniProt localization annotations', justify = tk.LEFT, wraplength=500)
# RadioButtons
radiobutton3 = tk.Radiobutton(self, text = 'Yes' , variable = localization, value = True, indicatoron=False)
radiobutton4 = tk.Radiobutton(self, text = 'No' , variable = localization, value = False, indicatoron=False)
# buttons
GO = ttk.Button(self, text = "Submit", command = lambda: self.repopulate_template(localization.get()))
# layout
label2.grid(row=1, column=0, sticky = tk.W, padx=5, pady=5)
radiobutton3.grid(row=1, column=1,ipadx=5, ipady=5, sticky = tk.N+tk.E+tk.S+tk.W)
radiobutton4.grid(row=1, column=2,ipadx=5, ipady=5, sticky = tk.N+tk.E+tk.S+tk.W)
GO.grid(row=2, column=1, columnspan=2, sticky=tk.N+tk.E+tk.S+tk.W)
def repopulate_template(self, loc):
self.localization = loc
self.populate_template()
def populate_confirm(self):
# Buttons
continue_button = ttk.Button(self.top, text = 'Continue', command = lambda: self.cont(self.top))
go_back_button = ttk.Button(self.top, text = 'Return to input template', command = self.destroy)
# Labels
l1 = ttk.Label(self.top, text = 'This is the data that the program will analyze, does it look correct?\n If so, click continue. \n Otherwise, click Return to input template to return to the previous screen.')
#scrollbar
scroll = ttk.Scrollbar(self.top, orient = tk.VERTICAL)
self.tree.config(yscrollcommand = scroll.set)
scroll.config(command = self.tree.yview)
x_scroll = ttk.Scrollbar(self.top, orient = tk.HORIZONTAL)
self.tree.config(xscrollcommand = x_scroll.set)
x_scroll.config(command = self.tree.xview)
# layout
l1.grid(row = 0, column = 0, columnspan = 2)
continue_button.grid(row=1, column = 1)
go_back_button.grid(row = 1, column = 0)
self.tree.grid(row = 2, column = 0, columnspan = 2)
scroll.grid(row = 2, column = 2)
x_scroll.grid(row=3, column=0, columnspan=2, sticky=tk.N+tk.E+tk.S+tk.W)
# styling
window_style()
LM = LayoutManager(self.top, children = True)
LM.apply_default_layout()
# make the window and widgets scalable
for row in range(0, 3):
for col in range(0, 2):
self.top.rowconfigure(row, weight=1)
self.top.columnconfigure(col, weight=1)
# make sure window size is not larger than the screen
check_size(self.top)
def get_tree_data(self):
dfs = []
ERR = None
ERR_node = []
for C in self.target_tree.get_children():
dfs_ = []
for node in self.target_tree.get_children(C):
x = self.target_tree.item(node)
vals = x['values']
if vals:
source = vals[0]
col = vals[3]-1
series = self.file_tree.df_dict[source].iloc[:, col]
series.name = node
dfs_.append(series)
else:
ERR = 1
ERR_node.append(node)
if len(dfs_)>0:
df = concat(dfs_, axis=1)
if self.n:
df = df.set_index(['Uniprot Accession #'])
else:
df = df.set_index(['Bait Uniprot Accession #_{}'.format(C), 'Prey Uniprot Accession #_{}'.format(C)])
df.index.names = ['Bait Uniprot Accession #', 'Prey Uniprot Accession #']
dfs.append(df)
if ERR:
messagebox.showerror("Error", "Please enter data for {}".format(str(ERR_node)))
return None
elif len(dfs)>0:
return concat(dfs, axis=1)
def tree_from_df(self, df):
df = df.reset_index()
tree = TreeviewMaker(self.top)
tree.config(columns = list(df.columns.values))
tree.column('#0', width=0)
for col in df.columns.values:
tree.heading(col, text = col)
tree.column(col, width = 200)
for row in range(0, df.shape[0]):
iid = tree.insert('', 'end')
for i, col in enumerate(df.columns.values):
tree.set(iid, col, value = df.iloc[row, i])
self.tree = tree
def cont(self, top):
if self.n:
self.nodes.to_csv('nodes.csv')
self.master.destroy()
else:
self.edges.to_csv('edges_{}.csv'.format(self.weightcutoff))
self.n = True
self.get_nodes(self.target_tree.get_children())
top.destroy()
class TreeviewMaker(ttk.Treeview): ## call the method from outside the class structure
'''Creates Treeview widgets for the data insertion template window
Has methods: populate_target_tree \n get_files \n populate_file_tree \n tree_to_df \n
expand_all \n collapse_all \n all_nodes \n hide_show_cols \n sort_AZ'''
def __init__(self, master):
ttk.Treeview.__init__(self, master, height = 10)
self.master = master
def populate_target_tree(self, conditions, abundance, localization, n):
'''Creates an empty treeview defined by the user parameters provided for conditions, abundance (if | |
'61875269':{'en': 'Meningie East'},
'61875270':{'en': 'Mypolonga'},
'61875271':{'en': 'New Well'},
'61875272':{'en': 'Parndana'},
'61875273':{'en': 'Paruna'},
'61875274':{'en': 'Peebinga'},
'61875275':{'en': 'Penneshaw'},
'61875276':{'en': 'Robertstown'},
'61875277':{'en': 'Sanderston'},
'61875278':{'en': 'Sandleton'},
'61875279':{'en': 'Sedan'},
'61875280':{'en': 'Taldra'},
'61875281':{'en': 'Taplan'},
'61875282':{'en': 'Taylorville'},
'61875283':{'en': 'Truro'},
'61875284':{'en': '<NAME>'},
'61875285':{'en': 'Wanbi'},
'61875286':{'en': 'Wunkar'},
'61875287':{'en': 'Wynarka'},
'61875288':{'en': 'Yumali'},
'61875289':{'en': 'Riverton'},
'61875290':{'en': 'Barmera'},
'61875291':{'en': 'Kingscote'},
'618752920':{'en': 'Berri'},
'618752921':{'en': 'Blanchetown'},
'618752922':{'en': '<NAME>'},
'618752923':{'en': 'Bower'},
'618752924':{'en': 'Callington'},
'618752925':{'en': 'Coonalpyn'},
'618752926':{'en': 'Copeville'},
'618752927':{'en': 'Culburra'},
'618752928':{'en': 'Eudunda'},
'618752929':{'en': 'Geranium'},
'618752930':{'en': 'Goolwa'},
'618752931':{'en': 'Gurrai'},
'618752932':{'en': 'Halidon'},
'618752933':{'en': 'Hartley'},
'618752934':{'en': 'Kapunda'},
'618752935':{'en': 'Karatta'},
'618752936':{'en': 'Karoonda'},
'618752937':{'en': 'Kingscote'},
'618752938':{'en': 'Lameroo'},
'618752939':{'en': 'Langhorne Creek'},
'618752940':{'en': 'Lowbank'},
'618752941':{'en': 'Loxton'},
'618752942':{'en': 'Malinong'},
'618752943':{'en': 'Mallala'},
'618752944':{'en': 'Mannum'},
'618752945':{'en': 'Mantung'},
'618752946':{'en': 'Marama'},
'618752947':{'en': 'Meningie'},
'618752948':{'en': 'Meningie East'},
'618752949':{'en': 'Milang'},
'618752950':{'en': 'Morgan'},
'618752951':{'en': 'Mount Pleasant'},
'618752952':{'en': '<NAME>'},
'618752953':{'en': 'Mypolonga'},
'618752954':{'en': 'Narrung'},
'618752955':{'en': 'New Well'},
'618752956':{'en': 'Parndana'},
'618752957':{'en': 'Paruna'},
'618752958':{'en': 'Peake'},
'618752959':{'en': 'Peebinga'},
'618752960':{'en': 'Penneshaw'},
'618752961':{'en': 'Perponda'},
'618752962':{'en': 'Pinnaroo'},
'618752963':{'en': '<NAME>'},
'618752964':{'en': 'Renmark'},
'618752965':{'en': 'Robertstown'},
'618752966':{'en': 'Sanderston'},
'618752967':{'en': 'Sandleton'},
'618752968':{'en': 'Sedan'},
'618752969':{'en': 'Strathalbyn'},
'618752970':{'en': '<NAME>'},
'618752971':{'en': '<NAME>'},
'618752972':{'en': 'Taldra'},
'618752973':{'en': 'Tanunda'},
'618752974':{'en': 'Taplan'},
'618752975':{'en': 'Taylorville'},
'618752976':{'en': 'Truro'},
'618752977':{'en': '<NAME>'},
'618752978':{'en': '<NAME>'},
'618752979':{'en': 'Waikerie'},
'618752980':{'en': '<NAME>'},
'618752981':{'en': 'Wanbi'},
'618752982':{'en': 'Willunga'},
'618752983':{'en': 'Windsor'},
'618752984':{'en': 'Wunkar'},
'618752985':{'en': 'Wynarka'},
'618752986':{'en': 'Yankalilla'},
'618752987':{'en': 'Yumali'},
'618752988':{'en': 'Barmera'},
'618752989':{'en': 'Berri'},
'61875299':{'en': 'Yankalilla'},
'618753000':{'en': 'Blanchetown'},
'618753001':{'en': 'Bow Hill'},
'618753002':{'en': 'Bower'},
'618753003':{'en': 'Callington'},
'618753004':{'en': 'Coonalpyn'},
'618753005':{'en': 'Copeville'},
'618753006':{'en': 'Culburra'},
'618753007':{'en': 'Eudunda'},
'618753008':{'en': 'Geranium'},
'618753009':{'en': 'Goolwa'},
'618753010':{'en': 'Gurrai'},
'618753011':{'en': 'Halidon'},
'618753012':{'en': 'Hartley'},
'618753013':{'en': 'Kapunda'},
'618753014':{'en': 'Karatta'},
'618753015':{'en': 'Karoonda'},
'618753016':{'en': 'Kingscote'},
'618753017':{'en': 'Lameroo'},
'618753018':{'en': 'Langhorne Creek'},
'618753019':{'en': 'Lowbank'},
'618753020':{'en': 'Loxton'},
'618753021':{'en': 'Malinong'},
'618753022':{'en': 'Mallala'},
'618753023':{'en': 'Mannum'},
'618753024':{'en': 'Mantung'},
'618753025':{'en': 'Marama'},
'618753026':{'en': 'Meningie'},
'618753027':{'en': 'Meningie East'},
'618753028':{'en': 'Milang'},
'618753029':{'en': 'Morgan'},
'618753030':{'en': 'Mount Pleasant'},
'618753031':{'en': 'Murray Bridge'},
'618753032':{'en': 'Mypolonga'},
'618753033':{'en': 'Narrung'},
'618753034':{'en': 'New Well'},
'618753035':{'en': 'Parndana'},
'618753036':{'en': 'Paruna'},
'618753037':{'en': 'Peake'},
'618753038':{'en': 'Peebinga'},
'618753039':{'en': 'Penneshaw'},
'618753040':{'en': 'Perponda'},
'618753041':{'en': 'Pinnaroo'},
'618753042':{'en': '<NAME>'},
'618753043':{'en': 'Renmark'},
'618753044':{'en': 'Robertstown'},
'618753045':{'en': 'Sanderston'},
'618753046':{'en': 'Sandleton'},
'618753047':{'en': 'Sedan'},
'618753048':{'en': 'Strathalbyn'},
'618753049':{'en': '<NAME>'},
'618753050':{'en': '<NAME>'},
'618753051':{'en': 'Taldra'},
'618753052':{'en': 'Tanunda'},
'618753053':{'en': 'Taplan'},
'618753054':{'en': 'Taylorville'},
'618753055':{'en': 'Truro'},
'618753056':{'en': '<NAME>'},
'618753057':{'en': '<NAME>'},
'618753058':{'en': 'Waikerie'},
'618753059':{'en': '<NAME>'},
'61875306':{'en': 'Gawler'},
'61875307':{'en': 'Gawler'},
'61875308':{'en': 'Berri'},
'618753090':{'en': 'Wanbi'},
'618753091':{'en': 'Willunga'},
'618753092':{'en': 'Windsor'},
'618753093':{'en': 'Wunkar'},
'618753094':{'en': 'Wynarka'},
'618753095':{'en': 'Yankalilla'},
'618753096':{'en': 'Yumali'},
'618753097':{'en': 'Barmera'},
'618753098':{'en': 'Berri'},
'618753099':{'en': 'Blanchetown'},
'618753100':{'en': '<NAME>'},
'618753101':{'en': 'Bower'},
'618753102':{'en': 'Callington'},
'618753103':{'en': 'Coonalpyn'},
'618753104':{'en': 'Copeville'},
'618753105':{'en': 'Culburra'},
'618753106':{'en': 'Eudunda'},
'618753107':{'en': 'Geranium'},
'618753108':{'en': 'Goolwa'},
'618753109':{'en': 'Gurrai'},
'61875311':{'en': '<NAME>'},
'61875312':{'en': 'Tanunda'},
'618753130':{'en': 'Halidon'},
'618753131':{'en': 'Hartley'},
'618753132':{'en': 'Kapunda'},
'618753133':{'en': 'Karatta'},
'618753134':{'en': 'Karoonda'},
'618753135':{'en': 'Kingscote'},
'618753136':{'en': 'Lameroo'},
'618753137':{'en': '<NAME>'},
'618753138':{'en': 'Lowbank'},
'618753139':{'en': 'Loxton'},
'618753140':{'en': 'Malinong'},
'618753141':{'en': 'Mallala'},
'618753142':{'en': 'Mannum'},
'618753143':{'en': 'Mantung'},
'618753144':{'en': 'Marama'},
'618753145':{'en': 'Meningie'},
'618753146':{'en': 'Barmera'},
'618753147':{'en': 'Berri'},
'618753148':{'en': 'Blanchetown'},
'618753149':{'en': '<NAME>'},
'618753150':{'en': 'Bower'},
'618753151':{'en': 'Callington'},
'618753152':{'en': 'Coonalpyn'},
'618753153':{'en': 'Copeville'},
'618753154':{'en': 'Culburra'},
'618753155':{'en': 'Eudunda'},
'618753156':{'en': 'Geranium'},
'618753157':{'en': 'Goolwa'},
'618753158':{'en': 'Gurrai'},
'618753159':{'en': 'Halidon'},
'618753160':{'en': 'Hartley'},
'618753161':{'en': 'Kapunda'},
'618753162':{'en': 'Karatta'},
'618753163':{'en': 'Karoonda'},
'618753164':{'en': 'Kingscote'},
'618753165':{'en': 'Lameroo'},
'618753166':{'en': 'Langhorne Creek'},
'618753167':{'en': 'Lowbank'},
'618753168':{'en': 'Loxton'},
'618753169':{'en': 'Malinong'},
'618753170':{'en': 'Mallala'},
'618753171':{'en': 'Mannum'},
'618753172':{'en': 'Mantung'},
'618753173':{'en': 'Marama'},
'618753174':{'en': 'Meningie'},
'618753175':{'en': 'Meningie East'},
'618753176':{'en': 'Milang'},
'618753177':{'en': 'Morgan'},
'618753178':{'en': 'Mount Pleasant'},
'618753179':{'en': 'Murray Bridge'},
'618753180':{'en': 'Mypolonga'},
'618753181':{'en': 'Narrung'},
'618753182':{'en': 'New Well'},
'618753183':{'en': 'Parndana'},
'618753184':{'en': 'Paruna'},
'618753185':{'en': 'Peake'},
'618753186':{'en': 'Peebinga'},
'618753187':{'en': 'Penneshaw'},
'618753188':{'en': 'Perponda'},
'618753189':{'en': 'Pinnaroo'},
'618753190':{'en': '<NAME>'},
'618753191':{'en': 'Renmark'},
'618753192':{'en': 'Robertstown'},
'618753193':{'en': 'Sanderston'},
'618753194':{'en': 'Sandleton'},
'618753195':{'en': 'Sedan'},
'618753196':{'en': 'Strathalbyn'},
'618753197':{'en': '<NAME>'},
'618753198':{'en': '<NAME>'},
'618753199':{'en': 'Taldra'},
'618753200':{'en': 'Tanunda'},
'618753201':{'en': 'Taplan'},
'618753202':{'en': 'Taylorville'},
'618753203':{'en': 'Truro'},
'618753204':{'en': '<NAME>'},
'618753205':{'en': '<NAME>'},
'618753206':{'en': 'Waikerie'},
'618753207':{'en': '<NAME>'},
'618753208':{'en': 'Wanbi'},
'618753209':{'en': 'Willunga'},
'618753210':{'en': 'Windsor'},
'618753211':{'en': 'Wunkar'},
'618753212':{'en': 'Wynarka'},
'618753213':{'en': 'Yankalilla'},
'618753214':{'en': 'Yumali'},
'618753215':{'en': 'Meningie East'},
'618753216':{'en': 'Milang'},
'618753217':{'en': 'Morgan'},
'618753218':{'en': 'Mount Pleasant'},
'618753219':{'en': '<NAME>'},
'61875322':{'en': 'Lameroo'},
'61875323':{'en': 'Lameroo'},
'61875324':{'en': 'Lameroo'},
'61875325':{'en': '<NAME>'},
'61875326':{'en': '<NAME>'},
'61875327':{'en': '<NAME>'},
'618753280':{'en': 'Mypolonga'},
'618753281':{'en': 'Narrung'},
'618753282':{'en': '<NAME>'},
'618753283':{'en': 'Parndana'},
'618753284':{'en': 'Paruna'},
'618753285':{'en': 'Peake'},
'618753286':{'en': 'Peebinga'},
'618753287':{'en': 'Penneshaw'},
'618753288':{'en': 'Perponda'},
'618753289':{'en': 'Pinnaroo'},
'618753290':{'en': '<NAME>'},
'618753291':{'en': 'Renmark'},
'618753292':{'en': 'Robertstown'},
'618753293':{'en': 'Sanderston'},
'618753294':{'en': 'Sandleton'},
'618753295':{'en': 'Sedan'},
'618753296':{'en': 'Strathalbyn'},
'618753297':{'en': '<NAME>'},
'618753298':{'en': '<NAME>'},
'618753299':{'en': 'Taldra'},
'618753300':{'en': 'Tanunda'},
'618753301':{'en': 'Taplan'},
'618753302':{'en': 'Taylorville'},
'618753303':{'en': 'Truro'},
'618753304':{'en': '<NAME>'},
'618753305':{'en': '<NAME>'},
'618753306':{'en': 'Waikerie'},
'618753307':{'en': '<NAME>'},
'618753308':{'en': 'Wanbi'},
'618753309':{'en': 'Willunga'},
'618753310':{'en': 'Windsor'},
'618753311':{'en': 'Wunkar'},
'618753312':{'en': 'Wynarka'},
'618753313':{'en': 'Yankalilla'},
'618753314':{'en': 'Yumali'},
'618753315':{'en': 'Barmera'},
'618753316':{'en': 'Berri'},
'618753317':{'en': 'Blanchetown'},
'618753318':{'en': 'Bow Hill'},
'618753319':{'en': 'Bower'},
'618753320':{'en': 'Callington'},
'618753321':{'en': 'Coonalpyn'},
'618753322':{'en': 'Copeville'},
'618753323':{'en': 'Culburra'},
'618753324':{'en': 'Eudunda'},
'618753325':{'en': 'Geranium'},
'618753326':{'en': 'Goolwa'},
'618753327':{'en': 'Gurrai'},
'618753328':{'en': 'Halidon'},
'618753329':{'en': 'Hartley'},
'618753330':{'en': 'Kapunda'},
'618753331':{'en': 'Karatta'},
'618753332':{'en': 'Karoonda'},
'618753333':{'en': 'Kingscote'},
'618753334':{'en': 'Lameroo'},
'618753335':{'en': 'Langhorne Creek'},
'618753336':{'en': 'Lowbank'},
'618753337':{'en': 'Loxton'},
'618753338':{'en': 'Malinong'},
'618753339':{'en': 'Mallala'},
'618753340':{'en': 'Mannum'},
'618753341':{'en': 'Mantung'},
'618753342':{'en': 'Marama'},
'618753343':{'en': 'Meningie'},
'618753344':{'en': 'Meningie East'},
'618753345':{'en': 'Milang'},
'618753346':{'en': 'Morgan'},
'618753347':{'en': '<NAME>'},
'618753348':{'en': '<NAME>'},
'618753349':{'en': 'Mypolonga'},
'618753350':{'en': 'Narrung'},
'618753351':{'en': '<NAME>'},
'618753352':{'en': 'Parndana'},
'618753353':{'en': 'Paruna'},
'618753354':{'en': 'Peake'},
'618753355':{'en': 'Peebinga'},
'618753356':{'en': 'Penneshaw'},
'618753357':{'en': 'Perponda'},
'618753358':{'en': 'Pinnaroo'},
'618753359':{'en': '<NAME>'},
'618753360':{'en': 'Renmark'},
'618753361':{'en': 'Robertstown'},
'618753362':{'en': 'Sanderston'},
'618753363':{'en': 'Sandleton'},
'618753364':{'en': 'Sedan'},
'618753365':{'en': 'Strathalbyn'},
'618753366':{'en': '<NAME>'},
'618753367':{'en': '<NAME>'},
'618753368':{'en': 'Taldra'},
'618753369':{'en': 'Tanunda'},
'618753370':{'en': 'Taplan'},
'618753371':{'en': 'Taylorville'},
'618753372':{'en': 'Truro'},
'618753373':{'en': '<NAME>'},
'618753374':{'en': '<NAME>'},
'618753375':{'en': 'Waikerie'},
'618753376':{'en': '<NAME>'},
'618753377':{'en': 'Wanbi'},
'618753378':{'en': 'Willunga'},
'618753379':{'en': 'Windsor'},
'618753380':{'en': 'Wunkar'},
'618753381':{'en': 'Wynarka'},
'618753382':{'en': 'Yankalilla'},
'618753383':{'en': 'Yumali'},
'618753384':{'en': 'Barmera'},
'618753385':{'en': 'Berri'},
'618753386':{'en': 'Blanchetown'},
'618753387':{'en': '<NAME>'},
'618753388':{'en': 'Bower'},
'618753389':{'en': 'Callington'},
'618753390':{'en': 'Coonalpyn'},
'618753391':{'en': 'Copeville'},
'618753392':{'en': 'Culburra'},
'618753393':{'en': 'Eudunda'},
'618753394':{'en': 'Geranium'},
'618753395':{'en': 'Goolwa'},
'618753396':{'en': 'Gurrai'},
'618753397':{'en': 'Halidon'},
'618753398':{'en': 'Hartley'},
'618753399':{'en': 'Kapunda'},
'618753400':{'en': 'Karatta'},
'618753401':{'en': 'Karoonda'},
'618753402':{'en': 'Kingscote'},
'618753403':{'en': 'Lameroo'},
'618753404':{'en': 'Langhorne Creek'},
'618753405':{'en': 'Lowbank'},
'618753406':{'en': 'Loxton'},
'618753407':{'en': 'Malinong'},
'618753408':{'en': 'Mallala'},
'618753409':{'en': 'Mannum'},
'618753410':{'en': 'Mantung'},
'618753411':{'en': 'Marama'},
'618753412':{'en': 'Meningie'},
'618753413':{'en': 'Meningie East'},
'618753414':{'en': 'Milang'},
'618753415':{'en': 'Morgan'},
'618753416':{'en': 'Mount Pleasant'},
'618753417':{'en': '<NAME>'},
'618753418':{'en': 'Mypolonga'},
'618753419':{'en': 'Narrung'},
'618753420':{'en': 'New Well'},
'618753421':{'en': 'Parndana'},
'618753422':{'en': 'Paruna'},
'618753423':{'en': 'Peake'},
'618753424':{'en': 'Peebinga'},
'618753425':{'en': 'Penneshaw'},
'618753426':{'en': 'Perponda'},
'618753427':{'en': 'Pinnaroo'},
'618753428':{'en': '<NAME>'},
'618753429':{'en': 'Renmark'},
'618753430':{'en': 'Robertstown'},
'618753431':{'en': 'Sanderston'},
'618753432':{'en': 'Sandleton'},
'618753433':{'en': 'Sedan'},
'618753434':{'en': 'Strathalbyn'},
'618753435':{'en': '<NAME>'},
'618753436':{'en': '<NAME>'},
'618753437':{'en': 'Taldra'},
'618753438':{'en': 'Tanunda'},
'618753439':{'en': 'Taplan'},
'618753440':{'en': 'Taylorville'},
'618753441':{'en': 'Truro'},
'618753442':{'en': '<NAME>'},
'618753443':{'en': '<NAME>'},
'618753444':{'en': 'Waikerie'},
'618753445':{'en': '<NAME>'},
'618753446':{'en': 'Wanbi'},
'618753447':{'en': 'Willunga'},
'618753448':{'en': 'Windsor'},
'618753449':{'en': 'Wunkar'},
'618753450':{'en': 'Wynarka'},
'618753451':{'en': 'Yankalilla'},
'618753452':{'en': 'Yumali'},
'618753453':{'en': 'Barmera'},
'618753454':{'en': 'Berri'},
'618753455':{'en': 'Blanchetown'},
'618753456':{'en': 'Bow Hill'},
'618753457':{'en': 'Bower'},
'618753458':{'en': 'Callington'},
'618753459':{'en': 'Coonalpyn'},
'61875346':{'en': '<NAME>'},
'618753470':{'en': 'Copeville'},
'618753471':{'en': 'Culburra'},
'618753472':{'en': 'Eudunda'},
'618753473':{'en': 'Geranium'},
'618753474':{'en': 'Goolwa'},
'618753475':{'en': 'Gurrai'},
'618753476':{'en': 'Halidon'},
'618753477':{'en': 'Hartley'},
'618753478':{'en': 'Kapunda'},
'618753479':{'en': 'Karatta'},
'618753480':{'en': 'Karoonda'},
'618753481':{'en': 'Kingscote'},
'618753482':{'en': 'Lameroo'},
'618753483':{'en': 'Langhorne Creek'},
'618753484':{'en': 'Lowbank'},
'618753485':{'en': 'Loxton'},
'618753486':{'en': 'Malinong'},
'618753487':{'en': 'Mallala'},
'618753488':{'en': 'Mannum'},
'618753489':{'en': 'Mantung'},
'618753490':{'en': 'Marama'},
'618753491':{'en': 'Meningie'},
'618753492':{'en': 'Meningie East'},
'618753493':{'en': 'Milang'},
'618753494':{'en': 'Morgan'},
'618753495':{'en': 'Mount Pleasant'},
'618753496':{'en': 'Murray Bridge'},
'618753497':{'en': 'Mypolonga'},
'618753498':{'en': 'Narrung'},
'618753499':{'en': 'New Well'},
'618753500':{'en': 'Parndana'},
'618753501':{'en': 'Paruna'},
'618753502':{'en': 'Peake'},
'618753503':{'en': 'Peebinga'},
'618753504':{'en': 'Penneshaw'},
'618753505':{'en': 'Perponda'},
'618753506':{'en': 'Pinnaroo'},
'618753507':{'en': '<NAME>'},
'618753508':{'en': 'Renmark'},
'618753509':{'en': 'Robertstown'},
'618753510':{'en': 'Sanderston'},
'618753511':{'en': 'Sandleton'},
'618753512':{'en': 'Sedan'},
'618753513':{'en': 'Strathalbyn'},
'618753514':{'en': '<NAME>'},
'618753515':{'en': '<NAME>'},
'618753516':{'en': 'Taldra'},
'618753517':{'en': 'Tanunda'},
'618753518':{'en': 'Taplan'},
'618753519':{'en': 'Taylorville'},
'618753520':{'en': 'Truro'},
'618753521':{'en': '<NAME>'},
'618753522':{'en': '<NAME>'},
'618753523':{'en': 'Waikerie'},
'618753524':{'en': '<NAME>'},
'618753525':{'en': 'Wanbi'},
'618753526':{'en': 'Willunga'},
'618753527':{'en': 'Windsor'},
'618753528':{'en': 'Wunkar'},
'618753529':{'en': 'Wynarka'},
'618753530':{'en': 'Yankalilla'},
'618753531':{'en': 'Yumali'},
'61875354':{'en': 'Kapunda'},
'61875360':{'en': 'Callington'},
'61875379':{'en': '<NAME>'},
'61875390':{'en': 'Meningie'},
'61875409':{'en': '<NAME>'},
'61875420':{'en': '<NAME>'},
'61875433':{'en': 'Mannum'},
'61875550':{'en': '<NAME>'},
'61875551':{'en': 'Windsor'},
'61875552':{'en': 'Willunga'},
'61875553':{'en': 'Berri'},
'61875554':{'en': 'Berri'},
'61875555':{'en': 'Berri'},
'61875556':{'en': 'Gawler'},
'61875557':{'en': 'Wunkar'},
'61875558':{'en': 'Wunkar'},
'61875559':{'en': 'Paruna'},
'61875560':{'en': 'Paruna'},
'61875561':{'en': 'Taldra'},
'61875562':{'en': 'Taldra'},
'61875563':{'en': 'Taplan'},
'61875564':{'en': 'Taplan'},
'61875565':{'en': 'Wanbi'},
'61875566':{'en': 'Wanbi'},
'61875567':{'en': 'Copeville'},
'61875568':{'en': 'Copeville'},
'61875569':{'en': 'Geranium'},
'61875570':{'en': 'Geranium'},
'61875571':{'en': 'Gurrai'},
'61875572':{'en': 'Gurrai'},
'61875573':{'en': 'Mount Pleasant'},
'61875574':{'en': 'Langhorne Creek'},
'61875575':{'en': 'Freeling'},
'61875576':{'en': 'Callington'},
'61875577':{'en': 'Gawler'},
'61875578':{'en': '<NAME>'},
'61875579':{'en': 'Gawler'},
'6187558':{'en': 'Gawler'},
'6187559':{'en': '<NAME>'},
'61875998':{'en': 'Willunga'},
'61875999':{'en': 'Strathalbyn'},
'61876000':{'en': 'Ceduna'},
'61876001':{'en': 'Coorabie'},
'61876002':{'en': 'Courela'},
'61876003':{'en': '<NAME>'},
'61876004':{'en': 'Mudamuckla'},
'61876005':{'en': 'Nunjikompita'},
'61876006':{'en': 'Penong'},
'61876007':{'en': 'Poochera'},
'61876008':{'en': 'Port Kenny'},
'61876009':{'en': 'Streaky Bay'},
'61876010':{'en': 'Wirrulla'},
'61876011':{'en': 'Everard'},
'61876012':{'en': 'Indian Pacific'},
'61876013':{'en': 'Marla'},
'61876014':{'en': 'Mintabie'},
'61876015':{'en': 'Booleroo Centre'},
'61876016':{'en': 'Caltowie'},
'61876017':{'en': 'Georgetown'},
'61876018':{'en': 'Gladstone'},
'61876019':{'en': 'Jamestown'},
'61876020':{'en': 'Laura'},
'61876021':{'en': 'Melrose'},
'61876022':{'en': 'Willowie'},
'61876023':{'en': 'Wilmington'},
'61876024':{'en': 'Carrieton'},
'61876025':{'en': 'Herbert'},
'61876026':{'en': 'Morchard'},
'61876027':{'en': 'Orroroo'},
'61876028':{'en': 'Peterborough'},
'61876029':{'en': 'Terowie'},
'61876030':{'en': 'Yunta'},
'61876031':{'en': 'Hawker'},
'61876032':{'en': '<NAME>'},
'61876033':{'en': '<NAME>'},
'61876034':{'en': 'Kelly'},
'61876035':{'en': 'Kimba'},
'61876036':{'en': '<NAME>'},
'61876037':{'en': 'Quorn'},
'61876038':{'en': 'The Ranges'},
'61876039':{'en': 'Whyalla'},
'61876040':{'en': 'Wilpena'},
'61876041':{'en': '<NAME>'},
'61876042':{'en': '<NAME>'},
'61876043':{'en': 'Cleve'},
'61876044':{'en': '<NAME>'},
'61876045':{'en': 'Coulta'},
'61876046':{'en': 'Cowell'},
'61876047':{'en': 'Cummins'},
'61876048':{'en': '<NAME>'},
'61876049':{'en': 'Elliston'},
'61876050':{'en': 'Kapinnie'},
'61876051':{'en': 'Koongawa'},
'61876052':{'en': 'Koppio'},
'61876053':{'en': 'Kyancutta'},
'61876054':{'en': 'Lock'},
'61876055':{'en': 'Miltalie'},
'61876056':{'en': 'Minnipa'},
'61876057':{'en': '<NAME>'},
'61876058':{'en': '<NAME>'},
'61876059':{'en': '<NAME>'},
'61876060':{'en': 'Rudall'},
'61876061':{'en': | |
| B) == not A & not B
# not (A & B) == not A | not B
def primitive_datatypes():
# int, 32 bit 10
# long Intiger > 32bits 10L
# float 10.0
# complex 1.2j
# bool True, False
# str "mystr"
# tuple (immutable sequence) (2,4,7)
# list (mutable sequence) [2,x,3.1]
# dict (Mapping) {x:2, y:2}
pass
def math_ops():
# a OP b, OP in +, -, *, **, %, >, <=, >=, !=, ==
# Note fractions:
4 / 3 # Py3: 1.33 Py2: 1
4.0 / 3.0 # = 1.3333 # float division float(4) / float(3)r
4.0 // 3.0 # = 1.0 # integer division
# print (10.0 / 4.0) # = 2.5
# print (2 ** 3) # = 8 (exponential, to power of)
# print (11 % 5) # = 1 (mod)
def incrementing():
# You can't i++, instead:
i = 0
i += 1 # ref: https://stackoverflow.com/questions/2632677/python-integer-incrementing-with/2632687#2632687
def fractions():
from fractions import Fraction
f = Fraction(10, 5) # Fraction(2, 1)
# f = f1 OP f2 OP + * etc..
f.numerator
f.denominator
f.conjugate()
# Ref: https://docs.python.org/2/library/fractions.html
# Getting median value from a sorted array
from math import ceil, floor
def median(L):
if len(L) % 2 == 1:
med_i = ((len(L) + 1) / 2) - 1
return L[int(med_i)]
else:
med_i = ((len(L) + 1.0) / 2.0) - 1
left = int(floor(med_i))
right = int(ceil(med_i))
return (L[left] + L[right]) / 2.0
# Test:
for i in [1, 2, 3, 4, 5]:
L = list(range(i))
med = median(L)
print(L, med)
def Loops():
# (for | while) can contain (break | continue)
def while_loop():
while True:
print("infLoop")
pass
def for_loops():
# for VAR in <ITERABLE>:
# stmt
for i in [1, 2, 3, 4]:
print((i * i))
def for_loop_reverse():
# To traverse a list in reverse, can use negative range
L = [1, 2, 3, 4]
for i in range(len(L) - 1, -1, -1):
print(L[i])
def for_loop_with_else():
# "else" part in a for loop is executed if a break never occured. As a "last" entry.
mystr = "helloMoto123"
for s in mystr:
if s.isdigit():
print(s)
break
else:
print("no digit found")
# -> 1
def for_enumerate():
for i, w in enumerate(['a', 'b', 'c'], 1): # starting at 1 instead of 0
print(i, w)
# 0 a
# 1 b
# 2 c
def while_else_break():
# When to use 'else' in a while loop?
# Else is executed if while loop did not break.
# I kinda like to think of it with a 'runner' metaphor.
# The "else" is like crossing the finish line, irrelevant of whether you started at the beginning or end of the track. "else" is only not executed if you break somewhere in between.
runner_at = 0 # or 10 makes no difference, if unlucky_sector is not 0-10
unlucky_sector = 6
while runner_at < 10:
print("Runner at: ", runner_at)
if runner_at == unlucky_sector:
print("Runner fell and broke his foot. Will not reach finish.")
break
runner_at += 1
else:
print("Runner has finished the race!") # Not executed if runner broke his foot.
# ref: https://stackoverflow.com/questions/3295938/else-clause-on-python-while-statement/57247169#57247169
# E.g breaking out of a nested while ≤loop # LTAG
for i in [1, 2, 3]:
for j in ['a', 'unlucky', 'c']:
print(i, j)
if j == 'unlucky':
break
else:
continue # Only executed if inner loop didn't break.
break # This is only reached if inner loop 'breaked' out since continue didn't run.
print("Finished")
# 1 a
# 1 unlucky
# Finished
def lambda_map_filter_reduce() :
numbers = [1, 2, 3, 4]
# Lambda Map
double = lambda x: x + x
numbers_doubled = list(map(double, numbers)) # -> [2, 4, 6, 8]
# Problem: https://www.hackerrank.com/challenges/map-and-lambda-expression/problem
# Pass lambda around to functions
def pass_lambda(lambdaFunc, i):
return lambdaFunc(i)
print(pass_lambda(lambda x: x + 1, 1))
# Lambda filter
keep_even = lambda x: x % 2 == 0
numbers_filtered = list(filter(keep_even, numbers)) # ref: https://stackabuse.com/lambda-functions-in-python/
# Reduce
# Concept: Reduce to a single value. Cumulatively in succession, left to right, reduce.
# In python 3, reduce needs to be imported.
reduce(lambda x, y: x + y, [1, 2, 3]) #->6
reduce(lambda x, y: x + y, [1, 2, 3], -1) # -> 5 # default value.
class String_Theory:
def string_comparison(self):
"leo" == "leo" # Equality. Preferred.
"leo" is "leo" # identity. id("leo"). Work, but equality generally preffered.
# https://stackoverflow.com/questions/2988017/string-comparison-in-python-is-vs
def string_multiline(self):
# -) For multi-line strings, indentation is as-is. Yon can use '\' for early line breaks thou.
ml = """\
line
hello
world\
"""
# -) Or manually join:
var = ("implicitly\n"
"Joined\n"
"string\n")
# ref: https://stackoverflow.com/questions/2504411/proper-indentation-for-python-multiline-strings
def string_splicing(self):
myString = "RedHat"
# Menonic: str[from:till)
myString[1:] # edHat
myString[:2] # Re
myString[0] # R #index starts at 0?
def spring_splitting(self):
# Splitting & Joining:
# Note: Delimiter matters. By default, spaces are removed.
# "A B".split() #-> ["A","B"]
# "A B".split(" ") #-> ["A", "", "", "B"]
mystr = "hello world how are you"
mySplitStrList = mystr.split(" ") # delimiter. -> ["hello", "world" ...
concatinated_by_delimiter = "-".join(mySplitStrList) # -> "hello-world-how-.... #ref: https://www.programiz.com/python-programming/methods/string/join
# See also re.split()
def string_reversal(self):
"leo"[::-1] #-> "ih" #reverser string #anki-todo
# ==
"".join(reversed(list("leo")))
def raw_strings(self):
# prefix 'r' to ignore escape sequences.
print("hello\nworld")
#hello
#world
print(r"hello\nworld")
#hello\nworld
# \n new line \<STUFF> is hazardous.
# \t tab
# \\ backslash
# \' \"
# ref: https://docs.python.org/2.0/ref/strings.html
def string_concatination(self):
# " ".join(..) has effective run-time. http://blog.mclemon.io/python-efficient-string-concatenation-in-python-2016-edition
# delimiter.join([str1,str2,str3])
" ".join(["hello", "world", "war", "Z"]) #'hello world war Z'
"".join(["hello ", "world ", "war ", "Z"]) #'hello world war Z'
" ".join(map(str, [1,2,3])) # useful to print data structures.
# For building some pretty output, consider
self.string_formatting(self)
def string_formatting(self):
# 2 methods:
# - .format() >= python 2.6. Better support for native Data structures (dict etc..) use for new code.
# - % == Old C-Printf style. Discouraged due to quirky behaviour but not deprecated.
# Manual or Automatic field numbering:
"Welcome {} {}!".format("Bob", "Young")
# "{arg_index} | {arg_name}".format([args])
"{0} {0} {1} {2} {named}".format("hello", "world", 99, named="NamedArg")
'hello hello world 99 NamedArg'
# (!) DANGER: padding with None can throw.
"{}".format(None) # OK.
"{:<5}".format(None) # (!) Throws TypeError: unsupported format string passed to NoneType.__format__
"{:<5}".format(str(None)) # OK
# Padding: # {: Filler_Char | > | ^ | < NUMS}
"{:>10}".format("Hello") # {:>10} Pad Right 10 Note {:XX} == {:>XX}
' Hello'
"{:<10}".format("Hello") # {:<10} Pad Left 10
'Hello '
"{:_>10}".format("Hello") # Pad with character.
'_____Hello'
"{:_^10}".format("Hello") # Center Align.
'__Hello___'
# Truncating ".len" like str[:2]
"{:.2}".format("Hello")
'He'
# Numbers:
"Number:{:d}".format(42) # d = int. f = float.
'Number:42'
# Numbers: Float, truncate last digits
"{:.1f}".format(1.134) #-> 1.1
# Numbers with padding: (min, but prints full number if len(str) > min)
"Lucky Number:{:3d}".format(42)
'Lucky Number: 42'
"{:<10.2f}".format(233.146) # Truncate by 2 and pad 10. {
'233.15 '
# For signs & signs + padding, see ref 1.
# Dictionary
d = {"leo": 31, "bob": 55}
"{d[bob]} {d[leo]}".format(d=d)
'55 31'
# List
"{l[0]} {l[1]}".format(l=['a','b'])
'a b'
# Parametrized Format:
"{:{padding}.{truncate}f}".format(3.14632, padding=10, truncate=2)
' 3.15'
# Also: DateTime formatting 1* class *1
# Refs:
# [1] Lots of examples: https://pyformat.info/
# [1] Which String Formatting method to use?: https://realpython.com/python-string-formatting
def string_update_char(self):
# Strings are immutable. To update one:
mystr = "absde"
# 1) Turn into a list. Manipulate list.
strlist = list(mystr)
strlist[2] = "c"
mystr2 = "".join(strlist)
print(mystr2)
# Src: https://stackoverflow.com/questions/19926089/python-equivalent-of-java-stringbuffer
# ExProb: https://www.hackerrank.com/challenges/swap-case/problem
# 2) Slice
mystr3 = mystr[:2] + "c" + mystr[3:]
print(mystr3)
#lref_pystr_index Anki-x # HackerR: https://www.hackerrank.com/challenges/python-mutations/problem
def string_validation(self):
"a-z,A-Z,0-9".isalnum() # Alpha Numeric. (!= regex '\w')
"a-Z".isalpha() # Alphabetical. # Not quite the same as regex '\w' since \w contains '_'
"123".isdigit()
"a-z123".islower()
"ABC123".isupper()
# Example problem: https://www.hackerrank.com/challenges/string-validators/problem
def string_constants____kwds_lower_upper_letters_alphabet(self):
import string
string.ascii_lowercase
# 'abcdefghijklmnopqrstuvwxyz'
string.ascii_uppercase
# 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
string.ascii_letters
# 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def string_alinghment(self):
"Leo".ljust(20, "#")
#'Leo#################'
"Leo".center(20, "#")
#'########Leo#########'
"Leo".rjust(20, "#")
#'#################Leo'
#ex: https://www.hackerrank.com/challenges/text-alignment/problem
#ex: https://www.hackerrank.com/challenges/designer-door-mat/problem
| |
crc32(filename, slice = OneM):
with open(filename, "rb") as f:
buf = f.read(slice)
crc = binascii.crc32(buf)
while True:
buf = f.read(slice)
if buf:
crc = binascii.crc32(buf, crc)
else:
break
return crc & 0xffffffff
def enable_http_logging():
httplib.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def ls_type(isdir):
return 'D' if isdir else 'F'
def ls_time(itime):
return time.strftime('%Y-%m-%d, %H:%M:%S', time.localtime(itime))
def print_pcs_list(json, foundmsg = "Found:", notfoundmsg = "Nothing found."):
list = json['list']
if list:
pr(foundmsg)
for f in list:
pr("{} {} {} {} {} {}".format(
ls_type(f['isdir']),
f['path'],
f['size'],
ls_time(f['ctime']),
ls_time(f['mtime']),
f['md5']))
else:
pr(notfoundmsg)
# tree represented using dictionary, (Obsolete: OrderedDict no longer required)
# NOTE: No own-name is kept, so the caller needs to keep track of that
# NOTE: Case-sensitive, as I don't want to waste time wrapping up a case-insensitive one
# single-linked-list, no backwards travelling capability
class PathDictTree(dict):
def __init__(self, type = 'D', **kwargs):
self.type = type
self.extra = {}
for k, v in kwargs.items():
self.extra[k] = v
super(PathDictTree, self).__init__()
def __str__(self):
return self.__str('')
def __str(self, prefix):
result = ''
for k, v in self.iteritems():
result += "{} - {}/{} - size: {} - md5: {} \n".format(
v.type, prefix, k,
v.extra['size'] if 'size' in v.extra else '',
binascii.hexlify(v.extra['md5']) if 'md5' in v.extra else '')
for k, v in self.iteritems():
if v.type == 'D':
result += v.__str(prefix + '/' + k)
return result
def add(self, name, child):
self[name] = child
return child
# returns the child tree at the given path
# assume that path is only separated by '/', instead of '\\'
def get(self, path):
place = self
if path:
# Linux can have file / folder names with '\\'?
if sys.platform.startswith('win32'):
assert '\\' not in path
route = filter(None, path.split('/'))
for part in route:
if part in place:
sub = place[part]
assert place.type == 'D' # sanity check
place = sub
else:
return None
return place
# return a string list of all 'path's in the tree
def allpath(self):
result = []
for k, v in self.items():
result.append(k)
if v.type == 'D':
for p in self.get(k).allpath():
result.append(k + '/' + p)
return result
class ByPy(object):
'''The main class of the bypy program'''
# public static properties
HelpMarker = "Usage:"
ListFormatDict = {
'$t' : (lambda json: ls_type(json['isdir'])),
'$f' : (lambda json: json['path'].split('/')[-1]),
'$c' : (lambda json: ls_time(json['ctime'])),
'$m' : (lambda json: ls_time(json['mtime'])),
'$d' : (lambda json: str(json['md5'] if 'md5' in json else '')),
'$s' : (lambda json: str(json['size'])),
'$i' : (lambda json: str(json['fs_id'])),
'$b' : (lambda json: str(json['block_list'] if 'block_list' in json else '')),
'$u' : (lambda json: 'HasSubDir' if 'ifhassubdir' in json and json['ifhassubdir'] else 'NoSubDir'),
'$$' : (lambda json: '$')
}
# Old setting locations, should be moved to ~/.bypy to be clean
OldTokenFilePath = HomeDir + os.sep + '.bypy.json'
OldHashCachePath = HomeDir + os.sep + '.bypy.pickle'
@staticmethod
def migratesettings():
result = ENoError
filesToMove = [
[ByPy.OldTokenFilePath, TokenFilePath],
[ByPy.OldHashCachePath, HashCachePath]
]
result = makedir(ConfigDir, 0o700) and result # make it secretive
# this directory must exist
if result != ENoError:
perr("Fail to create config directory '{}'".format(ConfigDir))
return result
for tomove in filesToMove:
oldfile = tomove[0]
newfile = tomove[1]
if os.path.exists(oldfile):
dst = newfile
if os.path.exists(newfile):
dst = TokenFilePath + '.old'
result = movefile(oldfile, dst) and result
return result
@staticmethod
def getcertfile():
result = ENoError
if not os.path.exists(ByPyCertsPath):
if os.path.exists(ByPyCertsFile):
result = copyfile(ByPyCertsFile, ByPyCertsPath)
else:
try:
# perform a simple download from github
urllib.urlretrieve(
'https://raw.githubusercontent.com/houtianze/bypy/master/bypy.cacerts.pem', ByPyCertsPath)
except IOError as ex:
perr("Fail download CA Certs to '{}'.\n" + \
"Exception:\n{}\nStack:{}\n".format(
ByPyCertsPath, ex, traceback.format_exc()))
result = EDownloadCerts
return result
def __init__(self,
slice_size = DefaultSliceSize,
dl_chunk_size = DefaultDlChunkSize,
verify = True,
retry = 5, timeout = None,
quit_when_fail = False,
listfile = None,
resumedownload = True,
extraupdate = lambda: (),
incregex = '',
ondup = '',
followlink = True,
checkssl = True,
cacerts = None,
verbose = 0, debug = False):
# handle backward compatibility
sr = ByPy.migratesettings()
if sr != ENoError:
# bail out
perr("Failed to migrate old settings.")
onexit(EMigrationFailed)
# it doesn't matter if it failed, we can disable SSL verification anyway
ByPy.getcertfile()
self.__slice_size = slice_size
self.__dl_chunk_size = dl_chunk_size
self.__verify = verify
self.__retry = retry
self.__quit_when_fail = quit_when_fail
self.__timeout = timeout
self.__listfile = listfile
self.__resumedownload = resumedownload
self.__extraupdate = extraupdate
self.__incregex = incregex
self.__incregmo = re.compile(incregex)
if ondup and len(ondup) > 0:
self.__ondup = ondup[0].upper()
else:
self.__ondup = 'O' # O - Overwrite* S - Skip P - Prompt
# TODO: whether this works is still to be tried out
self.__isrev = False
self.__followlink = followlink;
# TODO: properly fix this InsecurePlatformWarning
checkssl = False
# using a mirror, which has name mismatch SSL error,
# so need to disable SSL check
if pcsurl != PcsUrl:
# TODO: print a warning
checkssl = False
self.__checkssl = checkssl
self.Verbose = verbose
self.Debug = debug
if self.__checkssl:
# sort of undocumented by requests
# http://stackoverflow.com/questions/10667960/python-requests-throwing-up-sslerror
if cacerts is not None:
if os.path.isfile(cacerts):
self.__checkssl = cacerts
else:
perr("Invalid CA Bundle '{}' specified")
# falling through here means no customized CA Certs specified
if self.__checkssl is True:
# use our own CA Bundle if possible
if os.path.isfile(ByPyCertsPath):
self.__checkssl = ByPyCertsPath
else:
# Well, disable cert verification
pwarn(
"** SSL Certificate Verification has been disabled **\n\n" + \
"If you are confident that your CA Bundle can verify " + \
"Baidu PCS's certs, you can run the prog with the '" + CaCertsOption + \
" <your ca cert path>' argument to enable SSL cert verification.\n\n" + \
"However, most of the time, you can ignore this warning, " + \
"you are going to send sensitive data to the cloud plainly right?")
self.__checkssl = False
if not checkssl:
disable_urllib3_warning()
# the prophet said: thou shalt initialize
self.__existing_size = 0
self.__json = {}
self.__access_token = ''
self.__bduss = ''
self.__pancookies = {}
self.__remote_json = {}
self.__slice_md5s = []
if self.__listfile and os.path.exists(self.__listfile):
with open(self.__listfile, 'r') as f:
self.__list_file_contents = f.read()
else:
self.__list_file_contents = None
# only if user specifies '-ddd' or more 'd's, the following
# debugging information will be shown, as it's very talkative.
if self.Debug >= 3:
# these two lines enable debugging at httplib level (requests->urllib3->httplib)
# you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# the only thing missing will be the response.body which is not logged.
enable_http_logging()
if not self.__load_local_json():
# no need to call __load_local_json() again as __auth() will load the json & acess token.
result = self.__auth()
if result != ENoError:
perr("Program authorization FAILED.\n" + \
"You need to authorize this program before using any PCS functions.\n" + \
"Quitting...\n")
onexit(result)
if not self.__load_local_bduss():
self.pv("BDUSS not found at '{}'.".format(BDUSSPath))
def pv(self, msg, **kwargs):
if self.Verbose:
pr(msg)
def pd(self, msg, level = 1, **kwargs):
if self.Debug >= level:
pdbg(msg, kwargs)
def shalloverwrite(self, prompt):
if self.__ondup == 'S':
return False
elif self.__ondup == 'P':
ans = ask(prompt, False).upper()
if not ans.startswith('Y'):
return False
return True
def __print_error_json(self, r):
try:
dj = r.json()
if 'error_code' in dj and 'error_msg' in dj:
ec = dj['error_code']
et = dj['error_msg']
msg = ''
if ec == IEMD5NotFound:
pf = pinfo
msg = et
else:
pf = perr
msg = "Error code: {}\nError Description: {}".format(ec, et)
pf(msg)
except Exception:
perr('Error parsing JSON Error Code from:\n{}'.format(rb(r.text)))
perr('Exception:\n{}'.format(traceback.format_exc()))
def __dump_exception(self, ex, url, pars, r, act):
if self.Debug or self.Verbose:
perr("Error accessing '{}'".format(url))
if ex and isinstance(ex, Exception) and self.Debug:
perr("Exception:\n{}".format(ex))
tb = traceback.format_exc()
if tb:
pr(tb)
perr("Function: {}".format(act.__name__))
perr("Website parameters: {}".format(pars))
if hasattr(r, 'status_code'):
perr("HTTP Response Status Code: {}".format(r.status_code))
if (r.status_code != 200 and r.status_code != 206) or (not (pars.has_key('method') and pars['method'] == 'download') and url.find('method=download') == -1 and url.find('baidupcs.com/file/') == -1):
self.__print_error_json(r)
perr("Website returned: {}".format(rb(r.text)))
# always append / replace the 'access_token' parameter in the https request
def __request_work(self, url, pars, act, method, actargs = None, addtoken = True, dumpex = True, **kwargs):
result = ENoError
r = None
self.__extraupdate()
parsnew = pars.copy()
if addtoken:
parsnew['access_token'] = self.__access_token
try:
self.pd(method + ' ' + url)
self.pd("actargs: {}".format(actargs))
self.pd("Params: {}".format(pars))
if method.upper() == 'GET':
r = requests.get(url,
params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
elif method.upper() == 'POST':
r = requests.post(url,
params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
# BUGFIX: DON'T do this, if we are downloading a big file, the program sticks and dies
#self.pd("Request Headers: {}".format(
# pprint.pformat(r.request.headers)), 2)
sc = r.status_code
self.pd("HTTP Status Code: {}".format(sc))
# BUGFIX: DON'T do this, if we are downloading a big file, the program sticks and dies
#self.pd("Header returned: {}".format(pprint.pformat(r.headers)), 2)
#self.pd("Website returned: {}".format(rb(r.text)), 3)
if sc == requests.codes.ok or sc == 206: # 206 Partial Content
if sc == requests.codes.ok:
self.pd("Request OK, processing action")
else:
self.pd("206 Partial Content")
result = act(r, actargs)
if result == ENoError:
self.pd("Request all goes fine")
else:
ec = 0
try:
j = r.json()
ec = j['error_code']
# error print is done in __dump_exception()
# self.__print_error_json(r)
except ValueError:
perr("Not valid error JSON")
# 6 (sc: 403): No permission to access user data
# 110 (sc: 401): Access token invalid or no longer valid
# 111 (sc: 401): Access token expired
if ec == 111 or ec == 110 or ec == 6: # and sc == 401:
self.pd("Need to refresh token, refreshing")
if ENoError == self.__refresh_token(): # refresh the token and re-request
# TODO: avoid dead recursive loops
# TODO: properly pass retry
result = self.__request(url, pars, act, method, actargs, True, addtoken, dumpex, **kwargs)
else:
result = EFatal
perr("FATAL: Token refreshing failed, can't continue.\nQuitting...\n")
onexit(result)
# File md5 not found, you should use upload API to upload the whole file.
elif ec == IEMD5NotFound: # and sc == 404:
self.pd("MD5 not found, rapidupload failed")
result = ec
# user not exists
elif ec == 31045: # and sc == 403:
self.pd("BDUSS has expired")
result = IEBDUSSExpired
# superfile create failed
elif ec == 31081: # and sc == 404:
self.pd("Failed to combine files from MD5 slices |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.