code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import logging
import random
from typing import Any, Dict
from django.utils import timezone
import yaml
from faker import Faker
from .schema_mock import read_schema
logger = logging.getLogger(__name__)
fake = Faker()
def generate_oas_component(
service: str,
component: str,
**properties,
) -> Dict[str, Any]:
"""
Generate an object conforming to the OAS schema definition.
Any extra kwargs passed in are used as explicit values for properties.
"""
schema = yaml.safe_load(read_schema(service))
definition = schema["components"]
for bit in component.split("/"):
definition = definition[bit]
assert (
definition["type"] == "object"
), "Types other than object are not supported (yet)"
return generate_object(schema, definition, **properties)
def generate_object(schema: dict, definition: dict, **properties):
obj = properties.copy()
if "discriminator" in definition:
# Not implemented yet...
logger.debug("discriminator is not implemented yet")
if "properties" not in definition:
return {}
for prop, prop_def in definition["properties"].items():
if prop in obj:
continue
obj[prop] = generate_prop(schema, prop_def)
return obj
def generate_prop(schema: dict, prop_definition: dict) -> Any:
if "$ref" in prop_definition:
ref_bits = prop_definition["$ref"].replace("#/", "", 1).split("/")
prop_definition = schema
for bit in ref_bits:
prop_definition = prop_definition[bit]
prop_type = prop_definition["type"]
if prop_definition.get("nullable"):
return None
enum = prop_definition.get("enum")
if enum:
return random.choice(enum)
if prop_type == "string":
fmt = prop_definition.get("format")
if fmt == "uri":
return fake.url(schemes=["https"])
elif fmt == "duration":
return "P3W"
elif fmt == "date":
return fake.date()
elif fmt == "date-time":
return fake.date_time(tzinfo=timezone.utc).isoformat()
elif fmt is None:
return fake.pystr(
min_chars=prop_definition.get("minLength"),
max_chars=prop_definition.get("maxLength", 20),
)
elif prop_type == "boolean":
return fake.pybool()
elif prop_type == "array":
item = generate_prop(schema, prop_definition["items"])
return [item]
elif prop_type == "object":
return generate_object(schema, prop_definition) | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/test/component_generation.py | component_generation.py |
# Copyright (c) 2013, Vitaly Babiy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Django REST Framework JSON CamelCase nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from django.core.files import File
from django.http import QueryDict
def get_underscoreize_re(options):
if options.get("no_underscore_before_number"):
pattern = r"([a-z]|[0-9]+[a-z]?|[A-Z]?)([A-Z])"
else:
pattern = r"([a-z]|[0-9]+[a-z]?|[A-Z]?)([A-Z0-9])"
return re.compile(pattern)
def camel_to_underscore(name, **options):
underscoreize_re = get_underscoreize_re(options)
return underscoreize_re.sub(r"\1_\2", name).lower()
def _get_iterable(data):
if isinstance(data, QueryDict):
return data.lists()
else:
return data.items()
def underscoreize(data, **options):
if isinstance(data, dict):
new_dict = {}
for key, value in _get_iterable(data):
if isinstance(key, str):
new_key = camel_to_underscore(key, **options)
else:
new_key = key
new_dict[new_key] = underscoreize(value, **options)
if isinstance(data, QueryDict):
new_query = QueryDict(mutable=True)
for key, value in new_dict.items():
new_query.setlist(key, value)
return new_query
return new_dict
if is_iterable(data) and not isinstance(data, (str, File)):
return [underscoreize(item, **options) for item in data]
return data
def is_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return True | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/api_models/_camel_case.py | _camel_case.py |
from django.db import models
from django.utils.translation import gettext as _
class VertrouwelijkheidsAanduidingen(models.TextChoices):
openbaar = "openbaar", "Openbaar"
beperkt_openbaar = "beperkt_openbaar", "Beperkt openbaar"
intern = "intern", "Intern"
zaakvertrouwelijk = "zaakvertrouwelijk", "Zaakvertrouwelijk"
vertrouwelijk = "vertrouwelijk", "Vertrouwelijk"
confidentieel = "confidentieel", "Confidentieel"
geheim = "geheim", "Geheim"
zeer_geheim = "zeer_geheim", "Zeer geheim"
class RolTypes(models.TextChoices):
natuurlijk_persoon = "natuurlijk_persoon", "Natuurlijk persoon"
niet_natuurlijk_persoon = "niet_natuurlijk_persoon", "Niet-natuurlijk persoon"
vestiging = "vestiging", "Vestiging"
organisatorische_eenheid = "organisatorische_eenheid", "Organisatorische eenheid"
medewerker = "medewerker", "Medewerker"
class RolOmschrijving(models.TextChoices):
# "Kennis in dienst stellen van de behandeling van (een deel van) een zaak."
adviseur = "adviseur", "Adviseur"
# "De vakinhoudelijke behandeling doen van (een deel van) een zaak."
behandelaar = "behandelaar", "Behandelaar"
# "Vanuit eigen en objectief belang rechtstreeks betrokken "
# "zijn bij de behandeling en/of de uitkomst van een zaak."
belanghebbende = "belanghebbende", "Belanghebbende"
# "Nemen van besluiten die voor de uitkomst van een zaak noodzakelijk zijn."
beslisser = "beslisser", "Beslisser"
# "Aanleiding geven tot de start van een zaak .."
initiator = "initiator", "Initiator"
# "Het eerste aanspreekpunt zijn voor vragen van burgers en bedrijven .."
klantcontacter = "klantcontacter", "Klantcontacter"
# "Er voor zorg dragen dat de behandeling van de zaak in samenhang "
# "uitgevoerd wordt conform de daarover gemaakte afspraken."
zaakcoordinator = "zaakcoordinator", "Zaakcoördinator"
medeinitiator = "mede_initiator", "Mede-initiator"
class VervalRedenen(models.TextChoices):
tijdelijk = "tijdelijk", "Besluit met tijdelijke werking"
ingetrokken_overheid = "ingetrokken_overheid", "Besluit ingetrokken door overheid"
ingetrokken_belanghebbende = (
"ingetrokken_belanghebbende",
"Besluit ingetrokken o.v.v. belanghebbende",
)
class AardRelatieChoices(models.TextChoices):
vervolg = "vervolg", _("Vervolg")
bijdrage = "bijdrage", _("Bijdrage")
onderwerp = "onderwerp", _("Onderwerp") | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/api_models/constants.py | constants.py |
from dataclasses import dataclass, field
from datetime import date, datetime
from decimal import Decimal
from typing import List, Optional, Union
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from .base import Model, ZGWModel, factory
@dataclass
class Catalogus(ZGWModel):
url: str # bug: not required according to OAS
domein: str
rsin: str
contactpersoon_beheer_naam: str
contactpersoon_beheer_emailadres: str = ""
contactpersoon_beheer_telefoonnummer: str = ""
besluittypen: list = field(default_factory=list)
informatieobjecttypen: list = field(default_factory=list)
zaaktypen: list = field(default_factory=list)
@dataclass
class ZaakType(ZGWModel):
url: str # bug: not required according to OAS
catalogus: str
identificatie: str # bug: not required according to OAS
omschrijving: str
vertrouwelijkheidaanduiding: str
doel: str
aanleiding: str
indicatie_intern_of_extern: str
handeling_initiator: str
onderwerp: str
handeling_behandelaar: str
doorlooptijd: relativedelta
opschorting_en_aanhouding_mogelijk: bool
verlenging_mogelijk: bool
publicatie_indicatie: bool
producten_of_diensten: list
besluittypen: list
begin_geldigheid: date
versiedatum: date
omschrijving_generiek: str = ""
toelichting: str = ""
servicenorm: Optional[relativedelta] = None
verlengingstermijn: Optional[relativedelta] = None
trefwoorden: list = field(default_factory=list)
publicatietekst: str = ""
verantwoordingsrelatie: list = field(default_factory=list)
# selectielijst_procestype: ProcesType
statustypen: list = field(default_factory=list)
resultaattypen: list = field(default_factory=list)
eigenschappen: list = field(default_factory=list)
informatieobjecttypen: list = field(default_factory=list)
roltypen: list = field(default_factory=list)
deelzaaktypen: list = field(default_factory=list)
einde_geldigheid: Optional[date] = None
concept: bool = False
@dataclass
class StatusType(ZGWModel):
url: str # bug: not required according to OAS
zaaktype: str
omschrijving: str
volgnummer: int
omschrijving_generiek: str = ""
statustekst: str = ""
is_eindstatus: bool = False
@dataclass
class InformatieObjectType(ZGWModel):
url: str # bug: not required according to OAS
catalogus: str
omschrijving: str
vertrouwelijkheidaanduiding: str
begin_geldigheid: date
einde_geldigheid: Optional[date] = None
concept: bool = False
@dataclass
class ResultaatType(ZGWModel):
url: str # bug: not required according to OAS
zaaktype: str
omschrijving: str
resultaattypeomschrijving: str
selectielijstklasse: str
omschrijving_generiek: str = ""
toelichting: str = ""
archiefnominatie: str = ""
archiefactietermijn: Optional[relativedelta] = None
brondatum_archiefprocedure: Optional[dict] = None
@dataclass
class EigenschapSpecificatie(Model):
formaat: str
lengte: str
kardinaliteit: str
groep: str = ""
waardenverzameling: list = field(default_factory=list)
EIGENSCHAP_FORMATEN = {
"tekst": str,
"getal": lambda val: Decimal(val.replace(",", ".")),
"datum": date.fromisoformat,
"datum_tijd": parse,
}
@dataclass
class Eigenschap(ZGWModel):
url: str # bug: not required according to OAS
zaaktype: str
naam: str
definitie: str
specificatie: dict
toelichting: str = ""
def __post_init__(self):
super().__post_init__()
self.specificatie = factory(EigenschapSpecificatie, self.specificatie)
def to_python(self, value: str) -> Union[str, Decimal, date, datetime]:
"""
Cast the string value into the appropriate python type based on the spec.
"""
formaat = self.specificatie.formaat
assert formaat in EIGENSCHAP_FORMATEN, f"Unknown format {formaat}"
converter = EIGENSCHAP_FORMATEN[formaat]
return converter(value)
@dataclass
class RolType(ZGWModel):
url: str # bug: not required according to OAS
zaaktype: str
omschrijving: str
omschrijving_generiek: str
@dataclass
class BesluitType(ZGWModel):
url: str # bug: not required according to OAS
catalogus: str
zaaktypen: List[str]
publicatie_indicatie: bool
informatieobjecttypen: List[str]
begin_geldigheid: date
omschrijving: str = ""
omschrijving_generiek: str = ""
besluitcategorie: str = ""
reactietermijn: Optional[relativedelta] = None
publicatietekst: str = ""
publicatietermijn: Optional[relativedelta] = None
toelichting: str = ""
einde_geldigheid: Optional[date] = None
concept: bool = False | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/api_models/catalogi.py | catalogi.py |
from dataclasses import dataclass, field
from datetime import date, datetime
from typing import Any, Optional
from .base import ZGWModel
from .catalogi import Eigenschap
from .constants import RolOmschrijving, RolTypes, VertrouwelijkheidsAanduidingen
@dataclass
class Zaak(ZGWModel):
url: str # bug: not required according to OAS
bronorganisatie: str
zaaktype: str
identificatie: str # bug: not required according to OAS
registratiedatum: date # bug: not required according to OAS
verantwoordelijke_organisatie: str
startdatum: date
vertrouwelijkheidaanduiding: str # bug: not required according to OAS
omschrijving: str = ""
toelichting: str = ""
einddatum: Optional[date] = None
einddatum_gepland: Optional[date] = None
uiterlijke_einddatum_afdoening: Optional[date] = None
publicatiedatum: Optional[date] = None
status: Optional[str] = None
resultaat: Optional[str] = None
relevante_andere_zaken: list = field(default_factory=list)
zaakgeometrie: dict = field(default_factory=dict)
def get_vertrouwelijkheidaanduiding_display(self):
return VertrouwelijkheidsAanduidingen.values[self.vertrouwelijkheidaanduiding]
@dataclass
class Status(ZGWModel):
url: str # bug: not required according to OAS
zaak: str
statustype: str
datum_status_gezet: datetime
statustoelichting: str = ""
@dataclass
class ZaakObject(ZGWModel):
url: str # bug: not required according to OAS
zaak: str
object_type: str
object: str = ""
object_type_overige: str = ""
relatieomschrijving: str = ""
object_identificatie: dict = field(default_factory=dict)
@dataclass
class ZaakEigenschap(ZGWModel):
url: str # bug: not required according to OAS
# uuid: uuid.UUID
zaak: str
eigenschap: str
waarde: str
naam: str = ""
def get_waarde(self) -> Any:
assert isinstance(
self.eigenschap, Eigenschap
), "Ensure eigenschap has been resolved"
return self.eigenschap.to_python(self.waarde)
@dataclass
class Resultaat(ZGWModel):
url: str # bug: not required according to OAS
zaak: str
resultaattype: str
toelichting: str = ""
@dataclass
class Rol(ZGWModel):
url: str # bug: not required according to OAS
zaak: str
betrokkene_type: str
roltype: str
roltoelichting: str
betrokkene: str = ""
omschrijving: str = ""
omschrijving_generiek: str = ""
registratiedatum: Optional[datetime] = None
indicatie_machtiging: str = ""
betrokkene_identificatie: dict = field(default_factory=dict)
def get_betrokkene_type_display(self):
return RolTypes.values[self.betrokkene_type]
def get_omschrijving_generiek_display(self):
return RolOmschrijving.values[self.omschrijving_generiek]
@dataclass
class ZaakInformatieObject(ZGWModel):
url: str # bug: not required according to OAS
informatieobject: str
zaak: str
aard_relatie_weergave: str = ""
titel: str = ""
beschrijving: str = ""
registratiedatum: Optional[datetime] = None | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/api_models/zaken.py | zaken.py |
import uuid
from dataclasses import Field, fields
from datetime import date, datetime
from functools import partial
from typing import Any, Dict, List, Type, TypeVar, Union, overload
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from ._camel_case import underscoreize
from .compat import parse_relativedelta
from .types import JSONObject
__all__ = ["CONVERTERS", "factory", "Model", "ZGWModel"]
def noop(value: Any) -> Any:
return value
CONVERTERS = {
type(None): lambda x: None,
str: noop,
int: noop,
float: noop,
dict: noop, # TODO: recurse?
uuid.UUID: lambda value: uuid.UUID(value),
datetime: parse,
date: date.fromisoformat,
relativedelta: parse_relativedelta,
bool: noop,
}
class Model:
def __init__(self, *args, **kwargs):
pass
def __post_init__(self):
self._type_cast()
def _type_cast(self):
model_fields = get_model_fields(self)
for attr, field in model_fields.items():
typehint = field.type
value = getattr(self, attr)
if typehint is None:
typehint = type(None)
# support for Optional / List
if hasattr(typehint, "__origin__"):
if typehint.__origin__ is list and typehint.__args__:
subtypehint = typehint.__args__[0]
if issubclass(subtypehint, Model):
setattr(self, attr, factory(subtypehint, value))
else:
converter = CONVERTERS[subtypehint]
new_value = [converter(x) for x in value]
setattr(self, attr, new_value)
continue
if typehint.__origin__ is Union:
typehint = typehint.__args__
if value is None:
continue
# Optional is ONE type combined with None
typehint = next(t for t in typehint if t is not None)
if isinstance(value, typehint):
continue
if issubclass(typehint, Model):
converter = partial(factory, typehint)
else:
converter = CONVERTERS[typehint]
setattr(self, attr, converter(value))
M = TypeVar("M", bound=Model)
class ZGWModel(Model):
@property
def uuid(self) -> uuid.UUID:
"""
Because of the usage of UUID4, we can rely on the UUID as identifier.
"""
_uuid = self.url.split("/")[-1]
return uuid.UUID(_uuid)
def get_model_fields(model: Union[type, Model]) -> Dict[str, Field]:
return {field.name: field for field in fields(model)}
@overload
def factory(model: Type[M], data: JSONObject) -> M:
...
@overload
def factory(model: Type[M], data: List[JSONObject]) -> List[M]:
...
def factory(
model: Type[M], data: Union[JSONObject, List[JSONObject]]
) -> Union[M, List[M]]:
_is_collection = isinstance(data, list)
model_fields = get_model_fields(model)
known_kwargs = list(model_fields.keys())
def _normalize(kwargs: dict):
# TODO: this should be an explicit mapping, but *most* of the time with ZGW
# API's this is fine.
kwargs = underscoreize(kwargs)
to_keep = {key: value for key, value in kwargs.items() if key in known_kwargs}
return to_keep
if not _is_collection:
data = [data]
instances = [model(**_normalize(_raw)) for _raw in data]
if not _is_collection:
instances = instances[0]
return instances | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/api_models/base.py | base.py |
# zgyio
[](https://opensource.org/licenses/Apache-2.0)
[](https://github.com/equinor/zgyio/actions/workflows/python-app.yml)
[](https://pypi.org/project/zgyio/)
Convenience wrapper around Schlumberger's OpenZGY Python package which enables
reading of ZGY files with a syntax familiar to users of segyio.
---
### Installation
Requires **openzgy** package from Schlumberger, which is (for now) bundled here under Apache v2.0 license
- Wheels from [PyPI](https://pypi.org/project/zgyio/) without zgy support: `pip install zgyio`
- Source from [Github](https://github.com/equinor/zgyio): `git clone https://github.com/equinor/zgyio.git`
---
### Usage
#### Use segyio-like interface to read ZGY files ####
```python
import zgyio
with zgyio.open("in.vds")) as zgyfile:
il_slice = zgyfile.iline[zgyfile.ilines[LINE_IDX]]
xl_slice = zgyfile.xline[LINE_NUMBER]
zslice = zgyfile.depth_slice[SLICE_IDX]
trace = zgyfile.trace[TRACE_IDX]
trace_header = zgyfile.header[TRACE_IDX]
text_file_header = zgyfile.text[0]
```
#### Read a ZGY file with underlying functions ####
```python
from zgyio.accessors import SeismicReader
with SeismicReader("in.zgy") as reader:
inline_slice = reader.read_inline_number(LINE_NUMBER)
crossline_slice = reader.read_crossline(LINE_IDX)
z_slice = reader.read_zslice_coord(SLICE_COORD)
sub_vol = reader.read_subvolume(min_il=min_il, max_il=max_il,
min_xl=min_xl, max_xl=max_xl,
min_z=min_z, max_z=max_z)
```
| zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/README.md | README.md |
##@package openzgy.iterator
#@brief Efficient iterator for reading an entire ZGY file.
import numpy as np
from .api import _map_SampleDataTypeToDataType
from .impl.enum import _map_DataTypeToNumpyType
def _roundup(x, step):
return ((x + step - 1) // step) * step
def _choose_blocksize(surveysize, bricksize, samplesize, maxbytes):
"""
Get a reasonable blocksize (a.k.a. buffer size) to use when reading.
Always read full traces, even if this means exceeding the specified
maxbytes. Always read full bricks. Read more if it will fit inside
the specified maxbytes.
In each dimension the block size will either be the full survey size
padded to bricksize or a power-of-two multiple of the brick size.
In the simplest case caller can pass maxbytes=0 meaning read as little
as possible; this will return (bricksize[0], bricksize[1], surveysize[2])
a.k.a. one brick column.
Note: It is desirable to have blocksize being a multiple of chunksize
and chunksize being a multiple of bricksize. _choose_blocksize() tries
to help with this: If there is room for more than one brick-column it
will multiple the size in some direction, but only with a power of 2.
This means that the adjustment will not break this condition.
If it had not been for the power-of-two limitation this might not work.
If the condition is met then the consumer knows that if a chunk smaller
than the requested chunksize is received then this can only be due to
reading at the end or bottom of the survey.
"""
surveysize = list([_roundup(surveysize[i], bricksize[i]) for i in range(3)])
bs = [bricksize[0], bricksize[1], surveysize[2]]
while bs[0] * bs[1] * bs[2] * samplesize * 2 < maxbytes:
bs[1] *= 2
bs[1] = min(bs[1], surveysize[1])
while bs[0] * bs[1] * bs[2] * samplesize * 2 < maxbytes:
bs[0] *= 2
bs[0] = min(bs[0], surveysize[0])
return bs
def _readall_1(surveysize, blocksize, dtype, readfn):
"""
Iterates over the entire file and returns data in blocks.
This is the internal part of readall_2, it handles blocking the
read but not chunking the data to be returned to the caller.
"""
data = np.zeros(blocksize, dtype=dtype) if readfn else None
done = np.int64(0)
total = np.product(surveysize)
for ii in range(0, surveysize[0], blocksize[0]):
for jj in range(0, surveysize[1], blocksize[1]):
for kk in range(0, surveysize[2], blocksize[2]):
start = np.array((ii, jj, kk), dtype=np.int64)
count = np.minimum(start + blocksize, surveysize) - start
view = data[:count[0],:count[1],:count[2]] if data is not None else None
#print("Reading", start, count, view.shape)
if readfn: readfn(start, view)
done += np.product(count)
yield start, count, view
assert done == np.product(surveysize)
def _readall_2(surveysize, blocksize, chunksize, dtype, readfn, progress):
"""
Iterates over the entire file and returns data in chunks.
All numeric and array parameters use numpy types.
"""
done = np.int64(0)
total = np.product(surveysize)
# Give a chance to abort before we even start.
if progress and not progress(done, total): return
alldata = _readall_1(surveysize=surveysize,
blocksize=blocksize,
dtype=dtype,
readfn=readfn)
for datastart, datasize, data in alldata:
for ii in range(0, datasize[0], chunksize[0]):
for jj in range(0, datasize[1], chunksize[1]):
# After reading a block but before yielding.
if progress and not progress(done, total): return
for kk in range(0, datasize[2], chunksize[2]):
start = np.array((ii, jj, kk), dtype=np.int64)
end = np.minimum(start + chunksize, datasize)
count = end - start
view = data[start[0]:end[0],start[1]:end[1],start[2]:end[2]] if data is not None else None
done += np.product(count)
yield datastart + start, count, view
# After yielding, give a chance to abort before the next read.
# Also makes sure the final done==total is sent.
if progress and not progress(done, total): return
assert done == np.product(surveysize)
def _readall_3(surveysize, bricksize, blocksize, chunksize, dtype, readfn, maxbytes, progress):
"""
Handle default arguments, make sure all data has numpy types,
and then pass control to _readall_2.
"""
surveysize = np.array(surveysize, dtype=np.int64)
bricksize = np.array(bricksize, dtype=np.int64)
maxbytes = np.int64(maxbytes if maxbytes is not None else 128*1024*1024)
blocksize = np.array(blocksize if blocksize is not None else _choose_blocksize(
surveysize, bricksize, np.dtype(dtype).itemsize, maxbytes),
dtype=np.int64)
chunksize = np.array(chunksize if chunksize is not None else blocksize, dtype=np.int64)
blocksize[blocksize==0] = surveysize[blocksize==0]
chunksize[chunksize==0] = blocksize[chunksize==0]
if False:
fmt = lambda x: "{0} = {1} voxels, {2:.1f} MB".format(str(tuple(x)), np.product(x), np.product(x) * dtype.itemsize/(1024*1024))
print("survey", fmt(surveysize), "of", np.dtype(dtype).name)
print("brick ", fmt(bricksize))
print("block ", fmt(blocksize))
print("chunk ", fmt(chunksize))
# Since _readall_2 contains a yield, the function won't actually be
# invoked until somebody consumes its output.
result = _readall_2(surveysize=surveysize,
blocksize=blocksize,
chunksize=chunksize,
dtype=dtype,
readfn=readfn,
progress=progress)
return result
def readall(r, *, blocksize = None, chunksize=None, maxbytes=128*1024*1024, dtype=None, cropsize = None, cropoffset = None, lod = 0, sizeonly = False, progress=None):
"""
Convenience function to iterate over an entire cube, trying to use
an efficient block size on read and optionally returning a smaller
chunk size to the caller. The returned data buffer belongs to this
iterator and may be overwritten on the next call. Two different
iterators do not share their buffers.
Note: If blocksize is a multiple of chunksize and chunksize is a
multiple of bricksize then the following holds: if a chunk smaller
than the requested chunksize is received then this can only be due
to reading at the end or bottom of the survey. The caller might rely
on this. E.g. when computing low resolution bricks and writing them
out in a way that avoids a read/modify/write.
CAVEAT: This function mingh be overkill in some cases.
parameters:
r: ZgyReader -- The open ZGY file
blocksize: (i,j,k) -- How much to read (at most) in one call.
If omitted, use a reasonable default.
Dims set to 0 mean "as much as possible".
chunksize: (i,j,k) -- How much to return (at most) at once.
Defaults to blocksize, i.e. return the
same blocks that are read. Will never
return more than blocksize, so if set to
more than 64 you probably want to set an
explicit blocksize as well.
maxbytes: int64 -- Max bytes to read at once, defaults to 128 MB
Ignored if an explicit blocksize is specified.
dtype: np.dtype -- Data type of returned buffer. Defaults to
what is in the file. Use np.float32 to
convert and scale (if needed) to float.
cropsize: (i,j,k) -- Only read this amount of data.
Note, no sanity check of this parameter.
cropoffset: (i,j,k) Start reading at this point.
Note, no sanity check of this parameter.
lod: int Pass >0 for decimated data.
sizeonly: bool -- If false, return sizes but not data.
progress: callable -- Invoked after each read from disk.
"""
if sizeonly:
readfn = None
elif cropoffset is None or tuple(cropoffset) == (0, 0, 0):
readfn = lambda pos, data: r.read(pos, data, lod=lod)
else:
readfn = lambda pos, data: r.read(
(pos[0]+cropoffset[0],
pos[1]+cropoffset[1],
pos[2]+cropoffset[2]),
data, lod=lod)
impl_dtype = _map_SampleDataTypeToDataType(r.datatype)
dtype = dtype or np.dtype(_map_DataTypeToNumpyType(impl_dtype))
size = np.array(r.size, dtype=np.int64)
size = (size + ((1<<lod) - 1)) // (1<<lod)
return _readall_3(surveysize=cropsize if cropsize is not None else size,
bricksize=r.bricksize,
blocksize=blocksize,
chunksize=chunksize,
dtype=dtype,
readfn=readfn,
maxbytes=maxbytes,
progress=progress)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/iterator.py | iterator.py |
##@package openzgy
#@brief The top level only has package members.
##@package openzgy.api
#@brief User visible apis are here.
##
# \mainpage
#
# The %OpenZGY Python API allows read/write access to files
# stored in the ZGY format. The main part of the API is here:
#
# \li ZgyReader and its ZgyMeta base class.
# \li ZgyWriter also extending ZgyMeta.
# \li ZgyUtils for anything not read or write.
# \li \ref exception.ZgyError
# you might want to catch.
# \li ProgressWithDots example of progress reporting.
# \li \ref Example Example application.
#
# If you are reading this document from doxygen/pure/apidoc.pdf
# in the source tree then please see doxygen/README.md for an
# explanation of why the documentation produced by the build might
# be better.
#
# \if IMPL
# If you are viewing the full Doxygen documentation then this
# covers both the API and most of the implementation. So if you
# look at the list of classes and methods this might seem a bit
# daunting. All you really need to use the API should be in the
# above list. Excluding trivial structs that will be cross
# referenced as needed. So you don't need to go looking for them.
# Of course, if you want to work om %OpenZGY itself then you
# probably need everything.
#
# See also the following related pages:
#
# \li \ref physicalformat
# \li \ref implementation
# \li \ref lowres
# \li \ref migration
#
# \endif
# \page Example
# \include simplecopy.py
import numpy as np
import json
import sys
from collections import namedtuple
from enum import Enum
from .exception import *
from .impl import enum as impl_enum
from .impl.meta import ZgyInternalMeta
from .impl.bulk import ZgyInternalBulk, ScalarBuffer
from .impl.transform import generalTransform
from .impl.file import FileFactory
from .impl.stats import StatisticData
from .impl.lodalgo import DecimationType
from .impl.compress import CompressFactoryImpl
from .impl.zfp_compress import ZfpCompressPlugin
from .impl.genlod import GenLodC
##@cond IMPL
##@brief Explicit control of imported symbols.
class _internal:
"""
This class is only used to rename some internal code.
I want to be explicit about which classes I need from impl.*
but at the same time I don't want to pollute the user-visible api
namespace with names not even starting with an underscore that the
user has no business accessing. I am not sure whether my way of
achieving this is considered a kludge.
What I really want is to say something like:
from .impl.genlod import GenLodC as _internal.GenLodC
but apparently this is not allowed.
"""
pass
_internal.enum = impl_enum; del impl_enum
_internal.ZgyInternalMeta = ZgyInternalMeta; del ZgyInternalMeta
_internal.ZgyInternalBulk = ZgyInternalBulk; del ZgyInternalBulk
_internal.ScalarBuffer = ScalarBuffer; del ScalarBuffer
_internal.generalTransform = generalTransform; del generalTransform
_internal.FileFactory = FileFactory; del FileFactory
_internal.StatisticData = StatisticData; del StatisticData
_internal.DecimationType = DecimationType; del DecimationType
_internal.CompressFactoryImpl = CompressFactoryImpl; del CompressFactoryImpl
_internal.ZfpCompressPlugin = ZfpCompressPlugin; del ZfpCompressPlugin
_internal.GenLodC = GenLodC; del GenLodC
##@endcond
##@brief Sample data type used in the public API.
class SampleDataType(Enum):
"""
Sample data type used in the public API.
Corresponds to RawDataType used in the ZGY file format.
"""
unknown = 1000
int8 = 1001
int16 = 1002
float = 1003
##@brief Horizontal or vertical dimension as used in the public API.
class UnitDimension(Enum):
"""
Horizontal or vertical dimension as used in the public API.
Horizontal dimension may be length or arc angle, although most
applications only support length. Vertical dimension may be time
or length. Vertical length is of course the same as depth.
Arguably there should have been separate enums for horizontal and
vertical dimension since the allowed values differ.
"""
unknown = 2000
time = 2001
length = 2002
arcangle = 2003
##@brief Base class shared betewwn ZgyReader and ZgyWriter.
class ZgyMeta:
"""
Base class shared betewwn ZgyReader and ZgyWriter.
"""
def __init__(self, meta):
"""Create an instance, providing the ZgyInternalMeta to use."""
assert meta is not None
assert isinstance(meta, _internal.ZgyInternalMeta)
self._meta = meta
@property
def size(self): # (iii)
"""
Number of inlines, crosslines, and samples in that order.
"""
return self._meta._ih._size
@property
def datatype(self): # s -> Enum
"""
Sample data type.
The ZGY-Public API uses enums: "int8", "int16", "float".
In some cases these are also passed as strings.
The old Python wrapper for ZGY-Public is stringly typed.
Instead of returning a real enum it returns the name.
"""
return _map_DataTypeToSampleDataType(self._meta._ih._datatype)
@property
def datarange(self): # (ff), a.k.a. DataMinMax
"""
For integral data this is the lowest and highest sample value
than can be represented in storage. The lowest storage value
(e.g. -128 for SignedInt8 data) will be returned as DataMinMax[0]
when read as float. Similarly the highest storage value e.g. +127
will be returned as DataMinMax[1]. When integer data is read as
the "native" integral type then no automatic scaling is applied.
Note that in this case the actual range of the samples on file might
be smaller (for int8, not all of the range -128..+127 might be used)
but it cannot be larger.
For floating point data these numbers are supposed to be the actual
value range of the samples on file. It is a good idea to enforce
this here, as the values stored by older writers cannot be trusted.
Note: Also enforced on write in impl.meta.InfoHeaderV2.calculate_write.
TODO-Worry: In some float32 datasets the bulk data might have
ridiculously large spikes wich will be included in the statistical
range but not in the codingrange. So, codingrange is actually the
one that is correct. Also, can we have a situation where stats
are not filled in while the codingrange is set? I am not sure
this is currently handled.
"""
if self._meta._ih._datatype == _internal.enum.RawDataType.Float32:
return (self._meta._ih._smin, self._meta._ih._smax)
else:
return (self._meta._ih._safe_codingrange[0], self._meta._ih._safe_codingrange[1])
@property
def raw_datarange(self): # (ff), a.k.a. DataMinMax
"""
As datarange, but the actual values read from the file before
they might have been changed to try to fix a bad file.
Only use this property if you want to handle such files
differently than the library does.
"""
if self._meta._ih._datatype == _internal.enum.RawDataType.Float32:
return (self._meta._ih._smin, self._meta._ih._smax)
else:
return (self._meta._ih._file_codingrange[0], self._meta._ih._file_codingrange[1])
@property
def zunitdim(self):
"""
Dimension in the vertical direction. "time" or "length".
"time" might on file be "SeismicTWT" or "SeismicOWT".
The old Python wrapper for ZGY-Public is stringly typed.
Instead of returning a real enum it returns the name.
"""
return _map_VerticalDimensionToUnitDimension(self._meta._ih._vdim)
@property
def hunitdim(self):
"""
Dimension in the horizontal direction. Should always be "length".
The original specification called for supporting "arcangle" as well,
i.e. coordinates in degrees instead of a projection. But most
application code that use ZGY will not support this.
The old Python wrapper for ZGY-Public is stringly typed.
Instead of returning a real enum it returns the name.
"""
return _map_HorizontalDimensionToUnitDimension(self._meta._ih._hdim)
@property
def zunitname(self):
"""
Unit in the horizontal direction. E.g. "ms", "m", or "ft".
Note that Petrel might ignore this settings and instead
prompt the user to state what the unit should be.
"""
return self._meta._ih._vunitname
@property
def hunitname(self):
"""
Unit in the horizontal direction. E.g. "m" or "ft".
"""
return self._meta._ih._hunitname
@property
def zunitfactor(self):
"""
Factor to multiply stored vertical values with to get SI units.
E.g. 0.001 for ms, 1.0 for m or 0.3048 for ft.
"""
return self._meta._ih._vunitfactor
@property
def hunitfactor(self):
"""
Factor to multiply stored horizontal values with to get SI units.
"""
return self._meta._ih._hunitfactor
@property
def zstart(self):
"""
Distance from surface/MSL to first sample, given in the vertical unit.
"""
return self._meta._ih._orig[2]
@property
def zinc(self):
"""
Sample interval, given in the vertical unit.
"""
return self._meta._ih._inc[2]
@property
def annotstart(self):
"""
First inline and crossline numbers.
"""
return self._meta._ih._orig[0:2]
@property
def annotinc(self):
"""
Inline and crossline number increments between adjacent
sections of the cube.
"""
return self._meta._ih._inc[0:2]
@property
def corners(self):
"""
World XY coordinates of each of the 4 corners.
The same coordinates in ordinal numbers are
((0, 0), (Size[0]-1, 0), (0, Size[1]-1), (Size[0]-1, Size[0]-1))
"""
return self._meta._ih._ocp_world
@property
def indexcorners(self):
"""
Redundant with Size.
Ordinal coordinates of each of the 4 corners, ordered as "corners".
"""
return self._meta._ih._ocp_index
@property
def annotcorners(self):
"""
Redundant with Start, Inc, Size.
Annotation coordinates of each of the 4 corners, ordered as HCorners.
"""
return self._meta._ih._ocp_annot
@property
def bricksize(self):
"""
Size of a brick. Should always be (64, 64, 64).
"""
return self._meta._ih._bricksize
@property
def brickcount(self):
"""
Number of bricks (including empties) ordered by [lod][dimension].
"""
return self._meta._ih._lodsizes
@property
def nlods(self):
"""
Number of level-of-detail layers, including lod 0 a.k.a. full resolution.
Unlike the C++ version, nlods is NOT returned as 1 if lowres data is missing.
"""
return self._meta._ih._nlods
@staticmethod
def _formatUUID(uuid):
"""
Convert a little-endian binary UUID to a big-endian string version.
See the C++ version for details.
First part byteswaps as an uint32_t.
Second and third part byteswaps as two uint16_t.
Remaining two parts are not byteswapped.
Hyphens added between parts.
"""
return ("{3:02x}{2:02x}{1:02x}{0:02x}-" +
"{5:02x}{4:02x}-{7:02x}{6:02x}-" +
"{8:02x}{9:02x}-" +
"{10:02x}{11:02x}{12:02x}{13:02x}{14:02x}{15:02x}").format(*uuid)
#@property
#def dataid(self):
# """
# GUID set on file creation.
# """
# return self._formatUUID(self._meta._ih._dataid)
@property
def verid(self):
"""
GUID set each time the file is changed.
"""
return self._formatUUID(self._meta._ih._verid)
#@property
#def previd(self):
# """
# GUID before last change.
# """
# return self._formatUUID(self._meta._ih._previd)
@property
def meta(self):
"""
A dictionary of all the meta information, which can
later be passed as **kwargs to the ZgyWriter constructor.
and "indexcorners", "annotcorners", "brickcount", "nlods"
are all derived properties that will never be settable.
"numthreads" is a property of the implementation, not the file.
"""
return {
"size": self.size,
"bricksize": self.bricksize,
"datatype": self.datatype,
"datarange": self.datarange,
"zunitdim": self.zunitdim,
"hunitdim": self.hunitdim,
"zunitname": self.zunitname,
"hunitname": self.hunitname,
"zunitfactor": self.zunitfactor,
"hunitfactor": self.hunitfactor,
"zstart": self.zstart,
"zinc": self.zinc,
"annotstart": self.annotstart,
"annotinc": self.annotinc,
"corners": self.corners,
}
@property
def numthreads(self):
"""
How many threads to use when reading. Currently ignored.
"""
return 1
@numthreads.setter
def numthreads(self, x):
print("Warning: numthreads is ignored.")
def dump(self, file=None):
file = file or sys.stdout
print("{", file=file)
for e in sorted(self.meta.items()):
value = '"'+e[1]+'"' if isinstance(e[1], str) else str(e[1])
print(' "{0}": {1},'.format(e[0], value), file=file)
print("}", file=file)
### New in OpenZGY ###
_statisticsType = namedtuple("Statistics", "cnt sum ssq min max")
@property
def statistics(self):
"""
Return the statistics stored in the file header as a named tuple.
NOTE, I might want to change this to another type if there is a
need to implement the same method in the ZGY-Public wrapper,
as it might be trickier to define a namedtuple there.
"""
return self._statisticsType(self._meta._ih._scnt,
self._meta._ih._ssum,
self._meta._ih._sssq,
self._meta._ih._smin,
self._meta._ih._smax)
_histogramType = namedtuple("Histogram", "cnt min max bin")
@property
def histogram(self):
"""
Return the statistics stored in the file header as a named tuple.
NOTE, I might want to change this to another type if there is a
need to implement the same method in the ZGY-Public wrapper,
as it might be trickier to define a namedtuple there.
"""
if not self._meta._hh: return None
return self._histogramType(self._meta._hh._cnt,
self._meta._hh._min,
self._meta._hh._max,
self._meta._hh._bin)
##@brief Base class shared betewwn ZgyReader and ZgyWriter.
class ZgyMetaAndTools(ZgyMeta):
"""
Base class shared betewwn ZgyReader and ZgyWriter.
Adds coordinate conversion tools.
"""
@staticmethod
def transform(A, B, data):
"""
Linear transformation of an array of double-precision coordinates.
The coordinate systems to convert between are defined by
three arbitrary points in the source system and the target.
Arguments: ((ax0,ay0), (ax1,ay1), (ax2,ay2)),
((bx0,by0), (bx1,by1), (bx2,by2)),
data
where data is a 2d array of size (length, 2)
"""
# performance note: In Python it can be far more efficient to
# build and cache 6 transformation matrices between index/annot/world
# and use those for the 6 transforms. But if we only transform
# a few values at a time anyway, or if we are planning to convert
# the accessor back to C++ fairly soon, this is a non-issue.
_internal.generalTransform(
A[0][0], A[0][1], A[1][0], A[1][1], A[2][0], A[2][1],
B[0][0], B[0][1], B[1][0], B[1][1], B[2][0], B[2][1],
data)
@staticmethod
def transform1(A, B, point):
data = [[point[0], point[1]]]
ZgyMetaAndTools.transform(A, B, data)
return tuple(data[0])
def annotToIndex(self, point):
"""Convert inline, crossline to ordinal"""
return self.transform1(self.annotcorners, self.indexcorners, point)
def annotToWorld(self, point):
"""Convert inline, crossline to world X,Y"""
return self.transform1(self.annotcorners, self.corners, point)
def indexToAnnot(self, point):
"""Convert ordinal to inline, crossline"""
return self.transform1(self.indexcorners, self.annotcorners, point)
def indexToWorld(self, point):
"""Convert ordinal to world X,Y"""
return self.transform1(self.indexcorners, self.corners, point)
def worldToAnnot(self, point):
"""Convert world X,Y to inline, crossline"""
return self.transform1(self.corners, self.annotcorners, point)
def worldToIndex(self, point):
"""Convert world X,Y to ordinal"""
return self.transform1(self.corners, self.indexcorners, point)
##@brief Main entry point for reading ZGY files.
class ZgyReader(ZgyMetaAndTools):
"""
Main entry point for reading ZGY files.
Obtain a concrete instance by calling the constructor.
You can then use the instance to read both meta data and bulk data.
It is recommended to explicitly close the file when done with it.
"""
def __init__(self, filename, *, _update=False, iocontext=None):
# No "with" statement for the FileFactory, so we must remember
# to close it ourselves in our own __exit__.
self._fd = _internal.FileFactory(filename, ("r+b" if _update else "rb"), iocontext)
self._meta = _internal.ZgyInternalMeta(self._fd)
# self._meta._ih and friends will all be allocated.
# Prove that all the tests for "._ih is not None" are redundant.
self._meta._assert_all_headers_allocated()
# At the implementation level the bulk- and meta access are separate,
# and the bulk accessor needs some of the meta information to work.
self._accessor = _internal.ZgyInternalBulk(self._fd, self._meta)
# This causes an assignment to the parent's self._meta
# which in Python is a no-op but in C++ the parent might
# have its own _meta that we shadow here. Or not.
super().__init__(self._meta)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Note that if the block was exited due to an exception, and if we
# also get an exception from close, then it is the second one
# that gets caught in a try/catch block placed outside the "while".
# Callers will in this case often want to report just the first
# exception since the close() probably failed as a cause of it.
# Caller needs to do e.g. "ex2 = ex.__cause__ or ex.__context__".
# It is possible to instead suppress any errors from close if
# another exception is already pending. Simplifying the caller.
# But then the close() would not be available at all. Bad idea.
self.close()
##@brief Read bulk data into a caller specified buffer.
def read(self, start, data, *, lod = 0, verbose = None, zeroed_data = False):
"""
Read an arbitraty region of bulk data into a caller specified buffer.
The buffer's type must be float, short, or char. Any file may
be read as float. If the buffer is of type short or char then
the file must be of precisely that type.
Arguments: (i0,j0,k0), buffer, lod=0
"""
file_dtype = _internal.enum._map_DataTypeToNumpyType(self._meta._ih._datatype)
if (not isinstance(data, np.ndarray) or
data.dtype not in (np.float32, file_dtype) or
len(data.shape) != 3 or
not data.flags.writeable
):
raise ZgyUserError("Expected a writeable 3d numpy array of np.{0} or np.{1}".format(
np.dtype(np.float32).name, np.dtype(file_dtype).name))
if not self._accessor: raise ZgyUserError("ZGY file is not open for read.")
self._accessor.readToExistingBuffer(data, start, lod=lod,
as_float=(data.dtype==np.float32),
verbose=verbose, zeroed_result=zeroed_data)
##@brief Get hint about all constant region.
##@image html readconst-fig1.png
##@image latex readconst-fig1.png
def readconst(self, start, size, *, lod = 0, as_float = True, verbose = None):
"""
Get hint about all constant region.
Check to see if the specified region is known to have all samples
set to the same value. Returns that value, or None if it isn't.
The function only makes inexpensive checks so it might return
None even if the region was in fact constant. It will not make
the opposite mistake. This method is only intended as a hint
to improve performance.
For int8 and int16 files the caller may specify whether to scale
the values or not.
"""
if not self._accessor: raise ZgyUserError("ZGY file not open for read")
return self._accessor.readConstantValue(start, size, lod=lod,
as_float=as_float,
verbose=verbose)
def close(self):
if self._fd:
self._fd.xx_close()
self._fd = None
if self._accessor:
self._accessor = None # No other cleanup needed.
# Metadata remains accessible. Not sure whether this is a good idea.
##@brief Main API for creating ZGY files.
class ZgyWriter(ZgyMetaAndTools):
"""
Main API for creating ZGY files.
Obtain a concrete instance by calling the constructor.
All meta data is specified in the call to open(), so meta data
will appear to be read only. You can use the instance to write
bulk data. The file becomes read only once the instance is closed.
It is recommended to call finalize() and close() after all bulk
has been written. But if you forget then this will be done when
the writer goes out of scope, provided of course that you used a
"with" block.
"""
def __init__(self, filename, *,
iocontext=None, compressor=None, lodcompressor=None, **kwargs):
"""
Create a new ZGY file.
Optionally pass templatename = otherfile to create a new file
similar to otherfile. Explicit keyword argumants override
information from otherfile.
Optionally pass templatename = filename to erase all data blocks
from filename but keep the metadata. New data blocks can then
be written to the file. Petrel/BASE might need this feature,
due to the way it writes new files. They tend to get opened
several times to add meta information. Caveat: Behind the
scenes the file is simply deleted and re-created. This is
slightly less efficient than opening the file for read/write.
templatename: string
Optionally create a file similar to this one.
TODO-Low: In the future might also accept a ZgyReader instance.
This is convenient if a file is being copied, so as to not
needing to open it twice.
filename: string
The local or cloud file to create.
size: (int, int, int)
Number of inlines, crosslines, samples.
bricksize: (int, int, int)
Size of a single brick. Defaults to (64, 64, 64).
Please use the default unless you really know what
you are doing. In any case, each size needs to be
a power of 2.
datatype: SampleDataType
Specify int8, int16, or float.
datarange = (float, float)
Used only if datatype is integral, to convert from storage to
actual sample values. The lowest possible storage value, i.e.
-128 or -32768, maps to datarange[0] and the highest possible
storage value maps to datarange[1].
zunitdim: UnitDimension. time, length, or unknown.
zunitname: string
zunitfactor: float
Describe how to convert between storage units and SI units
in the vertical direction. Petrel ignores these settings and
prompts the user.
hunitdim: UnitDimension. length, arcangle, or unknown.
hunitname: string
hunitfactor: float
Describe how to convert between storage units and SI units
in the horizontal direction. Most applications cannot handle
arcangle. Petrel ignores these settings and prompts the user.
zstart: float
The time or depth corresponding to the shallowest sample.
zinc: float
The vertical time or depth distance between neighboring samples.
annotstart: (float, float)
The inline and crossline numbers corresponding to the ordinal
position (0, 0) i.e. the first sample on the file.
annotinc: (float, float)
The inline / crossline step between neighboring samples.
The samples at ordinal (1, 1) will have annotation
annotstart + annotinc.
corners: (float, float)[4]
World coordinates of each corner, order as
First inline / first crossline,
last inline / first crossline,
first inline / last crossline,
last inline / last crossline.
compressor, lodcompressor: callable
If set, attempt to compress each block with this callable.
Typically this should be a lambda or a class, because it
needs to capture the compression parameters.
Example:
compressor = ZgyCompressFactory("ZFP", snr = 30)
If different compression parameters are desired for
full- and low resolution bricks then lodcompressor can
be provided as well. It defaults to compressor. Using
two different instances, even if the parameters match,
may also cause statistics to be reported separately
for fullres and lowres.
TODO-Low: Future: passing zfp_compressor = snr is equivalent
to compressor = ZgyCompressFactory("ZFP", snr = snr).
Unlike the compressor keyword this also works in the wrapper.
"""
# The following arguments are not passed on to _create:
# - templatename is handled locally, using the template for defaults.
# - iocontext is only made available to the FileADT layer
# - compressor is explicitly passed as an argument to those
# functions (write, writeconst, finalize) that need it.
if "templatename" in kwargs:
with ZgyReader(kwargs["templatename"]) as t:
for k, v in t.meta.items():
if not k in kwargs:
kwargs[k] = v
del kwargs["templatename"]
# Compressing a file as integer is not useful, it just adds more noise
# at least as long as we are talking about seismic. Even if we allowed
# int8 or int16 here the code in impl_zfp_compress will currently
# use the ZFP float interface.
#
# Flagging as int8 or int16 would save memory at the cost of adding
# even more noise. Converting the decompressed data to integral before
# returning it as that type. But if the applicaton wants this then
# it can easily convert the result itself.
#
# There are other subtle issues with int8 / int16. Even with enabled
# compression, individual bricks are allowed to be stored uncompressed
# if neither lossy nor lossless compression works as desired. The
# declared value type then controls how these blocks are stored.
# Storing those (presumably very few) blocks as integral means
# more that can go wrong and more unit tests. And keep in mind
# that float->int8 and float->int16 are also a form of compression
# (4x and 2x respectively) but is a lot noisier than ZFP at the same
# compression factor.
#
# We may need to revisit this if switching to another compression
# algorithm where integral compression works better.
if compressor and kwargs.get("datatype", SampleDataType.float) != SampleDataType.float:
raise ZgyUserError("Compressed files need to be stored as float.")
# After this, self._meta._ih and friends exists but will be None.
self._meta = _internal.ZgyInternalMeta(None)
# This causes an assignment to the parent's self._meta
# which in Python is a no-op but in C++ the parent might
# have its own _meta that we shadow here. Or not.
super().__init__(self._meta)
self._create(filename, compressed = bool(compressor or lodcompressor), **kwargs)
# Now self._meta._ih and friends will all be allocated.
# Prove that all the tests for "._ih is not None" are redundant.
self._meta._assert_all_headers_allocated()
# The file creation was deferred until after the consistency checks.
# No "with" statement for the FileFactory, so we must remember
# to close it ourself in our own __exit__.
self._fd = _internal.FileFactory(filename, "w+b", iocontext)
self._meta._flush_meta(self._fd)
# The accessor needs to know whether we will do compression or not,
# because this determines whether bricks will be written aligned
# and possibly whether updates are allowed. The actual
# compression algorithm is passed on each write etc.
# TODO-Low consider storing the compressor as context of the accessor
# instead. Less precise control though. We might want a different
# snr on the low resolution bricks.
self._accessor = _internal.ZgyInternalBulk(
self._fd, self._meta,
compressed = bool(compressor or lodcompressor))
self._dirty = False # If True need LOD, stats, histogram.
self._compressor = compressor or lodcompressor
self._lodcompressor = lodcompressor or compressor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Note that if the block was exited due to an exception, and if we
# also get an exception from close, then it is the second one
# that gets caught in a try/catch block placed outside the "while".
# Callers will in this case often want to report just the first
# exception since the close() probably failed as a cause of it.
# Caller needs to do e.g. "ex2 = ex.__cause__ or ex.__context__".
# This is mitigated by the close() method skipping work and/or
# suppressing exceptions if the file has already been flagged bad.
self.close()
def _create(self, filename, *, size = None, compressed = False,
bricksize = None,
datatype = SampleDataType.float, datarange = None,
zunitdim = UnitDimension.unknown,
hunitdim = UnitDimension.unknown,
zunitname = None, hunitname = None,
zunitfactor = 0.0, hunitfactor = 0.0, zstart = 0.0, zinc = 0.0,
annotstart = (0, 0), annotinc = (0, 0),
corners = ((0,0),(0,0),(0,0),(0,0))):
"""
Called from __init__. Do not call directly.
The user should use a "using" statement when creating the reader.
Datatype can be "int8", "int16", "float".
Dimension can be "time", "length", or "arcangle".
"""
self._meta._init_from_scratch(filename = filename,
size = size,
compressed = compressed, # If True this is V4.
bricksize = bricksize,
datatype = _map_SampleDataTypeToDataType(datatype),
datarange = datarange,
zunitdim = _map_UnitDimensionToVerticalDimension(zunitdim),
hunitdim = _map_UnitDimensionToHorizontalDimension(hunitdim),
zunitname = zunitname,
hunitname = hunitname,
zunitfactor = zunitfactor,
hunitfactor = hunitfactor,
zstart = zstart,
zinc = zinc,
annotstart = annotstart,
annotinc = annotinc,
corners = corners)
##@brief Write an arbitrary region.
def write(self, start, data, *, verbose = None):
"""
Write bulk data. Type must be np.float32, np.int16, or np.int8.
np.float32 may be written to any file and will be converted
if needed before storing. Writing an integral type implies that
values are to be written without conversion. In that case the
type of the buffer must match exactly the file's storage type.
You cannot write int8 data to an int16 file or vice versa.
Arguments:
start: tuple(i0,j0,k0) where to start writing.
data: np.ndarray of np.int8, np.int16, or np.float32
"""
file_dtype = _internal.enum._map_DataTypeToNumpyType(self._meta._ih._datatype)
if (not isinstance(data, np.ndarray) or
data.dtype not in (np.float32, file_dtype) or
len(data.shape) != 3
):
raise ZgyUserError("Expected a 3d numpy array of np.{0} or np.{1}".format(
np.dtype(np.float32).name, np.dtype(file_dtype).name))
if not self._accessor: raise ZgyUserError("ZGY file is not open.")
if self._accessor._is_bad or self._meta._is_bad:
raise ZgyCorruptedFile("Cannot continue due to previous errors.")
self._accessor._writeRegion(data, start, lod=0,
compressor=self._compressor,
is_storage=(data.dtype != np.float32),
verbose=verbose)
self._dirty = True
##@brief Write a single value to a region of the file.
def writeconst(self, start, value, size, is_storage, *, verbose = None):
"""
Write a single value to a region of the file.
This is equivalent to creating a constant-value array with np.full()
and write that. But this version might be considerably faster.
If is_storage is false and the input value cannot be converted to
storage values due to being outside range after conversion then
the normal rules (use closest valid value) apply. If
is_storage is True then an error is raised if the supplied value
cannot be represented.
Arguments:
start: tuple(i0,j0,k0) where to start writing.
size: tuple(ni,nj,nk) size of region to write.
value: Scalar to be written.
is_storge: False if the value shall be converted to storage
True if it is already storage and should be written
unchanged. Ignored if the storage type is float.
"""
if self._meta._ih._datatype == _internal.enum.RawDataType.Float32:
dtype = np.float32 # "Convert" to user now. Actually a no-op.
is_storage = False
elif not is_storage:
dtype = np.float32 # Force conversion.
elif self._meta._ih._datatype == _internal.enum.RawDataType.SignedInt16:
dtype = np.int16
elif self._meta._ih._datatype == _internal.enum.RawDataType.SignedInt8:
dtype = np.int8
else:
raise ZgyFormatError("Unrecognized datatype on file")
if np.issubdtype(dtype, np.integer) and not np.isfinite(value):
raise ZgyUserError("Cannot store {0} in a {1}".format(value, np.dtype(dtype)))
self._accessor._writeRegion(_internal.ScalarBuffer(size, value, dtype),
start, lod=0,
compressor=self._compressor,
is_storage=is_storage,
verbose=verbose)
self._dirty = True
def finalize(self, *, decimation=None, progress=None, force=False, verbose=None):
"""
Generate low resolution data, statistics, and histogram.
This will be called automatically from close(), but in
that case it is not possible to request a progress callback.
If the processing raises an exception the data is still marked
as clean. Called can force a retry by passing force=True.
Arguments:
decimation: Optionally override the decimation algorithms by
passing an array of impl.lodalgo.DecimationType
with one entry for each level of detail. If the
array is too short then the last entry is used for
subsequent levels.
TODO-Low: The expected enum type is technically internal
and ought to have been mapped to an enum api.XXX.
progress: Function(done, total) called to report progress.
If it returns False the computation is aborted.
Will be called at least one, even if there is no work.
force: If true, generate the low resolution data even if
it appears to not be needed. Use with caution.
Especially if writing to the cloud, where data
should only be written once.
verbose: optional function to print diagnostic information.
"""
if self._dirty or force:
self._dirty = False
stats, histo = _internal.GenLodC(
accessor = self._accessor,
compressor = self._lodcompressor,
decimation = decimation,
progress = progress,
verbose = verbose)()
# TODO-Low: Refactor:
# violating encapsulation rather more than usual.
# Note that _accessor._metadata is private; it is a copy
# or actually a reference letting ZgyInternalBulk use
# some parts of the metadata.
(a, b) = self._accessor._scaleDataFactorsStorageToFloat()
stats.scale(a, b)
histo.scale(a, b)
histo.resize(256)
self._meta._ih._scnt = stats._cnt
self._meta._ih._ssum = stats._sum
self._meta._ih._sssq = stats._ssq
self._meta._ih._smin = stats._min
self._meta._ih._smax = stats._max
self._meta._hh._min = histo.vv_range[0]
self._meta._hh._max = histo.vv_range[1]
self._meta._hh._bin = histo.bins
self._meta._hh._cnt = np.sum(histo.bins)
else:
if progress:
progress(0, 0)
# For debugging and measurements only.
if False:
if self._compressor:
self._compressor.dump(msg="Compress")
if self._lodcompressor and not self._lodcompressor is self._compressor:
self._lodcompressor.dump(msg="LOD_data")
# TODO-Low: Refactor: the accessor should logically have a close(),
# shouldn't it? And maybe I shouldn't set self._fd here.
# Allowing the accessor to manage it.
def close(self):
"""
Close the currently open file.
Failure to close the file will corrupt it.
"""
if self._fd:
# Both the meta accessor and the bulk accessor has an _is_bad
# flag that is set if we got an error while writing to the file.
# If set then don't bother with statistics, histogram, and lowres.
# The file will probably just be discarded amyway.
if not (self._meta._is_bad or self._accessor._is_bad):
self.finalize()
# Flushing metadata is a bit more ambiguous. Parts of the file
# might still be salvageable, so especially when testing we might
# still want to flush a file marked as bad. But, ignore any
# secondary errors as the user already knows something is wrong.
if not (self._meta._is_bad or self._accessor._is_bad):
self._meta._flush_meta(self._fd)
else:
try:
self._meta._is_bad = False
self._accessor._is_bad = False
self._meta._flush_meta(self._fd)
except Exception:
pass
finally:
self._meta._is_bad = True
self._accessor._is_bad = True
# TODO-Low it would be more consistent if the final write was
# of the updated headers, in case the app crashes before
# xx_close. This is true for local file access. But for
# cloud access the last bulk data segment might be written
# on xx_close(). Difficult to change without complicating
# the internal FileADT api.
# Closing the local or cloud handle is always needed as there
# might be resources that need to be cleaned up.
self._fd.xx_close()
self._fd = None
# The client code is strongly advised to delete the file if it was
# opened for create. OpenZGY might have deleted the file itself but
# this is probably too harsh. A file opened for update (which is not
# actually supported yet) might still be usable. So in the future we
# may need an additional flag telling whether the writer has ever
# had any successful writes to the file. If not then the file is
# still good. Note that the suggestions below need more work.
# TODO-Low: Later: this is in the bells & whistles category.
#if not self._precious_set_on_open:
# self._fd.xx_delete_on_close(); self._fd.xx_close()
# self._fd.xx_close_of_needed_and_delete()
# ZgyUtils(saved_iocontext).delete(self._filename)
##@cond IMPL
@property
def errorflag(self):
"""
If true this means that an error happened during a critical operation
such as a write. This means the file being written is probably bad.
It is recommended to close and delete the file. In the future
OpenZGY might even do that automatically.
"""
return self._meta._is_bad or self._accessor._is_bad
@errorflag.setter
def errorflag(self, value):
"""
Set or reset the flag indicating that the file got corrupted.
"""
self._meta._is_bad = value
self._accessor._is_bad = value
##@endcond
##@brief Simple progress bar.
class ProgressWithDots:
"""
Progress bar that writes dots (51 by default) to standard output.
This can be user as-is for simple command line apps, or you can use
the source code as an example on how to write your own.
The default of 51 dots will print one dot at startup and then one
additional dot for each 2% work done.
If you are using this to write to the cloud a file that is smaller
than ~10 GB then the progress bar will probably move in larger
jumps. Because writing to a cloud back-end uses very large buffers.
Most cloud back-ends cannot report progress inside a "write block".
When passing a progress reporter to a function, make sure you do not
pass the class itself. You need to create an instance of it.
"""
def __init__(self, length=51, outfile=sys.stderr):
self._dots_printed = 0
self._length = length
self._outfile = outfile
def __call__(self, done, total):
#print("Progress: {0}/{1}".format(done, total))
if self._dots_printed == 0:
print("[" + (" " * self._length) + "]\r[", end='', flush=True)
needed = 1 if not total else 1 + ((done * (self._length-1)) // total)
if needed > self._dots_printed:
print("." * (needed - self._dots_printed),
file=self._outfile, flush=True, end='')
self._dots_printed = needed
if done == total:
print("", file=self._outfile)
return True
##@brief Operations other than read and write.
class ZgyUtils:
"""
Operations other than read and write.
Any operations that don't fit into ZgyReader or ZgyWriter go here.
Such as deleting a file. Or any other operation that does not need
the file to be open first.
"""
##@brief Create a new concrete instance of ZgyUtils.
def __init__(self, iocontext=None):
"""
Create a new concrete instance of ZgyUtils.
The reason you need to supply a file name or a file name prefix is that
you need to provide enough information to identify the back-end that
this instance will be bound to. So if you have registered a back-end
named "xx", both "xx://some/bogus/file.zgy" and just "xx://" will
produce an instance that works for your XX backend,
For performance reasons you should consider caching one ZgyUtils
instance for each back end you will be using. Instead of just creating
a new one each time you want to invoke a method. Just remember that
most operations need an instance created with the same prefix.
"""
self._iocontext = iocontext
##@brief Delete a file. Works both for local and cloud files.
def delete(self, filename):
"""
Delete a file. Works both for local and cloud files.
Note that the instance must be of the correct (local or cloud) type.
"""
with _internal.FileFactory(filename, "d", self._iocontext) as f:
f.xx_close()
def ZgyCompressFactory(name, *args, **kwargs):
"""
Look up a compression algorithm by name and instanciate a compressor,
passing the required compression parameters. Using this approach
reduces the coupling between client code and the compressor.
"""
return _internal.CompressFactoryImpl.factory(name, *args, **kwargs)
def ZgyKnownCompressors():
"""
Return the names of all compressors known to the system.
This is primarily for logging, but might in principle be used
in a GUI to present a list of compressors to choose from.
The problem with that is how to handle the argument list.
"""
return _internal.CompressFactoryImpl.knownCompressors()
def ZgyKnownDecompressors():
"""
Return the names of all compressors known to the system.
This is primarily for logging.
"""
return _internal.CompressFactoryImpl.knownDecompressors()
#############################################################################
### Define enums used in the public API. These are separate from the ###
### enums used inside ZGY files, to improve isolation. ###
#############################################################################
def _map_DataTypeToSampleDataType(e):
return _mapEnum(e, {
_internal.enum.RawDataType.SignedInt8: SampleDataType.int8,
_internal.enum.RawDataType.SignedInt16: SampleDataType.int16,
_internal.enum.RawDataType.Float32: SampleDataType.float,
None: SampleDataType.unknown,
})
def _map_SampleDataTypeToDataType(e):
return _mapEnum(e, {
SampleDataType.int8: _internal.enum.RawDataType.SignedInt8,
SampleDataType.int16: _internal.enum.RawDataType.SignedInt16,
SampleDataType.float: _internal.enum.RawDataType.Float32,
})
def _map_HorizontalDimensionToUnitDimension(e):
return _mapEnum(e, {
_internal.enum.RawHorizontalDimension.Length: UnitDimension.length,
_internal.enum.RawHorizontalDimension.ArcAngle: UnitDimension.arcangle,
None: UnitDimension.unknown,
})
def _map_VerticalDimensionToUnitDimension(e):
return _mapEnum(e, {
_internal.enum.RawVerticalDimension.Depth: UnitDimension.length,
_internal.enum.RawVerticalDimension.SeismicTWT: UnitDimension.time,
_internal.enum.RawVerticalDimension.SeismicOWT: UnitDimension.time,
None: UnitDimension.unknown,
})
def _map_UnitDimensionToHorizontalDimension(e):
return _mapEnum(e, {
UnitDimension.length: _internal.enum.RawHorizontalDimension.Length,
UnitDimension.arcangle: _internal.enum.RawHorizontalDimension.ArcAngle,
UnitDimension.unknown: _internal.enum.RawHorizontalDimension.Unknown,
})
def _map_UnitDimensionToVerticalDimension(e):
return _mapEnum(e, {
UnitDimension.time: _internal.enum.RawVerticalDimension.SeismicTWT,
UnitDimension.length: _internal.enum.RawVerticalDimension.Depth,
UnitDimension.unknown: _internal.enum.RawVerticalDimension.Unknown,
})
def _mapEnum(e, lookup):
"""
Internal method to map between impl.enum tags used in the file format
and those used in the API, to better isolate the API from changes.
An unrecognized tag when mapping from file format to api is
usually treated as a warning. The application might be able to
handle it. In the lookup table, if there is an entry with key None
then this is considered to be the default value. If there is no
such entry then we are expected to raise an exception on error.
When mapping in the other direction the value came
from user code so raising an exception is usually warranted.
"""
if e in lookup: return lookup[e]
if None in lookup: return lookup[None]
valid = ", ".join([str(e) for e in sorted(lookup, key = lambda x: x.value)])
raise ZgyUserError("Value " + str(e) + " not accepted." +
" It should be one of " + valid + ".")
if __name__ == "__main__":
help(ZgyReader)
help(ZgyWriter)
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/api.py | api.py |
##@package openzgy.exception
#@brief Exceptions that may be raised by OpenZGY.
##@brief Base class for all exceptions thrown by %OpenZGY.
class ZgyError(Exception):
"""
Base class for all exceptions thrown by %OpenZGY.
"""
pass
##@brief Corrupted or unsupported ZGY file.
class ZgyFormatError(ZgyError):
"""
Corrupted or unsupported ZGY file.
In some cases a corrupted file might lead to a ZgyInternalError
or ZgyEndOfFile being thrown instead of this one. Because it isn't
always easy to figure out the root cause.
"""
pass
##@brief The ZGY file became corrupted while writing to it.
class ZgyCorruptedFile(ZgyError):
"""
The ZGY file became corrupted while writing to it.
No further writes are allowed on this file because a previous write
raised an exception and we don't know the file's state. Subsequent
writes will also throw this exception.
The safe approach is to assume that the error caused the file to
become corrupted. It is recommended that the application closes and
deletes the file.
"""
pass
##@brief Exception that might be caused by the calling application.
class ZgyUserError(ZgyError):
"""
Exception that might be caused by the calling application.
Determining whether a problem is the fault of the calling application
or the %OpenZGY library itself can be guesswork. Application code
might choose to treat ZgyUserError and ZgyInternalError the same way.
"""
pass
##@brief Exception that might be caused by a bug in %OpenZGY.
class ZgyInternalError(ZgyError):
"""
Exception that might be caused by a bug in %OpenZGY.
Determining whether a problem is the fault of the calling application
or the %OpenZGY library itself can be guesswork. Application code
might choose to treat ZgyUserError and ZgyInternalError the same way.
A corrupt file might also be reported as ZgyInternalError instead of
the more appropriate ZgyFormatError.
"""
pass
##@brief Trying to read past EOF.
class ZgyEndOfFile(ZgyError):
"""
Trying to read past EOF.
This is always considered an error, and is often due to a corrupted
ZGY file. So this error should probably be treated as a ZgyFormatError.
"""
pass
##@brief Exception used internally to request a retry.
class ZgySegmentIsClosed(ZgyError):
"""
Exception used internally to request a retry.
A write to the cloud failed because the region that was attempted
written had already been flushed. And the cloud back-end does not
allow writing it again. The calling code, still inside the OpenZGY
library, should be able to catch and recover from this problem.
"""
pass
##@brief User aborted the operation.
class ZgyAborted(ZgyError):
"""
User aborted the operation.
If the user supplied a progress callback and this callback returned
false then the operation in progress will and by throwing this
exception. Which means that this is not an error; it is a consequence
of the abort.
"""
pass
##@brief Missing feature.
class ZgyMissingFeature(ZgyError):
"""
Missing feature.
Raised if some optional plug-in (e.g. some cloud back end or a
compressor) was loaded or explicitly requested, so we know about
it, but the plug-in is not operational for some reason.
"""
pass
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/exception.py | exception.py |
##@package openzgy.zgypublic
#@brief Consolidate old and new Python API.
import zgy
from .api import SampleDataType, UnitDimension
def _fixKeywordArgs(kwargs):
"""
The new API uses real enums instead of stringly typed values.
The names of each tag matches, so the conversion is trivial.
Also handles "iocontext" and "sourcetype" although those two
are not in either interface yet.
"""
result = {}
for key, value in kwargs.items():
if key in ("zunitdim", "hunitdim", "datatype", "sourcetype"):
result[key] = value.name
elif key in ("iocontext", "verbose"):
pass
else:
result[key] = value
return result
def _fixResultData(kwdata):
"""
Inverse of _fixKeywordArgs. Convert strings to enums.
"""
result = {}
for key, value in kwdata.items():
if key in ("datatype", "sourcetype"):
result[key] = SampleDataType[value]
elif key in ("zunitdim", "hunitdim"):
result[key] = UnitDimension[value]
else:
result[key] = value
return result
# Note, using mostly Doxygen style annotation because for this file
# it isn't really useful to have help instde Python.
class ZgyReader(zgy.ZgyReader):
##@brief Ignore iocontext and verbose arguments.
def __init__(self, *args, **kwargs):
return super().__init__(*args, **_fixKeywordArgs(kwargs))
##@brief Ignore iocontext and verbose arguments.
def read(self, *args, **kwargs):
return super().read(*args, **_fixKeywordArgs(kwargs))
##@brief Old: strings, new: enums for 4 properties.
#@details datatype, sourcetype, zunitdim, hunitdim need to be mapped.
@property
def meta(self):
return _fixResultData(super().meta)
##@brief Old: string, new: openzgy.api.SampleDataType.
@property
def datatype(self):
return SampleDataType[super().datatype]
##@brief Attribute doesn't exist in old api
@property
def raw_datarange(self):
return super().datarange
##@brief Old: string, new: openzgy.api.UnitDimension.
@property
def zunitdim(self):
return UnitDimension[super().zunitdim]
##@brief Old: string, new: openzgy.api.UnitDimension.
@property
def hunitdim(self):
return UnitDimension[super().hunitdim]
##@brief Old: missing from api.
#@details For the old API the bricksize will always be returned as
#(64,64,64) on read and always set to that value on file create.
@property
def bricksize(self):
return (64, 64, 64)
class ZgyWriter(zgy.ZgyWriter):
##@brief Ignore iocontext and verbose, and map 4 enums to string.
def __init__(self, *args, **kwargs):
return super().__init__(*args, **_fixKeywordArgs(kwargs))
##@brief Ignore iocontext and verbose arguments.
def write(self, *args, **kwargs):
return super().write(*args, **_fixKeywordArgs(kwargs))
##@brief Ignore and let _\_exit_\_ handle it instead.
def finalize(*args, **kwargs):
return None
##@brief Old: strings, new: enums for 4 properties.
#@details datatype, sourcetype, zunitdim, hunitdim need to be mapped.
@property
def meta(self):
return _fixResultData(super().meta)
##@brief Old: string, new: openzgy.api.SampleDataType.
@property
def datatype(self):
return SampleDataType[super().datatype]
##@brief Attribute doesn't exist in old api
@property
def raw_datarange(self):
return super().datarange
##@brief Old: string, new: openzgy.api.UnitDimension.
@property
def zunitdim(self):
return UnitDimension[super().zunitdim]
##@brief Old: string, new: openzgy.api.UnitDimension.
@property
def hunitdim(self):
return UnitDimension[super().hunitdim]
##@brief Old: missing from api.
#@details For the old API the bricksize will always be returned as
#(64,64,64) on read and always set to that value on file create.
@property
def bricksize(self):
return (64, 64, 64)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/zgypublic.py | zgypublic.py |
##@package openzgy.impl.histogram
import numpy as np
class HistogramData:
def __init__(self, range_hint=None, dtype=np.float32):
self._hmin, self._hmax = self._suggestHistogramRange(range_hint, dtype)
self._dtype = dtype
self._size = 256
# Uncomment the next lines to enable the temporary large histogram.
# One problem is that the histogram may lose the zero centric
# property that the user specified range might have, making the
# result look bad.
#if np.issubdtype(dtype, np.integer):
# self._size = max(256, min(65536, self._hmax - self._hmin + 1))
self._bins = np.zeros(self._size, dtype=np.int64)
if False:
print("@ Histogram ", self._hmin, self._hmax, "for data", range_hint)
@staticmethod
def _suggestHistogramRange(range_hint, dtype):
"""
Choose the histogram range to use.
The histogram range is normally set to the coding range for
integral data and the actual data range for float data.
This method takes care of corner cases and (future) subtle
tewaks such as making the range zero centric.
"""
# This logic in C++ is found in GenLodImpl::suggestHistogramRange()
# and has a much more detailed explanation of what is going on.
bogus = (-128, +127)
if np.issubdtype(dtype, np.integer):
# The histogram is built from storage values so its maximum
# possible range is already known. For int8 there will be one
# bin for every possible value. For int16 we might do the same,
# temporarily producing a histogram with 65,536 values and then
# whittling it down to 256 bins before storing it. But for now
# just map to the user provided coding range and hope that the
# user didn't decide to use just a small part of the available
# integer storage values.
return (np.iinfo(dtype).min, np.iinfo(dtype).max)
else:
# Choose histogram range based on the min/max value collected
# while writing lod 0. Always end up with a sane interval with
# min < max to avoid problems generating the histogram and also
# for applications reading the file later. For completely empty
# files just use (-1,+1) which is as good a default as any.
if (not range_hint or
not np.isfinite(range_hint[0]) or
not np.isfinite(range_hint[1]) or
range_hint[0] > range_hint[1]):
return bogus # nothing written or error.
elif range_hint[0] < range_hint[1]:
# This is the normal case for floating point data.
# Don't return numpy types. They have weird rules.
return (float(range_hint[0]), float(range_hint[1]))
elif range_hint[0] > 0: # At this point, hint[0] == hint[1]
return (0, range_hint[0]) # single positive value
elif range_hint[0] < 0:
return (range_hint[0], 0) # single negative value
else:
return bogus # all zero
def _histogram_data(self, data):
if not np.issubdtype(self._dtype, np.integer):
# numpy.histogram is documented to ignore values outside range.
# Handling of NaN is undocumented and currently reports errors
# from low level code. So, map NaN to +Inf just in case.
# TODO-Performance: This is not really a good idea.
data = np.copy(data)
data[np.isnan(data)] = np.inf
return np.histogram(data, bins=self._size, range=self.np_range)[0]
def add(self, data, factor = 1):
tmp = self._histogram_data(data)
if factor != 1:
tmp *= factor
self._bins += tmp
def scale(self, a, b):
self._hmin = a * self._hmin + b
self._hmax = a * self._hmax + b
def resize(self, newsize):
binwidth = (self._hmax - self._hmin) / (self._size - 1)
oldbins = self._bins
oldsize = self._size
self._size = newsize
self._bins = np.zeros(self._size, dtype=np.int64)
if np.count_nonzero(oldbins) == 0:
return
if newsize >= oldsize:
self._bins[:oldsize] = oldbins
self._hmax = self._hmin + binwidth * (self._size - 1)
return
skiplo = np.argmax(oldbins[::1] != 0)
skiphi = np.argmax(oldbins[::-1] != 0)
factor = max(1, (oldsize-skiplo-skiphi + (newsize-1)) // (newsize-2))
factor = ((factor // 2) * 2) + 1 # Round up to make it odd.
# Very minor issue: I reserve the first and last bin to hold
# data from the misaligned part. If enerything ends up aligned
# those two end up unused. I am absolutely sure no one will
# notice. *except possibly* when running unit tests.
# Adjust skiplo and skiphi upwards so that (a) neither moves
# more than "factor", (b) neither becomes negative, (c) the
# remaining size - skiphi - skiplo is a multiple of "factor",
# and (d) any zero-centric property is preserved by making
# sure the "zero" bin in the input ends up in the middle of
# one of the output bins. The last one is where it gets really
# tricky and TODO-High must be implemented. Or YAGNI, remove
# the capability to resize.
# Combine "factor" input bins into each output bin
center_count = ((oldsize-skiphi-skiplo)//factor)*factor
skiphi = oldsize - skiplo - center_count
partial = np.sum(oldbins[skiplo:oldsize-skiphi].reshape(-1,factor),axis=1)
# Mop up the ends that might have fewer than "factor" entries.
head = np.sum(oldbins[:skiplo])
tail = np.sum(oldbins[oldsize-skiphi:])
self._bins[1:(center_count//factor)+1] = partial
self._bins[0] = head
self._bins[(center_count//factor)+1] = tail
# The new binwidth must be binwidth*factor.
# The new bin[1] corresponds to old bin[skiplo], so new bin[0]
# must be new binwidth less than that.
self._hmin = (self._hmin + binwidth * skiplo) - (binwidth*factor)
self._hmax = self._hmin + (binwidth*factor) * (self._size-1)
@property
def bins(self):
return self._bins
@property
def vv_range(self):
"""
Histogram range, voxelvision and zgy style, with numbers
representing the center value of the first and last bin.
"""
return (self._hmin, self._hmax)
@property
def np_range(self):
"""
Histogram range, numpy and salmon style, with numbers
representing the edges of the first and last bin.
"""
binwidth = (self._hmax - self._hmin) / (self._size - 1)
return (self._hmin - binwidth/2, self._hmax + binwidth/2)
def binvalue(self, bin_number):
"""
Convert a single bin number to the center value of this bin.
Note that in ZGY this will refer to storage values, so you
may need to explicitly convert the result.
"""
binwidth = (self._hmax - self._hmin) / (self._size - 1)
return self._hmin + bin_number * binwidth
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/histogram.py | histogram.py |
##@package openzgy.impl.compress
import math
import sys
import numpy as np
from collections import namedtuple
from ..impl import enum as impl_enum
from ..exception import *
CompressionResult = namedtuple("Compression", "cdata csize isize signal noise ctime dtime snr_result snr_wanted snr_rounded snr_step")
CompressionResult.__doc__ = """The result returned from _xxx_try_one() functions.
The first member, cdata, is the one that really matters.
The snr_xxx members may be useful for goal seek.
All members except cdata may be useful logging and testing.
"""
CompressionResult.cdata.__doc__ = "bytes-like compressed data"
CompressionResult.csize.__doc__ = "Size in bytes of the compressed data"
CompressionResult.isize.__doc__ = "Size in bytes of the original input data"
CompressionResult.signal.__doc__ = "Sum of all sample values in the input"
CompressionResult.noise.__doc__ = "Sum of all absolute errors in round trip"
CompressionResult.ctime.__doc__ = "Time in seconds used for compression"
CompressionResult.dtime.__doc__ = "Time in seconds used for decompression"
CompressionResult.snr_result.__doc__ = "The SNR that was achieved"
CompressionResult.snr_wanted.__doc__ = "The SNR that was requested by the user"
CompressionResult.snr_rounded.__doc__ = "The SNR that was requested from the algorithm"
CompressionResult.snr_result.__doc__ = "Distance to next or previous quality level"
class CompressPlugin:
"""
Base class for OpenZGY compression plug-ins.
If anybody wants to add additional compression algorithms it
is recommended but not required to use this base class. See
CompressFactoryImpl.register{Compressor,Decompressor} for how to
use plain functors (C++) or callables (Python) instead.
This class performs triple duty as it handles both compression
and decompression static methods (need not have been together)
and an instance of the class can be used a compressor functor
if a lambda is too limiting. To invoke the methods:
MyCompressPlugin.factory(...)(data)
MyCompressPlugin.compress(data, ...) (NOT recommended)
MyCompressPlugin.decompress(cdata,status,shape,file_dtype,user_dtype)
The following will also work but should only be used for very simple
compressors that have no parameters. In the first case MyCompressPlugin
won't have the option to return None for certain parameters, and in the
second case handling a variable arguent list becomes trickier.
To register this class:
CompressFactoryImpl.registerCompressor("My",MyCompressPlugin.factory)
CompressFactoryImpl.registerDecompressor("My",MyCompressPlugin.decompress)
To use the compression part from client code:
compressor = ZgyCompressFactory("My", ...)
"""
@staticmethod
def compress(data, *args, **kwargs):
"""
This is an abstract method.
Compress a 3d or (TODO-Low 2d) numpy array, returning a bytes-like
result. If called with a single "data" argument the compression
will be done with default parameters and no extended logging.
Additional arguments are specific to the compression type.
The function can be used directly as the compression hook.
But you probably want a lambda expression or a real instance
of this class instead, to be able to specify parameters.
The compression algorithm is used is assumed to handle big / little
endian conversion itself. TODO-Worry this is not quite true for ZFP.
See the documentation. A special compilation flag is needed
on big endian machines. Also I suspect the optional hedaer
(which this code uses) might need byte swapping.
"""
raise ZgyInternalError("Attempt to invoke abstract method")
@staticmethod
def decompress(cdata, status, shape, file_dtype, user_dtype):
"""
This is an abstract method.
Decompress bytes or similar into a numpy.ndarray.
Arguments:
cdata -- bytes or bytes-like compressed data,
possibly with trailing garbage.
status -- Currently always BrickStatus.Compressed,
in the future the status might be used to
distinguish between different compression
algorithms instead of relying on magic numbers.
shape -- Rank and size of the result in case this is
not encoded by the compression algorithm.
file_dtype -- Original value type before compression,
in case the decompressor cannot figure it out.
This will exactly match the dtype of the
data buffer passed to the compressor.
user_dtype -- Required value type of returned array.
Passing an uncompressed brick to this function is an error.
We don't have enough context to handle uncompressed bricks
that might require byteswapping and fix for legacy quirks.
Also cannot handle constant bricks, missing bricks, etc.
The reason user_dtype is needed is to avoid additional
quantization noise when the user requests integer compressed data
to be read as float. the decompressor might need to convert
float data to int, only to have it converted back to float later.
Current assumptions made of all candidate algorithms:
- The compressed data stream may have trailing garbage;
this will be silently ignored by the decompressor.
- The compressed data stream will never be longer than
the uncompressed data. This needs to be enforced by
the compressor. The compressor is allowed to give up
and tell the caller to not compress this brick.
- The reason for the two assumptions above is an
implementation detail; the reported size of a
compressed brick is not completely reliable.
This might change in the next version
- The compressed data stream must start with a magic
number so the decompressor can figure out whether
this is the correct algorithm to use.
If the assumptions cannot be met, the compressor / decompressor
for this particular type could be modified to add an extra header
with the compressed size and a magic number. Or we might add a
(size, algorithm number) header to every compressed block to
relieve the specific compressor / decompressor from worrying
about this. Or the brick status could be used to encode which
algorithm was used, picked up from the MSB of the lup entry.
Which would also require the compressor to return both the
actual compressed data and the code to identify the decompressor.
That is the main reason we are also passed the "status" arg.
Caveat: If adding an extra header, keep in mind that this header
must be included when checking that the compressed stream is not
too big.
"""
raise ZgyInternalError("Attempt to invoke abstract method")
def __init__(self, *args, **kwargs):
"""
Create an instance that remembers the arguments it was created with.
When the instance is called as a function it will invoke compress()
with those arguments. So you can use either of the following:
compressor = CompressPlugin.compress # no arguments
compressor = CompressPlugin(...)
compressor = lambda x: CompressPlugin.compress(x, ...)
Derived classes don't need to redefine __init__ and __call__.
But they might want to in order to get argument checking.
The __init__ in the base class accepts any arguments so an error
won't be caught until the first time the compressor is invoked.
"""
self._args = args
self._kwargs = kwargs
def __call__(self, data):
"""
Invoke the compressor with arguments passed by the constructor.
"""
return self.compress(data, *args, **kwargs)
def dump(*args, **kwargs):
"""
Output statistics to standard output, if possible.
"""
pass
class CompressFactoryImpl:
"""
Registry of known compress and decompress algorithms.
Those two are completely separate but we might as well
handle both in the same class.
"""
_compress_registry = {}
_decompress_registry = []
@staticmethod
def registerCompressor(name, fn):
"""
Register a factory function that will be called to create
a function that in turn can be used to compress a data block.
Pass fn = None if you for some reason need to remove a registration.
The registered function can have any signature; the signature
needs to include whatever parameters the actual compressor wants.
The function that is created by the factory must have the signature:
raw: bytes.
brickstatus: impl.enum.BrickStatus,
bricksize: tuple(int,int,int),
file_dtype: np.dtype,
result_dtype: np.dtype
The function's return value:
np.ndarray with rank 3, shape bricksize, and dtype result_dtype.
Example usage of the factory:
old: with ZgyWriter(snr=snr)
new: with ZgyWriter(compressor = ZgyCompressFactory("ZFP", snr=30),
The example shows that this is a bit more inconvenient for the end
user. But it allows for using different compression plug-ins with
arbitrary parameters.
Note that user code doesn't need to use ZgyCompressFactory() and
its list of known compressors. Instead a compression function
can be provided directly. But most likely the factory method will
be simpler to maintain.
fn() is allowed to return None, which will have the same effect
as if the user did not specify any compression. E.g. there might be
a convention that snr<0 means store uncompressed. Allowing the
factory to return None in that case means the example above would
still work. Otherwise the snr<0 test would be included in the
client code. Which is messy.
"""
CompressFactoryImpl._compress_registry[name] = fn
@staticmethod
def registerDecompressor(name, fn):
"""
Register a factory function that is able to decompress one or more
types of compressed data. The registered functions will be called
in reverse order of registration until one of them indicates that
it has decompressed the data. You cannot remove a registration
but you can effectively disable it by registering another one
that recognizes the same input data.
The supplied name is only for information.
The function that is created by the factory must have the signature:
raw: bytes.
brickstatus: impl.enum.BrickStatus,
bricksize: tuple(int,int,int),
file_dtype: np.dtype,
result_dtype: np.dtype
"""
CompressFactoryImpl._decompress_registry.insert(0, (name, fn))
def knownCompressors():
"""
Return the names of all compressors known to the system.
This is primarily for logging, but might in principle be used
in a GUI to present a list of compressors to choose from.
The problem with that is how to handle the argument list.
"""
return list([k for k, v in CompressFactoryImpl._compress_registry.items()])
def knownDecompressors():
"""
Return the names of all compressors known to the system.
This is primarily for logging.
"""
return list([k for k, v in CompressFactoryImpl._decompress_registry])
@staticmethod
def factory(name, *args, **kwargs):
fn = CompressFactoryImpl._compress_registry.get(name, None)
if not fn:
known = ",".join(CompressFactoryImpl.knownDecompressors())
raise ZgyMissingFeature('Compression algorithm "{0}" not recognized. Must be one of ({1}).'.format(name, known))
return fn(*args, **kwargs)
@staticmethod
def decompress(cdata, status, shape, file_dtype, user_dtype):
"""
Loop over all registered decompressors and try to find one that
can handle this particular brick. Raises an error if none found.
See CompressPlugin.decompress() for parameter descriptions.
"""
result = None
if status != impl_enum.BrickStatus.Compressed:
raise ZgyInternalError("Tried to decompress uncompressed data.")
for k, v in CompressFactoryImpl._decompress_registry:
result = v(cdata, status, shape, file_dtype, user_dtype)
if result is not None:
break
if result is None:
raise ZgyFormatError("Compression algorithm not recognized.")
elif tuple(result.shape) != tuple(shape):
raise ZgyFormatError("Decompression returned unexpected data.")
return result
class CompressStats:
def __init__(self, details):
self._details = details # Parameters etc, just for logging.
self._signal = 0
self._noise = 0
self._original = 0
self._packed = 0
self._types = dict()
self._lossy = 0
self._perfect = 0
self._all_info = [] # (measured_snr, measured_compressed_size_%)
self._timed_bytes = 0
self._ctime = 0
self._dtime = 0
def add_data(self, idata, csize, ddata, *, ctime = None, dtime = None, msg = None):
signal, noise = self._collect_snr(idata, ddata)
isize = idata.size * idata.itemsize
snr = self._compute_snr(signal, noise)
self.add(signal=signal, noise=noise, snr=snr, isize=isize, csize=csize,
ctime=ctime, dtime=dtime, msg=msg)
#print("signal {0:8.0f} noise {1:8.0f} snr {2:.2f}".format(signal, noise, snr))
def add(self, signal, noise, snr, isize, csize, *, ctime = None, dtime = None, msg = None):
self._all_info.append([snr, 100*csize/isize])
if snr < 99:
# s/n only logged for lossy compression.
self._lossy += 1
self._signal += signal
self._noise += noise
else:
self._perfect += 1
self._original += isize
self._packed += csize
if ctime is not None and dtime is not None:
self._timed_bytes += isize
self._ctime += ctime
self._dtime += dtime
if msg == "compress" and snr >= 99: msg = "compres*"
if msg:
self._types[msg] = 1 + self._types.setdefault(msg, 0)
if False and msg:
print("@@ yoo-hoo {0} {1} -> {2} factor {3:.1f} snr {4:.1f}".format(
msg, isize, csize, isize/csize if csize else np.nan, snr))
@staticmethod
def _collect_snr(idata, ddata):
"""
This function along with _compute_snr defines the cost function
used when trying to quantify noise. The input data should not
contain NaN or Inf. We won't be applying lossy compression to
that kind of data anyway.
There is no perfect solution. We want to avoid having
neighboring bricks showning significant differences in
quality. This sounds easy enough, but "similar quality"
depends very much on the cost function.
"""
# Computationally inexpensive, but has problems with spikes.
# A brick containing many spikes will appear to have a higher
# average amplitude, which allows the compressor to leave
# more noise in it. It would have been better to use the
# average signal level of the entire survey instead of the
# brick. But that information is not available yet.
signal = np.sum(np.abs(idata), dtype=np.float64)
noise = np.sum(np.abs(idata - ddata), dtype=np.float64)
# Expensive, and has problems with dead traces and bricks
# that contain more than 50% water. The signal and noise
# in thise cases would be measuring only the dead and/or
# water samples.
#signal = np.median(np.abs(idata)) * idata.size
#noise = np.median(np.abs(idata - ddata)) * idata.size
return signal, noise
@staticmethod
def _compute_snr(s, n):
return 99 if n == 0 else -10 if s/n < 0.315 else min(99, 6 * math.log2(s/n))
def snr(self):
return self._compute_snr(self._signal, self._noise)
def dump(self, msg = None, *, outfile=None, text=True, csv=False):
outfile = outfile or sys.stdout
# Measured SNR for each brick, excluding lossless unless everything
# is lossless. In which case we pretend there is just one brick.
# The reason is that the median compression makes more sense
# that way. If I had been plotting all percentiles instead of
# just printing the median it would have made more sense to include
# the lossless (SNR 99) bricks as well.
all_snr = list([x[0] for x in self._all_info if x[0] < 99]) or [99.0]
all_snr.sort()
# Measured compression ratio for each brick, excluding uncompressed
# unless everything is uncompressed. In which case we pretend there
# is just one brick. Note that lossless can still be compressed.
ratios = list([ 100.0/e[1] for e in self._all_info if e[1] != 1 and e[1] != 0 ]) or [1.0]
ratios.sort()
good = self._original and self._packed
args = {
"msg": msg or "Processed",
"details": self._details,
"orig_mb": self._original / (1024*1024),
"pack_mb": self._packed,
"factor": self._original / self._packed if good else -1,
"percent": 100 * self._packed / self._original if good else 1,
"n_perfect": self._perfect,
"n_lossy": self._lossy,
"snr": self.snr(),
"snr_min": all_snr[0],
"snr_med": all_snr[len(all_snr)//2],
"snr_max": all_snr[-1],
"factor_min": ratios[0],
"factor_med": ratios[len(ratios)//2],
"factor_max": ratios[-1],
"percent_max": 100 / ratios[0],
"percent_med": 100 / ratios[len(ratios)//2],
"percent_min": 100 / ratios[-1],
"ctime": 0 if not self._ctime else (self._timed_bytes / self._ctime) / (1024*1024),
"dtime": 0 if not self._dtime else (self._timed_bytes / self._dtime) / (1024*1024),
"_ctime": self._ctime,
"_dtime": self._dtime,
"_cplusdtime": self._ctime + self._dtime,
"_timed_bytes": self._timed_bytes / (1024*1024),
}
if text:
print("{msg} {orig_mb:.0f} MB, compression factor {factor:.1f} (median {factor_med:.1f} with {n_perfect} bricks lossless and {n_lossy} bricks min snr {snr_min:.1f} median {snr_med:.1f} overall {snr:.1f}) compress {ctime:.1f} MB/s decompress {dtime:.1f} MB/s".format(**args), file=outfile)
for k in sorted(self._types):
print(" ", k, self._types[k], file=outfile)
if csv:
if csv == "header":
print(";msg;requested snr;median snr;snr;compressed size%;median size%;compress MB/s;decompress MB/s;compress elapsed;decompress elapsed;sum elapsed;Total data MB", file=outfile)
else:
print(";{msg};{details};{snr_med:.1f};{snr:.1f};{percent:.1f};{percent_med:.1f};{ctime:.1f};{dtime:.1f};{_ctime:.1f};{_dtime:.1f};{_cplusdtime:.1f};{_timed_bytes:.1f}".format(**args), file=outfile)
def empty(self):
return self._lossy + self._perfect == 0
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/compress.py | compress.py |
##@package openzgy.impl.enum
#@brief Enums not visible to the public API.
from enum import Enum
import numpy as np
class RawDataType(Enum):
"""
Sample data type as stored on the file.
In the public API this maps to SampleDataType.
Source: BrickedFileVersion.cpp, MetaDataValue.h
This enum is used for all versions.
Note that the existing public ZGY library
only recognizes SignedInt8, SignedInt16, and Float32.
"""
SignedInt8 = 0
UnsignedInt8 = 1
SignedInt16 = 2
UnsignedInt16 = 3
SignedInt32 = 4
UnsignedInt32 = 5
Float32 = 6
IbmFloat32 = 7
def _map_DataTypeToStructFormatCode(dt):
"""Internal method to get the "struct" format code."""
return {
RawDataType.SignedInt8: "<b",
RawDataType.UnsignedInt8: "<B",
RawDataType.SignedInt16: "<h",
RawDataType.UnsignedInt16: "<H",
RawDataType.SignedInt32: "<i",
RawDataType.UnsignedInt32: "<I",
RawDataType.Float32: "<f",
}[dt]
def _map_DataTypeToNumpyType(dt):
"""
Internal method to get the "numpy" format code.
Caveat: This returns a type object inheriting numpy.number,
not a numpy.dtype instance. These are often interchangeable
but not always.
"""
return {
RawDataType.SignedInt8: np.int8,
RawDataType.UnsignedInt8: np.uint8,
RawDataType.SignedInt16: np.int16,
RawDataType.UnsignedInt16: np.uint16,
RawDataType.SignedInt32: np.int32,
RawDataType.UnsignedInt32: np.uint32,
RawDataType.Float32: np.float32,
}[dt]
class RawCoordType(Enum):
"""
Coordinate type codes as stored in V1 files only.
The values are stored on the file, so the numbers must not be changed.
Source: BrickedFileVersion.cpp.
There is no corresponding enum in the API layer.
"""
Unknown = 0
Meters = 1
Feet = 2
ArcSec = 3 # value = deg*3600 + min*60 + sec
ArcDeg = 4 # value = deg + min/60 + sec/3600
ArcDegMinSec = 5 # value = deg*10000 + min*100 + sec
class RawHorizontalDimension(Enum):
"""
Horizontal dimension as seen in V2 files and later.
In the public API this maps to UnitDimension.
The values are stored in the file, so the numbers must not be changed.
Source: PoststackSeis3dInfo.h, MetaDataValue.h, ReaderImp::getMetaData
"""
Unknown = 0
Length = 1
ArcAngle = 2
class RawVerticalDimension(Enum):
"""
Vertical dimension as seen in V2 files and later.
In the public API this maps to UnitDimension.
The values are stored in the file, so the numbers must not be changed.
Source: PoststackSeis3dInfo.h, MetaDataValue.h, ReaderImp::getMetaData
"""
Unknown = 0
Depth = 1
SeismicTWT = 2
SeismicOWT = 3
class RawGridDefinition(Enum):
"""
Method used to define the geometry. Only FourPoint is allowed for write,
and only ThreePoint (treated as FourPoint) and FourPoint supported on read.
The values are stored in the file, so the numbers must not be changed.
There is no corresponding enum in the API layer.
"""
Unknown = 0
Parametric = 1
ThreePoint = 2
FourPoint = 3
class BrickStatus(Enum):
"""
Brick status as used in the internal API only.
"""
Missing = 0
Constant = 1
Normal = 2
Compressed = 3
class UpdateMode(Enum):
"""
WORK IN PROGRESS, potential configurable behavior.
A ZGY file cannot be updated once created, but individual bricks might
be written to more than once while the file is still open for create.
Updating a brick might cause loss of quality if the update was made
as part of a read/modify/write cycle. It might also cause space to be
wasted in the file since ZGY does not try to recycle freed bricks.
For this reason the application should explicitly indicate that it
accepts the loss of quality and/or leakage.
Kinds of leakage:
- Brick to be overwritten is in a closed segment. This is
expected to be rare, and only relevant for cloud storage.
- Brick to be overwritten and/or new brick is compressed
and the new data is smaller. Leaks the size difference,
although for implementation reasons we might want to leak
the entire old brick ("Pedantic" mode).
- Brick to be overwritten and/or new brick is compressed
and the new data is larger. Leaks the old brick.
The default is "Always" for uncompressed local files and "Constant"
otherwise.
It is fairly safe to set an uncompressed cloud file to "Always" but
there are some scenarios where very small regions are written to a
large file where this might cause much leakage. So the caller
needs to confirm he knows what he is doing.
Compressed files should only be set to "Always" in very special cases
or in unit tests. The potential leakage is much larger, as is the
problem of multiple compress and decompress cycles causing noise.
"""
Never = 0 # Never allow updating. Can only write to "Missing" bricks.
Constant = 1 # Can write to both "Missing" and "Constant" bricks. This
# permission is needed if the client wants to establish
# a default value for missing samples by setting the entire
# survey to this default. Followed by writing real data.
# "Never" and "Constant" require that the application
# writes brick aligned data. If a read/modify/write is
# indicated then this will raise an exception.
# The only drawback of "Constant" over "Never" is that
# "Constant" can cause a slight confusion: A particular
# read/modify/write might be allowed if the previous write
# just happened to have all constant values.
#NoCompress = 2 # Not recommended because it gives confusing behavior.
# Update is only allowed when both the old and the new
# brick is uncompressed. The only leaks allowed are those
# caused by the target block being in a closed segment on
# the cloud. The problem is that the decision is made
# per brick. It is more consistent to decide up front
# whether any compression might happen. If so, use
# Constant or Never. If not, just use Always.
#NoLeaks = 3 # Not recommended because it gives confusing behavior.
# As "Always" but if it turns out that a brick would be
# leaked, even in the rare "closed segment" case, the
# code will raise an exception.
Always = 4 # Always allow updating. This may cause leaked data in
# some cases. Uncompressed local data will not leak.
# Uncompressed cloud data can only leak when the target
# block being in a closed segment on the cloud.
# This can be the default for uncompressed data.
# For compressed data it is usually a bad idea.
# Not only could there be a lot more leakage, but if
# the reason for the overwrite is a read/modify/write
# and the data had lossy compression then the
# compression noise will accumulate.
Pedantic = 5 # As "Always", but when either the existing or the new
# brick is compressed then the old brick is leaked
# unconditionally. This gives more reproducible behavior
# but wastes more space.
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/enum.py | enum.py |
##@package openzgy.impl.stats
#@brief Code to compute statistics.
import numpy as np
class StatisticData:
"""
Accumulate statistics: count, sum, sum of squares, and value range.
"""
def __init__(self, other = None):
self._cnt = other._cnt if other else 0 # Number of added samples.
self._inf = other._inf if other else 0 # Number of rejected samples.
self._sum = other._sum if other else 0 # Sum of added samples.
self._ssq = other._ssq if other else 0 # Sum-of-squares of samples.
self._min = other._min if other else 1 # Minimum sample value.
self._max = other._max if other else -1 # Maximum sample value.
# Note for min and max: These initial values are only interesting
# if storing an empty file. As soon as real data is seen they get
# replaced (due to min > max) and the value type may also change.
def __repr__(self):
return "StatisticData(cnt={_cnt}, inf={_inf}, sum={_sum}, ssq={_ssq}, min={_min}, max={_max})".format(**self.__dict__)
def __str__(self):
return "StatisticData({_cnt} samples)".format(**self.__dict__)
def _add_numpy_array(self, value):
"""
Add the data in the provided numpy array.
Technically this function should also work for Python lists or
even scalars since all the numpy functions called are robust
enough to handle that. But that complication isn't really needed
so I don't want to worry about testing it.
TODO-Low make a simpler and more efficient version for integral types.
TODO-Low performance boost to include a "factor" argument.
"""
if not isinstance(value, np.ndarray):
raise TypeError("Only numpy arrays accepted when adding statistics.")
valid = np.isfinite(value).astype(np.bool)
scnt = np.count_nonzero(valid)
sinf = np.count_nonzero(valid == False)
if sinf == 0: valid = True
# Obscure problem: initial is required for smin, smax when
# where is used, in spite of me knowing that there is at least
# one valid value. But, if initial=np.inf and value is an
# integral type this goes rather badly. Using value[0] as
# the initial is also problematic because it might be NaN.
# To solve this I need to compute the range as float.
# TODO-Low: or I could use int64 for all integral types but do I
# really gain anything? Slightly more accurate results, yes,
# but more corner cases to test and might I theoretically
# overflow even an int64? The old code just used double.
# TODO-Worry the existing schema stores value range as float32,
# which is inaccurate if we ever decide to support int32 data.
value = value.astype(np.float64, copy=False)
if scnt:
ssum = np.sum(value, where=valid, dtype=np.float64)
sssq = np.sum(np.square(value, dtype=np.float64), where=valid, dtype=np.float64)
smin = np.amin(value, where=valid, initial=np.inf)
smax = np.amax(value, where=valid, initial=-np.inf)
self._cnt += int(scnt)
self._sum += ssum
self._ssq += sssq
if self._min is None or self._max is None or self._min > self._max:
self._min = smin
self._max = smax
else:
self._min = min(self._min, smin)
self._max = max(self._max, smax)
self._inf += int(sinf)
return self
def _add_other(self, other):
"""
Add more samples to an existing StatisticData.
other is also allowed to hold negative counts,
this will cause samples to be removed. But the
min/max range will still be expanded.
"""
# if other.cnt_ == 0 then cnt, sum, ssq should also be zero
# and min, max should be uninitialized. Don't trust that.
if other._cnt != 0:
if self._cnt != 0:
# already have data, so the range needs to be combined.
self._min = min(self._min, other._min)
self._max = max(self._max, other._max)
else:
# our own min/max is bogus since we don't have any samples yet.
self._min = other._min
self._max = other._max
self._cnt += int(other._cnt)
self._sum += other._sum
self._ssq += other._ssq
self._inf += int(other._inf)
return self
def _multiply_scalar(self, factor):
"""
Multiply StatisticsData with a constant N, equivalent to creating
a new instance and adding the old one to it N times. N can also be
negative. The min/max range is not affected.
"""
if factor != int(factor):
raise TypeError("Multiplication by integers only.")
factor = int(factor) # In case it is np.int32 now.
self._cnt *= factor;
self._inf *= factor;
self._sum *= factor;
self._ssq *= factor;
return self
def _equal(self, other):
if other is None:
return False
elif not isinstance(other, type(self)):
raise TypeError()
else:
return (self._cnt == other._cnt and
self._inf == other._inf and
self._sum == other._sum and
self._ssq == other._ssq and
self._min == other._min and
self._max == other._max)
def add(self, data, factor):
if factor == 1:
self._add_numpy_array(data)
else:
tmp = StatisticData()
tmp._add_numpy_array(data)
if factor != 1:
tmp._multiply_scalar(factor)
self._add_other(tmp)
def scale(self, slope, intercept):
"""
Calculate the linear transform needed to convert from one range
(typically the natural data range of the integral storage type)
to the data range that the application wants to see.
Then update the statistics in place so they look like the transform
had been done on every single data point before adding it.
The decoded value Y is given by a linear transform of the coded value X:
Y = intercept + slope*X
where intercept and slope are given by the coding range and the value range
of type T (see below). The statistics of Y are then:
SUM_Y = SUM(intercept + slope*x)
= n*intercept + slope*SUM(x) = n*intercept + slope*SUM_X
SSQ_Y = SUM((intercept + slope*x)^2)
= SUM(intercept^2 + 2*intercept*slope*x + slope^2*x^2)
= n*intercept^2 + 2*intercept*slope*SUM(x) + slope^2*SUM(x^2)
= n*intercept^2 + 2*intercept*slope*SUM_X + slope^2*SSQ_X
MIN_Y = MIN(intercept + slope*x)
= intercept + slope*MIN(x)
= intercept + slope*MIN_X
MAX_Y = MAX(intercept + slope*x)
= intercept + slope*MAX(x)
= intercept + slope*MAX_X
"""
#slope = (newmax - newmin) / (oldmax - oldmin)
#intercept = newmin - oldmin * slope
self._ssq = self._cnt*intercept*intercept + 2*intercept*slope*self._sum + slope*slope*self._ssq;
self._sum = self._cnt*intercept + slope*self._sum;
self._min = intercept + slope*self._min;
self._max = intercept + slope*self._max;
return self
def __eq__(self, other):
return self._equal(other)
def __ne__(self, other):
return not self._equal(other)
def __add__(self, other):
if isinstance(other, StatisticData):
return StatisticData(self)._add_other(other)
elif isinstance(other, np.ndarray):
return StatisticData(self)._add_numpy_array(other)
else:
raise TypeError("Can only add numpy arrays and other StatisticData instances")
def __iadd__(self, other):
if isinstance(other, StatisticData):
return self._add_other(other)
elif isinstance(other, np.ndarray):
return self._add_numpy_array(other)
else:
raise TypeError("Can only add numpy arrays and other StatisticData instances")
def __mul__(self, factor):
return StatisticData(self)._multiply_scalar(factor)
def __rmul__(self, factor):
return StatisticData(self)._multiply_scalar(factor)
def __imul__(self, factor):
return self._multiply_scalar(factor)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/stats.py | stats.py |
##@package openzgy.impl.file
#@brief Low level I/O, abstract layer.
import os
import json
from enum import Enum
from contextlib import contextmanager, suppress
from collections import namedtuple
from ..exception import *
try:
import sdglue as sd
except Exception as ex:
print("seismic store access is not available:", ex)
sd = None
class UsageHint(Enum):
Unknown = 0x00
TextFile = 0x01
Header = 0x10
Data = 0x20
Compressed = 0x40,
class Config:
_deprecation_context_warned = False
@staticmethod
def _deprecated_context(attrname):
if not Config._deprecation_context_warned:
print('DEPRECATION WARNING: IOContext should be a dict. Found "' +
attrname + '" as an attribute.')
Config._deprecation_context_warned = True
@staticmethod
def _get_string_env(context, attrname, name, default):
try:
value = context[attrname]
name = attrname
except (TypeError, KeyError):
if hasattr(context, attrname):
value = getattr(context, attrname)
name = attrname
Config._deprecated_context(attrname)
else:
value = os.getenv(name, str(default))
if default and not value:
print("WARNING: ${0} cannot be empty.".format(name))
value = default
return value
@staticmethod
def _get_numeric_env(context, attrname, name, default, min_value, max_value):
try:
value = context[attrname]
name = attrname
except (TypeError, KeyError):
if hasattr(context, attrname):
value = getattr(context, attrname)
name = attrname
Config._deprecated_context(attrname)
else:
try:
value = int(os.getenv(name, str(default)))
except ValueError:
print("WARNING: badly formed number in ${0} ignored.".format(name))
value = default
if value < min_value or value > max_value:
print("WARNING: ${0} must be be between {1} and {2}.".format(name, min_value, max_value))
value = default
return value
class FileConfig(Config):
def __init__(self, context):
"""
Currently the on-prem file reader has no user settable configurfation.
"""
self.maxsize = 0 # No consolidation of read requests.
self.maxhole = 0 # No fuzzy consolidation.
self.aligned = 0 # No additional alignment on reads.
self.segsize = 0 # No buffering of writes.
self.threads = 1 # No multi threading.
@staticmethod
def _redact(s):
return s if len(s) < 20 else s[:4] + "..." + s[-4:]
def dump(self):
return """FileConfig: No user settable configuration."""
class SDConfig(Config):
def __init__(self, context):
"""
Process an iocontext for seismic store, doing consistency checks
and applying fallbacks from environment variables and hard coded
defaults.
The context itself should be a dict or equivalent. The code also
supports the older style using a class instance with attributes.
That feature is deprecated and will be removed. It violates the
principle of least surprise.
A fully specified iocontext has the following attributes:
sdurl: string
Where to contact the seismic store service.
Defaults to $OPENZGY_SDURL.
sdapikey: string
Authorization for application to access the seismic store API.
Defaults to $OPENZGY_SDAPIKEY.
sdtoken: string
User credentials. Set to $OPENZGY_TOKEN if not found,
beware that this might not be secure. The code will no longer
use the token last saved by sdcfg as a fallback. If this is
desired you must specify "FILE:carbon.slbapp.com" as the token.
Caveat: The sdcfg token is not refreshed so it might time out
after an hour. Run "sdutil auth idtoken > /dev/null" to refresh.
maxsize: int specified in MB between 0 and 1024.
Zero is taken to mean do not consolidate.
Tell the reader to try to consolidate neighboring bricks
when reading from seismic store. This is usually possible
when the application requests full traces or at least traces
traces longer then 64 samples. Setting maxsize limits this
consolidation to the specified size. The assumption is that
for really large blocks the per-block overhead becomes
insignificant compared to the transfer time.
Consolidating requests has higher priority than using
multiple threads. So, capping maxsize might allow more
data to be read in parallel.
Note that currently the spitting isn't really smart. With a
64 MB limit and 65 contiguous 1 MB buffers it might end up
reading 64+1 MB instead of e.g. 32+33 MB.
Note that the low level reader should not assume that
requests are capped at this size. They might be larger
e.g. when reading the header information.
Defaults to $OPENZGY_MAXSIZE_MB if not specified, or 2 MB.
maxhole: int specified in MB between 0 and 1024.
This applies when consolidate neighboring bricks when
reading from seismic store. Setting maxhole > 0 tells the
reader that it is ok to also consolidate requests that are
almost neighbors, with a gap up to and including maxhole.
The data read from the gap will be discarded unless picked
up by some (not yet implemented) cache.
For cloud access with high bandwidth (cloud-to-cloud) this
should be at least 2 MB because smaller blocks will take
just as long to read. For low bandwidth cloud access
(cloud-to-on-prem) it should be less. If a fancy cache
is implemented it should be more. For accessing on-prem
ZGY files it probably makes no difference.
Defaults to $OPENZGY_MAXHOLE_MB if not specified, or 2 MB.
aligned: int in MB between 0 and 1024.
This is similar to the maxhole parameter. If set, starting
and ending offsets are extended so they both align to the
specified value. Set this parameter if the lower levels
implement a cache with a fixed blocksize and when there is
an assumpton that most reads will be aligned anyway.
TODO-Worry: Handling reads past EOF may become a challenge
for the implementation.
Defaults to $OPENZGY_ALIGNED_MB if not specified, or zero.
segsize: int in MB between 0 and 16*1024 (i.e. 16 GB).
Defaults to $OPENZGY_SEGSIZE_MB if not specified, or 1 GB.
threads: int between 1 and 1024.
Use up to this many parallel requests to seismic store
in order to speed up processing. This applies to individual
reads in the main API. So the reads must be for a large
area (i.e. covering many bricks) for the setting to be
of any use. Set to $OPENZGY_NUMTHREADS if not found,
and 1 (i.e. no threading) if the environment setting is
also missing.
Whether it is useful to set the variable depende on the
application. Apps such as Petrel/BASE generally do their
own multi threading, issuing multiple read requests to
the high level API in parallel. In that case it might
not be useful to also parallelize individual requests.
legaltag: string, possibly empty.
The legaltag stored in the file. Used only on create.
writeid:
I don't know what this is for. Ask the seismic store team.
seismicmeta:
a dictionary of additional information to be associated
with this dataset in the data ecosystem. Currently used
only on create, although SDAPI allows this to be set on
an existing file by calling {get,set}SeismicMeta().
When set via an environment variable (strictly for testing)
this needs to be the string representation of the json data.
When set from a program a Python dict is expected.
_debug_trace:
For debugging and unit tests only.
Callback to be invoked immediately before a read or write
is passed on to seismic store. Typically used to verify
that consolidating bricks works as expected. Can only be
set programmatically. Not by an environment variable.
"""
self.sdurl = self._get_string_env(context, "sdurl",
"OPENZGY_SDURL",
"")
self.sdapikey = self._get_string_env(context, "sdapikey",
"OPENZGY_SDAPIKEY",
"")
self.sdtoken = self._get_string_env(context, "sdtoken",
"OPENZGY_TOKEN",
"")
self.maxsize = self._get_numeric_env(context, "maxsize",
"OPENZGY_MAXSIZE_MB",64,0,1024) * (1024*1024)
self.maxhole = self._get_numeric_env(context, "maxhole",
"OPENZGY_MAXHOLE_MB",2,0,1024) * (1024*1024)
self.aligned = self._get_numeric_env(context, "aligned",
"OPENZGY_ALIGNED_MB",0,0,1024) * (1024*1024)
self.segsize = self._get_numeric_env(context, "segsize",
"OPENZGY_SEGSIZE_MB",1024,0,1024*16) * (1024*1024)
self.threads = self._get_numeric_env(context, "threads",
"OPENZGY_NUMTHREADS",1,1,1024)
# All the numeric options ought to be integral, but for unit
# tests it might be desirable to allow odd sizes. When reading
# from environment variables only integral numbers are accepted.
self.maxsize = int(round(self.maxsize))
self.maxhole = int(round(self.maxhole))
self.aligned = int(round(self.aligned))
self.segsize = int(round(self.segsize))
self.legaltag = self._get_string_env(
context, "legaltag", "OPENZGY_LEGALTAG", "")
self.writeid = self._get_string_env(
context, "writeid", "OPENZGY_WRITEID", "")
self.seismicmeta = self._get_string_env(
context, "seismicmeta", "OPENZGY_SEISMICMETA", "")
try:
self._debug_trace = context["_debug_trace"]
except (TypeError, KeyError):
if hasattr(context, "_debug_trace"):
self._debug_trace = getattr(context, "_debug_trace")
else:
self._debug_trace = lambda *args, **kwargs: False
@property
def extra(self):
"""
Legaltag, writeid, and seismicmeta are usually packed
into a single "extra" dictionary when creating a new file.
If any of them are unset they will be excluded from the
dictionary instead of being passed as some default value.
CAVEAT: The keys in the "extra" dictionary are not
supposed to be hard coded as I do here. They are defined in
seismic-store-client-api-cpp/src/src/core/Constants.{cc,h}.
Cannot access that file here.
NOTE: Python dicts have an undefined sort order, as does
json. To simplify testing I sort the keys in the "extra" dict.
If SDAPI for some reason should require a specific ordering
then "seismicmeta" needs to be passed as a string.
"""
result = {}
if self.legaltag:
result["legal-tag"] = self.legaltag
if self.writeid:
result["writeid"] = self.writeid
if self.seismicmeta:
if not isinstance(self.seismicmeta, str):
result["seismicmeta"] = json.dumps(
self.seismicmeta, sort_keys=True)
else:
result["seismicmeta"] = self.seismicmeta
return result
@staticmethod
def _redact(s):
return s if len(s) < 20 else s[:4] + "..." + s[-4:]
def dump(self):
return """SDConfig:
sdurl: {sdurl}
sdapikey: {sdapikey}
sdtoken: {sdtoken}
maxsize: {maxsize} MB
maxhole: {maxhole} MB
aligned: {aligned} MB
segsize: {segsize} MB
threads: {threads}
extra: {extra}""".format(
sdurl=self.sdurl,
sdapikey=self._redact(self.sdapikey),
sdtoken=self._redact(self.sdtoken),
maxsize=self.maxsize // (1024*1024),
maxhole=self.maxhole // (1024*1024),
aligned=self.aligned // (1024*1024),
segsize=self.segsize // (1024*1024),
threads=self.threads,
extra=json.dumps(self.extra, sort_keys=True))
class FileADT:
def __init__(self, filename, mode, iocontext):
"""
Open a file in the specified mode, which must be "rb" or "w+b".
Caller should use a "with" block to ensure the file gets closed.
The iocontext is an optional data structure that the user may
specify when a reader is created. It might be used to hold
user credentials etc. needed to access the low level file.
TODO-Low: support "r+b" (update) at some point in the future.
"""
if not mode in ("rb", "w+b", "r+b", "d"):
raise ZgyUserError("Opening ZGY as " + mode + " is not supported.")
# Currently no need to keep this, as the derived classes will
# copy the relevant information to self._config.
#self._iocontext = iocontext
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.xx_close()
def xx_close(self):
"""
Close a previously opened file.
No action if the file is already closed.
"""
raise NotImplementedError
def xx_read(self, offset, size, *, usagehint=UsageHint.Unknown):
"""
Read binary data from the file. Both size and offset are mandatory.
I.e. caller is not allowed to read "the entire file", and not
allowed to "read from where I left off the last time".
The actual reading will be done in a derived class.
The base class only validates the arguments.
"""
raise NotImplementedError
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
"""
Write binary data to the file. Offset is mandatory. I.e. caller
is not allowed to "write to where I left off the last time".
The actual writing will be done in a derived class.
The base class only validates the arguments.
"""
raise NotImplementedError
# Might want to pass this tuple to the delivery functor instead of
# discrete arguments. Probably doesn't change much either way, though.
# Or maybe consider passing everything as keyword arguments.
_deliveryType = namedtuple("Delivery", "data this_offset this_size want_offset want_size iocontext")
_requestType = namedtuple("Request", "offset size delivery")
def xx_readv(self, requests, *, parallel_ok=False, immutable_ok=False, transient_ok=False, usagehint=UsageHint.Unknown):
"""
Read binary data from multiple regions in the file. Each part
of the request specifies offset, size, and a delivery functor
which will be invoked to pass back the returned bulk.
Arguments:
parallel_ok: If true then the delivery functor might be called
simultaneously from multiple worker threads.
The function itself will block until all the data
has been read or an error occurs.
immutable_ok: If true the caller promises that the delivery
functor will not try to modify the data buffer.
Pass False e.g. if the functor may need to byteswap
the data it has read from file.
transient_ok: If true the caller promises that the delivery
functor will not keep a reference to the data buffer
after the functor returns.
The delivery functor is called as
fn(data)
FUTURE: a new argument partial_ok may be set to True if it is ok to
call the delivery functor with less data than requested, and to keep
calling it until all data has been delivered. The signature of the
delivery functor gets changed to fn(data, offset, size). Offset is the
absolute file offset. I.e. not relative to the requested offset.
Passing partial_ok=True might elide some buffer copies if the
caller is doing something simple (such as reading an uncompressed
brick) where partial copies are possible, and the backend is in the
cloud, and a longer lived cache is being maintained, and the cache
block size is smaller than the requested size. That is a lot of ifs.
There was some code to handle partial_ok but it has been removed.
Get it from the git history if you really want it.
"""
raise NotImplementedError
@staticmethod
def _nice(n):
"""Human readable number."""
if n >= 1024*1024 and (n % (1024*1024)) == 0:
return str(n//(1024*1024)) + " MB" # whole number of MB
elif n >= 256*1024 and (n % (256*1024)) == 0:
return str(n/(1024*1024)) + " MB" # e.g. 42.75 NB
elif n >= 1024 and (n % 1024) == 0:
return str(n/1024) + " kB"
else:
return str(n) + " bytes"
def _validate_read(self, offset, size):
if self._mode not in ("rb", "w+b", "r+b"):
raise ZgyUserError("The file is not open for reading.")
if offset is None or offset < 0:
raise ZgyUserError("Invalid offset {0} {1} for reading.".format(
str(type(offset)), str(offset)))
if size is None or size < 1:
raise ZgyUserError("Invalid size {0} {1} for reading.".format(
str(type(size)), str(size)))
# Beware of mixing python and numpy scalars on Windows.
# If offset fits in np.int32 then this is what it gets
# cast to, which could make the sum overflow. On Linux
# with a slightly older Python but same numpy version
# the sum always ends up as np.int64. However, in this
# particular case the exception should never occur so
# the consequence was only less robust code.
if int(offset) + int(size) > self.xx_eof:
# Might be an internal error or a corrupted file,
# but let's report only the immediate error and not
# try to guess.
raise ZgyEndOfFile("Offset {0} size {1} is past EOF at {2}".format(
self._nice(offset), size, self._nice(self.xx_eof)))
def _validate_write(self, data, offset):
if self._mode not in ("w+b", "r+b"):
raise ZgyUserError("The file is not open for writing.")
if offset is None or offset < 0:
raise ZgyUserError("Invalid offset for writing.")
if data is None or len(data) < 1:
raise ZgyUserError("Invalid size for writing.")
def _validate_readv(self, requests):
for offset, size, delivery in requests:
self._validate_read(offset, size)
def _check_short_read(self, offset, size, got):
"""
Throw a descriptive error if there was something wrong with the read.
Currently works for local files only.
"""
# TODO-Low, can I get this to work also for seismic store?
# Either make a virtual _get_real_size(), or admit this is
# local-file spacific and move it down to class LocalFile.
# Work around issue with mixing numpy and Python ints.
offset, size, got = (int(offset), int(size), int(got))
if got == size:
return
msg = "Cannot read offset {0} size {1}: ".format(
self._nice(offset), self._nice(size))
if got > size:
raise ZgyInternalError(msg + "got too much data: {0}.".format(
self._nice(got)))
elif offset + size > self.xx_eof:
# This can only happen if I (bug!) forgot to call _validate_read.
raise ZgyEndOfFile(msg + "past EOF at {2}.".format(
self._nice(self.xx_eof)))
elif os.stat(self._file.fileno()).st_size < self.xx_eof:
# This can happen if opening /dev/null for read/write,
# or if a write failed due to a full disk (and was not checked),
# or I somehow (bug!) failed to keep track of eof while writing.
# Or maybe some other process truncated the file.
raise ZgyEndOfFile(msg + "File is shorter than expected: {0} / {1}.".format(
self._nice(os.stat(self._file.fileno()).st_size),
self._nice(self.xx_eof)))
else:
# The os returned a short read for no apparent reason.
# Maybe the file is a special device other than /dev/null.
raise ZgyEndOfFile(msg + "short read for unknown reason.")
@staticmethod
def _consolidate_requests(requests, *,
max_hole = 2*1024*1024,
max_size = 64*1024*1024,
force_align = None,
consolidate_overlaps = False,
eof = None):
"""
Given a list of requests as passed to xx_readv, try to reduce
the number of requests by consolidating adjacent or nearly
adjacent reads. If successful this means we will be reading
with larger block sizes.
Return a new list of requests that the caller may pass on
to xx_readv instead of the original.
Remember that the callback functions specified with the
original requests need to be called with the exact data
that they expected. This means that in the consolidated
list the callback functions need to be wrappers.
Parameters:
max_hole: See class SDConfig for a description.
max_size: See class SDConfig for a description.
force_align: See class SDConfig for a description.
consolidate_overlaps: Set to True if you expect some of the
individual requests to overlap, and you are ok with
risking some corner cases. For example, if you
request a mutable buffer then the overlapping area
will be delivered to more than one recipient and
the buffer may or may not be shared between the two.
The default is False which causes the code to not
attempt consolidation of these. Less efficient
but also less surprises. In practice there should
never be any overlap anyway.
"""
class ConsolidatedDelivery:
"""
Helper function to distribute a single delivery from
a consolidated read request to all the requesters.
Slice the data so each requester gets exactly what
they originally asked for.
Note that if the delivered data is bytes or bytearray
the slicing will create a copy. If it is a numpy array
the slice will just return a more efficient view.
Should I perhaps create a numpy array here?
Note that if the original request had overlapping reads
we might want to force a copy anyway. Because we don't
know whether the recipient asked for a mutable buffer.
It is tempting to disallow overlapping reads completely.
Caveat: Handling reads past EOF may be tricky.
I need some specific unit tests for that.
"""
def __init__(self, group, begin):
self._group = group
self._begin = begin
def __call__(self, data):
for offset, size, delivery in self._group:
if delivery:
end = min(offset + size - self._begin, len(data))
beg = min(offset - self._begin, end)
delivery(data[beg:end])
def _groupsize(g, force_align, *, eof):
"""
Given a list of (offset, size, functor)
return offset and size for the entire group.
The offset is the linear offset from the start of the file;
it has not yet been converted to segment and local offset.
The returned value includes any padding for force_align.
TODO-High the padding is WRONG, because the alignment should be
done per segment. We may end up crossing segment boundaries
needlessly. And/or going past EOF. Going past EOF is critical
because in the subsequent call to _split_by_segment() we will
end up trying to actually read that part.
Crossing segment boundaries is less of a problem.
- It will not happen if the headers are aligned at least to
force_align, which is typically the cache bricksize.
- It will not happen if the file was uploaded with sdutil.
In that case there will be just one segment.
- It is (alomst) not an issue if a proper cache is attached.
- A naive cache can align to 256 KB, this virtually guarantees
the header area will be sufficiently aligned if the file
was initially stored on the cloud.
- In other cases there will be a small performance penalty but
only when reading close to a segment boundary or when reading
the headers. Opening a file may see a noticeable slowdown
but not I think anything dramatic.
"""
beg = min([e[0] for e in g])
end = max([e[0] + e[1] for e in g])
assert beg == g[0][0]
#assert end == g[-1][0] + g[-1][1] # might fail if overlap.
if force_align:
beg = (beg // force_align) * force_align
end = ((end + force_align - 1) // force_align) * force_align
if eof: end = min(end, eof)
return beg, end - beg
def _split_requests(requests, force_align, *, eof):
"""
Make a list of lists, grouping requests that should be read
in a single operation. Operates on linear addresses, so if
any of the input requests crossed a segment boundary then
this will also be the case for the output.
"""
# TODO-Low: In the Python code some parameters are inherited from
# calling method; this is confusing and wasn't actually intended.
all_requests = []
prev_request = (0, 0, None)
for request in sorted(requests):
hole = request[0] - (prev_request[0] + prev_request[1])
if not all_requests:
all_requests = [[request]]
elif (hole <= max_hole and
(consolidate_overlaps or hole >= 0) and
(not max_size or _groupsize(all_requests[-1] + [request], force_align, eof=eof)[1] <= max_size)):
all_requests[-1].append(request)
else:
all_requests.append([request])
prev_request = request
return all_requests
def _join_requests(all_requests, force_align, *, eof):
"""Create the final result containing one entry for each
consolidated group."""
new_requests = []
for group in all_requests:
# Short cut, probably not worth the trouble.
#if len(group)==1 and not force_align: new_requests.append(group[0])
offset, size = _groupsize(group, force_align, eof=eof)
new_requests.append((offset, size, ConsolidatedDelivery(group, offset)))
return new_requests
def _print_requests(all_requests, name = "Requests:"):
"""For debugging only, print a list of list of requests."""
if len(all_requests) == 0 or (len(all_requests) == 1 and len(all_requests[0]) == 0):
print(" (empty)")
return
print(name)
for group in all_requests:
if len(all_requests) > 1: print(" Group:")
prev_offset, prev_size = (None, None)
msg = " {0} offset {1:8X} end {2:8X} size {3:6X}"
for offset, size, delivery in group:
if prev_offset is not None:
skip_offset = prev_offset + prev_size
skip_size = offset - (prev_offset + prev_size)
if skip_size != 0:
print(msg.format("skip", skip_offset, offset, skip_size))
print(msg.format("read", offset, offset+size, size))
prev_offset, prev_size = (offset, size)
# main part of _consolidate_requests().
all_requests = _split_requests(requests, force_align, eof=eof)
new_requests = _join_requests(all_requests, force_align, eof=eof)
if False and len(requests) != len(new_requests):
print("Consolidated", len(requests), "into", len(new_requests))
print("Requests:")
_print_requests([requests])
print("Consolidated:")
_print_requests([new_requests], name="Consolidated:")
old_size = sum([x[1] for x in requests])
new_size = sum([x[1] for x in new_requests])
assert new_size >= old_size
return new_requests
@property
def threadsafe(self):
return False
@property
def xx_iscloud(self):
return False
class LocalFile(FileADT):
def __init__(self, filename, mode, iocontext):
super().__init__(filename, mode, iocontext)
self._config = FileConfig(iocontext)
self._file = open(filename, mode) if mode != "d" else None
self._mode = mode
self._name = filename
self._eof = 0 if mode in ("wb", "w+b") else os.stat(filename).st_size
@property
def xx_eof(self):
return self._eof
def xx_close(self):
mode = self._mode
self._mode = None
if mode and self._name:
if mode == "d":
with suppress(FileNotFoundError):
os.remove(self._name)
else:
self._file.close()
self._file = None
self._name = None
class LocalFileOther(LocalFile):
def xx_read(self, offset, size, *, usagehint=UsageHint.Unknown):
self._validate_read(offset, size)
self._file.seek(offset, 2 if offset < 0 else 0)
data = self._file.read(size)
self._check_short_read(offset, size, len(data))
return data
def xx_readv(self, requests, *, parallel_ok=False, immutable_ok=False, transient_ok=False, usagehint=UsageHint.Unknown):
for offset, size, delivery in requests:
delivery(LocalFileOther.xx_read(self, offset, size, usagehint=usagehint))
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
self._validate_write(data, offset)
self._file.seek(offset)
self._eof = max(self._eof, offset + len(data))
nbytes = self._file.write(data)
if nbytes != len(data):
raise ZgyInternalError("Short write to local file")
return len(data) # Most callers ignore this anyway.
@property
def threadsafe(self):
return False
class LocalFileLinux(LocalFile):
def xx_read(self, offset, size, *, usagehint=UsageHint.Unknown):
self._validate_read(offset, size)
data = os.pread(self._file.fileno(), size, offset)
self._check_short_read(offset, size, len(data))
return data
def xx_readv(self, requests, *, parallel_ok=False, immutable_ok=False, transient_ok=False, usagehint=UsageHint.Unknown):
for offset, size, delivery in requests:
delivery(LocalFileLinux.xx_read(self, offset, size, usagehint=usagehint))
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
self._validate_write(data, offset)
self._eof = max(self._eof, offset + len(data))
nbytes = os.pwrite(self._file.fileno(), data, offset)
if nbytes != len(data):
raise ZgyInternalError("Short write to local file")
@property
def threadsafe(self):
return True
class SeismicStoreFile(FileADT):
"""
Access data in seismic store as a linear file even when the dataset
has multiple segments. There are some limitations on write.
* Writes starting at EOF are allowed, and will cause a new segment
to be written.
* Writes starting past EOF, signifying a hole in the data, are not
allowed.
* Writes starting before EOF are only allowed if offset,size exactly
matches a previous write. This will cause that segment to be rewritten.
* Possible future extension: For the last segment only offset
needs to match. This means the last segment may be resized.
For read the class provides a readv() method to do scatter/gather reads.
The code will then consolidate adjacent bricks to get larger brick size
sent to SDAPI. Optionally parallelize requests that cannot be consolidated.
"""
def __init__(self, filename, mode, iocontext):
if sd is None:
raise ZgyMissingFeature("Seismic Store is not available")
super().__init__(filename, mode, iocontext)
self._config = SDConfig(iocontext)
#print(self._config.dump())
sdcred = (self._config.sdurl, self._config.sdapikey, self._config.sdtoken)
if not all(sdcred):
raise ZgyUserError("Missing credentials:" +
("" if sdcred[0] else " $OPENZGY_SDURL") +
("" if sdcred[1] else " $OPENZGY_SDAPIKEY") +
("" if sdcred[2] else " $OPENZGY_TOKEN"))
if mode in ("rb"):
self._accessor = sd.SdReader(filename, sdcred)
# Get the size of each segment. For efficiency assume that all
# segments except the first and last will have the same size.
# TODO-Medium: If sizes() is cheap just request all of them.
numseg = self._accessor.count()
if numseg <= 3:
self._sizes = list(self._accessor.sizes(*range(numseg)))
else:
tmp = self._accessor.sizes(0, 1, numseg-1)
self._sizes = [tmp[0]] + (numseg-2) * [tmp[1]] + [tmp[2]]
elif mode in ("w+b"):
#print(self._config.dump(), sep="\n")
# Create new, deleting or truncating existing.
self._accessor = sd.SdWriter(filename, sdcred, False, self._config.extra)
# TODO-Medium: If the file existed already, the mutable parts of the
# metadata is allowed to change. Data can only be written if
# the file was completely empty i.e. with just segment 0.
self._sizes = []
elif False and mode in ("r+b"):
# TODO-High open without truncating not supported yet.
# Limited support would be to open a file with only headers.
# Full support is much trickier, need to re-open last segment
# and also preferably do an incremental update of statistics
# and lowres bricks.
# Also, legaltag (and maybe seismicmeta) might be ignored here.
self._accessor = sd.SdWriter(filename, sdcred, True, self._config.extra)
numseg = self._accessor.count()
if numseg <= 3:
self._sizes = list(self._accessor.sizes(*range(numseg)))
else:
tmp = self._accessor.sizes(0, 1, numseg-1)
self._sizes = [tmp[0]] + (numseg-2) * [tmp[1]] + [tmp[2]]
elif mode in ("d"):
# TODO-Performance keep the SdUtil instance alive.
# There is a cost creating a new one, especially if the
# token needs a refresh.
self._accessor = None
try:
with sd.SdUtil(sdcred) as u:
u.delete(filename)
except RuntimeError as ex:
# File not found is ignored. Other errors are reported back.
mode = None
if str(ex).find("does not exist") < 0:
raise
else:
raise ZgyUserError("Opening ZGY as " + mode + " is not supported.")
self._mode = mode
self._cached_data = None
@property
def xx_eof(self):
return sum(self._sizes)
def xx_close(self):
mode = self._mode
self._mode = None
if self._accessor and mode and mode != "d":
self._accessor.close()
self._accessor = None
def _split_by_segment(self, requests):
"""
Given one or more (offset, size, ...) tuples, convert these
to (segment_number, offset_in_seg, size_in_seg, outpos).
"outpos" is the offset to store the data that was read, if
it is to be stored sequentially in one large buffer.
Request for data past EOF is converted to a request for data
in the last segment plus one. Trying to read that segment from
seismic store will fail. Note that if _sizes is changed to
include the open segment at the end then this special handling
makes not much difference. At least not if the argument has
already been checked to not cross the real EOF.
Note that the callers currently check for reads past EOF
and raises an exception in that case. So for now the above
paragraph if of academic interest only.
The returned list might be longer than the input if any of the
input requests crossed segment boundaries.
The return might be shorter than the input or even empty if
any input request was for 0 bytes [or data past EOF... NOT]
The algorithm is O(n^2) on segment_count * request_count
but both numbers should be small. If this actually becomes
a problem then use binary search in self._cumsize to find
the starting segment.
Maybe simplify: This logic could be moved inside SDAPI or the
SDAPI wrapper. Reads from segment "-1" imply linear access.
There would be a slight change in that requests split due to
crossing a segment boundary would not be parallelized. But
that is expected to be a very rare occurrence. Caveat, be
careful what happens if reading past EOF. The code can currently
handle that by returning data from the "open segment". That logic
might not be possible to put into SDAPI. So this is probably
not a good idea.
"""
result = []
outpos = 0
for entry in requests:
offset, size = entry[:2]
assert offset >= 0
size = entry[1]
seg = 0
for segsize in self._sizes:
this_size = max(0, min(size, segsize - offset))
if this_size > 0:
result.append((seg, offset, this_size, outpos))
offset += this_size
outpos += this_size
size -= this_size
# If not crossing segment boundary, size will be 0
# Otherwise offset will be segsize, meaning that in
# the next iteration offset will be 0 and size will
# be the remaining data to be read.
seg += 1
offset -= segsize
if size <= 0: break
if size > 0:
# Data past EOF treated as if it were in segment+1.
result.append((seg, offset, size, outpos))
insize = sum([e[1] for e in requests])
outsize = result[-1][2] + result[-1][3] if result else 0
assert insize == outsize
return result
def _cached_read(self, seg, offset, view):
"""
Very simplistic cache implementation. Only store the most recent
read from seismic store, and only consider a match when the range
matches exactly. Also, always make copies both when copying in to
and out of the cache. I.e. ignore immutable_ok, transient_ok.
The cache may be useful if the upstream caller only asks for a
single block at a time, so we get neither consolidation nor
parallel access. Enabling this cache and setting force_align
to a suitable value will hopefully cause the code to see the
same bulk request happen more than once. If force_align is off
it is very unlikely that the cache will help.
Understand that storing just a single block in the cache will
only help in lab conditions or in applications that we know
for sure will issue requests for sequential parts of the cube.
And if we know this already then we ought to be able to modify
that code to pass down larger requests. Bottom line, it isn't
very useful the way it works today.
"""
if not self._config.aligned:
self._config._debug_trace("read", len(view), len(view), 1)
return self._accessor.read(seg, offset, view)
seg_size = self._sizes[seg]
a_beg = (offset // self._config.aligned) * self._config.aligned
a_end = ((offset + len(view) + self._config.aligned - 1) // self._config.aligned) * self._config.aligned
a_end = min(a_end, seg_size)
c_seg, c_beg, c_data = self._cached_data or (0, 0, bytes())
need = (seg, a_beg, a_end)
have = (c_seg, c_beg, c_beg + len(c_data))
#print('cache', need, ('==' if need == have else '<>'), have)
if need == have:
self._config._debug_trace("cachehit", len(view), a_end-a_beg, 1)
data = c_data
else:
self._config._debug_trace("cachemiss", len(view), a_end-a_beg, 1)
data = bytearray(a_end - a_beg)
self._accessor.read(seg, a_beg, data)
self._cached_data = (seg, a_beg, data)
view[:] = data[offset-a_beg:offset-a_beg+len(view)]
def xx_read(self, in_offset, in_size, *, usagehint=UsageHint.Unknown):
self._validate_read(in_offset, in_size)
work = self._split_by_segment([(in_offset, in_size)])
result = bytearray(in_size)
view = memoryview(result)
maxseg = max([seg for seg, offset, size, outpos in work])
if maxseg >= len(self._sizes):
# This should only happen in white box unit tests.
# The higher levels of ZGY should have checked for EOF already.
# But seismic store can return a really obscure error message
# and/or hang in a retry loop if we don't do this check.
raise ZgyEndOfFile("Attempt to read from segment " + str(maxseg))
for seg, offset, size, outpos in work:
self._cached_read(seg, offset, view[outpos:outpos+size])
return result
def xx_readv(self, requests, *, parallel_ok=False, immutable_ok=False, transient_ok=False, usagehint=UsageHint.Unknown):
"""
Handle both brick consolidation and multi threading.
This implementation will issue a single readv() request to the
seismic store wrapper, wait for all threads to complete, and
then deliver all the results. For this reason it needs to
allocate a buffer to hold the entire data to be read.
In the future it might be possible to have the seismic store
wrapper support delivery callbacks and for it to allocate
the result buffers itself. This saves some memory and also
allows data to be decompressed if needed and copied out to
user space as the bricks become available. Caveat: requests
may need to be split if they cross a segment boundary.
This means that we need support for partial delivery.
Which would complicate things a lot.
"""
self._validate_readv(requests)
# I don't really like this kind of short cut since it creates
# a lot of corner cases to test for. But, if the naive caching
# is in effect then it is needed to make caching work.
# If multiple requests then the cache won't work anyway,
# and we might as well clear any data it contains.
# TODO-Performance, can I move this test after consolidate
# and split? Otherwise it will probably only work for the headers
# and when the application really did fetch just one brick at
# time. It might be good enough for Petrel though.
if self._config.aligned and len(requests) == 1:
for offset, size, delivery in requests:
delivery(SeismicStoreFile.xx_read(self, offset, size, usagehint=usagehint))
return
self._cached_data = None
# For debugging / logging only
asked = sum([e[1] for e in requests])
new_requests = self._consolidate_requests(requests,
max_hole=self._config.maxhole,
max_size=self._config.maxsize,
force_align=self._config.aligned,
eof=self.xx_eof)
work = self._split_by_segment(new_requests)
# TODO-Low: For robustness scan work[] to get realize. As the
# C++ code in impl/file_sd.cpp SeismicStoreFile::xx_readv() does.
realsize = work[-1][2] + work[-1][3] if work else 0
data = bytearray(realsize)
view = memoryview(data)
eof = sum(self._sizes)
# Read bulk data from seismic store using multiple threads.
self._config._debug_trace("readv", asked, len(view), len(work))
self._accessor.readv(work, data, self._config.threads)
# Deliver result to caller.
pos = 0
for offset, size, delivery in new_requests:
size = max(0, min(size, eof - offset))
delivery(view[pos:pos+size])
pos += size
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
self._validate_write(data, offset)
current_eof = SeismicStoreFile.xx_eof.__get__(self) # nonvirtual call
#print("SeismicStoreFile.xx_write(offset={0}, size={1}, current EOF is {2}".format(offset, len(data), current_eof))
if offset == current_eof:
# Sequential write from current EOF.
# Specific limitation for ZGY, for performance reasons only.
# This means we don't need to fetch sizes for all segments
# when opening a file for read. Note that since the last
# segment can have any size we won't discover a violation
# until the following read.
if len(self._sizes) >= 3 and self._sizes[-1] != self._sizes[1]:
raise ZgyUserError("Cannot write arbitrarily sized segment.")
self._config._debug_trace("append", len(data), len(data), 1)
self._accessor.write(len(self._sizes), data, False)
self._sizes.append(len(data))
elif offset < current_eof:
# Rewrite existing block. Resizing not allowed.
seg = 0
for segsize in self._sizes:
if offset == 0:
if len(data) == segsize:
self._config._debug_trace("write", len(data), len(data), 1)
self._accessor.write(seg, data, True)
break
else:
raise ZgySegmentIsClosed("Cannot write resized segment.")
elif offset < segsize:
raise ZgySegmentIsClosed("Cannot write part of segment.")
seg += 1
offset -= segsize
else:
# Attempting to write sparse data.
raise ZgyUserError("Cannot write segments out of order.")
return len(data)
# If I want to disable threading, possibly also consolidation:
#xx_readv = FileADT._forward_consolidated_readv
#xx_readv = FileADT._forward_readv
@property
def threadsafe(self):
return True if self._mode in ("rb") else False
@property
def xx_iscloud(self):
return True
class SeismicStoreFileDelayedWrite(FileADT):
"""
Improve on SeismicStoreFile, have it buffer large chunks of data before
writing it out to a new segment.
* Writes starting at EOF are allowed, and will buffer data in the
"open segment" until explicitly flushed.
* Writes starting past EOF, signifying a hole in the data, are not
allowed.
* Writes fully inside the open segment are allowed.
* Writes starting before the open segment are only allowed if
offset,size exactly matches a previous write. This will cause that
segment to be rewritten. As a corollary, writes canot span the
closed segment / open segment boundary.
* Possible future extension: For the last segment only offset
needs to match. This means the last segment may be resized.
Why we might want this: On opening a file with existing
data bricks we might choose to read the last segment and
turn it into an open segment. Then delete (in memory only)
the last segment. When it is time to flush the data it gets
rewritten. This allows adding bricks to a file, while still
ensuring that all segments except first and last need to be
the same size. Note that there are other tasks such as
incrementally updating statistics and histogram that might
turn out to be a lot of work.
* When used to create ZGY files, caller must honor the convention
that all segments except the first and last must have the same size.
* Caveat: The fact that random writes are sometimes allowed, sometimes
not depending on the segment number violates the principle of
least surprise. And makes for more elaborate testing. For ZGY
it is quite useful though. ZGY can recover from a ZgySegmentIsClosed
exception by abandoning (leaking) the current block and write it
to a new location. With a typical access pattern this will happen
only occasionally.
"""
def __init__(self, filename, mode, iocontext):
super().__init__(filename, mode, iocontext)
self._relay = SeismicStoreFile(filename, mode, iocontext)
self._mode = mode
self._open_segment = bytearray()
self._usage_hint = None
self._config = self._relay._config
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.xx_close()
def _flush_part(self, this_segsize):
"""
Flush "this_segsize" pending bytes. Leave any residual data
in the open segment buffer.
"""
assert this_segsize <= len(self._open_segment)
assert len(self._open_segment) > 0
flushme = memoryview(self._open_segment)
nbytes = self._relay.xx_write(flushme[:this_segsize],
self._relay.xx_eof,
usagehint=self._usage_hint)
if nbytes != this_segsize:
raise ZgyInternalError("Short write to seismic store")
self._open_segment = bytearray(flushme[this_segsize:])
def _flush(self, final):
"""
Flush pending writes, but only if we have enough data to fill
one or more complete segments or if the file is being closed.
The last segment is allowed to be smaller than the others.
"""
if self._config.segsize > 0:
while len(self._open_segment) >= self._config.segsize:
self._flush_part(self._config.segsize)
if final and len(self._open_segment) > 0:
self._flush_part(len(self._open_segment))
if len(self._open_segment) == 0:
self._usage_hint = None
@property
def xx_eof(self):
"""
Current size of the zgy file, including any buffered unwritten data.
"""
return self._relay.xx_eof + len(self._open_segment)
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
"""
Write data to seismic store, buffering the writes to get larger
segment sizes. Writes are only allowed at offset 0 and at EOF.
This is less general then the parent type which lets us rewrite
any segment as long as its size does not change.
Segment 0 contains just the headers and is always written in one
operation, so this is not buffered. Segment 0 can be both smaller
and larger than segsize. Which is another reason to bypass the
buffering code. Also, if we are rewriting data we bypass the
buffering and require that the caller updates the entire segment.
ZGY will currently only rewrite segment 0.
If segsize is zero no buffering is done and each write will either
create a new segment or completely rewrite an existing segment.
"""
written = self.xx_eof
committed = self._relay.xx_eof
#print("SeismicStoreFileDelayedWrite.xx_write(offset={0}, size={1}, current EOF is {2}".format(offset, len(data), self.xx_eof))
# TODO-Low: Refactor: Technically I could buffer segment 0 as well,
# and leave it to the caller to immediately flush that segment.
# I probably still need some special handling for the first segment.
# The benefit is that FileADT gets less knowledge about ZGY proper.
if offset == 0 or self._config.segsize <= 0 or offset < committed:
return self._relay.xx_write(data, offset, usagehint=usagehint)
if offset > written:
# Write sparse data with a hole between written and offset.
raise ZgyUserError("Cannot write segments out of order.")
# TODO-Low: Generalize: If caller doesn't catch ZgySegmentIsClosed then all
# rewrites ought to be forbidden. Since ZGY is our only client and
# does in fact catch that exception then this is low priority.
#if offset != self.xx_eof:
# raise ZgyUserError("Can only write at offset 0 or EOF, not {0} when EOF is {1}. Also, size is {2} and current open segment has {3} bytes.".format(offset, self.xx_eof, len(data), len(self._open_segment)))
# TODO-Low: Refactor: Python abuse! Passing a "self" that is not an
# instance of FileADT. But it has the attributes expected by the method.
FileADT._validate_write(self, data, offset)
lll = len(self._open_segment)
if offset == written:
# Append data to open segment
self._open_segment += data
elif offset + len(data) <= written:
# Update data fully inside open segment
self._open_segment[offset-committed:offset-committed+len(data)] = bytearray() + data
else:
# part update, part new.
raise NotImplementedError() # TODO-Low support for symmetry.
if self._usage_hint is None:
self._usage_hint = usagehint
elif self._usage_hint != usagehint:
self._usage_hint = UsageHint.Unknown # mixed hints
self._flush(False)
return len(data) # TODO-Low: retval not useful if I throw on short reads.
def xx_read(self, offset, size, *, usagehint=UsageHint.Unknown):
FileADT._validate_read(self, offset, size)
closed_size = max(0, min(size, self._relay.xx_eof - offset))
opened_size = size - closed_size
local_offset = max(0, offset - self._relay.xx_eof)
if local_offset + opened_size > len(self._open_segment):
raise ZgyUserError("Reading past EOF")
data1 = self._relay.xx_read(offset, closed_size, usagehint=usagehint) if closed_size > 0 else None
data2 = memoryview(self._open_segment)[local_offset:local_offset+opened_size] if opened_size > 0 else None
return data1 + data2 if data1 and data2 else data1 or data2
def xx_readv(self, requests, *, parallel_ok=False, immutable_ok=False, transient_ok=False, usagehint=UsageHint.Unknown, **kwargs):
end = max([offset + size for offset, size, delivery in requests])
if end <= self._relay.xx_eof:
# The open segment is not involved, so just forward the request.
self._relay.xx_readv(requests,
parallel_ok=parallel_ok,
immutable_ok=immutable_ok,
transient_ok=transient_ok,
usagehint=usagehint)
else:
# Let xx_read handle the requests one at a time.
# If the requests consisted of both open and closed segments
# then this is inefficient since SD access won't be paralellized.
# But that case would be a lot of effort to support and it
# won't happen often.
for offset, size, delivery in requests:
delivery(self.xx_read(offset, size, usagehint=usagehint))
def xx_close(self):
self._flush(True)
return self._relay.xx_close()
@property
def threadsafe(self):
return self._relay.threadsafe
@property
def xx_iscloud(self):
return self._relay.xx_iscloud
def FileFactory(filename, mode, iocontext):
"""
Return a FileADT instance able to read and/or write to the named file.
In the future the function might return different types of instances
e.g. if filename refers to something on the cloud.
"""
if filename[:5] == "sd://":
if mode in ("r+b", "w+b"):
myfile = SeismicStoreFileDelayedWrite(filename, mode, iocontext)
else:
myfile = SeismicStoreFile(filename, mode, iocontext)
elif hasattr(os, "pread"):
myfile = LocalFileLinux(filename, mode, iocontext)
else:
myfile = LocalFileOther(filename, mode, iocontext)
return myfile
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/file.py | file.py |
##@package openzgy.impl.transform
#@brief Deal with spatial location of the cube.
def generalTransform(ax0, ay0, ax1, ay1, ax2, ay2,
bx0, by0, bx1, by1, bx2, by2,
data):
"""
Coordinate conversion based on 3 arbitrary control points
in both the source and the target system, i.e. no need
to calculate the transformation matrix first.
This is my favorite code snippet for coordinate conversion.
Almost no ambiguity about what it does and how to use it.
The data array is converted in place. The code tries to ensure
that the output is floating point even though the input might
not be. This is to prevent surprises if converting the result
to a numpy type (where types are more strict) and forgetting
to give an explicit type.
See iltf2d.cpp.cpp.
TODO-Test, since this Python code was not ported from there
(I used lattice.py in interpretation-sandbox instead),
some extra testing is needed to verify the code.
"""
# Make everything relative to p0
ax1 -= ax0; ay1 -= ay0;
ax2 -= ax0; ay2 -= ay0;
bx1 -= bx0; by1 -= by0;
bx2 -= bx0; by2 -= by0;
det = ax1*ay2 - ax2*ay1; # The determinant
if abs(det) < 1.0e-6:
# If this is used to interpret coords from a ZGY file
# then caller should catch the exception and either
# just substitute a default or raise ZgyFormatError
raise RuntimeError("Colinear or coincident points.")
for pos in range(len(data)):
xq = data[pos][0] - ax0;
yq = data[pos][1] - ay0;
s = (xq*ay2 - ax2*yq)/det;
t = (ax1*yq - xq*ay1)/det;
data[pos][0] = float(bx0 + s*bx1 + t*bx2);
data[pos][1] = float(by0 + s*by1 + t*by2);
def acpToOcp(orig, inc, size, il, xl, wx, wy):
"""
Convert 3 arbitrary control points containing annotation- and world coords
into 4 ordered corner points according to the Petrel Ordered Corner Points
(OCP) definition, which corresponds to these bulk data indices:
( 0, 0)
(size[0] - 1, 0)
( 0, size[1] - 1)
(size[0] - 1, size[1] - 1)
See PetrelOrientationHandling
This is used to convert from a ThreePoint to a FourPoint definition.
If the definition is already FourPoint then calling this function with
the 3 first points should return the same result.
See OrderedCornerPoints.cpp.
TODO-Test, since this Python code was not ported from there
(I used lattice.py in interpretation-sandbox instead),
some extra testing is needed to verify the code.
"""
last = [orig[0] + inc[0] * (size[0]-1), orig[1] + inc[1] * (size[1]-1)]
corners = [[orig[0], orig[1]],
[last[0], orig[1]],
[orig[0], last[1]],
[last[0], last[1]]]
generalTransform(il[0], xl[0], il[1], xl[1], il[2], xl[2],
wx[0], wy[0], wx[1], wy[1], wx[2], wy[2],
corners)
return corners
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/transform.py | transform.py |
##@package openzgy.impl.lodalgo
#@brief Decimation algorithms to output low resolution bricks.
import numpy as np
from enum import Enum
import warnings
from ..exception import ZgyUserError
##@brief Possible algorithms to generate LOD bricks.
class DecimationType(Enum):
"""
Possible algorithms to generate LOD bricks.
We might trim this list later to what is actually in use.
The "classic" ZGY only uses the first two.
CAVEAT: Many of these might be expensive to port and/or not
possible to implement efficiently in Python.
TODO-Low: Avoid exposing this enum to the public API.
"""
LowPass = 0, # Lowpass Z / decimate XY.
WeightedAverage = 1, # Weighted averaging (depends on global stats).
Average = 2, # Simple averaging.
Median = 3, # Somewhat more expensive averaging.
Minimum = 4, # Minimum value.
Maximum = 5, # Maximum value.
MinMax = 6, # Checkerboard of minimum and maximum values.
Decimate = 7, # Simple decimation, use first sample.
DecimateSkipNaN = 8, # Use first sample that is not NaN.
DecimateRandom = 9, # Random decimation using a fixed seed.
AllZero = 10, # Just fill the LOD brick with zeroes.
WhiteNoise = 11, # Fill with white noise, hope nobody notices.
MostFrequent = 12, # The value that occurs most frequently.
MostFrequentNon0 = 13, # The non-zero value that occurs most frequently.
AverageNon0 = 14, # Average value, but treat 0 as NaN.
def _reorder(brick):
"""
Reorder the input brick, decimating it by a factor 2 but keeping
each of the 2x2x2 samples that are to be combined in the last
dimension. E.g. an input shape (64, 64, 64) returns a (32, 32, 32, 8).
"""
shape = (brick.shape[0]//2, brick.shape[1]//2, brick.shape[2]//2, 8)
tmp = np.zeros(shape, dtype=brick.dtype)
tmp[...,0] = brick[0::2,0::2,0::2]
tmp[...,1] = brick[0::2,0::2,1::2]
tmp[...,2] = brick[0::2,1::2,0::2]
tmp[...,3] = brick[0::2,1::2,1::2]
tmp[...,4] = brick[1::2,0::2,0::2]
tmp[...,5] = brick[1::2,0::2,1::2]
tmp[...,6] = brick[1::2,1::2,0::2]
tmp[...,7] = brick[1::2,1::2,1::2]
return tmp
_filter = np.array([
+0.07996591908598821,
-0.06968050331585399,
-0.10596589191287473,
+0.12716426995479813,
+0.44963820678761110,
+0.44963820678761110,
+0.12716426995479813,
-0.10596589191287473,
-0.06968050331585399,
+0.07996591908598821], dtype=np.float64) * 1.0392374478340252
def _decimate_LowPass(a):
# TODO-Test, this needs very careful testing. I am sort of guessing here.
# TODO-Medium, NaN and Inf handling compatible with what the C++ impl does.
# TODO-Worry, if the algoritm doesn't have precisely zero DC response
# I might need to explicitly test for all-constant traces to avoid
# an empty fullres brick turning into a non-empty LOD brick.
# See Salmon/Zgy/ArrayBasic/ArrayTile.cpp
# See Salmon/Zgy/Common/Sampling.cpp
# See numpy.convolve
dtype = a.dtype
a = a[::2,::2,:].astype(np.float32)
a = np.pad(a, ((0,0), (0,0), (5,5)), mode = 'reflect')
a = np.convolve(a.flatten('C'), _filter, mode = 'same').reshape(a.shape)
a = a[..., 5:-5:2]
return a.astype(dtype)
def _decimate_WeightedAverage(data, *, histogram, defaultvalue):
"""
Decimate the input by weighted average, with the weights being the inverse
of how common this sample value is in the entire survey. This means that
the algorithm needs access to the survey histogram. Which might be a
challenge if the histogram is being computed in parallel with the low
resolution blocks.
The computation can be run either in "storage" mode or in "float" mode.
The data buffer and the histogram obviously need to be in the same mode.
defaultvalue is used if all 8 inputs for one output sample are nan/inf.
For float data this would almost always be 0.0 and for integral data
the input samples cannot be nan/inf. So it might as well be 0 always.
This fuction looks very different from the C++ version in the old ZGY
accessor. The reason is that in C++ it is no big deal to have a tight
inner loop processing one sample at a time. In Python that approach is
a joke. On the other hand, Python has the fantastic numpy module.
Note that I might choose to remove all NaN and Inf values in the
_decimate_LowPass method that is typically used to generate lod 1.
In that case the extra testung for NaN might be redundant.
"""
if histogram is None or np.sum(histogram.bins) == 0:
raise ZgyInternalError("The WeightedAverage algorithm needs a histogram.")
# Linear transform from value to bin number.
# Should round to nearest: bin = np.rint(value * factor + offset)
r = histogram.vv_range
n = histogram.bins.size
factor = (n - 1)/ (r[1] - r[0])
offset = -r[0] * factor
# Better to be safe. All computation in double, to avoid surprises
# e.g. with huge sample counts causing numeric underflow. Also handle
# NaN / Inf values explicitly to avoid a lot of testing.
input_dtype = data.dtype
integral = np.issubdtype(input_dtype, np.integer)
data = data.astype(np.float64, copy=True)
if not integral:
ugly = np.logical_not(np.isfinite(data))
if np.any(ugly):
data[ugly] = 0
else:
ugly = None
else:
ugly = None
# Get histogram bin number for every input sample.
bin_numbers = np.rint(data * factor + offset)
np.clip(bin_numbers, 0, histogram.bins.size - 1, out = bin_numbers)
bin_numbers = bin_numbers.astype(np.int32)
# Get the sample's frequency for every input sample.
src_frequency = histogram.bins[bin_numbers]
# If frequency is reported as 0, this is an inconsistency.
# Since we know it occurs at least one time.
np.clip(src_frequency, 1, None, out=src_frequency)
tmp_weight = np.float64(1) / src_frequency
if ugly is not None:
tmp_weight[ugly] = 0
tmp_wsum = data * tmp_weight
# Now sum each 2x2x2 set of samples.
tmp_wsum = np.nansum(_reorder(tmp_wsum), 3)
tmp_weight = np.nansum(_reorder(tmp_weight), 3)
# Zero weights means no valid samples found among the 8 inputs.
# This can only happen if non-finite data was present in the input,
# causing weights to be set to zero earlier in this function.
if ugly is not None:
ugly2 = tmp_weight == 0
tmp_wsum[ugly2] = defaultvalue
tmp_weight[ugly2] = 1
# Final result is the weighted sum of 8 input samples per output sample.
# If all 8 samples were NaN we instead want to substitute an explicit
# default value, or possibly set them back to to NaN.
tmp_wsum /= tmp_weight
# Convert back to user's type, protecting against integer overflow.
if not integral:
tmp_wsum = tmp_wsum.astype(np.float32, copy=False)
else:
cliprange = (np.iinfo(input_dtype).min, np.iinfo(input_dtype).max)
np.clip(tmp_wsum, cliprange[0], cliprange[1], out=tmp_wsum)
tmp_wsum = np.rint(tmp_wsum).astype(input_dtype, copy=False)
return tmp_wsum
def _decimate_Average(brick):
tmp = _reorder(brick.astype(np.double))
# All NaN will return NaN, ditto for average of +inf, -inf.
# I have not seen this documented explicitly but it should be
# a safe assumption. Also the consequences if wrong are slight.
# So, ignore the warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmean(tmp, 3).astype(brick.dtype)
def _decimate_Median(brick):
tmp = _reorder(brick.astype(np.double))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmedian(tmp, 3).astype(brick.dtype)
def _decimate_Minimum(brick):
tmp = _reorder(brick)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmin(tmp, 3)
def _decimate_Maximum(brick):
tmp = _reorder(brick)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmax(tmp, 3)
def _decimate_Decimate(brick):
return brick[0::2,0::2,0::2]
def _decimate_AllZero(brick):
shape = (brick.shape[0]//2, brick.shape[1]//2, brick.shape[2]//2)
return np.zeros(shape, dtype=brick.dtype)
def _decimate_AverageNon0(brick, *, defaultvalue = 0):
"""Average not considering 0, nan, inf. Use defaultvalue if all invalid."""
tmp = _reorder(brick.astype(np.double))
mask = np.isfinite(tmp)
np.logical_not(mask, out=mask)
np.logical_or(mask, tmp == 0, out=mask)
tmp = np.ma.array(tmp, mask=mask, fill_value = defaultvalue)
tmp = np.mean(tmp, 3)
tmp.fill_value = defaultvalue # mean() doesn't preserve it
return tmp.filled().astype(brick.dtype)
_decimation_algo = {
DecimationType.LowPass: _decimate_LowPass,
DecimationType.WeightedAverage: _decimate_WeightedAverage,
DecimationType.Average: _decimate_Average,
DecimationType.Median: _decimate_Median,
DecimationType.Minimum: _decimate_Minimum,
DecimationType.Maximum: _decimate_Maximum,
#DecimationType.MinMax: _decimate_MinMax,
DecimationType.Decimate: _decimate_Decimate,
#DecimationType.DecimateSkipNaN: _decimate_DecimateSkipNaN,
#DecimationType.DecimateRandom: _decimate_DecimateRandom,
DecimationType.AllZero: _decimate_AllZero,
#DecimationType.WhiteNoise: _decimate_WhiteNoise,
#DecimationType.MostFrequent: _decimate_MostFrequent,
#DecimationType.MostFrequentNon0: _decimate_MostFrequentNon0,
DecimationType.AverageNon0: _decimate_AverageNon0,
}
def _is_power_of_two_ge_2(n):
for shift in range(1, 32):
if n == 1<<shift:
return True
return False
def _is_all_power_of_two_ge_2(nn):
return np.all([_is_power_of_two_ge_2(n) for n in nn])
def _combine_eight_bricks(bricks):
"""
Paste together 8 equal-sized bricks. Called only from decimate8(),
which is not called from production code. Shoud perhaps be moved
to the unit test module.
"""
half = bricks[0].shape
full = (half[0]*2, half[1]*2, half[2]*2)
result = np.zeros(full, dtype=bricks[0].dtype)
result[:half[0], :half[1], :half[2]] = bricks[0]
result[:half[0], :half[1], half[2]:] = bricks[1]
result[:half[0], half[1]:, :half[2]] = bricks[2]
result[:half[0], half[1]:, half[2]:] = bricks[3]
result[half[0]:, :half[1], :half[2]] = bricks[4]
result[half[0]:, :half[1], half[2]:] = bricks[5]
result[half[0]:, half[1]:, :half[2]] = bricks[6]
result[half[0]:, half[1]:, half[2]:] = bricks[7]
return result
def decimate(brick, algo, **kwargs):
"""
Decimate a single input brick to produce one smaller output brick.
"""
# The following test is only a requirement for bricksize.
# decimate() can be called e.g. for entire brick columns,
# and in that case the only requirement is that the sizes
# are all even.
#if not _is_all_power_of_two_ge_2(brick.shape):
# raise ZgyUserError("Brick size must be >= 2 and a power of 2")
if not all([(n%2) == 0 for n in brick.shape]):
raise ZgyUserError("Decimation can only be run on even-sized bricks")
try:
# TODO-Worry ... Remove the need for this kludge ...
# TODO-Worry ... NOT THREADSAFE ... NOT THREADSAFE ... NOT THREADSAFE ...
np_errors = np.seterr(all='print')
if algo in _decimation_algo:
return _decimation_algo[algo](brick, **kwargs)
else:
raise NotImplementedError(str(algo))
finally:
np.seterr(**np_errors)
def decimate8(bricks, algo, **kwargs):
"""
Decimate 8 equal-sized input bricks to produce one output.
Currently not used in production code. Only in the unit test.
The genlod module prefers to split up and paste its data itself.
This function shold perhaps be moved to the unit test module.
Most of the decimation algorithms operate om a local 2x2x2 region
for each output sample. So it makes no difference whether we
decimate each input brick and then combine them, or the other
way around. Combining last *might* use less memory if lazy
evaluation works. Combining first might help the algorithm perform
better (LowPass) or faster (WeightedAverage?)
"""
test1 = decimate(_combine_eight_bricks(bricks), algo, **kwargs)
test2 = _combine_eight_bricks([decimate(b, algo, **kwargs) for b in bricks])
if algo != DecimationType.LowPass:
assert np.all(test1 == test2)
return test1
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/lodalgo.py | lodalgo.py |
##@package openzgy.impl.zfp_compress
import math
import numpy as np
import time
import sys
from ..impl import enum as impl_enum
from ..exception import *
from ..impl.compress import CompressFactoryImpl, CompressPlugin, CompressStats, CompressionResult
try:
zfpy_ok = False
from zfpy import compress_numpy as zfpy_compress_numpy
from zfpy import decompress_numpy as zfpy_decompress_numpy
zfpy_ok = True
except Exception as ex:
print("Warning: ZFP compression is not available:", str(ex), file=sys.stderr)
class ZfpCompressPlugin(CompressPlugin):
"""
Implement ZFP compression. See the CompressPlugin base type
for details. Most methods are static or class methods.
The exception is __init__, __call__, and dump().
"""
@staticmethod
def compress(data, snr = 30, stats = None):
if zfpy_ok:
return ZfpCompressPlugin._zfp_compress(data, snr, stats)
else:
return None
@staticmethod
def decompress(cdata, status, shape, file_dtype, user_dtype):
if zfpy_ok:
return ZfpCompressPlugin._zfp_decompress(cdata, status, shape)
else:
return None
@staticmethod
def factory(snr = 30):
return None if snr <= 0 else ZfpCompressPlugin(snr)
def __init__(self, snr = 30):
self._snr = snr
self._details = "ZFP[target_snr={0:.1f}]".format(snr)
self._stats = CompressStats(self._details)
def __call__(self, data):
return self.compress(data, self._snr, self._stats)
def __str__(self):
return self._details
def dump(self, msg=None, *, outfile=None, text=True, csv=False, reset=True):
if not self._stats.empty():
self._stats.dump(msg, outfile=outfile, text=text, csv=csv)
if reset:
self._stats = CompressStats(self._details)
@staticmethod
def _zfp_precision_from_snr(data, want_snr):
# Not sure I want this test...
if want_snr < 10 or want_snr > 70:
return 0, 0, 5
if not np.issubdtype(data.dtype, np.integer):
if not np.all(np.isfinite(data)):
return 0, 0, 5 # Only lossless and no-compression handles this.
# ZFP precision has a valid range of 1 to 64 bit planes.
# If compression is done simply by reducing the number of bits:
# If signal uses 8 bits (-128..+127) the average magnitude of the
# signal would be 64 and the acerage quantization noise 0.5.
# So SNR with the current metric would be 20*log10(128) ~= 42 dB.
# Assume ZFP is better than that if asked to use N bit planes:
# 48 dB at 8 bits, or simply 6 dB per bit.
# A heuristic taken from hca_filt.zgy gets a somewhat different
# result, but is quite accurate for that data set.
# TODO-Test: test on other data sets; test on int8 and int16 as well.
#return (want_snr + 3) // 6 if want_snr >= 10 and want_snr <= 70 else 0
precision = (int(want_snr) + 23) // 5
snr_rounded = (precision * 5) - 20
return precision, snr_rounded, 5
@classmethod
def _zfp_try_one_lossless(cls, idata, file_dtype, snr_wanted):
"""
Lossless compression of the input. snr_wanted is only used for
logging since the actual snr will be perfect.
"""
ctime = None
dtime = None
ddata = idata
starttime = time.perf_counter()
cdata = zfpy_compress_numpy(idata.astype(np.float32, copy=False))
if True: # For testing only, for logging compression and time used.
ctime = time.perf_counter() - starttime
ddata = zfpy_decompress_numpy(cdata).astype(idata.dtype)
dtime = time.perf_counter() - starttime - ctime
if not np.allclose(idata, ddata, equal_nan=True):
raise InternalError("zfp: claims to be lossless but isn't.")
isize = idata.size * file_dtype.itemsize
csize = len(cdata)
# Noise should be 0 and snr 99, but I go thru the motions
# to get accurate statistics. "signal" gets logged.
if np.all(np.isfinite(idata)):
signal, noise = CompressStats._collect_snr(idata, ddata)
else:
signal, noise = (0, 0)
snr_result = CompressStats._compute_snr(signal, noise)
return CompressionResult(cdata = cdata,
csize = csize,
isize = isize,
signal = signal,
noise = noise,
ctime = ctime,
dtime = dtime,
snr_result = snr_result,
snr_wanted = snr_wanted,
snr_rounded = 99,
snr_step = 0)
@classmethod
def _zfp_try_one_precision(cls, idata, file_dtype, snr_wanted):
# the next method checks for non-finite numbers.
precision, snr_rounded, snr_step = cls._zfp_precision_from_snr(idata, snr_wanted)
if precision:
starttime = time.perf_counter()
cdata = zfpy_compress_numpy(idata, precision=precision)
ctime = time.perf_counter() - starttime
ddata = zfpy_decompress_numpy(cdata)
dtime = time.perf_counter() - starttime - ctime
isize = idata.size * file_dtype.itemsize
csize = len(cdata)
signal, noise = CompressStats._collect_snr(idata, ddata)
snr_result = CompressStats._compute_snr(signal, noise)
return CompressionResult(cdata = cdata,
csize = csize,
isize = isize,
signal = signal,
noise = noise,
ctime = ctime,
dtime = dtime,
snr_result = snr_result,
snr_wanted = snr_wanted,
snr_rounded = snr_rounded,
snr_step = snr_step)
else:
return CompressionResult(*((None,)*11))
@classmethod
def _zfp_compress_precision(cls, idata, file_dtype, want_snr, stats):
r = cls._zfp_try_one_precision(idata, file_dtype, want_snr)
if r.cdata is None or r.csize > 0.9*r.isize:
r = cls._zfp_try_one_lossless(idata, file_dtype, want_snr)
if r.cdata is not None:
# need to make sure the compressed size is not larger than
# the input. Note that r.isize needs to be computed as the size
# the brick has on disk. If idata has been converted from int8
# to int32 or float32 it is still the size on disk that matters.
# The test below should hardly ever fail for int16 or float data
# but for int8 it is a real possibility.
if r.csize <= 0.9 * r.isize:
stats.add(signal=r.signal, noise=r.noise, snr=r.snr_result,
isize=r.isize, csize=r.csize,
ctime=r.ctime, dtime=r.dtime,
msg="forced")
return r.cdata
return None
@classmethod
def _zfp_compress(cls, data, want_snr = 30, stats = None):
"""
Compression plug-in offering ZFP compression of data bricks.
The function can be passed to _writeRegion and _writeAllLODs
as-is but will in that case have a hard coded snr of 30.
And no compression statistics will be recorded.
To be able to change the snr use something like
lambda x, snr=snr: _zfp_compress(x, snr)
which will capture your local "snr" variable.
The input is a 3d or (TODO-Low 2d) numpy array and the output is bytes.
ZFP or whatever algorithm is used is assumed to handle big / little
endian conversion itself. TODO-Worry this is not quite true for ZFP.
See the documentation. A special compilation flag is needed
on bug endian machines. Also I suspect the optional hedaer
(which this code uses) might need byte swapping.
"""
# If forced inside test_compress.run_file, make sure we are really just used to handle float data.
#assert data.dtype == np.float32
if want_snr < 0 or not zfpy_ok:
return None # will end up uncompressed.
# Note, the api currently enforces file_dtype == np.float32,
# to avoid having the user shoot himself in the foot.
file_dtype = data.dtype
data = data.astype(np.float32, copy=False)
cdata = cls._zfp_compress_precision(data, file_dtype, want_snr, stats)
if cdata is not None: return cdata
# Give up. Do not compress this brick.
# Not sure whether I want to count this brick in the statistics.
# Definitely don't include ctime and dtime; timing here is irrelevant.
if stats:
isize = data.size * data.itemsize
signal, noise = CompressStats._collect_snr(data, data)
stats.add(signal=signal, noise=noise, snr=99,
isize=isize, csize=isize, msg="noncompr")
return None
@classmethod
def _zfp_decompress(cls, cdata, status, shape):
"""
Decompress data produced by _zfp_compress.
ZFP will return an ndarray and it is assumed that any byte
swapping has already been taken care of. ZFP encodes size in
its own header so we ignore what is passed by the caller. ZFP
also encodes dtype, but doesn't recognize int8 or int16 so
thise will show up as float or int32 and must be converted.
We need to be told which data type the caller eventually wants,
because the file might contain integral data encoded as float.
If the caller wants float data it would add more noise if we
temporarily convert it to int here.
See CompressPlugin.decompress for the argument list etc.
"""
if len(cdata) < 4 or cdata[:3] != bytes("zfp", "ASCII"):
return None # Not ours.
# Might be e.g. a memoryview if reading from cloud.
if not cdata is bytes: cdata = bytes(cdata)
ddata = zfpy_decompress_numpy(cdata)
#print("Inflated", len(cdata), "->", ddata.size*ddata.itemsize)
return ddata
# Add to list of known factories.
if zfpy_ok:
CompressFactoryImpl.registerCompressor("ZFP", ZfpCompressPlugin.factory)
CompressFactoryImpl.registerDecompressor("ZFP", ZfpCompressPlugin.decompress)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/zfp_compress.py | zfp_compress.py |
##@package openzgy.impl
#@brief All implementation details are in this namespace.
##@package openzgy.impl.meta
#@brief Meta data read/write.
import struct
import sys
import numpy as np
import json
import math
import random
from enum import Enum
from ..impl.transform import acpToOcp
from ..impl import enum as impl_enum
from ..impl import file as impl_file
from ..exception import *
def _checked_read(f, offset, size, *, usagehint=impl_file.UsageHint.Header):
data = f.xx_read(offset, size, usagehint=usagehint)
if len(data) != size:
if len(data) < size:
raise ZgyFormatError("Got EOF reading header.")
else:
raise ZgyInternalError("Got too much data reading header.")
return data
def _fix_codingrange(r, datatype):
"""
Sanity check. If the codingrange for an int cube is bad, silently
use a range that causes no conversion between storage and float.
This avoids several corner cases both inside OpenZGY and in applications.
Rationale: A non-finite range is always bad. A range with min==max
is technically valid when reading, as all storage values would map
to the same float value. But this is almost certainly not what the
writer intended. Similarly a range with min>max technically means
that increasing storage values correspond to decreasing float values.
Again, the values are more likely to be completely bogus.
Leave the range alone if this is a float cube. There is no conversion
involved, and for float the codingrange is ignored by the API anyway.
For files written by the OpenZGY library an exception wouls be thrown
on create. So the codingrange should always be ok for those.
Note: The sanity check could also be applied to the histogram range.
That fix would also apply to float cubes. The histogram is less
important though, and it should be ok to let the application worry
about a bad histogram.
"""
file_dtype = impl_enum._map_DataTypeToNumpyType(datatype)
if np.issubdtype(file_dtype, np.integer):
iinfo = np.iinfo(file_dtype)
if (not r or
not np.isfinite(r[0]) or
not np.isfinite(r[1]) or
r[1] <= r[0]):
#print("Bad codingrange", r, "-> use", (iinfo.min, iinfo.max))
r = (float(iinfo.min), float(iinfo.max))
return r
class ErrorsWillCorruptFile:
"""
Duplicated between impl.bulk and impl.meta. Maybe fix sometime.
"""
def __init__(self, parent): self._parent = parent
def __enter__(self): return None
def __exit__(self, type, value, traceback):
if type:
#print("Meta: Exit critical section", str(value))
self._parent._is_bad = True
class HeaderBase:
"""
Convenience base class for implementing classes that map 1:1 to a
specific header with a specific version that exists in a ZGY file.
The constructor should unpack a supplied byte buffer into a new
instance of this header. Caller is responsible for all I/O, so no
methods in this class need an iocontext except read(). The various
headers don't need to inherit this base class if they don't want to.
"""
@classmethod
def _formats(cls):
"""
Define the physical header layout. This method needs to be
overridden in a subclass. Otherwise the header will be empty.
The second field is the format as recognized by the 'struct' module.
Data on the file is stored packed and little-endian. So for use with
'struct', a '<' should be prepended to the string.
"""
return []
@classmethod
def _format(cls):
"""
Describe the layout of this header block on the file,
showing only the offsets as a string recognized by 'struct'.
May be used as an argument to struct.calcsize(). Can technically
also be used with struct.unpack to read the entire struct at
once. But, that will returns a flat list which is tricky to
assign to the respective attributes.
"""
return ("<" + " ".join([ e[1] for e in cls._formats() ])).strip()
@classmethod
def headersize(cls):
"""
Return the size this header has on disk.
"""
return struct.calcsize(cls._format())
@classmethod
def checkformats(cls, verbose = False, *, file = None):
"""
Helper to compare the python definition of the header layout with
the C++ version. Also check that the same attribute isn't listed twice.
"""
file = file or sys.stdout
mapping = {
"char*": "",
"enum": "B",
"float32": "f",
"float64": "d",
"int32": "i",
"int64": "q",
"uint32": "I",
"uint64": "Q",
"uint8": "B",
}
errors = 0
seen = set()
byteoffset = 0
for e in cls._formats():
if e[0] in seen:
print("# ERROR: attribute {0} is listed twice.".format(e[0]), file=file)
seen.add(e[0])
ctype = e[2]
cname = e[0]
csize = None
p1 = ctype.find("[")
p2 = ctype.find("]")
if p1 > 0 and p2 > p1:
csize = ctype[p1+1:p2]
cname = cname + "[" + csize + "]"
ctype = ctype[0:p1]
expect = (csize if csize else '') + mapping[ctype]
if expect == "16B": expect = "16s"
if expect == "4B": expect = "4s"
actual = e[1]
if actual and actual != expect:
print("# ERROR: Expected code {0}, got {1}".format(expect, e[1]), file=file)
errors += 1
byteoffset += struct.calcsize(e[1])
assert not errors
def pack(self):
"""
Convert the contents of this class to a byte array suitable for
storing in the ZGY file.
"""
self.checkformats()
buf = bytearray(self.headersize())
offset = 0
for e in self._formats():
if e[1]:
value = getattr(self, e[0])
if isinstance(value, Enum):
value = value.value
if isinstance(value, (list, np.ndarray)):
value = tuple(value)
if not type(value) is tuple: value = (value,)
data = struct.pack_into("<" + e[1], buf, offset, *value)
offset += struct.calcsize("<" + e[1])
assert offset == self.headersize()
return buf
def unpack(self, buf = None):
"""
Convert a byte array as read from the ZGY file into a Python object.
Normally a call to unpack() will immediately be followed by a call
to calculate() to fill in any derived information, convert enume, etc.
If buf is None, unpacking is done on an all zero buffer. This ensures
that all data fields are present in the object. Simplifying things
if the application is creating an instance from scratch.
"""
self.checkformats()
if not buf: buf = bytes(self.headersize())
offset = 0
for e in self._formats():
if e[1]:
data = struct.unpack_from("<" + e[1], buf, offset=offset)
offset += struct.calcsize("<" + e[1])
setattr(self, e[0], data if len(data) > 1 else data[0])
else:
setattr(self, e[0], None)
assert offset == self.headersize()
@classmethod
def read(cls, f, offset):
"""
Read the header from disk and parse it, returning a new instance.
"""
return cls(_checked_read(f, offset, cls.headersize()))
def dump(self, prefix = "", file = None):
"""
Print the entire contents of the object, including derived fields.
"""
file = file or sys.stdout
print("\n{0} takes up {1} bytes with format '{2}'".format(
self.__class__.__name__, self.headersize(), self._format()), file=file)
for e in self._formats():
print("{0}{1:12} = {2}".format(prefix, e[0], getattr(self, e[0])), file=file)
#****************************************************************************
#** FileHeader **************************************************************
#****************************************************************************
class FileHeader(HeaderBase):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
def __init__(self, buf = None, *, compressed = None):
assert self.headersize() == 8
# Python isn't really good at overloaded methods.
assert (buf is None and compressed is not None) or (buf is not None and compressed is None)
if buf:
self.unpack(buf)
assert buf == self.pack()
if self._magic == b'VCS\x00': raise ZgyFormatError("Old ZGY compressed files are not supported")
if self._magic != b'VBS\x00': raise ZgyFormatError("Not an uncompressed ZGY file")
if self._version < 1 or self._version > 4: raise ZgyFormatError("Unsupported ZGY version " + str(version))
else:
# Prepare for writing a new file.
self._magic = b'VBS\x00'
self._version = 4 if compressed else 3
@staticmethod
def _formats():
return [
('_magic', '4s', 'uint8[4]', 'Always VBS\\0.'),
('_version', 'I', 'uint32', 'Current version is 3 or 4.'),
]
#****************************************************************************
#** OffsetHeader ************************************************************
#****************************************************************************
class OffsetHeaderV1:
def __init__(self):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
self._infsize = 0
self._strsize = 0
self._histsize = 0
self._alphalupsize = 0
self._bricklupsize =0
self._infoff = -1
self._stroff = 0 # not used in V1
self._histoff = -1
self._alphalupoff = -1
self._bricklupoff = -1
self.calculate()
@classmethod
def _format(self):
return "<8I"
@classmethod
def size(self):
return struct.calcsize(self._format())
@classmethod
def load(self, buf):
data = struct.unpack(self._format(), buf)
r = self()
# Weird combination of big-endian and little-endian.
# Also beware that it was cast to signed int.
# Negative offsets probably yield undefined behavior.
# But that should not have been possible anyway.
r._infoff = (data[0] << 32) + data[1]
r._stroff = 0
r._alphalupoff = (data[2] << 32) + data[3]
r._bricklupoff = (data[4] << 32) + data[5]
r._histoff = (data[6] << 32) + data[7]
#print("###", r._infoff)
return r
def calculate(self, ih = None):
"""
Offsets are stored explicitly, so this only needs to set size.
"""
self._infsize = InfoHeaderV1.headersize()
self._histsize = HistHeaderV1.headersize()
self._alphalupsize = 8 * ih._alphaoffsets[-1] if ih else 0
self._bricklupsize = 8 * ih._brickoffsets[-1] if ih else 0
@classmethod
def read(cls, f):
# The OffsetHeader always starts immediately after the FileHeader,
# which always starts at the beginning of the file.
return cls.load(_checked_read(f, FileHeader.headersize(), cls.size()))
def dump(self, *, file=None):
file = file or sys.stdout
print("\n{0} takes up {1} bytes with format '{2}'".format(self.__class__.__name__, self.size(), self._format()), file=file)
print(" Offsets: info {0:x} str {1:x} hist {2:x} alpha {3:x} brick {4:x}".format(
self._infoff, self._stroff, self._histoff, self._alphalupoff, self._bricklupoff), file=file)
@staticmethod
def _formats():
"""
Describe the layout of this header block on the file.
This class doesn't inherit HeaderBase and is coded by hand.
The implementation won't use _formats but it is included
for the benefit of some tools.
"""
return [
('_infoff', 'Q', 'int64', 'InfoHeader position in file.'),
('_stroff', '', 'int64', 'String table position, N/A in V1 and pulled from InfoHeader in V2.'),
('_alphalupoff', 'Q', 'int64', 'Alpha tile lookup table position in file.'),
('_bricklupoff', 'Q', 'int64', 'Brick data lookup table position in file.'),
('_histoff', 'Q', 'int64', 'Histogram position in file.'),
]
class OffsetHeaderV2:
def __init__(self, buf = None):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
self._infsize = 0
self._strsize = 0
self._histsize = 0
self._alphalupsize = 0
self._bricklupsize =0
self._infoff = -1
self._stroff = -1
self._histoff = -1
self._alphalupoff = -1
self._bricklupoff = -1
self.calculate()
def pack(self):
return bytes(1)
@classmethod
def _format(self):
return "<B"
@classmethod
def size(self):
return struct.calcsize(self._format())
@classmethod
def read(cls, f):
# This will read and discard a single byte.
# The OffsetHeader always starts immediately after the FileHeader,
# which always starts at the beginning of the file.
return cls(_checked_read(f, FileHeader.headersize(), cls.size()))
def dump(self, *, file=None):
file = file or sys.stdout
print("\n{0} takes up {1} bytes with format '{2}'".format(self.__class__.__name__, self.size(), self._format()), file=file)
print((" Offsets = info {_infoff} str {_stroff} hist {_histoff} alpha {_alphalupoff} brick {_bricklupoff}\n" +
" Sizes = info {_infsize} str {_strsize} hist {_histsize} alpha {_alphalupsize} brick {_bricklupsize}" +
"").format(**self.__dict__), file=file)
def calculate(self, ih = None):
"""
Calculate offsets and sizes for the various headers and tables.
Some information requires the InfoHeader to be already known.
If it isn't we will just calculate as much as we can.
In general the size of a header as written to file might be
larger than the size that the header expects to unpack.
This allows adding more data fields at the end of the header.
Older readers will just unpack the fields they know about.
For ZGY V{2,3,4} this is moot, as all the offsets are implicit
with all the headers written sequentially. So the size needs
to match exactly or the headers following this will be corrupt.
"""
self._infoff = FileHeader.headersize() + OffsetHeaderV2.size()
self._infsize = InfoHeaderV2.headersize()
if ih:
self._strsize = ih._slbufsize
self._histsize = HistHeaderV2.headersize()
self._alphalupsize = 8 * ih._alphaoffsets[-1]
self._bricklupsize = 8 * ih._brickoffsets[-1]
self._stroff = self._infoff + self._infsize
self._histoff = self._stroff + self._strsize
self._alphalupoff = self._histoff + self._histsize
self._bricklupoff = self._alphalupoff + self._alphalupsize
@staticmethod
def _formats():
"""
Describe the layout of this header block on the file.
This class doesn't inherit HeaderBase and is coded by hand.
The implementation won't use _formats but it is included
for the benefit of some tools.
"""
return [
('_infoff', '', 'int64', 'InfoHeader position in file.'),
('_stroff', '', 'int64', 'String table position, N/A in V1 and pulled from InfoHeader in V2.'),
('_alphalupoff', '', 'int64', 'Alpha tile lookup table position in file.'),
('_bricklupoff', '', 'int64', 'Brick data lookup table position in file.'),
('_histoff', '', 'int64', 'Histogram position in file.'),
]
class OffsetHeaderV3(OffsetHeaderV2):
pass
class OffsetHeaderV4(OffsetHeaderV3):
pass
def OffsetHeaderFactory(version):
try:
return [OffsetHeaderV1, OffsetHeaderV2, OffsetHeaderV3, OffsetHeaderV4][version-1]
except IndexError:
raise ZgyFormatError("Version " + str(version) + " is not supported")
#****************************************************************************
#** InfoHeader **************************************************************
#****************************************************************************
class InfoHeaderV1(HeaderBase):
def __init__(self, buf = None):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
assert self.headersize() == 146
self.unpack(buf)
self.calculate()
if buf:
# Check the roundtrip.
assert buf == self.pack()
@classmethod
def load(self, buf):
result = self()
data = struct.unpack(self._format(), buf)
#print(data)
assert len(data) == 30
return result
def calculate(self, sl = None, hh = None):
# See FileVersion<1>::InfoHeader::Interpret in the old library.
self._bricksize = (64, 64, 64)
try:
if type(self._datatype) == int: self._datatype = impl_enum.RawDataType(self._datatype)
except ValueError as ex:
raise ZgyFormatError("Invalid enumeration found in ZGY file: " + str(ex))
try:
if type(self._coordtype) == int: self._coordtype = impl_enum.RawCoordType(self._coordtype)
except ValueError as ex:
self._coordtype = impl_enum.RawCoordType.Unknown
self._ocp_world, self._ocp_annot, self._ocp_index = (
_CalcOrderedCorners(self._orig, self._inc, self._size,
self._gpiline, self._gpxline,
self._gpx, self._gpy))
self._lodsizes = _CalcLodSizes(self._size, self._bricksize)
self._nlods = len(self._lodsizes)
self._brickoffsets = _CalcLutOffsets(self._lodsizes, False)
self._alphaoffsets = _CalcLutOffsets(self._lodsizes, True)
# In V1 there is just a single value range, used both for the
# histogram and for scaling integral values to float and for
self._file_codingrange = (hh._min, hh._max) if hh else None
# Sanity check. If codingrange is bad, silently use a range
# that causes no conversion between storage and float.
self._safe_codingrange = _fix_codingrange(self._file_codingrange, self._datatype)
self._smin = hh._min if hh else None
self._smax = hh._max if hh else None
# Convert the V1 "coordtype" member to the V2 equivalent.
if self._coordtype == impl_enum.RawCoordType.Meters:
self._hdim = impl_enum.RawHorizontalDimension.Length
self._hunitname = 'm'
self._hunitfactor = 1.0
elif self._coordtype == impl_enum.RawCoordType.Feet:
self._hdim = impl_enum.RawHorizontalDimension.Length
self._hunitname = 'ft'
self._hunitfactor = 0.3048
elif self._coordtype == impl_enum.RawCoordType.ArcSec:
self._hdim = impl_enum.RawHorizontalDimension.ArcAngle
self._hunitname = 'arcsec'
self._hunitfactor = 1.0
elif self._coordtype == impl_enum.RawCoordType.ArcDeg:
self._hdim = impl_enum.RawHorizontalDimension.ArcAngle
self._hunitname = 'deg'
self._hunitfactor = 3600.0
elif self._coordtype == impl_enum.RawCoordType.ArcDegMinSec:
# value = deg*10000 + min*100 + sec
# Not supported, nor does it work in the old code.
self._hdim = impl_enum.RawHorizontalDimension.Unknown
self._hunitname = '?'
self._hunitfactor = 1.0
else:
self._hdim = impl_enum.RawHorizontalDimension.Unknown
self._hunitname = ''
self._hunitfactor = 1.0
# V1 had no vertical unit information.
self._vdim = impl_enum.RawVerticalDimension.Unknown
self._vunitname = ''
self._vunitfactor = 1.0
@staticmethod
def _formats():
"""
Describe the layout of this header block on the file.
The second field is the format as recognized by the 'struct' module.
Data on the file is stored packed and little-endian. So for use with
'struct', a '<' should be prepended to the string.
"""
return [
('_size', '3i', 'int32[3]', 'Integer size in inline, crossline, vertical directions.'),
('_orig', '3i', 'int32[3]', 'First inline, crossline, time/depth. Only integral values allowed.'),
('_inc', '3i', 'int32[3]', 'Integer increment in inline, crossline, vertical directions.'),
('_incfactor', '3f', 'float32[3]', 'Unused. Write as (1,1,1), ignore on read.'),
('_gpiline', '4i', 'int32[4]', 'Inline component of 4 control points.'),
('_gpxline', '4i', 'int32[4]', 'Crossline component of 4 control points.'),
('_gpx', '4d', 'float64[4]', 'X coordinate of 4 control points.'),
('_gpy', '4d', 'float64[4]', 'Y coordinate of 4 control points.'),
('_datatype', 'B', 'uint8', 'Type of samples in each brick: int8 = 0, int16 = 2, float32 = 6.'),
('_coordtype', 'B', 'uint8', 'Coordinate type: unknown = 0, meters = 1, feet = 2, degrees*3600 = 3, degrees = 4, DMS = 5.'),
# Derived, as they are missing in v1 but present in v2.
('_bricksize', '', 'int32[3]', 'Brick size. Values other than (64,64,64) will likely not work.'),
('_file_codingrange', '', 'float32[2]', 'Rescaling interval used if datatype is non-float. Value range hint otherwise.'),
# Data identifiers
('_dataid', '', 'uint8[16]', 'Data GUID, set on file creation.'),
('_verid', '', 'uint8[16]', 'Data version GUIDm set each time the file is changed.'),
('_previd', '', 'uint8[16]', 'Previous data version GUID.'),
# Data source
('_srcname', '', 'char*', 'Source name.'),
('_srcdesc', '', 'char*', 'Source descriotion.'),
('_srctype', '', 'uint8', 'Source datatype.'),
# Extent
('_curorig', '', 'int32[3]', 'Zero-based origin of extent spanned by the data currently in the file. Unused?'),
('_cursize', '', 'int32[3]', 'Size of extent spanned by the data currently in the file. Unused?'),
# Statistics
('_scnt', '', 'int64', 'Count of values used to compute statistics.'),
('_ssum', '', 'float64', 'Sum of all "scnt" values.'),
('_sssq', '', 'float64', 'Sum of squared "scnt" values.'),
('_smin', '', 'float32', 'Statistical (computed) minimum value.'),
('_smax', '', 'float32', 'Statistical (computed) maximum value.'),
('_srvorig', '', 'float32[3]', 'Unused?'),
('_srvsize', '', 'float32[3]', 'Unused?'),
# Grid definition
('_gdef', '', 'uint8', 'Grid definition type. Ignored on read.'),
('_gazim', '', 'float64[2]', 'Unused.'),
('_gbinsz', '', 'float64[2]', 'Unused.'),
# Horizontal domain
('_hprjsys', '', 'char*', 'Free form description of the projection coordinate system. Usually not parseable into a well known CRS.'),
('_hdim', '', 'uint8', 'Horizontal dimension.'),
('_hunitfactor', '', 'float64', 'Multiply by this factor to convert from storage units to SI units.'),
('_hunitname', '', 'char*', 'For annotation only. Use the factor to convert to or from SI.'),
# Vertical domain
('_vdim', '', 'uint8', 'Vertical dimension.'),
('_vunitfactor', '', 'float64', 'Multiply by this factor to convert from storage units to SI units.'),
('_vunitname', '', 'char*', 'For annotation only. Use the factor to convert to or from SI.'),
# Derived information, both in v1 and v2.
('_ocp_world', '', 'enum', 'Ordered corner points: ((i0,j0),(iN,j0),(i0,jM),(iN,jM)'),
('_lodsizes', '', 'int32[lod]', 'Size of the survey at reduced level of detail.'),
('_nlods', '', 'int32', 'How many levels of details. 1 means only full resolution. Currently nlods will always be just enough to make the highest LOD (i.e. lowest resolution) fit in a single brick.'),
('_brickoffsets', '', 'int64[lod]', 'How many entries in the lookup table to skip when dealing with level N.'),
('_alphaoffsets', '', 'int64[lod]', 'How many entries in the lookup table to skip when dealing with level N.'),
]
class InfoHeaderV2(HeaderBase):
def __init__(self, buf = None):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
assert self.headersize() == 337
self.unpack(buf)
if buf:
# Check the roundtrip.
if buf != self.pack():
print("FAIL",
" ".join([hex(x) for x in buf]),
" ".join([hex(x) for x in self.pack()]),
sep="\n")
assert buf == self.pack()
# Need to do this at end, because calculate() may patch the header.
self.calculate()
@staticmethod
def _cast_enum(e, cls, default):
"""
Convert an integer read from file to the correctly typed enum.
The numerical values of these enums have been chosen to match
what the file contains. If the file contains an impossible
value then return the supplied default. It is safe to call this
method twice. If the value is already an enum then it is returned
unchanged.
"""
if type(e) == int:
try:
return cls(e)
except ValueError:
return default
else:
return e
def calculate(self, sl = None, hh = None):
# Convert plain ints to enums. If they are already enums then this is a no-op.
try:
if type(self._datatype) == int:
self._datatype = impl_enum.RawDataType(self._datatype)
except ValueError as ex:
# If the data type is not recognized the file will be unuseable.
raise ZgyFormatError("Invalid enumeration found in ZGY file: " + str(ex))
# The other enums on the file are not important; replace corrupt
# values with a suitable dsefault.
self._srctype = self._cast_enum(
self._srctype, impl_enum.RawDataType, self._datatype)
self._gdef = self._cast_enum(
self._gdef, impl_enum.RawGridDefinition, impl_enum.RawGridDefinition.Unknown)
self._hdim = self._cast_enum(
self._hdim, impl_enum.RawHorizontalDimension, impl_enum.RawHorizontalDimension.Unknown)
self._vdim = self._cast_enum(
self._vdim, impl_enum.RawVerticalDimension, impl_enum.RawVerticalDimension.Unknown)
# Set the 5 strings if a StringHeader is provided, else clear them.
# Normally we get called exactly once without a StringList, followed
# by exactly one call that has the strings.
self._srcname = sl._list[0] if sl else None
self._srcdesc = sl._list[1] if sl else None
self._hprjsys = sl._list[2] if sl else None
self._hunitname = sl._list[3] if sl else None
self._vunitname = sl._list[4] if sl else None
# Geometry can be specified either as 4 ordered corner points
# (_gdef = RawGridDefinition.FourPoint) or 3 arbitrary points
# (_gdef = RawGridDefinition.ThreePoint) where both annotation
# and world coordinates are given. There is also .Parametric
# which specifies azimuth and spacing, but that is no longer
# supported. If it ever was. With FourPoint the writer is still
# required to store the annotation coordinates just in case
# there is some disagreement over how the corners are ordered.
# So, we treat FourPoint the same as ThreePoint. Ignore the last
# point (nominally this is the corner opposite the origin)
# and treat the remaining 3 points as if they were arbitraty.
self._ocp_world, self._ocp_annot, self._ocp_index = (
_CalcOrderedCorners(self._orig, self._inc, self._size,
self._gpiline, self._gpxline,
self._gpx, self._gpy))
self._lodsizes = _CalcLodSizes(self._size, self._bricksize)
self._nlods = len(self._lodsizes) # NOT _CalcNumberOfLODs(self._size)
self._brickoffsets = _CalcLutOffsets(self._lodsizes, False)
self._alphaoffsets = _CalcLutOffsets(self._lodsizes, True)
# Sanity check. If codingrange is bad, silently use a range
# that causes no conversion between storage and float.
self._safe_codingrange = _fix_codingrange(self._file_codingrange, self._datatype)
def _calculate_strings(self):
strings = [self._srcname or '',
self._srcdesc or '',
self._hprjsys or '',
self._hunitname or '',
self._vunitname or '']
strings = '\x00'.join(strings) + '\x00'
strings = bytes(strings, "ASCII", errors='replace')
return strings
def calculate_write(self):
"""
Call this when important parts of this struct has changed,
Note that on write it is we that compute slbufsize that gets copied
to offsetheader and used when the string list is written.
Not the other way aroud.
"""
self._lodsizes = _CalcLodSizes(self._size, self._bricksize)
self._nlods = len(self._lodsizes)
self._brickoffsets = _CalcLutOffsets(self._lodsizes, False)
self._alphaoffsets = _CalcLutOffsets(self._lodsizes, True)
self._slbufsize = len(self._calculate_strings())
# For floating point data the coding range set by the user
# on file creation is ignored; see _init_from_scratch().
# on write it is set to the actual min/max data range as
# measured in the statistics. This is very useful if the
# file is later converted to an integral type without
# explicitly setting the range. Also in general it is bad
# to have an attribute which might be set incorrectly by
# the user but is 99% unused so the consequence of setting
# it wrong is unknown.
# Note that overriding the codingrange is also done on read,
# see openzgy.api.ZgyMeta.datarange.
# TODO-Low, this does not support the case where data is written
# containing huge spikes and where the codingrange is then
# set to the correct range the data ought to be clipped to.
if self._datatype == impl_enum.RawDataType.Float32:
if self._smin <= self._smax:
self._file_codingrange = (self._smin, self._smax)
else: # No valid samples in file, still need a valid range
self._file_codingrange = (-1, +1)
self._safe_codingrange = self._file_codingrange
@staticmethod
def _formats():
"""
Describe the layout of this header block on the file.
The second field is the format as recognized by the 'struct' module.
Data on the file is stored packed and little-endian. So for use with
'struct', a '<' should be prepended to the string.
In the Info header, the fields bricksize, datatype, and size
are particularly important and immutable because their values
affect the size of other blocks on the file. Codingrange is
also immutable when storage type is integral. Because once
bulk has been written, changing the codingrange would change
all the sample values. Also, dataid is immutable by definition.
Annotation origin and increment could technically be updated
but this doesn't make much sense. Data may have been loaded
indexed by annotation coordinates, and in that case changing
the annotation would invalidate the data.
"""
return [
# Coding
('_bricksize', '3i', 'int32[3]', 'Brick size. Values other than (64,64,64) will likely not work.'),
('_datatype', 'B', 'uint8', 'Type of samples in each brick: int8 = 0, int16 = 2, float32 = 6.'),
('_file_codingrange', '2f', 'float32[2]', 'If datatype is integral, this is the value range samples will be scaled to when read as float. In this case it must be specified on file creation. If datatype is float then this is the value range of the data and should be set automatically when writing the file.'),
# Data identifiers
('_dataid', '16s', 'uint8[16]', 'GUID set on file creation.'),
('_verid', '16s', 'uint8[16]', 'GUID set each time the file is changed.'),
('_previd', '16s', 'uint8[16]', 'GUID before last change.'),
# Data source
('_srcname', '', 'char*', 'Optional name of this data set. Rarely used.'), # In StringList[0]
('_srcdesc', '', 'char*', 'Optional description of this data set. Rarely used.'), # in StringList[1]
('_srctype', 'B', 'uint8', 'Optional datatype the samples had before being stored in this file.'),
# Extent
('_orig', '3f', 'float32[3]', 'First inline, crossline, time/depth. Unlike v1 these are now floating point.'),
('_inc', '3f', 'float32[3]', 'Increment in inline, crossline, vertical directions.'),
('_size', '3i', 'int32[3]', 'Size in inline, crossline, vertical directions.'),
('_curorig', '3i', 'int32[3]', 'Unused. Set to (0,0,0) on write and ignore on read.'),
('_cursize', '3i', 'int32[3]', 'Unused. Set to size on write and ignore on read.'),
# Statistics. Not set directly from user code.
('_scnt', 'q', 'int64', 'Count of values used to compute statistics.'),
('_ssum', 'd', 'float64', 'Sum of all "scnt" values.'),
('_sssq', 'd', 'float64', 'Sum of squared "scnt" values.'),
('_smin', 'f', 'float32', 'Statistical (computed) minimum value.'),
('_smax', 'f', 'float32', 'Statistical (computed) maximum value.'),
# Survey extent. Ignore on read.
('_srvorig', '3f', 'float32[3]', 'Unused. Set equal to orig on write. Ignore on read.'),
('_srvsize', '3f', 'float32[3]', 'Unused. Set to inc*size on write. Ignore on read.'),
# Grid definition
('_gdef', 'B', 'uint8', 'Grid definition type. Set to 3 (enum: "FourPoint") on write. Ignored on read. See notes for a longer explanation.'),
('_gazim', '2d', 'float64[2]', 'Unused.'),
('_gbinsz', '2d', 'float64[2]', 'Unused.'),
('_gpiline', '4f', 'float32[4]', 'Inline component of 4 control points.'),
('_gpxline', '4f', 'float32[4]', 'Crossline component of 4 control points.'),
('_gpx', '4d', 'float64[4]', 'X coordinate of 4 control points.'),
('_gpy', '4d', 'float64[4]', 'Y coordinate of 4 control points.'),
# Horizontal domain
('_hprjsys', '', 'char*', 'Free form description of the projection coordinate system. Usually not parseable into a well known CRS.'), # in StringList[2]
('_hdim', 'B', 'uint8', 'Horizontal dimension. Unknown = 0, Length = 1, ArcAngle = 2. Few applications support ArcAngle.'),
('_hunitfactor', 'd', 'float64', 'Multiply by this factor to convert from storage units to SI units. Applies to gpx, gpy.'),
('_hunitname', '', 'char*', 'For annotation only. Use hunitfactor, not the name, to convert to or from SI.'), # in StringList[3]
# Vertical domain
('_vdim', 'B', 'uint8', 'Vertical dimension. Unknown = 0, Depth = 1, SeismicTWT = 1, SeismicOWT = 3.'),
('_vunitfactor', 'd', 'float64', 'Multiply by this factor to convert from storage units to SI units. Applies to orig[2], inc[2].'),
('_vunitname', '', 'char*', 'For annotation only. Use vunitfactor, not the name, to convert to or from SI.'), # in StringList[4]
# Miscellaneous. Not set directly from user code.
('_slbufsize', 'I', 'uint32', 'Size of the StringList section.'),
# Derived information, not stored in the file.
('_ocp_world', '', 'enum', 'Ordered corner points: ((i0,j0),(iN,j0),(i0,jM),(iN,jM)'),
('_lodsizes', '', 'int32[lod]', 'Size of the survey at reduced level of detail.'),
('_nlods', '', 'int32', 'How many levels of details. 1 means only full resolution. Currently nlods will always be just enough to make the highest LOD (i.e. lowest resolution) fit in a single brick.'),
('_brickoffsets', '', 'int64[lod]', 'How many entries in the lookup table to skip when dealing with level N.'),
('_alphaoffsets', '', 'int64[lod]', 'How many entries in the lookup table to skip when dealing with level N.'),
]
class InfoHeaderV3(InfoHeaderV2):
pass
class InfoHeaderV4(InfoHeaderV3):
pass
def InfoHeaderFactory(version):
try:
return [InfoHeaderV1, InfoHeaderV2, InfoHeaderV3, InfoHeaderV4][version-1]
except IndexError:
raise ZgyFormatError("Version " + str(version) + " is not supported")
#****************************************************************************
#** StringList **************************************************************
#****************************************************************************
class StringListV1:
"""
ZGY V1 had no string list, so this section is empty.
"""
def __init__(self, di):
pass
@classmethod
def _format(cls):
return ""
@classmethod
def size(cls, oh = None, ih = None):
return 0
@classmethod
def load(cls, di, buf):
return cls()
@classmethod
def read(cls, f, oh, ih):
return cls(None)
def dump(self, *, file=None):
file = file or sys.stdout
print("\n{0} takes up {1} bytes with format '{2}'".format(self.__class__.__name__, self.size(), self._format()), file=file)
class StringListV2:
"""
The string list holds 5 null terminated strings:
srcname, srcdesc, hprjsys, hunit.name, vunit.name.
The reader will ignore trailing garbage. This is
the one place we might add additional information
without breaking existing readers.
"""
def __init__(self, buf = None, oh = None, ih = None):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
self._oh = oh
self._ih = ih
if buf:
buf = buf.split(b'\x00')
self._list = list([e.decode("ASCII", errors='replace') for e in buf])
else:
self._list = ['','','','','']
@classmethod
def _format(cls):
return ""
@classmethod
def size(cls, oh = None, ih = None):
"""
On read the size is stored in the offsetheader,
which may have gotten it from the infoheader.
On write the size depends on the strings written
and we might not be able to trust the offsetheader.
"""
# TODO-Worry handle write, especially the case where one of the strings
# have been updated. Or isn't this needed because we don't allow
# updating these?
return oh._strsize if oh else 0
@classmethod
def read(cls, f, oh, ih):
if oh._strsize > 0:
return cls(_checked_read(f, oh._stroff, oh._strsize), oh, ih)
else:
return cls()
def dump(self, *, file=None):
file = file or sys.stdout
print("\n{0} takes up {1} bytes with format '{2}'".format(self.__class__.__name__, self.size(self._oh, self._ih), self._format()), file=file)
print(" " + "\n ".join(self._list), file=file)
class StringListV3(StringListV2):
pass
class StringListV4(StringListV3):
pass
def StringListFactory(version):
try:
return [StringListV1, StringListV2, StringListV3, StringListV4][version-1]
except IndexError:
raise ZgyFormatError("Version " + str(version) + " is not supported")
#****************************************************************************
#** HistHeader **************************************************************
#****************************************************************************
class HistHeaderV1(HeaderBase):
def __init__(self, buf = None):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
assert self.headersize() == 256*4 + 8
self.unpack(buf)
#self.calculate()
self._cnt = np.sum(self._bin) # In V1 there is no explicit all-count.
if buf:
# Check the roundtrip.
assert buf == self.pack()
@staticmethod
def _formats():
"""
Describe the layout of this header block on the file.
The second field is the format as recognized by the 'struct' module.
Data on the file is stored packed and little-endian. So for use with
'struct', a '<' should be prepended to the string.
"""
return [
('_max', 'f', 'float32', 'Center point of first bin.'),
('_min', 'f', 'float32', 'Center point of last bin.'),
('_bin', '256I', 'uint32[256]', 'Histogram.'),
]
class HistHeaderV2(HeaderBase):
def __init__(self, buf = None):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
assert self.headersize() == 256*8 + 16
self.unpack(buf)
#self.calculate()
if buf:
# Check the roundtrip.
assert buf == self.pack()
@staticmethod
def _formats():
"""
Describe the layout of this header block on the file.
The second field is the format as recognized by the 'struct' module.
Data on the file is stored packed and little-endian. So for use with
'struct', a '<' should be prepended to the string.
"""
return [
('_cnt', 'q', 'int64', 'Total number of samples.'),
('_min', 'f', 'float32', 'Center point of first bin.'),
('_max', 'f', 'float32', 'Center point of last bin.'),
('_bin', '256q', 'int64[256]', 'Histogram.'),
]
class HistHeaderV3(HistHeaderV2):
pass
class HistHeaderV4(HistHeaderV3):
pass
def HistHeaderFactory(version):
try:
return [HistHeaderV1, HistHeaderV2, HistHeaderV3, HistHeaderV4][version-1]
except IndexError:
raise ZgyFormatError("Version " + str(version) + " is not supported")
#****************************************************************************
#** Alpha lookup table ******************************************************
#****************************************************************************
class LookupTable:
"""
Both the Alpha lookup table and the Brick lookup table hold a 64-bit
file offset for each tile or brick in the file. Alpha tiles are bitmaps
used to flag dead traces, and only have (i,j) coordinates. Bricks
contain the actual samples and are indexed with (i, j, k).
The size of the lookup tables depend on the survey size. The first
entry in the lookup table is for the brick or tile (always just one)
holding the lowest resolution. This is immediately followed by one
or more entries for the bricks or tiles at level of detail N-1, and
so on until the entries for lod 0. Within one lod level the first
entry is for the lowest numbered i,j,[k]. For subsequent entries the
i numbers vary fastest and the k (or j in the alpha case) varies
slowest. Note that this is somewhat non intuitive as it is the
opposite of the ordering of samples within a tile.
In version 1 of the lookup tables the file offsets are stored in a
somewhat quirky manner. The high 32 bits and the low 32 bits are
both stored as little-endian integers, but the high part is stored
first. So it is part big-endian, part little-endian.
An offset of 0 means the corresponding brick or tile does not exist.
An offset of 1 means the brick or tile contains all zeros and does
not take any space on the file. An offset with the most significant
bit set also means the brick or tile has a constant value. In this
case the actual value is encoded in the least significant 8/16/32
bits (depending on valuetype) of the stored offset.
Offsets 0x8000000000000000 and 0x0000000000000001 are equivalent.
Actually, v2 and later uses the first form while v1 used the second.
For robustness both forms should be accepted regardless of version.
"""
def __init__(self, buf, lupsize, mustflip):
"""
Unpack a byte buffer into a new instance of this header.
Caller is responsible for all I/O, so we don't need an iocontext.
"""
self._mustflip = mustflip
self._lupsize = lupsize
if buf:
self._lookup = list(struct.unpack(self._format(lupsize), buf))
if self._mustflip: self._lookup = self._flip_array(self._lookup)
self._lookend = self._calc_lookupsize(self._lookup, None, None)
else:
self._lookup = [0] * (lupsize//8)
self._lookend = [0] * (lupsize//8)
def pack(self):
if self._mustflip: self._lookup = self._flip_array(self._lookup)
result = struct.pack(self._format(len(self._lookup)*8), *self._lookup)
if self._mustflip: self._lookup = self._flip_array(self._lookup)
return result
@staticmethod
def _calc_lookupsize(lookup, eof, maxsize):
"""
Given an index => start_offset lookup table, produce an
index => end_offset table by assuming there are no holes
in the allocated data.
The function understands constant-value and compressed blocks.
If eof and maxsize are known, the code can also make the
following checks:
Blocks that have a start offset > eof are unreadable and
should be ignored. Set them to start and end at eof.
The same applies to offsets of unknown type i.e. the most
significant bit is 1 but the most significant byte is
neither 0x80 (constant) nor 0xC0 (compressed).
Blocks ending past eof should be assumed to end at eof.
Blocks that appear to be larger than an uncompressed block are
probably too large. This may be caused by holes in the allocated
data. Assume the block is the same size as an uncompressed block.
If a compressed block takes up more room than an uncompressed one
then the writer should simply refrain from compressing it.
But for extra robustness the code that makes use of this
information shoukd be prepared to retry the access of the block
really turned out to be larger.
This method might be called unconditionally on file open, or
called only if at least one compressed brick was found, or it
might be deferred until the first time we read a compressed brick.
TODO-Low: If alpha tiles are present then both brick and alpha offsets
ought to be considered in the same pass. The way it will work now
is that for bricks, a few bricks will appear too large because
they are followed by some alpha tiles. This is harmless.
For aplha tiles the end offsets will be hopelessly wrong.
We will need to just assume 4 KB for those.
"""
#print("@@@ _calc_lookupsize", list(map(hex, lookup[:5])))
# make array of (offset, endoffset, type, ordinal)
# Note, the only reason I use a structured array instead of a
# 2d array [][4] is to get sort() to work the way I want it.
# Is there a simpler way? Or is this just as performant?
dtype = [('offset', np.uint64),
('endpos', np.uint64),
('type', np.uint64),
('ordinal', np.uint64)]
tmp = np.zeros(len(lookup), dtype=dtype)
codeshift = np.uint64(56)
codemask = np.uint64(0xFF) << codeshift
tmp['offset'] = lookup
tmp['ordinal'] = np.arange(len(lookup), dtype=np.uint64)
tmp['type'] = tmp['offset']; tmp['type'] &= codemask
tmp['offset'][tmp['type'] == (np.uint64(0x80) << codeshift)] = 0
tmp['offset'][tmp['type'] == (np.uint64(0xC0) << codeshift)] &= ~(codemask)
tmp.sort(order='offset')
# The end of block i is the start of block i+1,
# except the last block which ends at EOF, just use a huge number.
tmp[:-1]['endpos'] = tmp[1:]['offset']
tmp[-1]['endpos'] = eof or ~(np.uint64(1)<<np.uint64(63))
tmp.sort(order='ordinal')
# Several entries may be flagged as starting at offset 0.
# With the above algorithm, an arbitrary one of these will
# have its end set to the start of the first real block.
# The rest will appear to end at zero. Get rid of this
# odd behavior.
tmp['endpos'][tmp['offset']==0] = 0
if eof is not None and maxsize is not None:
#endpos = max(offset, min(endpos, eof, offset + maxsize))
np.minimum(tmp['endpos'], eof, out=tmp['endpos'])
np.minimum(tmp['endpos'], tmp['offset'] + maxsize, out=tmp['endpos'])
np.maximum(tmp['endpos'], tmp['offset'], out=tmp['endpos'])
# This test as written also catches unrecognized block types
# because the offsets are unsigned and all the unrecognized
# types will have the most significant bit set.
#print("@@@ _calc_lookupsize", tmp[:5])
# Returning a numpy array of np.int64 is problematic because
# _lookup is a normal Python array, and arithmetic such as
# int(foo) - np.int64(bar) returns a Python float.
#return tmp['endpos'].copy()
return list(map(int, tmp['endpos']))
@classmethod
def _flip(cls, n):
return ((n >> 32) & 0xFFFFFFFF) | ((n & 0xFFFFFFFF) << 32)
@classmethod
def _flip_array(cls, a):
return list([cls._flip(n) for n in a])
@classmethod
def _format(cls, lupsize):
fmt = "<" + str(lupsize//8) + "Q"
assert struct.calcsize(fmt) == lupsize
return fmt
@classmethod
def read(cls, f, offset, lupsize):
return cls(_checked_read(f, offset, lupsize), lupsize)
def dump(self, prefix = "", prefix0 = "", *, file=None):
file = file or sys.stdout
print("\n{0}{1} takes up {2} bytes with format '{3}'".format(
prefix0,
self.__class__.__name__,
self._lupsize,
self._format(self._lupsize)), file=file)
print(prefix+("\n"+prefix).join(map(hex, self._lookup)), file=file)
class AlphaLUPV1(LookupTable):
def __init__(self, buf, lupsize):
super().__init__(buf, lupsize, mustflip = True)
class AlphaLUPV2(LookupTable):
def __init__(self, buf, lupsize):
super().__init__(buf, lupsize, mustflip = False)
class AlphaLUPV3(AlphaLUPV2):
pass
class AlphaLUPV4(AlphaLUPV3):
pass
def AlphaLUPFactory(version):
try:
return [AlphaLUPV1, AlphaLUPV2, AlphaLUPV3, AlphaLUPV4][version-1]
except IndexError:
raise ZgyFormatError("Version " + str(version) + " is not supported")
#****************************************************************************
#** Brick lookup table ******************************************************
#****************************************************************************
class BrickLUPV1(LookupTable):
def __init__(self, buf, lupsize):
super().__init__(buf, lupsize, mustflip = True)
class BrickLUPV2(LookupTable):
def __init__(self, buf, lupsize):
super().__init__(buf, lupsize, mustflip = False)
class BrickLUPV3(BrickLUPV2):
pass
class BrickLUPV4(BrickLUPV3):
pass
def BrickLUPFactory(version):
try:
return [BrickLUPV1, BrickLUPV2, BrickLUPV3, BrickLUPV4][version-1]
except IndexError:
raise ZgyFormatError("Version " + str(version) + " is not supported")
#****************************************************************************
#** All headers combined in a single instance *******************************
#****************************************************************************
class ZgyInternalMeta:
"""
Holds references to all the individual headers needed to access ZGY.
The information is stored in the instance in a format that is tightly
coupled to the file format, so there will be one layer (but hopefully
just one) above us that is responsible for the public API.
"""
def __init__(self, myfile):
self._is_bad = False
if myfile: # when reading
self._init_from_open_file(myfile)
else: # when writing
self._init_from_headers(None, None, None, None, None, None, None)
def _assert_all_headers_allocated(self):
"""
The following asserts might seem paranoid, but for a long time
there was an assupption that every single access to those headers
had to check first whether they were None. I am just proving to
myself that all those checks can be removed.
"""
assert self._fh is not None
assert self._oh is not None
assert self._ih is not None
assert self._sl is not None
assert self._hh is not None
assert self._alup is not None
assert self._blup is not None
def _init_from_headers(self, fh, oh, ih, sl, hh, alup, blup):
self._fh = fh
self._oh = oh
self._ih = ih
self._sl = sl
self._hh = hh
self._alup = alup
self._blup = blup
def _init_from_open_file(self, f):
"""
Read all the headers and save pointers to each of them in this instance.
Some care is needed to read them in the correct order, as there are
several dependencies between them. The next version of the file format
will hopefully be a bit simpler in this respect.
"""
# The file header contains only a magic string and the file version.
# In V{2,3,4} the headers are stored consecutively:
# FileHeader OffsetHeader InfoHeader StringList HistHeader
# AlphaLUT BrickLUT
# In V1 and most likely in the upcoming V4 only FileHeader and
# OffsetHeader are known to be consecutive.
fh = FileHeader.read(f, 0)
# The offset header immediately follows the file header.
# Changed in v2 and v3: Offset header is no longer used
# (all offsets are now implicit), but due to a quirk in the
# implementation it still occupies one byte on the file.
# This is actually a bug waiting to happen, because that
# byte (which is the size of a class with no members) is
# probably compiler dependant.
# Removing the OffsetHeader actually made the files tricker
# to read, as the size of some sections depend on contents
# of other sections.
oh = OffsetHeaderFactory(fh._version).read(f)
# 'oh' at this point may be incomplete (V{2,3,4}) but the offset
# to the InfoHeader should be known by now. In V1 is is mostly
# complete but is missing a few section sizes.
ih = InfoHeaderFactory(fh._version).read(f, oh._infoff)
# For V{2,3,4}, fill in the rest of the offsets now that
# the InfoHeader is known.
oh.calculate(ih)
# Variable length strings are stored in a separate header.
# Most (currently all) of these logically belong to InfoHeader.
sl = StringListFactory(fh._version).read(f, oh, ih)
hh = HistHeaderFactory(fh._version).read(f, oh._histoff)
# For V2 and later, fill in the missing strings in InfoHeader
# now that the StringList is known.
# For V1 we also need to copy the value range (used for scaling)
# from the histogram header.
ih.calculate(sl, hh)
alup = AlphaLUPFactory(fh._version).read(f, oh._alphalupoff, oh._alphalupsize)
blup = BrickLUPFactory(fh._version).read(f, oh._bricklupoff, oh._bricklupsize)
self._init_from_headers(fh, oh, ih, sl, hh, alup, blup)
def _init_from_scratch(self, filename, *, size = None, compressed = False,
bricksize = None,
datatype = impl_enum.RawDataType.Float32,
datarange = None,
zunitdim = impl_enum.RawVerticalDimension.Unknown,
hunitdim = impl_enum.RawHorizontalDimension.Unknown,
zunitname = None, hunitname = None,
zunitfactor = None, hunitfactor = None,
zstart = 0.0, zinc = 0.0,
annotstart = (0, 0), annotinc = (0, 0),
corners = ((0,0),(0,0),(0,0),(0,0))):
#--- Sanity checks and defaults management ---#
if not size:
raise ZgyUserError("size must be specified.")
elif any([s<1 for s in size]):
raise ZgyUserError("size must be at least 1 in each dimension.")
if not bricksize:
bricksize = (64, 64, 64)
elif len(bricksize) != 3:
raise ZgyUserError("bricksize must be specified in 3 dimensions.")
elif any([(s<4 or not self._is_power_of_two(s)) for s in bricksize]):
raise ZgyUserError("bricksize must be >= 4 and a power of 2.")
# The codingrange for floating point data is special. The user is
# not allowed to set it, and its value is not used internally.
# To ensure that any user supplied range is really ignored we set
# the range to NaN. In _calculate_write it will be set to the
# statistical range before being written out. As an additional
# bullet-proofing, to avoid surprises with older files, this
# rule can also be enforced in api.ZgyMeta.datarange.
# Note: For integral types I might have defaulted the datarange
# to no conversion (-128..+127 or -32768..+32767) and also
# silently re-ordered min and max if needed. But in both cases
# the application is buggy. So, make a fuss.
# A data range for integral data covering just a single value
# (presumably the user will just write that single value to
# the file) is also forbidden because it just isn't useful
# and it triggers several corner cases.
if datatype == impl_enum.RawDataType.Float32:
datarange = (math.nan, math.nan)
elif not datarange or len(datarange) != 2:
raise ZgyUserError("datarange must be specified for integral types.")
elif datarange[0] >= datarange[1]:
raise ZgyUserError("datarange must have min < max.")
elif not np.isfinite(datarange[0]) or not np.isfinite(datarange[1]):
raise ZgyUserError("datarange must be finite.")
zunitname = zunitname or ""
hunitname = hunitname or ""
zunitfactor = zunitfactor or 1
hunitfactor = hunitfactor or 1
#--- End sanity checks and defaults --#
fh = FileHeader(buf = None, compressed = compressed) # Inits to version 3, or 4 if potentially compressed.
oh = OffsetHeaderFactory(fh._version)() # Sets infoff, infsize only.
ih = InfoHeaderFactory(fh._version)() # Inits to all zero
# Fill in the parts of InfoHeader that affects headers elsewhere.
ih._bricksize = tuple(bricksize) or (64, 64, 64)
ih._datatype = datatype
ih._size = tuple(size)
ih._slbufsize = 0
# Meta information caller is allowed to specify.
# Naming is not 100% consistent; this is because
# the parameters to this function wree set to match
# the existing Python wrapper for the old ZGY.
ih._file_codingrange = (datarange[0], datarange[1])
ih._safe_codingrange = ih._file_codingrange
ih._vdim = zunitdim
ih._hdim = hunitdim
ih._vunitname = zunitname
ih._hunitname = hunitname
ih._vunitfactor = zunitfactor
ih._hunitfactor = hunitfactor
ih._orig = (annotstart[0], annotstart[1], zstart)
ih._inc = (annotinc[0], annotinc[1], zinc)
ih._ocp_world = ((corners[0][0], corners[0][1]),
(corners[1][0], corners[1][1]),
(corners[2][0], corners[2][1]),
(corners[3][0], corners[3][1]))
ih._gpx = (corners[0][0], corners[1][0], corners[2][0], corners[3][0])
ih._gpy = (corners[0][1], corners[1][1], corners[2][1], corners[3][1])
beg = (ih._orig[0], ih._orig[1])
end = (ih._orig[0] + ih._inc[0] * (ih._size[0] - 1),
ih._orig[1] + ih._inc[1] * (ih._size[1] - 1))
ih._gpiline = (beg[0], end[0], beg[0], end[0])
ih._gpxline = (beg[1], beg[1], end[1], end[1])
# Meta information that might be updated after creation.
# Except for dataid.
def makeUUID():
# See the C++ version for details.
# TODO-Worry: Is the entropy of the random seed good enough?
uuid = bytearray([random.randint(0,255) for i in range(16)])
uuid[8] = (uuid[8] & 0x3f) | 0x80 # variant 1 (DCE)
uuid[7] = (uuid[7] & 0x0f) | 0x40 # version 4 (random)
return uuid
ih._dataid = makeUUID()
ih._verid = makeUUID()
ih._previd = bytes(16)
ih._srcname = ""
ih._srcdesc = ""
ih._srctype = datatype
ih._hprjsys = ""
ih._scnt = 0
ih._ssum = 0.0
ih._sssq = 0.0
ih._smin = 0.0
ih._smax = 0.0
# Unused fields, required to be set this way for compatibility.
ih._curorig = (0, 0, 0)
ih._cursize = ih._size
ih._srvorig = ih._orig
ih._srvsize = (ih._size[0] * ih._inc[0], ih._size[1] * ih._inc[1], ih._size[2] * ih._inc[2])
ih._gdef = impl_enum.RawGridDefinition.FourPoint
ih._gazim = (0.0, 0.0)
ih._gbinsz = (0.0, 0.0)
# Derived information, see calculate()
ih.calculate_write()
# Fill in the rest of the offsets now that the InfoHeader is known.
# Requires _slbufsize, ih._alphaoffsets, _brickoffsets
oh.calculate(ih)
# TODO-Low: Refactor: a more elegant way of handling this?
sl = StringListFactory(fh._version)(ih._calculate_strings(), oh, ih)
# Histogram gets initialized to empty
hh = HistHeaderFactory(fh._version)()
# Lookup tables get initialized to empty
alup = AlphaLUPFactory(fh._version)(None, oh._alphalupsize)
blup = BrickLUPFactory(fh._version)(None, oh._bricklupsize)
self._init_from_headers(fh, oh, ih, sl, hh, alup, blup)
# If "assert contig" in _flush_meta fails, this is also wrong:
# ZGY aligns data to the basic block size, which depends on
# data type and bricksize. This simplifies caching data.
# If all headers are written sequentially from the start
# of the file then it is simpler to add the padding as part
# of the header data instead of before the first data block.
# Forget about using leftover space in the header to store the
# first few alpha tiles. We probably won't be writing those anyway.
code = impl_enum._map_DataTypeToStructFormatCode(self._ih._datatype)
bs = self._ih._bricksize
bs = bs[0] * bs[1] * bs[2] * struct.calcsize(code)
hdrsize = self._oh._bricklupoff + self._oh._bricklupsize
padsize = (((hdrsize+bs-1)//bs)*bs)-hdrsize
self._data_beg = hdrsize + padsize
self._data_end = hdrsize + padsize
def _flush_meta(self, f):
# Make sure derived information is up to date and consistent.
self._ih.calculate_write()
self._oh.calculate(self._ih)
# fileheader, infoheader, histheader all derive from HeaderBase
# so they inherit pack() and unpack() from there. alup and blup
# inherit pack() from LookupTable.
fh = self._fh.pack()
oh = self._oh.pack()
ih = self._ih.pack()
sl = self._ih._calculate_strings()
hh = self._hh.pack()
alup = self._alup.pack()
blup = self._blup.pack()
assert len(ih) == self._oh._infsize
assert len(sl) == self._oh._strsize
assert len(hh) == self._oh._histsize
assert len(alup) == self._oh._alphalupsize
assert len(blup) == self._oh._bricklupsize
contig = (self._oh._infoff == len(fh) + len(oh) and
self._oh._stroff == self._oh._infoff + len(ih) and
self._oh._histoff == self._oh._stroff + len(sl) and
self._oh._alphalupoff == self._oh._histoff + len(hh) and
self._oh._bricklupoff == self._oh._alphalupoff + len(alup))
# ZGY aligns data to the basic block size, which depends on
# data type and bricksize. This simplifies caching data.
# If all headers are written sequentially from the start
# of the file then it is simpler to add the padding as part
# of the header data instead of before the first data block.
# Forget about using leftover space in the header to store the
# first few alpha tiles. We probably won't be writing those anyway.
pad = bytes()
self._first_data = None
if contig:
code = impl_enum._map_DataTypeToStructFormatCode(self._ih._datatype)
bs = self._ih._bricksize
bs = bs[0] * bs[1] * bs[2] * struct.calcsize(code)
hdrsize = self._oh._bricklupoff + self._oh._bricklupsize
padsize = (((hdrsize+bs-1)//bs)*bs)-hdrsize
pad = bytes(padsize)
self._first_data = hdrsize + padsize
# If not contiguous then just write the blocks one at a time.
# For V{2,3,4} this will never happen. So I won't add code
# for that case (which I will never be able to test).
assert contig
with ErrorsWillCorruptFile(self):
f.xx_write(fh + oh + ih + sl + hh + alup + blup + pad,
0,
usagehint=impl_file.UsageHint.Header)
def dumpRaw(self, *, file=None):
self._fh.dump(file=file)
self._oh.dump(file=file)
self._ih.dump(' ', file=file)
self._sl.dump(file=file)
self._hh.dump(' ', file=file)
self._alup.dump(' ', file=file)
self._blup.dump(' ', file=file)
@staticmethod
def _is_power_of_two(n):
"""Not efficient, but will only be called a couple of times."""
for shift in range(0, 32):
if n == 1<<shift:
return True
return False
#****************************************************************************
#** Free functions **********************************************************
#****************************************************************************
def _CalcLodSizes(size, bricksize):
"""
Compute the size of the file in bricks, for all LOD levels
and all 3 dimensions. Indirectly also compute the number
of LOD levels, since by definition the last LOD level
is the one that has size (1, 1, 1) i.e. the entire cube
is decimated enought to fit inside a single brick.
"""
if min(bricksize) < 1: return [(0, 0, 0)]
size = ((size[0] + bricksize[0] - 1) // bricksize[0],
(size[1] + bricksize[1] - 1) // bricksize[1],
(size[2] + bricksize[2] - 1) // bricksize[2])
result = [ size ]
while max(result[-1]) > 1:
size = ((size[0]+1)//2, (size[1]+1)//2, (size[2]+1)//2)
result.append(size)
#print("## CalcLodSize", result)
return tuple(result)
def _CalcLutOffsets(lods, isalpha):
"""
Compute the offset into the lookup tables by LOD level.
Return an array of offsets indexed by LOD. Also return
(appended to the end of the result) the lookuop table size.
The result differs depending on whether this is the alpha
or brick LUT.
The highest LOD is at the start of each table so it by
definition has offset 0. Since the highest LOD level
just has a single brick, the second highest will start
at offset 1.
"""
result = []
pos = 0
for e in reversed(lods):
result.append(pos)
pos = pos + e[0]*e[1]*(1 if isalpha else e[2])
result.reverse()
result.append(pos)
#print("## CalcLutOffsets", result)
return tuple(result)
def _CalcNumberOfLODs(size):
"""
For illustration only. Use len(_CalcLodSize(...)) instead.
This method suffers from numerical accuracy issues.
"""
assert False
return math.ceil(math.log(max(size))/math.log(2)) + 1 if size and max(size) >= 1 else 0
def _CalcOrderedCorners(orig, inc, size, gpiline, gpxline, gpx, gpy):
"""
Convert three arbitrary control points to 4 ordered corner points ordered
first il, first xl; last il, first xl, first il, last xl, last il, last xl.
Also return the same 4 corners in annotation- and ordinal coordinates.
The last two are redundant with size and annot but are provided as a
convenience for the api layer.
If the file contains world corners but no annotation, assume the writer
used RawGridDefinition.FourPoint but without storing the apparently
redundant annotation corners. This is contrary to spec but we will handle
that case for robustness.
If the conversion fails for another reason then return None because
in that case coordinate conversion will not be possible.
"""
no_world = math.isclose(gpx[0], gpx[1]) and math.isclose(gpy[0], gpy[1])
no_annot = math.isclose(gpiline[0], gpiline[1]) and math.isclose(gpxline[0], gpxline[1])
if no_annot and not no_world:
ocp_world = list([[gpx[i], gpy[i]] for i in range(4)])
else:
try:
ocp_world = acpToOcp(orig, inc, size, gpiline, gpxline, gpx, gpy)
except RuntimeError:
#raise ZgyFormatError("Invalid coordinates in ZGY file.")
ocp_world = None
#ocp_world = list([[gpx[i], gpy[i]] for i in range(4)])
ends = (orig[0] + inc[0] * (size[0]-1),
orig[1] + inc[1] * (size[1]-1))
ocp_annot = ((orig[0], orig[1]),
(ends[0], orig[1]),
(orig[0], ends[1]),
(ends[0], ends[1]))
ocp_index = ((0, 0),
(size[0]-1, 0),
(0, size[1]-1),
(size[0]-1, size[1]-1))
# Make immutable so it is safe to return to application code.
if ocp_world is not None: ocp_world = tuple(map(tuple, ocp_world))
return (ocp_world, ocp_annot, ocp_index)
def checkAllFormats(*, verbose = False, file = None):
FileHeader.checkformats(verbose=verbose, file=file)
InfoHeaderV1.checkformats(verbose=verbose, file=file)
InfoHeaderV2.checkformats(verbose=verbose, file=file)
InfoHeaderV3.checkformats(verbose=verbose, file=file)
InfoHeaderV4.checkformats(verbose=verbose, file=file)
HistHeaderV1.checkformats(verbose=verbose, file=file)
HistHeaderV2.checkformats(verbose=verbose, file=file)
HistHeaderV3.checkformats(verbose=verbose, file=file)
HistHeaderV4.checkformats(verbose=verbose, file=file)
# For now, run the consistency checks also when the module is imported.
checkAllFormats()
if __name__ == "__main__":
pass
#me = InfoHeaderV1()
#me.dump(" ")
#print(me.pack())
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/meta.py | meta.py |
##@package openzgy.impl.bulk
# \brief Reading and writing bulk data.
import numpy as np
import struct
import os
import math
import sys
from ..impl import enum as impl_enum
from ..impl import file as impl_file
from ..exception import *
from ..impl.lodalgo import decimate, DecimationType
from ..impl.stats import StatisticData
from ..impl.compress import CompressFactoryImpl
# Changing this should be done ONLY FOR TESTING.
# Use a different value for 'outside survey area' than the default.
_padding_fill_value = None
def dprint(*args, **kwargs):
return None # comment out to get debug logging
#print(*args, **kwargs)
class ErrorsWillCorruptFile:
"""
Duplicated between impl.bulk and impl.meta. Maybe fix sometime.
Technically the two differ because they set different flags.
In ZgyInternalBulk and ZgyInternalMeta respectively. But that
distinction isn't easy to see in Python.
Start a critical section where any exception means that the
owner class should be permanently flagged with _is_bad = True.
Typically this is used to prevent secondary errors after a
write failure that has most likely corrupted the entire file.
The exception itself will not be caught.
The _is_bad flag normally means that any further attempts
to access this class, at least for writing, will raise a
ZgyCorruptedFile exception. Regardless of what the exception
was that caused the flag to be set.
"""
def __init__(self, parent): self._parent = parent
def __enter__(self): return None
def __exit__(self, type, value, traceback):
if type:
#print("Bulk: Exit critical section", str(value))
self._parent._is_bad = True
class ScalarBuffer:
"""
Represents an ndarray where all elements have the same value.
This is basically a scalar but additionally has a .shape
attribute telling how large an array it represents.
"""
def __init__(self, shape, value, dtype):
self._shape = tuple(shape)
self._dtype = np.dtype(dtype)
self._value = self._dtype.type(value)
@property
def shape(self): return self._shape
@property
def dtype(self): return self._dtype
@property
def value(self): return self._value
@property
def itemsize(self): return self._dtype.itemsize
def __len__(self): return 0
def __str__(self): return str(self._value)
def __repr__(self): return "numpy.{0}({1})".format(self._dtype.name, self._value)
def inflate(self): return np.full(self._shape, self._value, dtype=self._dtype)
class ZgyInternalBulk:
"""
Read or write bulk data. The meta data needs to have been read
already. The user-callable API will forward its read requests here.
"""
def __init__(self, f, metadata, *, compressed = False):
self._is_bad = False
self._file = f
# Note, ZgyInternalBulk._metadata and ZgyMeta._meta will point to
# the same instance of ZgyInternalMeta. The members deliberately
# have different names; I believe this will to reduce confusion.
self._metadata = metadata
self._compressed = compressed # If true, do not align bricks.
# Technically only needed if self._metadata._fh._version == 1
# but do it unconditionally to get better test coverage.
self._unscramble_init()
# Keep track of value range as LOD 0 samples are written.
# If samples are overwritten the range might be too wide.
# This range will be used when computing the histogram.
# The range is always user values, not storage. This migh
# be of academic interest only, as the information is
# currently ignored when the data on the file is integral.
self._sample_min = np.inf
self._sample_max = -np.inf
# TODO-Low some way for application code to configure this.
# For now don't check for cloud/local because uncompressed
# local files are reasonable to have as Always, especially
# since it is so backward to change.
#self._update_mode = impl_enum.UpdateMode.Always if not compressed and not f.xx_iscloud else impl_enum.UpdateMode.Constant
self._update_mode = impl_enum.UpdateMode.Always if not compressed else impl_enum.UpdateMode.Constant
@staticmethod
def _argType(parameter, accept, name = "parameter"):
"""
Check that the parameter has one of the acceped types.
If None is allowed, include type(None) in the accepted list.
Raise an exception on error.
The main reason I am using this function is that I changed the
convention for passing data buffers around. This used to be
either a np.ndarray or a scalar, with the latter often requiring
a "size" parameter as well. The new convention is to pass either
np.ndarray of ScalarBuffer. Given Python's weak typing this
might easily introduce some bugs.
"""
if not isinstance(parameter, accept):
if not isinstance(accept, tuple): accept = (accept,)
oknames = tuple([t.__name__ for t in accept])
myname = type(parameter).__module__ + "." + type(parameter).__name__
err = "{0} must be in {1} but was {2}".format(name, oknames, myname)
raise TypeError(err)
def _checkCompatibleBufferTypes(self, user_dtype, is_storage):
"""
Several methods now have both an explicit is_storage argument
and a return-buffer value type that can be used to infer whether
the buffer or scalar needs to be converted from a float 'user'
value to an integer 'storage' values. The is_storage parameter
has precedence but to help trap bugs the value type must match.
"""
file_dtype = impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype)
if is_storage and user_dtype != file_dtype:
raise ZgyInternalError("Wrong data type " + str(data.dtype) +
" for a file with " + str(file_dtype))
elif not is_storage and user_dtype != np.float32:
raise ZgyInternalError("Wrong data type " + str(data.dtype) +
", when is_storage=False it should be float")
@staticmethod
def _slice(beg, end):
"""For debugging output only."""
return "[{0}:{3},{1}:{4},{2}:{5}]".format(*beg, *end)
#return "[{0}:{3},{1}:{4},{2}:{5}] (size {6},{7},{8})".format(*beg, *end, *(end-beg))
@classmethod
def _getOverlap(cls, src_beg, src_count, dst_beg, dst_count, survey_beg, survey_count):
"""
Determine the overlap between two or three N-dimensional regions.
Typically used to compare what the user wanted with the content
of one particular 3d data block, and possibly also with the
declared total area of the survey. With the intent of copying
only the overlapping area. Return the offsets relative to both
the source and the target region.
"""
src_beg = np.array(src_beg, dtype=np.int64)
src_end = src_beg + np.array(src_count, dtype=np.int64)
dst_beg = np.array(dst_beg, dtype=np.int64)
dst_end = dst_beg + np.array(dst_count, dtype=np.int64)
# Get the overlap, relative to cube origin, between the brick
# we just read (src) and the area that the user requested (dst).
overlap_beg = np.maximum(src_beg, dst_beg)
overlap_end = np.minimum(src_end, dst_end)
# Optionally clip to the survey size as well.
if survey_beg is not None and survey_count is not None:
survey_beg = np.array(survey_beg, dtype=np.int64)
survey_end = survey_beg + np.array(survey_count, dtype=np.int64)
overlap_beg = np.maximum(overlap_beg, survey_beg)
overlap_end = np.minimum(overlap_end, survey_end)
#print("// src relative to cube:", cls._slice(src_beg, src_end))
#print("// dst relative to cube:", cls._slice(dst_beg, dst_end))
#print("// overlap to be copied:", cls._slice(overlap_beg, overlap_end))
# Now convert from offsets relative to the cube orign
# to offsets into the two buffers.
local_src_beg = overlap_beg - src_beg
local_src_end = overlap_end - src_beg
local_dst_beg = overlap_beg - dst_beg
local_dst_end = overlap_end - dst_beg
#print("// src relative to buffer:", cls._slice(local_src_beg, local_src_end))
#print("// dst relative to buffer:", cls._slice(local_dst_beg, local_dst_end))
return local_src_beg, local_src_end, local_dst_beg, local_dst_end
@classmethod
def _partialCopy(cls, src, src_beg, src_count, dst, dst_beg, dst_count, survey_beg, survey_count, *, verbose = None):
"""
Copy bulk data from src to dst, but only in the overlapping region.
The src buffer will typically be a single brick read from file.
Src can also be a scalar, in which case the overlapping region
will have all its samples set to this particular value.
The method will typically be called several times, once for each
brick that was read. Both src and dest need to be either numpy
arrays or (in the case of src) a scalar. Or a ScalarBuffer.
"""
if isinstance(src, (np.ndarray, ScalarBuffer)): assert tuple(src.shape) == tuple(src_count)
if isinstance(dst, np.ndarray): assert tuple(dst.shape) == tuple(dst_count)
dst_count = dst.shape
(local_src_beg, local_src_end,
local_dst_beg, local_dst_end) = cls._getOverlap(
src_beg, src_count, dst_beg, dst_count, survey_beg, survey_count)
if np.all(local_src_end > local_src_beg):
if isinstance(src, np.ndarray):
if verbose:
verbose("set result",
cls._slice(local_dst_beg, local_dst_end), "= brick",
cls._slice(local_src_beg, local_src_end))
dst[local_dst_beg[0]:local_dst_end[0],
local_dst_beg[1]:local_dst_end[1],
local_dst_beg[2]:local_dst_end[2]] = (
src[local_src_beg[0]:local_src_end[0],
local_src_beg[1]:local_src_end[1],
local_src_beg[2]:local_src_end[2]])
else:
if verbose:
verbose("set result",
cls._slice(local_dst_beg, local_dst_end),
"= constant", src)
value = src.value if isinstance(src, ScalarBuffer) else src
dst[local_dst_beg[0]:local_dst_end[0],
local_dst_beg[1]:local_dst_end[1],
local_dst_beg[2]:local_dst_end[2]] = value
else:
if verbose:
verbose("no ovelap with this brick")
@staticmethod
def _scaleFactorsStorageToFloat(codingrange, file_dtype):
"""
Get the linear transform y = a*x + b for converting from
storage values to actual floating point values.
The smallest value in storage (e.g. -128) should map to codingrange[0]
and the largest value (e.g. +127) should map to codingrange[1].
If file_dtype is a floating point type there is never any conversion.
"""
if not np.issubdtype(file_dtype, np.integer):
(a, b) = (1, 0)
else:
iinfo = np.iinfo(file_dtype)
a = (codingrange[1] - codingrange[0]) / (iinfo.max - iinfo.min)
b = codingrange[0] - a * iinfo.min
return (a, b)
def _scaleDataFactorsStorageToFloat(self):
ih = self._metadata._ih
file_dtype = impl_enum._map_DataTypeToNumpyType(ih._datatype)
return self._scaleFactorsStorageToFloat(ih._safe_codingrange, file_dtype)
@classmethod
def _scaleToFloat(cls, data, codingrange, file_dtype):
"""
Scale integral data from storage according to the coding range.
Input can be either a scalar or a numpy array.
The operation is a no-op if file_dtype is float.
The data must be known to be in storage domain, we don't try
to guess based on its valuetype.
"""
if data is None: return None
# The compression module might not like the following assert
# because for compressed integral data the decompressor might
# convert from integral to float but leave the scaling for
# later. Currently not a problem.
if isinstance(data, (ScalarBuffer, np.ndarray)): assert file_dtype == data.dtype
if not np.issubdtype(file_dtype, np.integer): return data
(a, b) = cls._scaleFactorsStorageToFloat(codingrange, file_dtype)
if isinstance(data, np.ndarray):
# Numerical accuracy notes:
#
# To match the (arguably broken) old ZGY implementation
# do all the computations, including the codingrange to
# (slope, intercept) calculation, using single precision
# float. The result isn't seriously wrong but values
# close to zero may see a noticeable shift.
#
# To get the result as accurate as possible, convert the
# entire array to float64, then apply the conversion,
# then convert back to float32. The compromise currently
# used is to accurately compute slope, intercept but do
# the scaling on float32 values. The difference compared
# to the most accurate case is hardly noticeable.
#
# See also InfoHeaderAccess::storagetofloat() in OpenZGY/C++.
#
data = data.astype(np.float32)
# In-place operators to avoid copying arrays again.
data *= a
data += b
return data
elif isinstance(data, ScalarBuffer):
return ScalarBuffer(data.shape, a * float(data.value) + b, np.float32)
else:
return np.float32(a * float(data) + b)
def _scaleDataToFloat(self, data):
"""
Convert and/or scale the storage value type to user-supplied data.
Unlike _scaleToFloat() this is an instance method so it already
knows the storage type. The data must be known to be in storage
domain, we don't try to guess based on its valuetype.
"""
file_dtype = impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype)
return self._scaleToFloat(data, self._metadata._ih._safe_codingrange, file_dtype)
@staticmethod
def _clipAndCast(data, dtype):
"""
Cast to the specified numpy type, clipping any values outside
the valid range to the closest edge. Works both for scalars
and numpy arrays. Can be made to support lists and tuples also,
but this is a case of yagni.
If the data is a scalar then the result will be of the reqested
numpy type and not the generic python 'int'.
"""
cliprange = (np.iinfo(dtype).min, np.iinfo(dtype).max)
if isinstance(data, np.ndarray):
np.clip(data, cliprange[0], cliprange[1], out=data) # in-place clip
# Can I save a few cycles by passing dtype to np.rint and
# dropping the call to astype? Not a big deal and the
# documentation is a bit unclear.
data = np.rint(data).astype(dtype)
elif isinstance(data, ScalarBuffer):
value = np.rint(max(cliprange[0], min(cliprange[1], data.value)))
data = ScalarBuffer(data.shape, value, dtype)
else:
data = np.rint(max(cliprange[0], min(cliprange[1], data)))
data = dtype(data)
return data
@classmethod
def _scaleToStorage(cls, data, codingrange, file_dtype):
"""
Scale floating point data to integral storage values according
to the coding range. Input can be either a scalar or a numpy array.
Or the new ScalarBuffer type.
Input values that cannot be encoded will be clipped to the closest
value that can be represented.
The operation is a no-op if file_dtype is float and not an integral type.
The input data must be np.float32 or an array of same; this is to
avoid accidentally passing an input that is already in storage.
"""
# Test: unittests.test_scaleToStorage()
vt = data.dtype if isinstance(data, (np.ndarray, ScalarBuffer)) else type(data)
if vt != np.float32:
raise ZgyInternalError("Input to _scaleToStorage must be np.float32")
if not np.issubdtype(file_dtype, np.integer): return data
(a, b) = cls._scaleFactorsStorageToFloat(codingrange, file_dtype)
if isinstance(data, np.ndarray):
data = data.astype(np.float32)
# In-place operators to avoid copying arrays again.
data -= b
data *= (1.0/a)
elif isinstance(data, ScalarBuffer):
value = (float(data.value) - b) / a
data = ScalarBuffer(data.shape, value, np.float32)
else:
data = (float(data) - b) / a
return cls._clipAndCast(data, file_dtype)
def _defaultValue(self, as_float):
"""
Get the "real" or "storage" value to be used when samples that
have never been written. as_float=False means the return will be
a "storage" value as found in the file. Otherwise convert to "real".
If the data is a scalar then the result will be of the reqested
numpy type and not the generic python 'int'.
TODO-Performance, compute this when the file is opened.
"""
defaultstorage = self._scaleDataToStorage(np.float32(0))
defaultreal = self._scaleDataToFloat(defaultstorage)
return defaultreal if as_float else defaultstorage
@staticmethod
def _bricksNeeded(start, size, bricksize):
"""
Return the list of bricks needed to cover the entire area given by
start and size. Each entry in the list gives both the sample position
and the brick position, each as a 3-tuple of integers.
"""
brick0 = [start[0] // bricksize[0],
start[1] // bricksize[1],
start[2] // bricksize[2]]
brickN = [(start[0] + size[0] - 1) // bricksize[0],
(start[1] + size[1] - 1) // bricksize[1],
(start[2] + size[2] - 1) // bricksize[2]]
result = []
for ii in range(brick0[0], brickN[0]+1):
for jj in range(brick0[1], brickN[1]+1):
for kk in range(brick0[2], brickN[2]+1):
result.append(((ii * bricksize[0],
jj * bricksize[1],
kk * bricksize[2]),
(ii, jj, kk)))
return result
def _validatePosition(self, i, j, k, lod):
if lod < 0 or lod >= self._metadata._ih._nlods:
raise ZgyUserError("Requested LOD {0} is outside the valid range {1} to {2} inclusive".format(lod, 0, self._metadata._ih._nlods - 1))
size = self._metadata._ih._lodsizes[lod]
if (i<0 or i>=size[0] or j<0 or j>=size[1] or k<0 or k>=size[2]):
raise ZgyUserError("Requested brick position {0} is outside the valid range {1} to {2} inclusive at lod {3}".format((i, j, k), (0, 0, 0), tuple(np.array(size) - 1), lod))
def _getBrickLookupIndex(self, i, j, k, lod):
self._validatePosition(i, j, k, lod)
size = self._metadata._ih._lodsizes[lod]
index = (self._metadata._ih._brickoffsets[lod] +
i + (size[0] * j) + (size[0] * size[1] * k))
if index < 0 or index >= self._metadata._ih._brickoffsets[-1]:
raise ZgyInternalError("Internal error in _getBrickLookupIndex")
return index
def _getAlphaSizeInBytes(self):
return int(self._metadata._ih._bricksize[0] * self._metadata._ih._bricksize[1])
def _getBrickSizeInBytes(self):
"""
Get the size of an uncompressed brick in bytes.
TODO-Performance, this should be cached on file open and
should probably be a derived attribute of self._metadata._ih.
NOTE-Performance: np.product() might be preferable to spelling out
the multiply and needing a temp. But it could be 100 times slower.
"""
file_dtype = np.dtype(impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype))
bs = self._metadata._ih._bricksize
maxsize = bs[0] * bs[1] * bs[2] * file_dtype.itemsize
return int(maxsize)
def _getBegAndSize(self, lup, ix, maxsize):
"""
Use only for bricks known to be compressed. Unless testing.
Return both the file offset of the compressed block and its size.
The latter is a hint that should not be fully trusted.
For testing pass sane = False, this skips the test for too large
bricks etc. which means I can use the tool to find holes in the
allocated data.
"""
# TODO-Low consider deferring _calc_lookupsize() until first needed.
# The simple version below id NOT THREADSAFE.
#if self._metadata._blup._lookend is None:
# self._metadata._blup._lookend = (
# self._metadata._blup._calc_lookupsize(self._lookup, eof, maxsize))
raw_beg = lup._lookup[ix]
raw_end = lup._lookend[ix]
btype = (raw_beg >> 56) & 0xFF
if btype == 0x80 or raw_beg < 2:
return 0, 0
beg = raw_beg & ~(0xFF << 56)
end = raw_end
# Need extra tests if _calc_lookupsize() was called without
# giving the eof and maxsize parameters.
end = max(beg, min(end, self._file.xx_eof, beg + maxsize))
return (beg, end - beg)
def _getAlphaBegAndSize(self, ix, *, sane = True):
return self._getBegAndSize(self._metadata._alup, ix, self._getAlphaSizeInBytes() if sane else (0xFF<<56))
def _getBrickBegAndSize(self, ix, *, sane = True):
return self._getBegAndSize(self._metadata._blup, ix, self._getBrickSizeInBytes() if sane else (0xFF<<56))
def _getBrickFilePosition(self, i, j, k, lod):
"""
returns (brickstatus, fileoffset, constvalue, bricksize).
"""
ix = self._getBrickLookupIndex(i, j, k, lod)
pos = self._metadata._blup._lookup[ix]
btype = (pos >> 56) & 0xFF
if pos == 0:
return (impl_enum.BrickStatus.Missing, None, 0, 0)
elif pos == 1:
return (impl_enum.BrickStatus.Constant, None, 0, 0)
elif btype == 0x80:
# TODO-Worry probably won't work with big-endian.
# I suspect the old C++ reader won't either,
# but that is an academic question since we
# only support that library for x86.
fmt = impl_enum._map_DataTypeToStructFormatCode(self._metadata._ih._datatype)
tmp = struct.pack("<Q", pos)
constant = struct.unpack_from(fmt, tmp)[0]
return (impl_enum.BrickStatus.Constant, None, constant, 0)
elif btype == 0xC0:
beg, size = self._getBrickBegAndSize(ix)
return (impl_enum.BrickStatus.Compressed, beg, None, size)
elif pos & (1<<63):
raise ZgyFormatError("Unknown brick type " + str(btype))
else:
return (impl_enum.BrickStatus.Normal, pos, None, self._getBrickSizeInBytes())
def _setBrickFilePosition(self, i, j, k, lod, brickstatus, content, bricksize):
ix = self._getBrickLookupIndex(i, j, k, lod)
if brickstatus == impl_enum.BrickStatus.Missing:
self._metadata._blup._lookup[ix] = 0
self._metadata._blup._lookend[ix] = 0
elif brickstatus == impl_enum.BrickStatus.Constant:
fmt = impl_enum._map_DataTypeToStructFormatCode(self._metadata._ih._datatype)
#print("_setBrickFilePosition: constant", type(content), content)
tmp = struct.pack(fmt, content)
tmp += bytes(8 - len(tmp))
content = struct.unpack_from("<Q", tmp)[0]
content |= (1<<63)
self._metadata._blup._lookup[ix] = content
self._metadata._blup._lookend[ix] = 0
elif brickstatus == impl_enum.BrickStatus.Compressed:
self._metadata._blup._lookend[ix] = content + bricksize
content &= ~(0xFF << 56)
content |= (0xC0 << 56)
self._metadata._blup._lookup[ix] = content
else:
self._metadata._blup._lookup[ix] = content
self._metadata._blup._lookend[ix] = content + bricksize
def _getAlphaLookupIndex(self, i, j, lod):
self._validatePosition(i, j, 0, lod)
size = self._metadata._ih._lodsizes[lod]
index = self._metadata._ih._alphaoffsets[lod] + i + (size[0] * j)
if index < 0 or index >= self._metadata._ih._alphaoffsets[-1]:
raise ZgyInternalError("Internal error in _getAlphaLookupIndex")
return index
def _getAlphaFilePosition(self, i, j, lod):
# TODO-Low: Compression, same handling as in _getBrickFilePosition()
pos = self._metadata._alup._lookup[self._getAlphaLookupIndex(i, j, lod)]
if pos == 0:
return (impl_enum.BrickStatus.Missing, None, 0)
elif pos == 1:
return (impl_enum.BrickStatus.Constant, None, 0)
elif pos & (1<<63):
constant = pos & 0xff
return (impl_enum.BrickStatus.Constant, None, constant)
else:
return (impl_enum.BrickStatus.Normal, pos, None)
def _unscramble_init(self):
"""
Create static tables to convert between a regular 3d brick (v2, v3)
and a brick that has 8x8 subtiling in the horizontal direction (v1).
"""
self._srctodst = np.zeros(64*64*64, dtype=np.int32)
self._dsttosrc = np.zeros(64*64*64, dtype=np.int32)
pos = 0
for ii in range(0, 64, 8):
for jj in range(0, 64, 8):
for subi in range(ii, ii+8):
for subj in range(jj, jj+8):
for subk in range(64):
src_pos = pos
pos += 1
dst_pos = subi*64*64 + subj*64 + subk
self._srctodst[src_pos] = dst_pos
self._dsttosrc[dst_pos] = src_pos
def _unscramble(self, brick):
return brick.flat[self._srctodst].reshape(brick.shape)
def readConstantValue(self, start, size, lod = 0, as_float = True, *, verbose = None):
"""
Check to see if the specified region is known to have all
samples set to the same value. A return value that is not None
signifies that a regular read would return all samples set to
that value. A return value of None means we don't know. This
method is only intended as a hint to improve performance.
"""
defaultstorage = self._defaultValue(as_float=False)
needed = self._bricksNeeded(start, size, self._metadata._ih._bricksize)
bricks = [((e[0],) + self._getBrickFilePosition(*e[1], lod)) for e in needed]
result = None
for startpos, brickstatus, fileoffset, constvalue, bricksize in bricks:
if brickstatus == impl_enum.BrickStatus.Constant:
if result is not None and result != constvalue and not (np.isnan(result) and np.isnan(constvalue)):
return None
result = constvalue
elif brickstatus == impl_enum.BrickStatus.Missing:
if result is not None and result != defaultstorage:
return None
result = defaultstorage
else:
return None
if as_float:
result = self._scaleDataToFloat(result)
return result
def _deliverOneBrick(self, result, start, startpos, raw, brickstatus, as_float, *, verbose = None):
"""
This is the final step in readToExistingBuffer(). The data has
been read from storage, so now it needs to be copied back to
the user. This function may be invoked multiple times if data
was needed from more than one brick.
Arguments:
result -- user's buffer which was passed to readToExistingBuffer()
start -- user's supplied position as a 3-tuple of index values
startpos -- position of the start of this brick
raw -- bulk data for this brick, scalar or bytes or ...
brickstatus -- normal, compressed, constant, ...
This low level function deals with bytes (or similar) and scalars
an input. Not the np.ndarray and ScalarBuffer used elsewhere.
"""
self._checkCompatibleBufferTypes(result.dtype, not as_float)
file_dtype = impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype)
# TODO-Low: Refactor: this is probably NOT the right place.
if brickstatus == impl_enum.BrickStatus.Compressed:
# Note that when the file contains compressed integral data
# and the application asks for float data back, the decompressor
# is responsible for the int to float conversion but NOT the
# scaling from storage values to real values.
onebrick = CompressFactoryImpl.decompress(raw, brickstatus, self._metadata._ih._bricksize, file_dtype, result.dtype)
if onebrick is None:
raise ZgyFormatError("Compression type not recognized")
elif brickstatus == impl_enum.BrickStatus.Normal:
if len(raw) != np.product(self._metadata._ih._bricksize) * np.dtype(file_dtype).itemsize:
raise ZgyFormatError("Got wrong count when reading brick.")
onebrick = np.frombuffer(raw, dtype=file_dtype)
# Instead of describing the array as explicitly little-endian
# I will immediately byteswap it to become native-endian.
if np.dtype('<i4') != np.int32: # big-endian
onebrick.byteswap(inplace=True)
onebrick = onebrick.reshape(self._metadata._ih._bricksize, order="C")
# Only needed for normal bricks. No compressed bricks exist in v1.
if self._metadata._fh._version == 1:
onebrick = self._unscramble(onebrick)
else:
_ = float(raw) # just to assert it is in fact a scalar.
onebrick = ScalarBuffer(self._metadata._ih._bricksize, raw, file_dtype)
if as_float and file_dtype != np.float32:
onebrick = self._scaleDataToFloat(onebrick)
#print(onebrick)
# Note, we might pass in the survey range as well to force
# all padding bytes to be set to the default value. Less
# surprises for the caller. It may look a bit odd if the user
# does a flood-fill of the entire survey to a given value and
# later sees that the content is different in the padding area.
# But, the caller should ignore the padding.
#
# On write the padding samples should also be forced to contain
# the same value. If nothing else, to help compression. But for
# efficiency reasons the value is not specified. Typically it
# will be the default "absent" value but if the rest of the
# brick has a const value and no allocated disk space then
# they will inherit that constant.
self._partialCopy(onebrick,
startpos, self._metadata._ih._bricksize,
result, start, result.shape,
(0, 0, 0), self._metadata._ih._size,
verbose=verbose)
def readToExistingBuffer(self, result, start, lod, as_float, *, verbose = None, zeroed_result=False):
"""
Read bulk data starting at "start" in index space and store the
result in the provided 3d numpy array. Start should be in the range
(0,0,0) to Size-1. The count of samples to read is implied by the
size of the provided result array that is passed in. The valid data
types for the result array are float32 (in which case samples stored
as int8 or int16 will be scaled) or the files's storage value type
(in which case there is no scaling). It is valid to pass a count
that includes the padding area between the survey and the end
of the current brick, but not past that point.
"""
self._argType(result, (np.ndarray,))
self._checkCompatibleBufferTypes(result.dtype, not as_float)
file_dtype = impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype)
result_dtype = result.dtype
count = result.shape
# Need a default value to use when trying to read a brick that
# was never written, or to fill in a brick that was only partly
# written. To avoid non intuitive behavior the same value should
# be used for both cases, and it should be a value that would
# also be allowed to store as a regular sample. Use the value
# that becomes 0.0 after conversion to float. Or as close as
# possible to that value if the coding range is not zero centric.
# Always use zero for floating point data. (Not NaN...)
# Dead traces don't have any special handling apart from the
# alpha flag. They still contain whatever data was written to them.
defaultstorage = self._defaultValue(as_float=False)
defaultvalue = self._defaultValue(as_float=True)
# Make a separate pass to gather all the bricks we need to read.
# FUTURE: we might fetch some of them in parallel and we might be
# able to combine bricks to read larger blocks at a time. Both changes
# can have a dramatic performance impact effect on cloud access.
needed = self._bricksNeeded(start, count, self._metadata._ih._bricksize)
bricks = [((e[0],) + self._getBrickFilePosition(*e[1], lod)) for e in needed]
#print(bricks)
#print("Default storage", defaultstorage, " and value", defaultvalue)
if not zeroed_result:
result.fill(defaultvalue if _padding_fill_value is None else _padding_fill_value)
# After all bricks have been processed, the padding past the
# end if the survey might still not have been touched. Just in
# case the request did in fact include such samples we will
# initialize the entire result buffer to the default value.
if not zeroed_result:
result.fill(_padding_fill_value if _padding_fill_value is not None else
defaultvalue if result.dtype == np.float32 else
defaultstorage)
bricksize_bytes = np.prod(self._metadata._ih._bricksize) * file_dtype().itemsize
requests = []
for startpos, brickstatus, fileoffset, constvalue, real_bricksize in bricks:
# Functor that accepts a raw "data" brick and copies it
# into the correct place in result. Note that "startpos" and
# "brickstatus" need to be captured for the current loop
# iteration. Other captured variables should remain the same.
deliverance = lambda data, startpos=startpos, brickstatus=brickstatus: self._deliverOneBrick(result, start, startpos, data, brickstatus, as_float=as_float, verbose=verbose)
if brickstatus == impl_enum.BrickStatus.Constant:
if verbose:
verbose(" Reading brick at {0} as constant {1}".format(
startpos, constvalue))
deliverance(constvalue)
elif brickstatus == impl_enum.BrickStatus.Missing:
if verbose:
verbose(" Reading brick at {0} not found, use {1}".format(
startpos, defaultstorage))
deliverance(defaultstorage)
elif brickstatus == impl_enum.BrickStatus.Normal:
if verbose:
verbose("Reading brick at {0} from file offset {1}".format(
startpos, hex(fileoffset)))
requests.append((fileoffset, bricksize_bytes, deliverance))
elif brickstatus == impl_enum.BrickStatus.Compressed:
if verbose:
verbose("Reading compressed brick at {0} from file offset {1} size {2}".format(
startpos, hex(fileoffset), hex(real_bricksize)))
# TODO-Worry obscure corner case, might need to re-try if we didn't get enough data.
requests.append((fileoffset, real_bricksize, deliverance))
else:
raise ZgyInternalError("Internal error, bad brick status")
# Send all pending read requests to the backend in one operation.
if requests:
self._file.xx_readv(requests,
parallel_ok=False,
immutable_ok=False,
transient_ok=True,
usagehint=impl_file.UsageHint.Data)
# --- WRITE SUPPORT --- #
def _scaleDataToStorage(self, data):
"""
Convert and/or scale the user-supplied data to the storage value type.
Unlike _scaleToStorage() this is an instance method so it already
knows the storage type. Also be more strict about the input type.
The method accepts np.ndarray or ScalarBuffer only.
"""
file_dtype = impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype)
data_dtype = data.dtype
if data_dtype != np.float32:
raise ZgyInternalError("Input to _scaleDataToStorage must be np.float32, not " + str(data_dtype))
if file_dtype != np.float32:
data = self._scaleToStorage(data, self._metadata._ih._safe_codingrange, file_dtype)
return data
@classmethod
def _setPaddingToEdge(cls, data, used, modulo, dim):
"""
Pad unused parts of the data buffer by replicating the last samples,
but only up to a multiple of 'modulo' samples. Handles just one
dimension, so caller will typically invoke us three times.
"""
cls._argType(data, (np.ndarray,))
beg = used[dim]
end = min(((used[dim]+modulo-1) // modulo) * modulo, data.shape[dim])
if beg >= end: pass
elif dim == 0: data[beg:end,:,:] = data[beg-1:beg,:,:]
elif dim == 1: data[:,beg:end,:] = data[:,beg-1:beg,:]
elif dim == 2: data[:,:,beg:end] = data[:,:,beg-1:beg]
@classmethod
def _setPaddingToConst(cls, data, used, missingvalue, dim):
"""
Pad unused parts of the data buffer with a constant.
Handles just one dimension, so caller should invoke us three times.
"""
cls._argType(data, (np.ndarray,))
beg = used[dim]
end = data.shape[dim]
if beg >= end: pass
elif dim == 0: data[beg:end,:,:] = missingvalue
elif dim == 1: data[:,beg:end,:] = missingvalue
elif dim == 2: data[:,:,beg:end] = missingvalue
@classmethod
def _setPaddingSamples(cls, data, used, missingvalue, compressor):
"""
Make sure the contants of the padding area, if any, is
deterministic instead of whatever garbage the buffer holds.
The method is allowed to update the buffer in place,
but may also allocate and return a new one.
"""
cls._argType(data, (np.ndarray,))
if tuple(used) == tuple(data.shape):
return data
# TODO-Low let the compressor choose the optimal padding.
# ZFP: replicate up to modulo-4, storage-zero outside.
#if compressor: return compressor.pad(data, used)
for dim in range(3):
cls._setPaddingToConst(data, used, missingvalue, dim)
# To reduce LOD edge artifacts, pad 'edge' up to even size.
# To optimize ZFP compression, pad 'edge' up to modulo-4.
for dim in range(3):
cls._setPaddingToEdge(data, used, 4, dim)
return data
def _writeWithRetry(self, rawdata, brickstatus, fileoffset, brickpos, lod, *, verbose = None):
dprint("@@ _writeWithRetry(pos={0}, count={1}, fileoffset={2:x})".format(brickpos, len(rawdata), fileoffset or self._file.xx_eof))
if fileoffset is not None:
try:
self._file.xx_write(rawdata, fileoffset, usagehint=impl_file.UsageHint.Data)
except ZgySegmentIsClosed:
# The update mode doesn't need to be checked again here
# unless we want to support UpdateMode.NoLeaks which
# would require raising an exception.
# TODO-Test: testing will require careful thought.
fileoffset = None # Write a new brick, abandoning the old one.
if fileoffset is None:
# Write a new brick.
fileoffset = self._file.xx_eof
self._file.xx_write(rawdata, fileoffset, usagehint=impl_file.UsageHint.Data)
self._setBrickFilePosition(brickpos[0], brickpos[1], brickpos[2],
lod, brickstatus,
fileoffset, len(rawdata))
def _writeOneNormalBrick(self, data, fileoffset, brickpos, lod, compressor, *, verbose = None):
"""
Handle padding of area outside the survey and optionally compression.
Also convert from NumPy array to plain bytes.
TODO-Performance
This function is a candidate for running multi threaded due to the
potentially expensive compression. While the next step, actually
writing the file, needs to be serialized both because it may want
to write at EOF and because even if the file offset is explicitly
known the lower level write might not be thread safe. Also when
serializing the order of bricks should be preserved. Otherwise
performance on read might suffer.
"""
self._argType(data, (np.ndarray,))
dprint("@@ _writeOneNormalBrick(pos={0}, count={1})".format(brickpos, data.shape))
# TODO-Medium: if scalar(data): return data, impl_enum.BrickStatus.Constant
# Make it possible for this function to be called for all data
# in _writeOneBrick(). Then move it even further up to
# _writeAlignedRegion() which will hopefully enable some
# parallelization on write.
assert tuple(data.shape) == tuple(self._metadata._ih._bricksize)
used = self._usedPartOfBrick(data.size, brickpos, lod)
data = self._setPaddingSamples(data, used,
missingvalue=self._defaultValue(as_float=False),
compressor=compressor)
# TODO-Low: Compression, might also want to check for all-constant
# by calling _isUsedPartOfBrickAllConstant() and if true, return
# (data.flat[0], impl_enum.BrickStatus.Constant) and do not attempt
# to compress. This function would then be called from
# _writeAlignedRegion() instead of _writeWithRetry().
# There is a slight amount of added work because if the data block
# already exists on file (which we don't know yet) the test would
# not be needed.
# With that change both _writeOneNormalBrick() and _writeOneBrick()
# will need to know the current brick status, and _writeOneNormalBrick()
# would return .Constant for constant data.
brickstatus = impl_enum.BrickStatus.Normal
rawdata = None
if compressor:
cdata = compressor(data)
if cdata:
rawdata = cdata
brickstatus = impl_enum.BrickStatus.Compressed
if rawdata is not None:
pass
elif np.dtype('<i4') != np.int32: # big-endian, TODO-Worry not tested.
rawdata = data.byteswap().tobytes()
else:
rawdata = data.tobytes()
self._writeWithRetry(rawdata, brickstatus, fileoffset, brickpos, lod, verbose = None)
def _writeOneConstantBrick(self, data, brickpos, lod, *, verbose = None):
self._argType(data, (ScalarBuffer,))
dprint("@@ _writeOneConstantBrick(pos={0}, value={1})".format(brickpos, data))
self._setBrickFilePosition(brickpos[0], brickpos[1], brickpos[2], lod,
impl_enum.BrickStatus.Constant, data.value, 0)
def _usedPartOfBrick(self, size, brickpos, lod):
"""
Compute the size of the used (inside survey) area of a data buffer
with size "size". size will probably always be equal to bricksize
but the function should still work if it isn't.
"""
# TODO-Low: Refactor, should I enforce storing the members as numpy arrays?
bricksize = np.array(self._metadata._ih._bricksize, dtype=np.int64)
surveysize = np.array(self._metadata._ih._size, dtype=np.int64)
brickpos = np.array(brickpos, dtype=np.int64)
surveysize = (surveysize + (1<<lod) - 1) // (1<<lod)
available = surveysize - (brickpos * bricksize)
return np.maximum((0, 0, 0), np.minimum(available, size))
def _isUsedPartOfBrickAllConstant(self, data, brickpos, lod):
"""
Return True if all useful samples in this brick have the same value.
Padding samples outside the survey are not useful and should not
be checked since they could easily have been set to a different value.
"""
self._argType(data, (np.ndarray,))
used = self._usedPartOfBrick(data.shape, brickpos, lod)
first = data.flat[0]
if np.isnan(first):
return np.all(np.isnan(data[:used[0],:used[1],:used[2]]))
else:
return np.all(data[:used[0],:used[1],:used[2]] == first)
def _mustLeakOldBrick(self, data, compressor, brickstatus):
"""
Return True if this block needs to be leaked by pretending
that its block offset has not been allocated yet.
Raise an exception if the update us disallowed by _update_mode.
Note, in "Always" mode I am only supposed to leak the block
if the new one is larger. But that is too much work to
figure out here. So, treat "Always" as if it were "Pedantic".
Note, there is one other place where bricks might leak:
in _writeWithRetry() when a ZgySegmentIsClosed is caught.
The update mode doesn't need to be checked again at that point
unless we want to support UpdateMode.NoLeaks.
"""
self._argType(data, (np.ndarray, ScalarBuffer))
msg = "Updating a {0} BrickStatus.{1} brick with {2} data is illegal in UpdateMode.{3}.".format(
"cloud" if self._file.xx_iscloud else "local",
brickstatus.name,
"Compressed" if compressor else "Normal" if isinstance(data, np.ndarray) else "Constant",
self._update_mode.name)
if brickstatus != impl_enum.BrickStatus.Missing:
if self._update_mode in (impl_enum.UpdateMode.Never,):
raise ZgyUserError(msg)
if brickstatus == impl_enum.BrickStatus.Normal:
if self._update_mode in (impl_enum.UpdateMode.Never, impl_enum.UpdateMode.Constant):
raise ZgyUserError(msg)
if brickstatus == impl_enum.BrickStatus.Compressed or (brickstatus == impl_enum.BrickStatus.Normal and compressor is not None):
if self._update_mode in (impl_enum.UpdateMode.Pedantic, impl_enum.UpdateMode.Always):
return True
else:
raise ZgyUserError(msg)
return False
def _writeOneBrick(self, data, brickpos, lod, compressor, *, verbose = None):
"""
Write a single brick to storage. Normally the data should either
be a numpy scalar (all samples have the same value) or a numpy array
with the same shape as the file's brick size.
brickpos is given relative to this lod level. For lod 0 the valid
range is the survey size in bricks. For lod 1 it is half that,
rounded upwards.
The data must already have been scaled to storage values and
converted to the appropriate value type. It must either be a
scalar (to fill the entire brick) or a numpy array.
Caveat: int and float scalars won't work. Be explicit and pass an
np.int8, np.int16, or np.float32.
"""
self._argType(data, (np.ndarray, ScalarBuffer))
self._checkCompatibleBufferTypes(data.dtype, is_storage = True)
brickstatus, fileoffset, constvalue, bricksize = self._getBrickFilePosition(brickpos[0], brickpos[1], brickpos[2], lod)
if self._mustLeakOldBrick(data, compressor, brickstatus):
brickstatus, fileoffset, constvalue, bricksize = (
impl_enum.BrickStatus.Missing, None, 0, 0)
data_const = not isinstance(data, np.ndarray)
file_const = brickstatus not in (impl_enum.BrickStatus.Normal, impl_enum.BrickStatus.Compressed)
if file_const: # also true if never written yet.
if data_const:
# Caller asked to store a constant value.
dprint("@@ Explicit store constant")
self._writeOneConstantBrick(data, brickpos, lod, verbose=verbose)
elif self._isUsedPartOfBrickAllConstant(data, brickpos, lod):
# Caller did not explicitly ask for a constant value,
# but all useable samples (i.e. all samples that are
# inside the survey boundaries) have ths same value.
tmp = ScalarBuffer(data.shape, data[0,0,0], data.dtype)
self._writeOneConstantBrick(tmp, brickpos, lod, verbose=verbose)
else:
# Allocate a new disk block and write out this block.
self._writeOneNormalBrick(data, None, brickpos, lod, compressor=compressor, verbose=verbose)
else:
if data_const:
dprint("@@ Const data expanded before storing")
# The brick has already been allocated. Cannot set it to
# constant-value because the file storage for this brick
# would then be leaked.
data = np.full(self._metadata._ih._bricksize[:3], data.value, dtype=data.dtype)
# No compression, for the same reason as above.
self._writeOneNormalBrick(data, fileoffset, brickpos, lod, compressor=None, verbose=verbose)
def _writeAlignedRegion(self, data, start, lod, compressor, *, verbose = None):
"""
Write an arbitrary region covering one or more full bricks.
The data must already be converted to storage.
Start must be a multiple of brick size. start + count must
either be a multiple of block size or set to the end of the
survey.
With current usage the data may or may not have been padded to
the brick size depending on whether a read/modify/write was used.
"""
self._argType(data, (np.ndarray, ScalarBuffer))
# Meta information from the file only.
bs = np.array(self._metadata._ih._bricksize, dtype=np.int64)
file_dtype = impl_enum._map_DataTypeToNumpyType(self._metadata._ih._datatype)
self._checkCompatibleBufferTypes(data.dtype, is_storage=True)
survey_beg = np.array((0, 0, 0), dtype=np.int64)
survey_end = np.array(self._metadata._ih._size, dtype=np.int64)
survey_end = (survey_end + (1<<lod) - 1) // (1<<lod)
defaultstorage = self._defaultValue(as_float=False)
# Massaging of arguments to this function
dprint("@@ _writeRegion(start {0}, count {1}, type {2} -> {3})".format(
start, data.shape, data.dtype, np.dtype(file_dtype)))
start = np.array(start, dtype=np.int64)
count = np.array(data.shape, dtype=np.int64)
beg_brick = (start // bs) * bs
end_brick = ((start + count + bs - 1) // bs) * bs
if isinstance(data, np.ndarray):
brick = np.zeros(bs, dtype=file_dtype)
else:
brick = ScalarBuffer(bs, data.value, data.dtype)
for ii in range(beg_brick[0], end_brick[0], bs[0]):
for jj in range(beg_brick[1], end_brick[1], bs[1]):
for kk in range(beg_brick[2], end_brick[2], bs[2]):
this_beg = np.array([ii, jj, kk], dtype=np.int64)
brickpos = this_beg // bs
if isinstance(brick, np.ndarray):
brick.fill(defaultstorage)
self._partialCopy(data, start, data.shape,
brick, this_beg, bs,
survey_beg, survey_end - survey_beg,
verbose=verbose)
# TODO-Medium Note Compression:
# This might be a good extension point, as this is the
# last place where we still have a list of bricks to
# be written. So it would be easier to parallelize here.
# But we don't yet know which bricks will be written as
# all-constant so there would need to be some refactoring.
# _writeOneNormalBrick() probably needs to be called from here.
# See comments in that function; it will need some
# changes. And _writeOneBrick() needs to be told whether
# the data has been compressed or not. Instead of
# being told which compressor (if any) to use.
# If there are any errors during _writeOneBrick() this
# probably means the entire file is a lost cause.
# This is true also for ZgyUserError raised in the file
# layer, because at that layer the "user" is really
# OpenZGY and not some client code. The only acceptable
# error is ZgySegmentIsClosed, and that will be caught
# and handled at lower levels.
# TODO-Low the logic here can probably be refined.
# If there is no buffering (on-prem files) then a
# write can probably just be re-tried if we make
# sure the write is done before updating metadata.
# But do we really want that complexity? The risk
# of transient errors is way higher with cloud access.
# And due to buffering in the file layer those would
# also be a lot harder to recover from.
with ErrorsWillCorruptFile(self):
self._writeOneBrick(brick, brickpos,
lod=lod,
compressor=compressor,
verbose=verbose)
def _writeRegion(self, data, start, lod, compressor, is_storage, *, verbose = None):
"""
Write an arbitrary region. If start and end are not aligned to
the brick size this will do a read/modify/write. Note that when
writing to the cloud it is highly recommended to write aligned
regions only. Otherwise some disk space might be wasted.
Performance note: The read/modify/write could also have been done
one brick at a time. Doing it here means that for large requests
a number of bricks which were already full will also be read.
On the other hand a single read might help parallelism. Either
way the recommendation is for the user to write brick aligned data.
So this might not be a big deal.
"""
self._argType(data, (np.ndarray, ScalarBuffer))
self._checkCompatibleBufferTypes(data.dtype, is_storage)
if not is_storage:
data = self._scaleDataToStorage(data)
beg = np.array(start, dtype=np.int64)
end = np.array(data.shape, dtype=np.int64) + beg
bs = np.array(self._metadata._ih._bricksize, dtype=np.int64)
survey_beg = np.array((0, 0, 0), dtype=np.int64)
survey_end = (np.array(self._metadata._ih._size, dtype=np.int64) + ((1<<lod) - 1)) // (1<<lod)
need_rmw = any([(beg[i]%bs[i]) != 0 or ((end[i]%bs[i]) != 0 and end[i] < survey_end[i]) for i in range(3)])
if need_rmw:
# Since we are doing a read/modify/write, also read any padding
# outside the survey. _writeAlignedRegion will see full
# bricks also at the end of the survey. If no r/m/w needed
# the latter is not guaranteed.
new_start = (beg // bs) * bs
new_count = (((end + bs - 1) // bs) * bs) - new_start
new_data = np.zeros(new_count, dtype=data.dtype)
dprint("@@ _writeRegion r/m/w: user {0} {1} padded {2} {3}".format(
start, data.shape, new_start, new_count))
# The reader takes care of any defaultvalue.
# as_float=False because we converted, and data.dtype==file_dtype
self.readToExistingBuffer(new_data, new_start, lod=lod,
as_float=False,
verbose=verbose)
self._partialCopy(data, start, data.shape,
new_data, new_start, new_data.shape,
survey_beg, survey_end - survey_beg,
verbose=verbose)
data = new_data
start = new_start
else:
dprint("@@ _writeRegion direct: user {0} {1}".format(start, data.shape))
self._writeAlignedRegion(data, start, lod=lod,
compressor=compressor,
verbose=verbose)
data = data.value if isinstance(data, ScalarBuffer) else data
if np.issubdtype(data.dtype, np.integer):
tmp_min = np.amin(data)
tmp_max = np.amax(data)
if self._sample_min > self._sample_max:
# Changes _sample_min / _sample_max to the correct value type.
self._sample_min = tmp_min
self._sample_max = tmp_max
else:
self._sample_min = min(self._sample_min, tmp_min)
self._sample_max = max(self._sample_max, tmp_max)
else:
valid = np.isfinite(data).astype(np.bool)
self._sample_min = np.amin(data, where=valid, initial=self._sample_min)
self._sample_max = np.amax(data, where=valid, initial=self._sample_max)
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/bulk.py | bulk.py |
##@package openzgy.impl.genlod
# TODO-Low: Several tweaks are possible but might not offer
# much value. See a detailed discussion in doc/lowres.html.
# Testing notes:
# test.black.checkLodContents(), checkStatistics(), checkHistogram()
# is part of a reasonably complete end to end test checking this module.
# A new unit test testFinalizeProgress() has been written to verify the
# progress mechanism. Both that the done and total end up precisely
# identical at the end and that the client can abort the calculation.
import sys
import argparse
import numpy as np
from collections import namedtuple
from . import enum as impl_enum
from ..exception import *
from .stats import StatisticData
from .histogram import HistogramData
from .lodalgo import decimate, DecimationType
from .bulk import ScalarBuffer
def _round_down_to_power_of_2(n):
exp = -1
while n > 0:
n >>= 1
exp += 1
return 1<<exp
def _make_blocksizes(bricksize, surveysize, nlods, dtype, factor=(1,1,1), verbose=None):
"""
CURRENTLY NOT USED.
Calculate the minimum blocksize to read at each lod level. Clip to
the survey size. Also compute the memory needed to hold one buffer
for each lod. Note that the genlod algorithm currently assumes
that the block size is the same for all levels except for the
clipping. And it currently handles clipping itself and might not
like us to do so. Currently this function is not very useful.
"""
blocksizes = np.zeros((nlods, 3), dtype=np.int64)
ss = np.array(surveysize, dtype=np.int64)
bs = np.array([2*factor[0]*bricksize[0],
2*factor[1]*bricksize[1],
ss[2]], dtype=np.int64)
iterations = 0
for lod in range(nlods):
bs = np.minimum(bs, ss)
blocksizes[lod] = bs
iterations += np.product((ss+bs-1) // bs)
ss = (ss + 1) // 2
bytesused = np.sum(np.product(blocksizes, axis=1)) * int(np.dtype(dtype).itemsize)
returntype = namedtuple("BlockSizeInfo", "blocksizes bytesused iterations")
result = returntype(blocksizes, bytesused, iterations)
print(result)
return result
def _choose_blocksizes(bricksize, surveysize, nlods, dtype, maxmem=512*1024*1024):
"""
CURRENTLY NOT USED.
Plan B and C are rather memory intensive already. So we might not
be able to increase the buffer sizes very much.
Calculate the actual blocksize to read at each lod level. If there
is enough memory available we can multiply the basic block size by
some factor in the j direction. And if there is still more memory
left it can be multiplied in the J directon as well. There is a
benefit when the file resides on the cloud because the code will
read larger blocks from lod 0. There is also a benefit both for
cloud and on-prem files in that low resolutun bricks end up more
contiguous.
Beware that the current genlod algorithm assumes the block size is
the same at each level. It will do exactly 4 reads at lod-1 to
produce one brick at lod. There is also a requirement that the
facors are a power of 2. Neither of those limitations apply to the
non-recursive plan B. Which is currently not implemented.
This means that the factor we multipy with should be the same for
all lods. Had it not been for this, it would probably have made
sense to make the blocks larger in (i,j) since they will be
smaller in k.
For very small surveys there is another caveat. The granularity
of calls to the progress callback will be the block size.
If the buffer gets too large then a progress bar won't be
very useful.
"""
result = _make_blocksizes(bricksize, surveysize, nlods, dtype,
factor=(1,1,1))
jfactor = max(1, maxmem // result.bytesused)
jfactor = _round_down_to_power_of_2(jfactor)
result = _make_blocksizes(bricksize, surveysize, nlods, dtype,
factor=(1,jfactor,1))
ifactor = max(1, maxmem // result.bytesused)
ifactor = _round_down_to_power_of_2(ifactor)
result = _make_blocksizes(bricksize, surveysize, nlods, dtype,
factor=(ifactor,jfactor,1), verbose=None)
return result
class GenLodBase:
"""
Abstract class for generating low resolution bricks, histogram,
and statistics. At this level only define virtual methods for I/O.
The implementation can be used as-is when mocking the class.
The optional nlods parameter is only used as a consistency check.
Note that the WeightedAverage algorithm requires a histogram to work.
If no histogram was provided then the current contents of the
accumulated histogram will be used. This is unfortunate and might
cause brick artifacts. Especially in the first few bricks that are
generated. With a non-recursive algorithm (plan C) and with only
lod2 and above uses weighted average then this is unproblematic.
Because in that case we will be done with the histogram once we
need it. TODO-Low consider doing an initial pass with a statistical
sampling of the lod0 data, only for use with weighted average.
There will be a minor issue with some values appearing to have zero
frequency, but this should not cause any trouble. (assume "1").
Note that the WeightedAverage and AverageNon0 algorithms expect a
defaultvalue to use when all inputs are inf/nan or (for AverageNon0)
zero. Only relevant for integral types, to ensure that the default
is whatever will produce the value closest to 0 after conversion.
And integral data can neither be inf nor nan, so this is a pretty
academic issue. For AverageNon0 that algorithm is currently not
used. So it isn't even clear what the desired behavior should be.
"""
def __init__(self, size, *, bricksize = (64, 64, 64), dtype = np.float32,
range_hint = None, nlods = None,
decimation = None, histogram = None, defaultvalue = None,
progress = None, verbose = None):
_nlods = 1 # The loop stops before counting final level
_total = 1 # Total number of bricks in all levels.
bs = np.array(bricksize, dtype=np.int64)
sz = np.array(size, dtype=np.int64)
while np.any(sz > bs):
_nlods += 1
_total += np.product((sz + bs - 1) // bs)
sz = (sz + 1) // 2
assert nlods is None or nlods == _nlods
self._surveysize = np.array(size, dtype=np.int64)
self._bricksize = np.array(bricksize, dtype=np.int64)
self._dtype = dtype
self._range_hint = range_hint or (-1, 1)
self._progress = progress
self._verbose = verbose or (lambda *args, **kw: False)
self._done = 0
self._total = _total
self._nlods = _nlods
self._surveysize.flags.writeable = False
self._bricksize.flags.writeable = False
self._decimation_type = decimation or [DecimationType.LowPass,
DecimationType.WeightedAverage]
self._wa_histogram = histogram # Might become self._histo later.
self._wa_defaultvalue = defaultvalue or 0
self._verbose("@survey {0}".format(tuple(self._surveysize)))
#_choose_blocksizes(bricksize, size, nlods, dtype)
self._report(None)
def _report(self, data):
"""
Invoke the user's progress callback if any.
Keep track of how many bricks we have processed. Both reads and
writes increment the same counter. For plan C the reads will cover
all blocks in lod 0 and the writes will cover all blocks in lod > 0.
For plan D all blocks are written which means the computation of
_total done in __init__ might need to change.
"""
if data is not None:
count = np.product((np.array(data.shape, dtype=np.int64) +
self._bricksize - 1) // self._bricksize)
self._done += count
if self._progress and not self._progress(self._done, self._total):
raise ZgyAborted("Computation of low resolution data was aborted")
def _read(self, lod, pos, size):
"""
This is a stub that must be redefined except for low level unit tests.
Read a block from the ZGY file (plans B and C) or the application
(plan D). The lod parameter will always be 0 for plans C and D.
Returns a ScalarBuffer if all constant, else a 3D numpy array.
"""
result = ScalarBuffer(size, 0, self._dtype)
self._report(result)
return result
def _write(self, lod, pos, data):
"""
This is a stub that must be redefined except for low level unit tests.
Write a block to the ZGY file. Silently ignore writes of data that
is known to have been read directly from the file. For plans B and C
this means ignoring all writes to lod 0.
"""
self._report(data)
def _savestats(self):
"""
This is a stub that must be redefined except for low level unit tests.
Finalize and write the computed statistics and histogram to the file.
"""
pass
def _prefix(self, lod):
"""For debugging and logging only."""
return " " * (self._nlods-1 - lod)
@staticmethod
def _format_result(result):
"""For debugging and logging only."""
if isinstance(result, np.ndarray):
return "array" + str(tuple(result.shape))
else:
return repr(result)
class GenLodImpl(GenLodBase):
"""
Abstract class for generating low resolution bricks, histogram,
and statistics. The inherited methods for I/O are still stubs.
See doc/lowres.html for details. This class implements plan C or D
which is good for compressed data and acceptable for uncompressed.
The ordering of low resolution bricks in the file will not be optimal.
For optimal ordering but working only for uncompressed data consider
implementing plan B in addition to the plan C already implemented.
The implementation can be used as-is in a unit test with mocked I/O.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#print("GenLodImpl", args, kwargs)
self._stats = StatisticData()
self._histo = HistogramData(range_hint=self._range_hint,
dtype=self._dtype)
# See base constructor. Better than no histogram at all.
self._wa_histogram = self._wa_histogram or self._histo
def __call__(self):
"""
Generate and store statistics, histogram, and all low resolution
bricks. Works for plans C and D. If we also need an implementation
of plan B then this method wold need to iterate over all bricks
and lods, and _accumulate would not make any recursive calls.
"""
self._calculate((0,0,0), self._nlods-1)
self._savestats() # Remove?
return self._stats, self._histo
def _accumulate(self, data):
"""
Keep a running tally of statistics and histogram.
"""
if data is None:
return
factor = 1
if isinstance(data, ScalarBuffer):
factor = np.product(data.shape, dtype=np.int64)
data = np.array([data.value], dtype=data.dtype)
self._stats.add(data, factor)
self._histo.add(data, factor)
def _calculate(self, readpos, readlod):
"""
Read data from the specified (readpos, readlod) and store it back.
The function will itself decide how much to read. But with several
constraints. Always read full traces. Size in i and j needs to be
2* bs * 2^N where bs is the file's brick size in that dimension,
Clipped to the survey boundaries. This might give an empty result.
When readlod is 0 and the data was read from the ZGY file then the
writing part is skipped. Since the data is obviously there already.
In addition to reading and writing at the readlod level, the
method will compute a single decimated buffer at readlod+1 and
return it. As with the read/write the buffer might be smaller at
the survey edge. Note that the caller is responsible for storing
the decimated data.
Full resolution data (lod 0) will be read from file (plan C) or the
application (plan D). Low resolution is computed by a recursive call
to this function (plans C and D) or by reading the file (plan B).
Note that currently only plan C is fully implemented.
For plans B and C a single call needs to be made to read the brick
(there is by definition just one) at the highest level of detail.
This will end up computing all possible low resolution bricks and
storing them. For plan B the caller must iterate.
"""
surveysize = (self._surveysize + (1<<readlod) - 1) // (1<<readlod)
readpos = np.array((readpos[0], readpos[1], 0), dtype=np.int64)
if readpos[0] >= surveysize[0] or readpos[1] >= surveysize[1]:
return None
readsize = np.minimum(2*self._bricksize, surveysize - readpos)
readsize[2] = surveysize[2] # Always read full traces.
writesize = (readsize + 1) // 2
self._verbose("@{0}calculate(lod={1}, pos={2})".format(
self._prefix(readlod), readlod, tuple(readpos)))
if readlod == 0:
data = self._read(lod=readlod, pos=readpos, size=readsize)
self._accumulate(data)
else:
offsets = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]]
offsets = np.array(offsets, dtype=np.int64) * self._bricksize
hires = [None, None, None, None]
for i in range(4):
hires[i] = self._calculate(2*readpos + 2*offsets[i], readlod-1)
data = self._paste4(hires[0], hires[1], hires[2], hires[3])
self._write(lod=readlod, pos=readpos, data=data)
if readlod == self._nlods - 1:
result = None # Caller will discard it anyway.
assert self._done == self._total
elif not isinstance(data, np.ndarray):
result = ScalarBuffer(writesize, data.value, data.dtype)
else:
result = self._decimate(data, lod=readlod+1)
assert tuple(result.shape) == tuple(writesize)
self._verbose("@{0}calculate returns(lod={1}, pos={2}, data={3})".format(
self._prefix(readlod), readlod+1, tuple(readpos//2),
self._format_result(result)))
return result
def _decimate(self, data, lod):
"""
Return a decimated version of the input buffer with half the size
(rounded up) in each dimension. In total the result will be ~1/8
the size of the input.
"""
if data is None:
return None
elif isinstance(data, ScalarBuffer):
size = ((data.shape[0]+1)//2,
(data.shape[1]+1)//2,
(data.shape[2]+1)//2)
return ScalarBuffer(size, data.value, data.dtype)
else:
di = data.shape[0] % 2
dj = data.shape[1] % 2
dk = data.shape[2] % 2
if di != 0 or dj != 0 or dk != 0:
data = np.pad(data, ((0, di), (0, dj), (0, dk)), mode='edge')
#return data[::2,::2,::2]
dt = self._decimation_type[min(lod, len(self._decimation_type)-1)]
if dt == DecimationType.WeightedAverage:
return decimate(data, dt,
histogram = self._wa_histogram,
defaultvalue = self._wa_defaultvalue)
elif dt == DecimationType.AverageNon0:
return decimate(data, dt,
defaultvalue = self._wa_defaultvalue)
else:
return decimate(data, dt)
def _paste1(self, result, more, ioff, joff):
"""
See _paste4() for details.
"""
if more is not None:
value = more.value if isinstance(more, ScalarBuffer) else more
result[ioff:ioff+more.shape[0], joff:joff+more.shape[1], :] = value
def _paste4(self, d00, d01, d10, d11):
"""
Combine 4 buffers into one. Input buffers may be None (do not
paste) or ScalarBuffer (paste a constant value). If all not-None
buffers are just scalars then the return from this function
will also be a scalar. d01 adds more data in the J direction,
so it starts at i=0, j>0 in the target. Similarly d10 adds
more in the J direction. And d11 in the diagonal.
"""
if d01 is None and d10 is None and d11 is None:
return d00 # Nothing to paste. Also works for None or scalar.
assert d00 is not None # Empty d00 + non-empty others is not good.
assert d01 is None or d01.shape[0] == d00.shape[0]
assert d10 is None or d10.shape[1] == d00.shape[1]
if d01 is not None and d10 is not None:
# The "diagonal" brick must exist with the right size.
assert d11 is not None
assert d11.shape[1] == d01.shape[1] and d11.shape[0] == d10.shape[0]
else:
# The "diagonal" brick should not be needed in this case.
assert d11 is None
ni = d00.shape[0] + (d10.shape[0] if d10 is not None else 0)
nj = d00.shape[1] + (d01.shape[1] if d01 is not None else 0)
nk = d00.shape[2]
all_same = True
for e in (d00, d01, d10, d11):
if all_same and e is not None:
if not isinstance(e, ScalarBuffer) or e.value != d00.value:
all_same = False
if all_same:
result = ScalarBuffer((ni, nj, nk), d00.value, d00.dtype)
else:
result = np.zeros((ni, nj, nk), dtype=d00.dtype)
self._paste1(result, d00, 0, 0)
self._paste1(result, d01, 0, d00.shape[1])
self._paste1(result, d10, d00.shape[0], 0)
self._paste1(result, d11, d00.shape[0], d00.shape[1])
return result
class GenLodC(GenLodImpl):
"""
Generate and store low resolution bricks, histogram, and statistics.
See doc/lowres.html for details. I/O is done via ZgyInternalBulk.
Use this class as part as finalize().
The implementation uses plan C, which means the full resolution data
will be read from the ZGY file. To implement plan D, make a derived
class that redefines _read() to query the client for the required full
resolution data. _read() must then also call _write() to store the
data it just received.
"""
def __init__(self, accessor, *,
compressor = None, decimation = None,
progress = None, verbose = None):
# Could add defaultvalue. And add a unit test for correct operation.
super().__init__(size = accessor._metadata._ih._size,
bricksize = accessor._metadata._ih._bricksize,
dtype = impl_enum._map_DataTypeToNumpyType(
accessor._metadata._ih._datatype),
range_hint = (accessor._sample_min,
accessor._sample_max),
nlods = accessor._metadata._ih._nlods,
decimation = decimation,
progress = progress, verbose = verbose)
self._accessor = accessor
self._compressor = compressor
def _read(self, lod, pos, size):
"""See base class for detils."""
as_float = bool(self._dtype == np.float32)
verbose = False
data = self._accessor.readConstantValue(
pos, size, lod=lod, as_float=as_float, verbose=verbose)
if data is not None:
data = ScalarBuffer(size, data, self._dtype)
else:
data = np.zeros(size, dtype=self._dtype)
self._accessor.readToExistingBuffer(
data, pos, lod=lod, as_float=as_float, verbose=verbose)
self._verbose("@{0}read(lod={1}, pos={2}, size={3}) => {4}".format(
self._prefix(lod-1), lod, tuple(pos), tuple(size),
self._format_result(data)))
assert tuple(size) == tuple(data.shape)
self._report(data)
return data
def _write(self, lod, pos, data):
"""See base class for detils."""
self._verbose("@{0}write(lod={1}, pos={2}, data={3})".format(
self._prefix(lod-1), lod, tuple(pos),
self._format_result(data)))
is_storage = bool(data.dtype != np.float32)
verbose = False
if lod > 0:
self._accessor._writeRegion(
data, pos, lod=lod, compressor=self._compressor,
is_storage=is_storage, verbose=verbose)
self._report(data)
def _savestats(self):
#print("STATS", repr(self._stats), str(type(self._stats._min)))
pass
def main(filename, snr):
"""
Create or re-create all statistics, histogram, and low resolution data
in the provided open ZGY file. This method is for testing only; normally
class GenLodC will be invoked from inside the api layer as part of
finalizing a newly created file. Normally the genlod module should not
use the api layer itself. Also note that ZGY technically doesn't allow
updating a file once it has been written and closed. A kludge makes
it work, sort of, but not for seismic store and not for versions < 2.
Also, only the lowres data is actually written to the file.
Now that the new code has been hooked up to the rest of OpenZGY there
isn't really much need for this stand alone test; the code should be
covered better by the regular unit tests.
"""
from ..api import ZgyReader, ZgyCompressFactory, ProgressWithDots
from ..test.utils import SDCredentials
with ZgyReader(filename, iocontext=SDCredentials(), _update=True) as reader:
GenLodC(accessor = reader._accessor,
progress = ProgressWithDots(),
verbose = lambda *args, **kw: None, # print(*args, **kw),
compressor = ZgyCompressFactory("ZFP", snr = snr))()
if __name__ == "__main__":
"""
Stand alone app for testing the genlod module. See main() for details.
"""
np.seterr(all='raise')
parser = argparse.ArgumentParser(description="Generate and store low resolution bricks, histogram, and statistics. See doc/lowres.html for details.")
parser.add_argument('input', help='ZGY input cube, local or sd://')
parser.add_argument('--snr', default=0, type=int,
help='Pass 10..70 for lossy compression, 99 for lossless, omit for uncompressed.')
args = parser.parse_args()
if not args.input:
print("File names cannot be empty.", file=sys.stderr)
sys.exit(1)
main(args.input, args.snr)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/impl/genlod.py | genlod.py |
#print('Running' if __name__ == '__main__' else 'Importing', __file__)
import numpy as np
import os
import sys
import io
import math
import json
import base64
import time
from contextlib import suppress, ExitStack, contextmanager
from enum import Enum
from collections import namedtuple
try:
from .. import zgypublic as oldzgy
print("Also testing the old ZGY-Public API.")
except Exception as ex:
print("Old ZGY-Public is not available:", ex)
class FakeAPI:
zgy = None
ZgyReader = object()
ZgyWriter = object()
oldzgy = FakeAPI()
from .. import api as newzgy
from ..api import SampleDataType, UnitDimension, ProgressWithDots, ZgyCompressFactory, ZgyKnownCompressors, ZgyKnownDecompressors
from ..impl.lodalgo import DecimationType # TODO-Low encapsulation?
from ..test.utils import SDCredentials, TempFileAutoDelete, LocalFileAutoDelete, CloudFileAutoDelete, HasSeismicStore, HasZFPCompression, SDTestData, SDTestSink
from ..impl.enum import UpdateMode
from ..exception import *
def HasOldZgy():
return oldzgy.zgy is not None
def showZgy(*args):
msg = ""
for a in args:
if a is None: pass
elif a is newzgy.ZgyReader: msg += " and new reader"
elif a is newzgy.ZgyWriter: msg += " and new writer"
elif a is oldzgy.ZgyReader: msg += " and old reader"
elif a is oldzgy.ZgyWriter: msg += " and old writer"
else: msg += " and " + a.__module__ + "." + a.__name__
return msg[5:] if msg else ""
# ----- Called by test code; not runnable by themselves. ----- #
@contextmanager
def TimeMe(name):
#start = time.perf_counter()
yield None
#elapsed = time.perf_counter() - start
#print("TIMED: %-20.20s %7.3f" % (name+":", elapsed), flush=True)
class TraceCallsToSD:
"""
Suitable for use as a _debug_trace callback.
"""
_entry = namedtuple("io", "what nbytes padded parts")
def __init__(self, *, verbose = False):
self.calls = []
self._verbose = verbose
def __call__(self, what, nbytes, padded, parts):
self.calls.append(self._entry(what, nbytes, padded, parts))
if self._verbose:
print(" {0:9s} size {1:10s} padded {2:10s} parts {3:1d}".format(
what, self._pretty(nbytes), self._pretty(padded), parts))
@staticmethod
def _pretty(n):
if (n < 1024) or (n % (1024) != 0):
return "{0:4d} bytes".format(n)
elif (n < 1024*1024) or (n % (1024*1024) != 0):
return "{0:7d} KB".format(n//1024)
else:
return "{0:7d} MB".format(n//(1024*1024))
def reset(self):
self.calls = []
class MustThrow:
"""
Check that we get the expected exception.
"""
def __init__(self, message = None, extypes = None):
self._extypes = extypes
self._message = message
if isinstance(extypes, type) and issubclass(extypes, Exception):
self._extypes = (extypes,)
self._exnames = tuple([e.__name__ for e in self._extypes]) if self._extypes else "Exception"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None:
problem = 'Expected {0}, got no exception'.format(self._exnames)
elif self._extypes and type not in self._extypes:
problem = 'Expected {0} got {1} "{2}"'.format(self._exnames, type.__name__, str(value))
elif self._message and str(value).find(self._message) < 0:
problem = 'Expected "{0}" got "{1}"'.format(self._message, str(value))
else:
problem = None
#print('Ok: Expected {0} "{1}" got {2} "{3}"'.format(self._exnames, self._message or "", type.__name__, str(value)))
if problem:
raise AssertionError(problem) from None
return True # suppress the exception.
def pretty(n):
"""
Format a number, assumed to be a size in bytes, as a human readable string.
"""
if type(n) != type(42):
return str(n)
if n >= (1024*1024*1024) and (n % (1024*1024*1024)) == 0:
return str(n//(1024*1024*1024)) + " GB"
if n >= (1024*1024) and (n % (1024*1024)) == 0:
return str(n//(1024*1024)) + " MB"
if n >= (512*1024) and (n % (256*1024)) == 0:
return str(n//(256*1024)) + "*256 KB"
if n >= (1024) and (n % (1024)) == 0:
return str(n//(1024)) + " KB"
return str(n) + " bytes"
def savePNG(data, outfile):
from PIL import Image
def normalize(a):
a = a.astype(np.float32)
dead = np.isnan(a)
amin, amax = (np.nanmin(a), np.nanmax(a))
a[dead] = amin
if amin == amax:
a *= 0
else:
a = (a - amin) / (amax - amin)
a = (a * 255).astype(np.uint8)
return a, dead
data = np.squeeze(data)
data = np.transpose(data)
data = np.flip(data, 1)
data, dead = normalize(data)
tmp = np.zeros((data.shape[0], data.shape[1], 3), dtype=np.uint8)
r = tmp[...,0]
g = tmp[...,1]
b = tmp[...,2]
r += data
g += data
b += data
r[dead] = 255
g[dead] = 255
b[dead] = 0
im = Image.fromarray(tmp, mode="RGB")
im.save(outfile, format="PNG")
def isMutable(obj, *, verbose = False, seen = set()):
"""
Recursive check for whether an object is mutable.
The idea was to check that all members of e.g. ZgyReader are
immutable so the user cannot (a) shoot himself in the foot by
directly modifying a data members, or (b) even worse, change
some cached value by modifying a mutable member of a container.
Unfortunately this was a lot harder then I thought.
- A callable might or might not be const. Need to check the code.
- A property and a data member look rather similar.
- A readonly property may have a syub __set__ that will throw.
- A __setattr__, if present, can make any attribute mutable.
- Python has no frozendict (yet) unless I want to add a
rather pointless dependency, So I copy dicts before
returning them. This is safe, but the code here cannot know.
I might make my own dict-like wrapper but this is getting
way too complicated.
Looks like I just jave to rely on dump() followed by eyeballing
the source code.
"""
# Known types
if isinstance(obj, (type(None), type, str, int, bool, float, tuple, bytes, Enum, np.dtype)):
if verbose: print("Immutable type", type(obj).__name__)
return False
elif isinstance(obj, (list, set, dict, bytearray, np.ndarray)):
if verbose: print("MUTABLE type", type(obj).__name__)
return True
elif callable(obj):
if verbose: print("CALLABLE type", type(obj).__name__)
return False
# Recursive checks
if id(obj) in seen:
if verbose: print("skipping cycle of", type(obj).__name__)
return False
print("Adding", id(obj), "to seen")
seen |= set((id(obj),))
if isinstance(obj, dict):
obj = obj.items()
if isinstance(obj, tuple):
if verbose: print("recursively checking", type(obj).__name__)
return any([isMutable(e, verbose=verbose, seen=seen) for e in obj])
if verbose: print("unknown type, assuming mutable", type(obj).__name__)
return True
def hasMutableMembers(obj, *, safe = set(), verbose = False):
"""
Try to detect whether obj (which is some kind of instance variable)
has any plain data members or any properties that contain data that
in turn looks like it is mutable. Note that this turned out to be
a lot harder then I first thought. The tests are by no means complete.
"""
if obj is not None:
for x in sorted(dir(obj)):
if x[0] != '_' and not x in safe:
is_prop = isinstance(getattr(type(obj), x, None), property)
is_call = callable(getattr(obj, x))
if not is_prop and not is_call:
if verbose: print(type(obj).__name__ + "." + x,
"looks like a DATA member")
return True
if isMutable(getattr(obj, x), verbose=False, seen=set()):
if verbose: print(type(obj).__name__ + "." + x,
"is of a MUTABLE type")
return True
return False
def dump(message, obj, verbose = False):
if message: print(message)
class Dummy:
"""(no doc)"""
for x in sorted(dir(obj)):
if x[0] != '_':
value = getattr(obj, x)
if isinstance(getattr(type(obj), x, None), property):
vt = "prop "
elif callable(value):
vt = "call "
else:
vt = "DATA "
if isMutable(value, seen=set()):
vt = "MUTABLE " + vt
if verbose:
doc = '\n' + str(getattr(obj.__class__, x, Dummy).__doc__)
doc = doc.replace('\n', '\n\t\t')
print('\t' + vt + x, "=", value, doc)
else:
if not callable(value):
print('\t' + vt + x, "=", value)
else:
print('\t' + vt + x + "()")
def createFancyBuffer(defaultvalue, unwrittenvalue):
"""
Create test data as described elsewhere. This version saves the
data in an in-memory numpy array making the code quite trivial.
There is no point in writing the data in multiple operations
because we aren't testing numpy.
The caller needs to specify the default value that will be
assigned to samples that were never written. Separate defaults
may be given for unwritten samples inside a brick vs. bricks
never written to at all. If these two differ this is arguably
a bug in the implementation.
"""
data = np.full((112, 64, 176), defaultvalue, dtype=np.float32)
data[16:16+40, 16:16+41, 16:16+42] = 31
data[48:48+72, 20:20+10, 24:24+16] = 97
data[:,:,128:176] = unwrittenvalue
return data
def createFancyFile(filename, datatype, datarange, zgyWriterFactory, *, single_write = False, kwargs = dict()):
"""
The layout of this test data is described in detail in doc/testdata.png
The figure also explains how to compute the expected statistics by hand.
As for computing the expected sample values, this is done by
createFancyBuffer().
* Create a ZGY file with size (112, 64, 176) which gives it a bricksize
of 2x1x3. Other parameters vary.
* Write an oddly sized rectangle "A" inside the first brick.
* Write an oddly sized rectangle "B" covering two cubes and partly
intersecting the first write, and also runs slightly into the
padding area.
* Write an all-zero region "C" that completely covers one brick and
also covers a second brick completely apart from padding area
outside the survey.
Additional arguments such as "snr" can be passed as kwargs={"snr": 99},
note that I have not declared the parameter as **kwargs so the dict
must be created by hand. To make it more explicit what the extras are.
Accounting for existing bugs:
Several of the tests have arguments (defaultvalue,unwrittenvalue,countdead).
- defaultvalue should be the value closest to 0 that can be represented.
- unwrittenvalue ought to have been the same as defaultvalue, but with the
old reader it might be 0 for float access and 0 converted to float for
raw.
- countdead should be True meaning unwritten samples are included in the
statistics and the histogram, but if the file was created by the old
writer then it needs to be set False.
Future: If implementing aplha support (currently not the case) we will
also need a file with alpha tiles set to the horizontal extent of the
actual stored data. In this data set there will still be unwritten
data at the tail end of each trace. Production code rarely does
this though; the assumption is that all traces have the same length
and that traces are written fully or not at all.
Note that currently, neither the old ZGY-Public nor the new OpenZGY
API can write alpha tiles. Only ZGY-Internal can do that. That API
does not have any Python wrapper.
"""
with zgyWriterFactory(filename,
iocontext = SDCredentials(),
size = (112, 64, 176),
datatype = datatype,
datarange = datarange,
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(3775, 1000),
(1000, 2890),
(3775, 2890)),
**kwargs
) as writer:
expect_datarange_1 = datarange
if datatype == SampleDataType.float and zgyWriterFactory != oldzgy.ZgyWriter:
# The value is unspecified. It could be NaN if the file was never
# flushed, or (0,0) if it was flushed before writing anything.
# Or it could be the (likely not calculated yet) statistical
# range if the code in api.ZgyMeta.datarange chooses to return
# the statistical range instead.
expect_datarange_1 = (0, 0)
#dump(filename, writer)
checkmeta(writer, datatype, expect_datarange_1)
if single_write:
# Read/modify/write is not allowed whan writing compressed data,
# or at least not recommended since noise will accumulate.
writer.write((0, 0, 0), createFancyBuffer(0, 0))
else:
writer.write((16,16,16), np.full((40,41,42), 31, dtype=np.float32))
writer.write((48,20,24), np.full((72,10,16), 97, dtype=np.float32))
writer.write((0,0,64), np.full((112,64,64), 0, dtype=np.float32))
# Statistics haven't been computed yet, so datarange for float cubes
# should still be returned as empty.
checkmeta(writer, datatype, expect_datarange_1)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
expect_datarange_2 = datarange
if datatype == SampleDataType.float:
if True or zgyWriterFactory != oldzgy.ZgyWriter:
# The value has been explicitly set to the statistical range
# if written by the new writer. If api.ZgyMeta.datarange chooses
# to return the statistical range instead this this happens
# also for files written by the old accessor. The second
# conditinal should be disabled in that case.
expect_datarange_2 = (reader.statistics.min, reader.statistics.max)
checkmeta(reader, datatype, expect_datarange_2)
def checkmeta(meta, datatype = None, datarange = None):
"""
Verify round trip of metadata. This can be used both by a writer
(ensure the data we set is still available as properties) and a
reader (ensure the roundtrip to a stored file and back worked).
"""
assert(meta.size == (112, 64, 176))
assert(datatype is None or meta.datatype == datatype)
assert(datarange is None or meta.datarange == datarange)
assert(meta.raw_datarange == meta.datarange)
assert(meta.zunitdim == UnitDimension.time)
assert(meta.zunitname == "ms")
assert(abs(meta.zunitfactor - 0.001) < 1.0e-5)
assert(meta.hunitdim == UnitDimension.length)
assert(meta.hunitname == "ft")
assert(abs(meta.hunitfactor - 0.3048) < 0.0001)
assert(meta.zstart == 2500)
assert(abs(meta.zinc - 4.125) < 0.0001)
assert(meta.annotstart == (1234, 5678))
assert(meta.annotinc == (5, 2))
assert np.sum(np.abs(np.array(meta.corners) -
np.array(((1000, 1000),
(3775, 1000),
(1000, 2890),
(3775, 2890))))) < 0.0001
def explaincontents(expect, actual, delta):
"""
Detailed checking of a small part of the standard test cube.
A single trace that covers many special cases. Show an explanation
of what is being tested as well as expected vs. actual results.
See doc/testdata.png. This method is meant to be used to understand
why a particular test has failed.
"""
table = [( 0, 16, "default(r/m/w)"),
( 16, 24, "written once "),
( 24, 40, "written twice "),
( 40, 58, "written once "),
( 58, 63, "default(r/m/w)"),
( 64, 128, "constant-zero "),
(128, 176, "default(empty)")]
print("Displaying the trace at [50,22,:]")
for beg, end, text in table:
ex = expect[50,22,beg:end]
ac = actual[50,22,beg:end]
if np.amin(ex) == np.amax(ex) and np.amin(ac) == np.amax(ac):
print(" ", text, "expect", ex[0], "actual", ac[1])
else:
print(" ", text, "expect", ex, "actual", ac)
print(" largest error in entire cube:", delta)
def checkContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, *, maxdelta = 0.001):
"""
Read back the entire survey from one of the files created by
createFancyFile() and compare with the expected results.
Also check the metadata.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader, io.StringIO() as bitbucket:
# Improve coverage by exercising the debug log statements
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
checkmeta(reader)
actual = np.zeros((112, 64, 176), dtype=np.float32)
reader.read((0,0,0), actual, verbose = verbose)
delta = np.amax(np.abs(expect - actual))
if not delta <= maxdelta:
explaincontents(expect, actual, delta)
assert delta <= maxdelta
def compareArrays(expect, actual, value_epsilon = 0.02, count_epsilon = 0.01, *, verbose = False):
value_range = np.amax(expect) - np.amin(expect)
count_total = len(expect.flat)
# Error in each sample, relative to the total expected value range.
# Can technically be greater than 1 if "actual" has wild values.
# A value of e.g. <= 0.01 might be considered close enough.
value_delta = np.abs(expect - actual) / (value_range if value_range else 1)
count_bad = np.count_nonzero(value_delta > value_epsilon)
# In addition to the test for not exactly equal, allow a certain
# fraction of samples to differ by any amount. Typically this
# might be needed due to edge effects in lowres data.
relative_bad = count_bad / count_total
ok = relative_bad <= count_epsilon
if verbose:
print("{5}: {0:6d} of {1:7d} samples ({2:.2f}%) differ > {3:.2f}%. Allowed {4:.2f}%.".format(
count_bad, count_total, 100.0 * count_bad / count_total,
100.0 * value_epsilon, 100.0 * count_epsilon,
"pass" if ok else "FAIL"))
return ok
def showdecimation(lod0, lod1):
"""
Input 4 hires traces (2,2,n) and a corresponding decimated
trace (n//2) and display those to manually inspect the result.
"""
print(" decimated from these input samples")
for ii in range(0, lod0.shape[2], 2):
print("{0:10.5g} {1}".format(lod1[ii//2], list(lod0[:,:,ii:ii+2].flat)))
def checkLodContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue):
"""
As checkContents, but caller specifies which LOD to read and we
allow some slop in the result since the "expect" array uses trivial
decimation while the zgy writer uses something fancier.
NOTE: Due to bugs in the old writer, no checks are done for samples
where the fullres data has never been written. I have given up on
figuring out the current behavior; I just know that it is wrong.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
nlods = 1
size = np.array(reader.size, dtype=np.int64)
while np.any(size > reader.bricksize):
nlods += 1
size = (size + 1) // 2
assert nlods == reader.nlods
for lod in range(0, nlods):
step = 1<<lod
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
expect = expect[:,:,:128] # Hard coded edge of written data.
expect = expect[::step,::step,::step]
size = (np.array(reader.size, dtype=np.int64) + (step-1)) // step
size[2] = 128//step
actual = np.zeros(size, dtype=np.float32)
reader.read((0,0,0), actual, lod = lod)
ok = compareArrays(expect, actual,
value_epsilon = 0.02 if lod < 2 else 0.04,
count_epsilon = 0.01 if lod < 2 else 0.03)
if not ok:
deltas = np.abs(expect - actual).astype(np.float64)
# A single 2d section in the "interesting" part of the survey.
actual_2d = actual[:,22//step,:]
expect_2d = expect[:,22//step,:]
deltas_2d = deltas[:,22//step,:]
# A single trace in the "interesting" part of the survey.
expect_1d = expect_2d[50//step,:]
actual_1d = actual_2d[50//step,:]
deltas_1d = deltas_2d[50//step,:]
# Now visualize these for debugging
savePNG(actual[:,22//step,:], "actual-" + str(lod) + ".png")
savePNG(expect[:,22//step,:], "expect-" + str(lod) + ".png")
savePNG(deltas[:,22//step,:], "deltas-" + str(lod) + ".png")
print("\n{0} LOD {1} check: {2}".format(
filename, lod, ("pass" if ok else "FAIL")))
print("Default", defaultvalue, "unwritten", unwrittenvalue)
print("first sample expect {0} actual {1}".format(
expect[0,0,0], actual[0,0,0]))
print("last sample expect {0} actual {1}".format(
expect[-1,-1,-1], actual[-1,-1,-1]))
print("interesting trace expect", expect_1d,
"interesting trace actual", actual_1d,
"delta", deltas_1d,
sep="\n")
assert ok
def checkRawContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, *, maxdelta = 0.001):
"""
As checkContents, but do the value conversion ourselves.
There may be issues with never written bricks.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
dtype = {SampleDataType.int8: np.int8,
SampleDataType.int16: np.int16,
SampleDataType.float: np.float32 }[reader.datatype]
checkmeta(reader)
actual = np.zeros((112, 64, 176), dtype=dtype)
reader.read((0,0,0), actual)
#print("raw...", actual[50,22,:])
if np.issubdtype(dtype, np.integer):
iinfo = np.iinfo(dtype)
actual = actual.astype(np.float32)
a = (reader.datarange[1]-reader.datarange[0])/(iinfo.max-iinfo.min)
b = reader.datarange[0] - a * iinfo.min
actual *= a
actual += b
delta = np.amax(np.abs(expect - actual))
if not delta <= maxdelta:
# A single trace in the "interesting" part of the survey.
print("expect", expect[50,22,:])
print("actual", actual[50,22,:])
print("delta", delta)
assert delta <= maxdelta
def computeStatisticsByRead(filename, zgyReaderFactory):
"""
Read back the entire survey from one of the files created by
createFancyFile() and compute statistics from the bulk data.
Concentrate on sum of samples and count of samples.
Also check the metadata.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader)
data = np.zeros((112, 64, 176), dtype=np.float32)
reader.read((0,0,0), data)
theSum = np.sum(data.flat, dtype=np.float64)
theCount = len(data.flat)
#print("Read sum {0}, sample count {1}".format(theSum, theCount))
#cnt = 0
#for x in (0, 1, 31, 97):
# c = np.count_nonzero(data == x)
# print(x, c)
# cnt += c
#print("?", theCount - cnt) # unaccounted for
return theSum, theCount
def readStatisticsStoredInFile(filename, zgyReaderFactory):
"""
Open the ZGY file and retrieve only the stored statistics information.
This is only supported in the new API.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
stats = reader.statistics
#print(stats)
return (stats.sum, stats.cnt)
def computeStatisticsByHand(defaultvalue, unwrittenvalue):
S = 112 * 64 * 176 # Total samples in survey, excluding padding.
P = 128 * 64 * 192 - S # Padding samples to align with 64^3 bricks.
A = 40 * 41 * 42 # Rect A beg (16,16,16) end (56,57,58) value 31.
B = 72 * 10 * 16 # rect B beg (48,20,24) end (120,30,40) value 97.
C = 112 * 64 * 64 # rect C beg (0,0,64) end (112,64,128) value 0.
D = 8 * 10 * 16 # overlap A/B, begin at (48,20,24).
E = 8 * 10 * 16 # B outside survey: begin at(128,30,40).
Z = 112 * 64 * 48 # Samples inside survey in never-written bricks.
nSample_31 = A - D
nSample_97 = B - E
nSample_unwritten = Z
nSample_default = S - nSample_31 - nSample_97 - nSample_unwritten
theSum = (31 * nSample_31 +
97 * nSample_97 +
defaultvalue * nSample_default +
(unwrittenvalue or 0) * nSample_unwritten)
theCount = S if unwrittenvalue is not None else S - Z
#print("Expected sum {0} * {1} + {2} * {3} + {4} * {5} + {6} * {7} = {8}, sample count {9}".format(31, nSample_31, 97, nSample_97, defaultvalue, nSample_default, unwrittenvalue, nSample_unwritten, theSum, theCount))
if unwrittenvalue is None:
theHist = { 31: nSample_31, 97: nSample_97,
defaultvalue: nSample_default }
elif defaultvalue == unwrittenvalue:
theHist = { 31: nSample_31, 97: nSample_97,
defaultvalue: nSample_default + nSample_unwritten }
else:
theHist = { 31: nSample_31, 97: nSample_97,
defaultvalue: nSample_default,
unwrittenvalue: nSample_unwritten }
return theSum, theCount, theHist
def checkStatistics(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, countdead, *, maxdelta = 0.001):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
byhand = computeStatisticsByHand(defaultvalue, unwrittenvalue)
byread = computeStatisticsByRead(filename, zgyReaderFactory)
if not (abs(byhand[0]-byread[0]) < maxdelta and byhand[1] == byread[1]):
print("stat sum: byhand: {0}, byread {1}, maxdelta {2}, count byhand: {3} byread {4}".format(byhand[0], byread[0], maxdelta, byhand[1], byread[1]))
assert(abs(byhand[0]-byread[0]) < maxdelta and byhand[1] == byread[1])
if zgyReaderFactory is not oldzgy.ZgyReader:
byhand = computeStatisticsByHand(defaultvalue, unwrittenvalue if countdead else None)
byload = readStatisticsStoredInFile(filename, zgyReaderFactory)
assert(abs(byhand[0]-byload[0]) < maxdelta and byhand[1] == byload[1])
def findHistogramSlot(value, histrange):
"""
Which slot this value belongs to in a 256-bin histogram.
The result is guaranteed to be in the range [0..255].
Values outside range are clipped to 0 or 255. This is not
how the actual histogram computation is done, but for the
tests it should not make any difference.
"""
value = 255 * (value - histrange[0]) / (histrange[1] - histrange[0])
return int(np.rint(np.clip(value, 0, 255)))
def checkHistogram(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, countdead):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
if zgyReaderFactory is not oldzgy.ZgyReader:
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
stat = (reader.statistics.min, reader.statistics.max)
hist = (reader.histogram.min, reader.histogram.max)
data = (reader.datarange[0], reader.datarange[1])
if False:
print("checkHistogram:",
"stat", stat, "hist", hist, "data", data,
"type", reader.datatype.name)
if reader.datatype == SampleDataType.float:
# Float data written by the old writer currently writes
# the histogram on the fly and may end up with a too wide
# range. The new reader doesn't do this now but it might do
# so in the future. Note that data == stat for float zgy.
assert hist[0] <= data[0] and hist[1] >= data[1]
else:
assert math.isclose(hist[0],data[0]) and math.isclose(hist[1],data[1])
assert reader.histogram.cnt == reader.statistics.cnt
hist = reader.histogram
#print(hist)
_, _, byhand = computeStatisticsByHand(defaultvalue, unwrittenvalue if countdead else None)
#print(byhand)
expect_hist = np.zeros(256, dtype=np.int64)
for value, expect in byhand.items():
slot = findHistogramSlot(value, (hist.min, hist.max))
expect_hist[slot] += expect
for slot in range(256):
actual = hist.bin[slot]
expect = expect_hist[slot]
if actual != expect:
print("histogram value", value, "slot", slot,
"expect", expect, "actual", actual)
#print("actual", hist)
#print("expect", expect_hist)
assert actual == expect
def isReaderOpen(reader):
"""
Return True if the zgy file is open for read.
There isn't a property for that in the API because
typically this is only needed when testing.
"""
tmp = np.zeros((1, 1, 1), dtype=np.float32)
try:
reader.read((0,0,0), tmp)
except (RuntimeError, newzgy.ZgyUserError) as ex:
assert "ot open for" in str(ex)
return False
return True
def checkReadingDeadArea(filename, pos, zgyReaderFactory, expected):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.float32)
reader.read(pos, tmp)
#print(list(tmp.flat), "expected", expected)
assert np.all(np.abs(tmp - expected) < 0.001)
def checkReadingOutsideRange(filename, zgyReaderFactory):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.float32)
with MustThrow("outside the valid range"):
reader.read((0, 0, 10000), tmp)
with MustThrow("outside the valid range"):
reader.read((0, 0, -9999), tmp)
with MustThrow("outside the valid range"):
reader.readconst((0, 0, 10000), (2, 2, 2))
with MustThrow("outside the valid range"):
reader.readconst((0, 0, -9999), (2, 2, 2))
#with MustThrow("outside the valid range"):
# reader.readconst((0, 0, 0), (1000000, 1000000, 1000000))
def checkReadingOutsideLod(filename, zgyReaderFactory):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.float32)
with MustThrow("outside the valid range"):
reader.read((0, 0, 0), tmp, lod=-1)
with MustThrow("outside the valid range"):
reader.read((0, 0, 0), tmp, lod=9)
with MustThrow("outside the valid range"):
reader.readconst((0, 0, 0), (2, 2, 2), lod=-1)
with MustThrow("outside the valid range"):
reader.readconst((0, 0, 0), (2, 2, 2), lod=9)
def checkReadingToWrongValueType(filename, zgyReaderFactory):
"""
This was supposed to cover a test in readToExistingBuffer()
but now the error is caught already in the API layer.
Which is already tested in testBadArgumentsOnReadWrite.
Keeping the test here in case this changes back later.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.int16)
#with MustThrow("conversion only supported"):
with MustThrow("array of np.float32 or np.int8"):
reader.read((0, 0, 0), tmp)
def hasSAuthToken():
try:
jwt = json.loads(base64.urlsafe_b64decode(SDCredentials().sdtoken.split(".")[1] + "====").decode("ascii"))
print(json.dumps(jwt, indent=2, sort_keys=True))
timeleft = jwt["exp"] - int(time.time())
print("SAuth token has", timeleft // 60, "minutes to expiry")
return timeleft > 0
except IOError:
# Missing or malformed token, including "FILE:" tokens.
# Unfortunately, impersonation tokens that are still
# good to refresh will also fail here.
return True # optimist.
# ----- Separate tests, but needs testFancy() to create the test files. ----- #
def runCloseOnException(filename, zgyReaderFactory):
"""
Test that the "with" guard is working properly.
On leaving the scope the reader should be closed.
Even if we left via an exception.
"""
class DummyException(Exception):
pass
try:
# If the reader raises an exception in __init__ then "reader"
# remains unassigned. While if we raise an exception ourselves
# it gets caught at the same level but now with "reader" known.
# No big deal as long as we *only* catch the dummy exception,
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
assert isReaderOpen(reader)
raise DummyException("testing...")
except DummyException:
pass
assert not isReaderOpen(reader)
def runErrorOnClose(filename, ZgyReaderFactory):
"""
Only relevant for openzgy. Verify correct behavior when we exit
the context manager due to an exception. For the old zgy wrapper
there is no easy way of forcing an error to be thrown on close,
so while I would like to have tested that one as well, I won't.
"""
# Exception was thrown from inside the block only.
# Make sure the reader was closed. This peeks at internal data.
try:
message = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
raise RuntimeError("xyzzy")
except Exception as ex:
message = str(ex)
assert message == "xyzzy"
assert reader._fd is None
# Exception was thrown from the reader's close() method only.
try:
message = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
reader._fd.xx_close()
reader._fd = "oops"
except Exception as ex:
message = str(ex)
assert message.find("object has no attribute") >= 0
# Exception was thrown from inside the block, then when handling
# that exception another exception was thrown inside close().
try:
message1 = ""
message2 = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
reader._fd.xx_close()
reader._fd = "oops"
raise RuntimeError("xyzzy")
except Exception as ex:
message1 = str(ex)
message2 = str(ex.__cause__ or ex.__context__)
assert message1.find("object has no attribute") >= 0
assert message2 == "xyzzy"
def runConversions(filename, zgyReaderFactory):
"""
Verify that coordinate conversion between index, annot, and world works.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as demo:
#dump("", demo, True)
a = demo.indexToAnnot((3, 7))
i = demo.annotToIndex(a)
#print(a, i)
assert(a == (1249, 5692) and i == (3, 7))
w = demo.indexToWorld((0, 0))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000, 1000) and i == (0, 0))
w = demo.indexToWorld((1, 0))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1025, 1000) and i == (1, 0))
w = demo.indexToWorld((0, 1))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000, 1030) and i == (0, 1))
w = demo.indexToWorld((3, 7))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000 + 3*25, 1000 + 7*30) and i == (3, 7))
w = demo.annotToWorld(a)
a = demo.worldToAnnot(w)
#print(w, a)
assert(w == (1000 + 3*25, 1000 + 7*30) and a == (1249, 5692))
def runErrorIfNotOpenForRead(filename, zgyReaderFactory):
size = (1, 1, 1)
tmp = np.zeros(size, dtype=np.float32)
pos = (0, 0, 0)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
reader.close()
with MustThrow("ot open for read"):
reader.read(pos, tmp)
if zgyReaderFactory is not oldzgy.ZgyReader:
with MustThrow("ot open for read"):
reader.readconst(pos, size)
def runDumpToDevNull(filename, zgyReaderFactory):
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader, io.StringIO() as stream:
reader._meta.dumpRaw(file=stream)
# No test on the result, only see that it doesn't crash.
assert len(stream.getvalue()) > 0
def runClone(filename, templatename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=templatename) as writer:
checkmeta(writer, SampleDataType.int8, (-28,+227))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader, SampleDataType.int8, (-28,+227))
def runUpdate(filename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=filename) as writer:
checkmeta(writer, SampleDataType.int8, (-28,+227))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader, SampleDataType.int8, (-28,+227))
def runDumpMembers(filename, templatename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=templatename) as writer:
#dump("\nZgyWriter contents:", writer, verbose=False)
assert not hasMutableMembers(writer, safe=set(("meta",)), verbose=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
#dump("\nZgyReader contents:", reader, verbose=True)
assert not hasMutableMembers(reader, safe=set(("meta",)), verbose=True)
# ----- Separately runnable tests, might need caller to clean up files. ----- #
def testRegisteredCompressors():
#print("Known compressors", ",".join(ZgyKnownCompressors()),
# "decompressors", ",".join(ZgyKnownDecompressors()))
assert "ZFP" in ZgyKnownCompressors()
assert "ZFP" in ZgyKnownDecompressors()
with MustThrow('"XYZZY" not recognized. Must be one of', ZgyMissingFeature):
lossy = ZgyCompressFactory("XYZZY", snr=30)
def testProgressWithDots():
with io.StringIO() as line:
p = ProgressWithDots(length=51, outfile=line)
assert line.getvalue() == ""
p(0, 1000)
assert line.getvalue() == "."
p(1, 1000)
assert line.getvalue() == "."
p(500, 1000)
assert line.getvalue() == "." * 26
p(999, 1000)
assert line.getvalue() == "." * 50
p(1000, 1000)
assert line.getvalue() == "." * 51 + "\n"
def testBadArgumentsOnCreate():
fname = "should-not-exist.zgy"
try:
os.remove(fname)
except FileNotFoundError:
pass
with MustThrow("size must be specified", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname):
pass
with MustThrow("size must be at least 1", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,0,20)):
pass
with MustThrow("bricksize must be specified in 3 dimensions", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), bricksize=(64,64)):
pass
with MustThrow("bricksize must be >= 4 and a power of 2", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), bricksize=(64,64,48)):
pass
with MustThrow("datarange must be specified for integral types", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8):
pass
with MustThrow("datarange must have min < max", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(3,2)):
pass
with MustThrow("datarange must have min < max", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(3,3)):
pass
with MustThrow("datarange must be finite", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(np.nan,np.nan)):
pass
# The consistency checks should be done before actually creating the file.
# Which means that the next call should fail.
with MustThrow(None, FileNotFoundError):
os.remove(fname)
def testBadArgumentsOnReadWrite(filename):
origin = (0, 0, 0)
expect = "Expected a 3d numpy array of np.float32 or np.float32"
with newzgy.ZgyWriter(filename, size=(10,15,20)) as w:
with MustThrow(expect): # no data
w.write(origin, None)
with MustThrow(expect): # not numpy data
w.write(origin, [[[1,1,1]]])
with MustThrow(expect): # wrong data type
w.write(origin, np.array([[[1,1,1]]], dtype=np.int8))
with MustThrow(expect): # wrong number of dimensions
w.write(origin, np.array([1,1,1], dtype=np.float32))
expect = "Expected a writeable 3d numpy array of np.float32 or np.float32"
with newzgy.ZgyReader(filename) as r:
with MustThrow(expect): # no data
r.read(origin, None)
with MustThrow(expect): # not numpy data
r.read(origin, [[[1,1,1]]])
with MustThrow(expect): # wrong data type
r.read(origin, np.array([[[1,1,1]]], dtype=np.int8))
with MustThrow(expect): # wrong number of dimensions
r.read(origin, np.array([1,1,1], dtype=np.float32))
with MustThrow(expect): # buffer not writeable
a = np.array([[[1,1,1]]], dtype=np.float32)
a.setflags(write=False)
r.read(origin, a)
def testAutoDelete():
# It is an error if the expected file is missing.
with MustThrow("", FileNotFoundError):
with LocalFileAutoDelete("xyzzy", silent=True) as fn:
pass
# As above, but if some other error occurred that will have precedence.
with MustThrow("", IndexError):
with LocalFileAutoDelete("xyzzy", silent=True) as fn:
foo = [][1]
# No attempt is made to remove, if we explicitly disarmed.
with LocalFileAutoDelete("xyzzy") as fn:
assert "/tmp-" in fn.name or "\\tmp-" in fn.name or fn.name[:4] == "tmp-"
fn.disarm()
# Actually try creating the file. Auto cleanup happens.
with LocalFileAutoDelete("xyzzy") as fn:
assert "/tmp-" in fn.name or "\\tmp-" in fn.name or fn.name[:4] == "tmp-"
myname = fn.name
with open(fn.name, "w"):
pass
assert os.path.exists(myname)
assert not os.path.exists(myname)
myname = [None, None]
with ExitStack() as cleanup:
fn1 = LocalFileAutoDelete("one")
myname[0] = fn1.name
cleanup.enter_context(fn1)
with open(fn1.name, "w"):
pass
fn2 = LocalFileAutoDelete("two")
myname[1] = fn2.name
cleanup.enter_context(fn2)
with open(fn2.name, "w"):
pass
assert os.path.exists(myname[0])
assert os.path.exists(myname[1])
assert not os.path.exists(myname[0])
assert not os.path.exists(myname[1])
myname = [None, None]
with MustThrow("", FileNotFoundError):
with ExitStack() as cleanup:
fn1 = LocalFileAutoDelete("one")
myname[0] = fn1.name
cleanup.enter_context(fn1)
with open(fn1.name, "w"):
pass
fn2 = LocalFileAutoDelete("two", silent=True)
myname[1] = fn2.name
cleanup.enter_context(fn2)
# I did not get around to creating the second file.
# This means the fn2 context will raise an exception.
# fn1 should still have been deleted though.
assert not os.path.exists(myname[0])
def testHistogramRangeIsCenterNotEdge(filename):
"""
When the histogram gets generated by the ZGY writer, the range gives
the center value of bin 0 and the center value of bin 255. NOT the
lowest value that maps to bin 0 and the highest value that maps to
bin 255. Which would arguably also make sense. Verify that behavior.
"""
with oldzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 64, 64),
datatype = SampleDataType.float,
datarange =(0, 255),
zstart = 0, zinc = 4,
annotstart = (1, 1), annotinc = (1, 1),
corners = ((1000, 1000), (1630, 1000),
(1000, 1630), (1630, 1630))
) as writer:
# With the 0..255 histogram range interpreted as the center of the
# first and last bin, we have the following:
# slot 0 is -0.5..+0.5, slot 2 is 1.5..2.5, slot 5 is 4.5..5.5
# If we instead had a 0..256 histogram range interpreted as the
# extreme eddes of the first and last bin, we have this:
# slot 0 is 0..1, slot 2 is 2..3, slot 5 is 5..6, slot 255: 255..256
# That would still be approximately correct at least for the first
# few bins when setting the histogram range to 0..255 instead of
# 0..256. So if the histogram algorithm choose to use the range
# as the extreme limits (which it is NOT supposed to do),
# 1.8 and 2.2 would end up in different slots. And 4.3 and 4.7
# would end up in the same slot. It should be the other way around.
#
writer.write((0, 0, 0), np.full((1, 10, 10), 1.8, dtype=np.float32))
writer.write((1, 0, 0), np.full((1, 1, 1), 2.2, dtype=np.float32))
writer.write((2, 0, 0), np.full((1, 10, 5), 4.3, dtype=np.float32))
writer.write((3, 0, 0), np.full((1, 1, 2), 4.7, dtype=np.float32))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
#print(reader.histogram)
assert math.isclose(reader.histogram.min, 0.0)
assert math.isclose(reader.histogram.max, 255.0)
assert reader.histogram.bin[2] == 101
assert reader.histogram.bin[4] == 50
assert reader.histogram.bin[5] == 2
def testEmptyFile(filename, zgyWriterFactory = newzgy.ZgyWriter, zgyReaderFactory = newzgy.ZgyReader):
"""
Create a file without writing bulk data to it; make sure it is
well behaved both on write and on read back. Ideally test both
on-prem and cloud, and test all 9 combinations of ZGY, OpenZGY/C++,
and OpenZGY/Python readers and writers. With the current test
framework it gets a bit tricky to test the two OpenZGY/C++ vs.
OpenZGY/Python cases. Can I make a test that imports all three?
"""
#print('testEmptyFile("{0}")'.format(filename))
#print(' -> Using ' + showZgy(zgyWriterFactory, zgyReaderFactory))
with zgyWriterFactory(filename,
iocontext = SDCredentials(),
size = (100, 200, 300),
datatype = SampleDataType.float,
datarange = (-1, 1),
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(1005, 1000),
(1000, 1002),
(1005, 1002))
) as writer:
pass
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
slurp = np.ones(reader.size, dtype=np.float32)
reader.read((0,0,0), slurp)
assert np.count_nonzero(slurp) == 0
if zgyReaderFactory == newzgy.ZgyReader:
assert reader.readconst((0,0,0), reader.size) == 0
def testEmptyExistingFile(filename, zgyReaderFactory = newzgy.ZgyReader):
"""
Access a file that has already been created by the old ZGY accessor
with no written bricks and an invalid coding range.
To create, use the old ZGY-Public Python wrapper:
with zgy.ZgyWriter("OldEmpty2.zgy", size=(512, 640, 1000),
datarange=(101,101), datatype="int16") as w: pass
Can leave the file locally, or upload with ZGY, or with sdutil.
Currently the latter is the most interesting case to test.
"""
#print('testEmptyExistingFile("{0}")'.format(filename))
#print(' -> Using ' + showZgy(zgyReaderFactory))
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
if zgyReaderFactory == oldzgy.ZgyReader:
slurp = np.ones(reader.size, dtype=np.float32)
reader.read((0,0,0), slurp)
value = slurp[0,0,0] if np.all(slurp.flat == slurp[0,0,0]) else None
else:
value = reader.readconst((0,0,0), reader.size, as_float=True)
#print(" -> VALUE", value, "RANGE", reader.datarange)
# In spite of the 101..101 coding range, the file will contain
# all zeros. In the new accessor the coding range is rejected
# as bad, no conversion is done, so empty bricks read as zero.
# In the old accessor there is a "feature" that cause empty
# bricks to read as zero regardless of whether caller wants conversion.
assert value == 0
def testRmwFile(filename, zgyWriterFactory = newzgy.ZgyWriter):
"""
The layout of this test data is described in detail in doc/testdata-rmw.png.
"""
rmwsize = (((0,0,0), (304,64,384)), # Survey size.
((0,0,192), (304,64,384)), # Half the survey set to constant "1".
((28,0,84), (144,64,304)), # Touches 12 bricks.
((40,0,100), (160,64,288)), # Touches 12 bricks.
((204,0,0), (216,64,384)), # Tall, thin, to fill up this segment.
((52,0,120), (176,64,272)), # Touches 12 bricks.
((256,0,0), (304,64,352)), # Constant-value at survey edge.
((0,0,256), (64,64,320))) # Normal brick changed to constant.
surveysize = rmwsize[0][1]
expect = np.zeros(surveysize, dtype=np.float32)
partnum = 0
for part in rmwsize[1:]:
partnum += 1
beg, end = part
#print("part", part, "beg", beg, "end", end)
expect[beg[0]:end[0],beg[1]:end[1],beg[2]:end[2]] = partnum
with zgyWriterFactory(filename,
iocontext = SDCredentials(segsize=11/4),
size = surveysize,
datatype = SampleDataType.int8,
datarange = (-28,+227),
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(1005, 1000),
(1000, 1002),
(1005, 1002))
) as writer:
partnum = 0
sizes = [(0,)]
for part in rmwsize[1:]:
partnum += 1
beg, end = part
size = (end[0]-beg[0], end[1]-beg[1], end[2]-beg[2])
#print("part", part, "beg", beg, "end", end, "size", size)
if partnum == 1:
# Just doing this to exercise both the write functions.
data = np.full(size, partnum, dtype=np.float32)
writer.write(beg, data)
else:
data = np.float32(partnum)
writer.writeconst(beg, data, size=size, is_storage=False)
if filename[:5] == "sd://":
closed_sizes = tuple(writer._fd._relay._sizes)
opened_sizes = tuple([len(writer._fd._open_segment)])
sizes.append(closed_sizes + opened_sizes)
else:
sizes.append((writer._fd.xx_eof,))
#print(sizes)
sizes_in_bricks = []
for e in sizes:
for bytecount in e:
assert all([(bytecount % 64) == 0 for bytecount in e])
sizes_in_bricks.append(tuple(np.array(e, dtype=np.int64) // (256*1024)))
# The expected results have been computed by hand.
# See testdata-rmw.svg for a detailedexplanation with figures.
#print(sizes_in_bricks)
local = filename[:5] != "sd://"
assert sizes_in_bricks[1] == (( 1,) if local else (1, 0))
assert sizes_in_bricks[2] == ((11,) if local else (1, 10))
assert sizes_in_bricks[3] == ((11,) if local else (1, 10))
assert sizes_in_bricks[4] == ((17,) if local else (1, 11, 5))
assert sizes_in_bricks[5] == ((17,) if local else (1, 11, 11, 4))
assert sizes_in_bricks[6] == ((18,) if local else (1, 11, 11, 5))
assert sizes_in_bricks[7] == ((18,) if local else (1, 11, 11, 6))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
# Read the entire survey, excluding padding bytes, in a single
# operation. Compare with the survey built in memory.
slurp = np.zeros(reader.size, dtype=np.float32)
reader.read((0,0,0), slurp)
assert np.all(slurp == expect)
# Check each brick for whether it takes up space in the file or
# is flagged as constant value. The expected result is explained
# in the textual- and image descriptionof the test data.
is_const = np.zeros((5, 6), dtype=np.float32)
for ii in range(0, 320, 64):
for kk in range(0, 384, 64):
c = reader.readconst((ii, 0, kk), (64, 64, 64))
is_const[ii//64, kk//64] = -1 if c is None else c
expect_const = np.array([[0, -1, -1, -1, -1, 1],
[0, -1, 5, 5, -1, 1],
[0, -1, -1, -1, -1, 1],
[-1, -1, -1, -1, -1, -1],
[6, 6, 6, 6, 6, -1]], dtype=np.float32)
assert np.all(is_const == expect_const)
def testNoRmwInCompressedFile(filename):
lossy = ZgyCompressFactory("ZFP", snr=30)
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), size=(100, 64, 64), compressor=lossy) as w:
# Writing a constant value should not prevent overwriting later.
w.writeconst((0,0,0), value=42, size=w.size, is_storage=False)
# Write part of a brick for the first time.
data = np.arange(50*64*64, dtype=np.float32).reshape((50, 64, 64))
w.write((0,0,0), data)
# Write needing to update the first brick.
with MustThrow("Updating a local BrickStatus.Compressed brick with Compressed data is illegal"):
w.write((50,0,0), data)
# The above error might have set the global _is_bad flag, in spite of
# this being a recoverable user error. But it probably doesn't
# matter much either way.
w.errorflag = False
# Write entire survey. This is an update, but no read/modify/write.
# The old brick will be leaked if new one compresses larger.
data = np.arange(100*64*64, dtype=np.float32).reshape((100, 64, 64))
with MustThrow("Updating a local BrickStatus.Compressed brick with Compressed data is illegal"):
w.write((0,0,0), data)
w.errorflag = False
# This should actually have been set when we opened the file,
# that feature isn't implemented yet. Besides, for the purpose
# of this test I need to change it while the file is in use.
w._accessor._update_mode = UpdateMode.Pedantic
w.write((0,0,0), data)
def testFatalErrorFlag(filename):
class BogusFile:
def close(self): pass
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), size=(100, 64, 64)) as w:
data = np.arange(64*64*64, dtype=np.float32).reshape(64, 64, 64)
w.write((0,0,0), data)
w.write((0,0,0), data)
hack = w._accessor._file._file
w._accessor._file._file = BogusFile()
with MustThrow("BogusFile", AttributeError):
w.write((0,0,0), data)
w._accessor._file._file = hack
# File is now usable again, but the global error flag is set.
with MustThrow("previous errors"):
w.write((0,0,0), data)
# Explicitly reset it and we should be good.
w.errorflag = False
w.write((0,0,0), data)
# Another bad write
w._accessor._file._file = BogusFile()
with MustThrow("BogusFile", AttributeError):
w.write((0,0,0), data)
# Verify that lod generation and meta flush is either
# turned off or is ignoring errors. The final close()
# of the python file descriptor will not throw because
# BogusFile wraps close().
w.close()
hack.close()
def testLargeSparseFile(filename, zgyWriterFactory, zgyReaderFactory):
size = (5000, 6000, 1000)
wbeg = (1000, 9000)
wend = (wbeg[0] + 10 * (size[0]-1), wbeg[1] + 10 * (size[1]-1))
if zgyWriterFactory:
with zgyWriterFactory(filename,
iocontext = SDCredentials(),
size = size,
datatype = SampleDataType.int8,
datarange = (-28,+227),
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((wbeg[0], wbeg[1]),
(wend[0], wbeg[1]),
(wbeg[0], wend[1]),
(wend[0], wend[1]))) as writer:
writer.write((size[0]-1, size[1]-1, 0), np.array([[[42, 10, 10]]], dtype=np.int8))
writer.finalize(progress=ProgressWithDots(), decimation=[DecimationType.Maximum])
if zgyReaderFactory:
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
assert reader.size == size
data = np.zeros((1,1,4), dtype=np.int8)
pos = np.array((size[0]-1, size[1]-1, 0), dtype=np.int64)
reader.read(pos, data, lod=0)
assert tuple(data.flat) == (42, 10, 10, -100)
reader.read(pos//2, data, lod=1)
assert tuple(data.flat) == (42, 10, -100, -100)
for lod in range(2,8):
reader.read(pos//(1<<lod), data, lod=lod)
assert tuple(data.flat) == (42, -100, -100, -100)
def testNaan(filename, snr = -1):
compressor = ZgyCompressFactory("ZFP", snr = snr) if snr > 0 else None
with newzgy.ZgyWriter(filename,
compressor = compressor,
iocontext = SDCredentials(),
size = (256, 128, 128),
datatype = SampleDataType.float) as writer:
data = np.zeros((64, 64, 64), dtype=np.float32)
count_nan = 0
count_inf = 0
counts = np.zeros(256, dtype=np.int32)
# Some NaN, a few other different values, mostly zero.
data.fill(0)
data[0,0,:3] = np.nan
data[0,0,3] = 2
data[0,0,4] = 3
writer.write((0,0,0), data)
count_nan += 3
counts[2] += 1
counts[3] += 1
# Some NaN, only one other value (42)
data.fill(42)
data[0,0,:5] = np.nan
writer.write((64,0,0), data)
count_nan += 5
counts[42] += (64*64*64) - 5
# NaN only
data.fill(np.nan)
writer.write((128,0,0), data)
count_nan += (64*64*64)
# NaN explicitly written as constant value
writer.writeconst((192, 0, 0), np.nan, (64, 64, 64), is_storage=False)
count_nan += (64*64*64)
# Now repeat for +/- inf
# Some Inf, a few other different values. Mostly zero.
data.fill(0)
data[0,0,0] = np.inf
data[0,0,1] = -np.inf
data[0,0,2] = np.inf
data[0,0,3] = 3
data[0,0,4] = 4
writer.write((0,64,0), data)
count_inf += 3
counts[3] += 1
counts[4] += 1
# Some Inf, only one other value (255).
data.fill(255)
data[0,0,:13] = np.inf
data[0,1,:10] = -np.inf
writer.write((64,64,0), data)
count_inf += 23
counts[255] = (64*64*64) - 23
# +Inf only
data.fill(np.inf) # 64^3 Inf
writer.write((128,64,0), data)
count_inf += (64*64*64)
# -Inf explicitly written as constant value
writer.writeconst((192, 64, 0), -np.inf, (64, 64, 64), is_storage=False)
count_inf += (64*64*64)
counts[0] = 256*128*128 - np.sum(counts[1:]) - count_nan - count_inf
writer.finalize(decimation = [DecimationType.Average])
# Exercise logging & debug code in the compression module.
# Discard the output. Yes, this is a shameless trick to
# increase coverage. But in Python a test that only checks
# that a function is callable is in fact somewhat useful.
if compressor is not None:
with io.StringIO() as devnull:
compressor.dump(msg=None, outfile=devnull,
text=True, csv=True, reset=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
# --- statistics and histogram ---
#print(reader.statistics)
#print(reader.histogram)
#print(list(counts))
#print("Expect total size", 256*128*128,
# "nan", count_nan,
# "inf", count_inf,
# "valid", 256*128*128 - count_nan - count_inf)
#print("Got valid",
# "stats", reader.statistics.cnt,
# "histo", reader.histogram.cnt,
# "sampl", np.sum(reader.histogram.bin))
# Limits are set automatically to the value range. I carefully
# chose 0..255 since the histogram then has one bin per sample value.
assert reader.histogram.min == 0 and reader.histogram.max == 255
h = reader.histogram.bin
for i in range(256):
if counts[i] != h[i]:
print("Histogram bin", i, "expected", counts[i], "actual", h[i])
assert reader.statistics.cnt == 256*128*128 - count_nan - count_inf
assert reader.histogram.cnt == 256*128*128 - count_nan - count_inf
assert np.all(np.array(reader.histogram.bin) == counts)
#assert reader.statistics.inf == count_nan + count_inf # not in api
# --- bricks stored as all-constant or not ---
BRICK = (64, 64, 64)
assert reader.readconst((0,0,0), BRICK) is None
assert reader.readconst((64,0,0), BRICK) is None
assert np.isnan(reader.readconst((128,0,0), BRICK))
assert np.isnan(reader.readconst((192,0,0), BRICK))
assert reader.readconst((0,64,0), BRICK) is None
assert reader.readconst((64,64,0), BRICK) is None
assert reader.readconst((128,64,0), BRICK) == np.inf
assert reader.readconst((192,64,0), BRICK) == -np.inf
# -- read back samples ---
reader.read((0,0,0), data)
assert np.all(np.isnan(data[0,0,:3]))
assert data[0,0,3] == 2
assert data[0,0,4] == 3
assert np.count_nonzero(data) == 5
reader.read((64,0,0), data)
assert np.all(np.isnan(data[0,0,:5]))
assert np.count_nonzero(data == 42) == 64*64*64 - 5
reader.read((0,64,0), data)
assert data[0,0,0] == np.inf
assert data[0,0,1] == -np.inf
assert data[0,0,2] == np.inf
assert data[0,0,3] == 3
assert data[0,0,4] == 4
assert np.count_nonzero(data) == 5
reader.read((64,64,0), data)
assert np.all(data[0,0,:13] == np.inf)
assert np.all(data[0,1,:10] == -np.inf)
assert np.count_nonzero(data == 255) == 64*64*64 - 13 - 10
# --- read back low resolution ---
# LOD1 should be sufficient to test.
# Note that this only tests a single decimation algorithm
# and the functions that call it. There needs to be separate
# unit tests to verify that all decimation algorithms have a
# reasonable behavior for nan and inf.
fullres = np.zeros((128, 128, 128), dtype=np.float32)
reader.read((0,0,0), fullres, lod=0)
reader.read((0,0,0), data, lod=1)
# Input first trace: nan, nan, nan, 2, 3
# An extra slop factor is needed because calculation done in float32.
assert math.isclose(data[0,0,0], 0, rel_tol=1.0e-5) # 2 NaN (skipped), the rest zero.
assert math.isclose(data[0,0,1], 2/7, rel_tol=1.0e-5) # 1 NaN (skipped), 1 "2", rest "0"
assert math.isclose(data[0,0,2], 3/8, rel_tol=1.0e-5) # one "3", rest default to zero
# Input trace: 5*nan, rest is 42. With "Average" decimation
# each output sample found at least one finite value.
assert np.all(data[32:64, 0:32, 0:32] == 42)
# Input trace: +inf, -inf, +inf, 3, 4. All others 0.
# Note: The C++ code skips +/- inf. Numpy includes them unless
# told otherwise, and the average of +inf and -inf is NaN.
# These rules are pretty obscure and it is probably easier to
# TODO-Low adopt the C++ strategy both places.
#showdecimation(fullres[0:2,64:66,0:20], data[0,32,0:10])
assert np.isnan(data[0,32,0])
assert data[0,32,1] == np.inf
assert math.isclose(data[0,32,2], 4/8, rel_tol=1.0e-5) # one "4", rest default to zero
# Input trace: 13 * +inf in one trace, 10 * -inf in another.
# So the first 5 samples have average(-inf,+inf) => nan
# the next 2 samples have average(255,+inf) => +inf
# Everything else should be 255.
# UPDATE: In the C++ version (and soon also Python)
# +/- inf is ignored so all decimated samples are 255.
#showdecimation(fullres[64:66,64:66,0:20], data[32,32,0:10])
assert np.all(np.isnan(data[32,32,:5]))
assert data[32,32,5] == np.inf
assert data[32,32,6] == np.inf
assert data[32,32,7] == 255
# Now read the brick built from all-constant input.
reader.read((64,0,0), data, lod=1)
d1 = data[:32,:32,:32] # from data written at (128,0,0)
d2 = data[32:,:32,:32] # from data written at (192,0,0)
d3 = data[:32,32:,:32] # from data written at (128,64,0)
d4 = data[32:,32:,:32] # from data written at (192,64,0)
assert np.all(np.isnan(d1))
assert np.all(np.isnan(d2))
assert np.all(d3 == np.inf)
assert np.all(d4 == -np.inf)
def testWriteNaanToIntegerStorage(filename):
with newzgy.ZgyWriter(filename,
size = (256, 128, 128),
iocontext = SDCredentials(),
datatype = SampleDataType.int8,
datarange = (-128,+127)
) as writer:
data = np.zeros((64, 64, 64), dtype=np.float32)
data[0,0,42] = np.nan
writer.write((0,0,0), data)
def testZeroCentric(filename):
"""
Specific test for the zero-centric property. When the hard coded
(in this test) datarange is zero-centric then the rounding makes
an equal number of small positive and small negative numbers
end up being returned as zero after a roundtrip.
"""
data = np.array([[[
-1.4, -1.2, -1.0, -0.8, -0.6,
-0.4, -0.2, +0.0, +0.2, +0.4,
+0.6, +0.8, +1.0, +1.2, +1.4,
100.0, 200.0]]], dtype=np.float32)
expect = np.array([[[
-1, -1, -1, -1, -1,
0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
100, 200]]], dtype=np.float32)
with newzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
size = (64, 64, 64),
datatype = SampleDataType.int8,
datarange = (-28,+227),
) as writer:
writer.write((0,0,0), data)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
actual = np.zeros((1, 1, expect.size), dtype=np.float32)
reader.read((0,0,0), actual)
assert np.all(np.isclose(expect, actual))
def testFinalizeProgress(filename, abort = False):
"""
Check the progress callback that can be installed while generating
low resolution bricks. Optionally check that the callback can be
used to abort the generation.
"""
class Progress:
def __init__(self, abort = False):
self._abort = abort
self._complete = False
self._waszero = False
def __call__(self, done, total):
self._complete = bool(done == total)
self._waszero = self._waszero or done == 0
#print("done {0}/{1}".format(done, total))
return not abort or done < total//4
with newzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
size = (112+640, 64+320, 176),
) as writer:
writer.write((16,16,16), np.full((40,41,42), 31, dtype=np.float32))
writer.write((48,20,24), np.full((72,10,16), 97, dtype=np.float32))
writer.write((0,0,64), np.full((112,64,64), 0, dtype=np.float32))
writer.write((512,0,0), np.full((128,128,64), 42, dtype=np.float32))
progress = Progress(abort)
if abort:
# The progress callback will return False on 25% done.
with MustThrow(extypes = newzgy.ZgyAborted):
writer.finalize(progress=progress)
assert progress._waszero
assert not progress._complete
else:
writer.finalize(progress=progress)
assert progress._waszero
assert progress._complete
def testHugeFile(filename):
"""
Create a very sparse file where the declared size is large enough
to make the header area > 1 MB. This can trigger some issues.
Number of bricks:
Lod 0: 64*64*32 bricks = 131072
Lod 1: 32*32*16 bricks = 16384
Lod 2: 16*16*8 bricks = 2048
Lod 3: 8*8*4 bricks = 256
Lod 4: 4*4*2 bricks = 32
Lod 5: 2*2*1 bricks = 4
Lod 6: 1*1*1 brick = 1
SUM: 149797 bricks, 1.14 Mb of brick lookup tables
Rounded up to brick size there will be 1.25 MB of headers.
Non-constant bricks: Only one per layer. 1.75 MB total
Total file size: 3 MB.
"""
with newzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
datatype = SampleDataType.int8,
datarange = (-128,+127),
size = (64*64, 64*64, 32*64),
) as writer:
writer.write((640,512,0), np.full((64,64,65), 42, dtype=np.float32))
#writer.finalize(progress=ProgressWithDots())
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
assert reader.nlods == 7
c1 = reader.readconst((640,512,0), (64,64,64))
c2 = reader.readconst((640,512,64), (64,64,64))
c3 = reader.readconst((640,512,129), (64,64,64))
assert c1 == 42 # writer detected it was constant
assert c2 is None # partly written
assert c3 == 0 # never written
assert os.stat(filename).st_size == 3 * 1024 * 1024
def testDecimateOddSize(filename):
"""
At the survey edge, the decimation that normally has 8 samples input
might only have 4, 2, or 1. Make sure the code doesn't include
the padding in its computation.
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (7, 13, 64+17)
) as writer:
data = np.full(writer.size, 200, dtype=np.float32)
data[0::2,:,:] = 100
data[:,0::2,:] = 50
assert np.all(data[:,:,:] == data[:,:,0:1])
writer.write((0,0,0), data)
writer.finalize(decimation = [DecimationType.Average])
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
assert reader.nlods == 2
data = np.zeros((4, 7, 32+9), dtype=np.float32)
reader.read((0,0,0), data, lod=1)
# Within each trace all samples should be the same, also
# the last one, since this is true also for the input.
assert np.all(data[:,:,:] == data[:,:,0:1])
# Most output values will be avg(200, 100, 50, 50) = 100.
# At the edges in i/j it should be average(50, 100) or (50,50).
# At the corner expect average(50) i.e. 50.
# If the implemenation erroneously tried to read the
# padding (which ought to be zero) the numbers will be lower.
# Currently in OpenZGY/C++ the samples not based on 8 neighbors
# might be set to 0.
assert np.all(data[:3, :6, :] == 100)
assert np.all(data[:3, 6, :] == 50)
assert np.all(data[3, :6, :] == 75)
assert np.all(data[3, 6, :] == 50)
def testDecimateWeightedAverage(filename):
"""
As test.lodalgo.testSpecial but very simplified, just to make sure
the default lod2 algorithm is in fact WeightedAverage. The lod1
default is LowPass; to avoid this getting in the way I will
make all traces constant-value. This makes LowPass behave as
Decimate (or Average, or Median, etc.)
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 256, 512)
) as writer:
data = np.zeros((64, 64, 512), dtype=np.float32)
# 1/4 brick of 300, 3/4 brick of 100, 3 bricks of unwritten 0.
data[:16,:,:] = 300
data[16:,:,:] = 100
tiny = np.array([[300, 300, 0, 0],
[300, 300, 0, 0],
[0, 0, 100, 100],
[0, 0, 100, 100]], dtype=np.float32)
# In lod 1 this will be just 300, 0, 0, 1000
tiny = tiny.reshape((4,4,1))
data[:4,:4,:] = tiny
assert np.all(data[:,:,:] == data[:,:,0:1])
writer.write((0,0,0), data)
#writer.finalize(decimation = [DecimationType.Average])
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
assert reader.nlods >= 3
# Checking the lowpass output, including the fact that it is
# supposed to have zero DC bias.
data = np.zeros((2, 2, 256), dtype=np.float32)
reader.read((0,0,0), data, lod=1)
#print(data[:,:,0])
assert np.all(np.isclose(data[0,0,:], 300))
assert np.all(np.isclose(data[0,1,:], 0))
assert np.all(np.isclose(data[1,0,:], 0))
assert np.all(np.isclose(data[1,1,:], 100))
data = np.zeros((1, 1, 1), dtype=np.float32)
reader.read((0,0,0), data, lod=2)
# average(300, 0, 0, 100) is 100 but we expect something closer to
# 300 since this value is relatively more scarce.
#print(data)
assert data.flat[0] > 200
def testMixingUserAndStorage(filename):
"""
When the file has an integer type both reading and writing can be done
either in float user sample values or in integral storage values.
Try all 4 combinations.
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
datatype = SampleDataType.int8, datarange = (-2,+763),
size = (64, 64, 512)
) as writer:
# user = 3*storage + 382
# storage = (user - 382) / 3
# user 3 -> storage -126.33 -> -126 -> user 4
# user 12 -> storage -123.33 -> -123 -> user 13
# user 40 -> storage -114
# user 71 -> storage -103.66 -> -104 -> user 70
w1 = np.zeros((64, 64, 64), dtype=np.float32)
w2 = np.zeros((64, 64, 64), dtype=np.float32)
w3 = np.zeros((64, 64, 64), dtype=np.int8)
w4 = np.zeros((64, 64, 64), dtype=np.int8)
w1[0,0,0] = 3.0 # user 4 <-> storage -126
w2[0,0,0] = 12.0 # user 13 <-> storage -123
w3[0,0,0] = -114 # user 40 <-> storage -114
w4[0,0,0] = -104 # user 70 <-> storage -104
writer.write((0,0,0), w1)
writer.write((0,0,64), w2)
writer.write((0,0,128), w3)
writer.write((0,0,192), w4)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
r1 = np.zeros((1, 1, 1), dtype=np.float32)
r2 = np.zeros((1, 1, 1), dtype=np.int8)
r3 = np.zeros((1, 1, 1), dtype=np.float32)
r4 = np.zeros((1, 1, 1), dtype=np.int8)
reader.read((0,0,0), r1)
reader.read((0,0,64), r2)
reader.read((0,0,128), r3)
reader.read((0,0,192), r4)
#print("expect", 4.0, -123, 40.0, -114)
#print("actual", r1.flat[0], r2.flat[0], r3.flat[0], r4.flat[0])
assert np.isclose(r1.flat[0], 4.0)
assert r2.flat[0] == -123
assert np.isclose(r3.flat[0], 40.0)
assert r4.flat[0] == -104
def testSmallConstArea(filename):
"""
Check what happens when writeconst() is called with a region
smaller than one brick. Application code might well specify
a region that doesn't align with the bricks. Actually writing
less than a brick in total would be odd, but the corner cases
that need to be handled are similar.
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
datatype = SampleDataType.int8, datarange = (-128,+127),
size = (64, 64, 256)
) as writer:
writer.writeconst((0,0,128), 42, size=(64,64,128), is_storage=True)
# unwritten brick, value matches defaultvalue -> mark as const
# unwritten brick, value does not match default -> inflate
# const brick, value matches previous brick -> no-op
# const brick, value differs -> inflate
writer.writeconst((1,2,3+0), 0, size=(11,12,13), is_storage=True)
writer.writeconst((1,2,3+64), 15, size=(11,12,13), is_storage=True)
writer.writeconst((1,2,3+128), 42, size=(11,12,13), is_storage=True)
writer.writeconst((1,2,3+192), 67, size=(11,12,13), is_storage=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
BRICK = (64,64,64)
r1 = reader.readconst((0,0,0), BRICK, as_float = False)
r2 = reader.readconst((0,0,64), BRICK, as_float = False)
r3 = reader.readconst((0,0,128), BRICK, as_float = False)
r4 = reader.readconst((0,0,192), BRICK, as_float = False)
#print("testSmallConstArea:", r1, r2, r3, r4)
assert r1 == 0 # Was converted from "unwritten" to "const zero"
assert r2 is None # Brick now contains a mix of 0 and 15.
assert r3 == 42 # No-op; the brick already contained const 42.
assert r4 is None # Brick now contains a mix of 42 and 67.
onevalue_t = namedtuple("result", "range stats histo stats_count histo_count bins")
def testHistoOneValue(filename, dtype, value, fill, *, datarange = None, verbose = False):
if verbose:
print("Test dtype", dtype, "value", value,
("only" if fill else "and unwritten bricks"))
center = value if np.isfinite(value) else -0.25
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 64, 3*64),
datatype = dtype,
datarange = datarange or (center-1, center+1)
) as writer:
if np.isfinite(value):
writer.writeconst((0, 0, 0), value,
size=(64, 64, 64), is_storage=False)
if fill:
writer.writeconst((0, 0, 64), value,
size=(64, 64, 128), is_storage=False)
writer.finalize(force=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
if verbose:
print("Data range", reader.datarange)
print("Statistics", reader.statistics)
print("Histogram ", (reader.histogram.min, reader.histogram.max))
return onevalue_t((reader.datarange[0], reader.datarange[1]),
(reader.statistics.min, reader.statistics.max),
(reader.histogram.min, reader.histogram.max),
reader.statistics.cnt,
np.sum(reader.histogram.bin),
reader.histogram.bin)
def testHistoCornercaseFloat(filename):
# Float: datarange with zero size is valid on input,
# in fact the data range isn't specified by the user.
# Reading back data gives the statistical range
# which for float may include defaultvalue.
# The histogram will use the fuzzy algorithm.
# The numbers in brackets correspond to the ones in
# GenLodImpl::suggestHistogramRange().
# [3] nothing written.
# Note that the writer might need to pass force=True to finalize()
# to get the histogram- and statistics information written out even
# when no actual data has been written. I am unsure about how the
# principle of least surprise applies here. As of Oct 2020 the force
# is required. See the ZgyWriter constructor setting _dirty(False).
BRICK = 64*64*64
r = testHistoOneValue(filename, SampleDataType.float, np.nan, False)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (0, 0)
assert r.histo == (-128, +127)
assert r.stats_count == 3*BRICK # Assuming finalize with force=True
assert r.bins[128] == r.histo_count
# [4] one all zero brick, two never written.
# Expected result same as for nothing written.
r = testHistoOneValue(filename, SampleDataType.float, 0, False)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (0, 0)
assert r.histo == (-128, +127)
assert r.stats_count == 3*BRICK
assert r.bins[128] == r.histo_count
# [4] three all zero bricks.
# Expected result same as for nothing written.
r = testHistoOneValue(filename, SampleDataType.float, 0, True)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (0, 0)
assert r.histo == (-128, +127)
assert r.stats_count == 3*BRICK
assert r.bins[128] == r.histo_count
# [6] single negative value, plus two never written bricks.
# The statistics and histogram include the never-written
# samples as if they were zero.
# Note: I won't be testing the "some never written" scenario
# for every remaining case; it is hopefully enough to
# confirm once that never-written is treated as written-zero.
r = testHistoOneValue(filename, SampleDataType.float, -42, False)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (-42, 0)
assert r.histo == (-42, 0)
assert r.stats_count == 3*BRICK
assert r.bins[0] == BRICK
assert r.bins[255] == 2*BRICK
# [6] single negative value in all three bricks.
# The value range and the statistics should have the True
# range i.e. low==high and the histogram range should be wider.
r = testHistoOneValue(filename, SampleDataType.float, -42, True)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (-42, -42)
assert r.histo == (-42, 0)
assert r.stats_count == 3*BRICK
assert r.bins[0] == 3*BRICK
assert r.bins[255] == 0
# [6] single positive value in all three bricks.
# Result similar to the above but the ranges are swapped.
r = testHistoOneValue(filename, SampleDataType.float, +42, True)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (42, 42)
assert r.histo == (0, 42)
assert r.stats_count == 3*BRICK
assert r.bins[0] == 0
assert r.bins[255] == 3*BRICK
def testHistoCornercaseInt(filename):
# Integral data.
# Histogram range should always match the user provided range,
# which for never-written is -1.25 to +0.75 and for the
# remaining cases value +/- 1. This means that value won't be
# exactly representable as an integer (it maps to -0.5) and
# this will be noticeable in the statistics. The 0.5 factor
# may also lead to numerical instability. The samples end up
# either in bin 127 or bin 128.
# Also, range might be wider then statistics (unlike the float
# case) if not all possible storage values have been used.
BRICK = 64*64*64
r = testHistoOneValue(filename, SampleDataType.int8, np.nan, False)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25
assert abs(r.stats[0] - 0) > 0.001 # 0.0 not representable.
assert r.histo[0] == -1.25 and r.histo[1] == 0.75 # user choice exactly.
assert r.stats_count == 3*BRICK # Assuming finalize with force=True
# I don't really care where the "0" samples end up. It won't be the center.
assert r.bins[127] + r.bins[128] == 0
r = testHistoOneValue(filename, SampleDataType.int8, 0, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25
assert abs(r.stats[0] - 0) > 0.001 # 0.0 not representable.
assert r.histo[0] == 0-1 and r.histo[1] == 0+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int8, -42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] + 42) < 0.25
assert abs(r.stats[0] + 42) > 0.001 # 42.0 not representable.
assert r.histo[0] == -42-1 and r.histo[1] == -42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int8, +42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 42) < 0.25
assert abs(r.stats[0] - 42) > 0.001 # 42.0 not representable.
assert r.histo[0] == 42-1 and r.histo[1] == 42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
# 16 bit not much different from 8 bit, but the statistics will be
# closer to the supplied value because the quantization error is smaller.
r = testHistoOneValue(filename, SampleDataType.int16, np.nan, False)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25/256
assert abs(r.stats[0] - 0) > 0.001/256 # 0.0 not representable.
assert r.histo[0] == -1.25 and r.histo[1] == 0.75 # user choice exactly.
assert r.stats_count == 3*BRICK
# I don't really care where the "0" samples end up. It won't be the center.
assert r.bins[127] + r.bins[128] == 0
r = testHistoOneValue(filename, SampleDataType.int16, 0, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25/256
assert abs(r.stats[0] - 0) > 0.001/256 # 0.0 not representable.
assert r.histo[0] == 0-1 and r.histo[1] == 0+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int16, -42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] + 42) < 0.25/256
assert abs(r.stats[0] + 42) > 0.001/256 # 42.0 not representable.
assert r.histo[0] == -42-1 and r.histo[1] == -42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int16, +42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 42) < 0.25/256
assert abs(r.stats[0] - 42) > 0.001/256 # 42.0 not representable.
assert r.histo[0] == 42-1 and r.histo[1] == 42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
# Behavior when all explicitly written values get clipped.
# Expect both the histogram and the statistics to only reflect
# the clipped value (-5) as if that value and not -42 had been
# written.
r = testHistoOneValue(filename, SampleDataType.int8, -42, True,
datarange = (-5, +760))
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats == (-5, -5)
assert r.histo == (-5, +760)
assert r.stats_count == 3*BRICK
assert r.bins[0] == 3*BRICK
# As above, all explicitly written values get clipped but now
# there are a few unwritten bricks. Expect both the histogram
# and the statistics to only reflect the clipped value (-5) as
# if that value and not -42 had been written.
# Defaultvalue is +1 because the range does not give a zero
# centric histogram. The statistics should also reflect that.
# I.e. expect +1 to be part of the range.
r = testHistoOneValue(filename, SampleDataType.int8, -42, False,
datarange = (-5, +760))
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats == (-5, +1)
assert r.histo == (-5, +760)
assert r.stats_count == 3*BRICK
assert r.bins[0] == BRICK
assert r.bins[2] == 2*BRICK
# Similar to the above but no values written at all.
# Defaultvalue is still 1 due to missing zero-centric propery
# so this is what should be reflected in the statistics.
r = testHistoOneValue(filename, SampleDataType.int8, np.nan, False,
datarange = (-5, +760))
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats == (+1, +1)
assert r.histo == (-5, +760)
assert r.stats_count == 3*BRICK
assert r.bins[2] == 3*BRICK
def testFancyDefaultValue():
"""
Part of the test suite using the same test data stored in different ways.
Check what happens when reading samples that were never written.
The rectangles used are:
a) Dead area of partly written brick
b) Part dead area, part all-constant brick
c) all-constant brick
d) part all-constant brick, part unwritten brick
e) unwritten brick.
In the new reader all should return the default value.
In the old reader the last one might throw a missing brick exception,
it does in the C++ ZGY-Public API but the Python wrapper catches it.
And the penultimate one might read zero from the unwritten area
while still seeing the default (1 in this case) elsewhere.
Also check reading completely outside range. The new accessor should
raise exceptions; the old one does whatever it feels like doing.
"""
with LocalFileAutoDelete("fancy-2.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-2,+763),
newzgy.ZgyWriter)
checkReadingDeadArea(fn.name, (5, 22, 1), oldzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 63), oldzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 65), oldzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 127), oldzgy.ZgyReader,
np.array([[[1, 0],[1, 0]],[[1, 0],[1, 0]]]))
checkReadingDeadArea(fn.name, (5, 22, 129), oldzgy.ZgyReader, 0)
#checkReadingOutsideRange(fn.name, oldzgy.ZgyReader)
#checkReadingOutsideLod(fn.name, oldzgy.ZgyReader)
#checkReadingToWrongValueType(fn.name, oldzgy.ZgyReader)
checkReadingDeadArea(fn.name, (5, 22, 1), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 63), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 65), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 127), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 129), newzgy.ZgyReader, 1)
checkReadingOutsideRange(fn.name, newzgy.ZgyReader)
checkReadingOutsideLod(fn.name, newzgy.ZgyReader)
checkReadingToWrongValueType(fn.name, newzgy.ZgyReader)
def testFancyReadConstant():
"""
Test the new API in openzgy to return brick status.
"""
with LocalFileAutoDelete("fancy-2.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-2,+763),
newzgy.ZgyWriter)
with newzgy.ZgyReader(fn.name, iocontext = SDCredentials()) as reader, io.StringIO() as bitbucket:
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
# While the data inside this small rectangle is indeed constant,
# the whole brick is not. So, it won't be flagged as const val.
a = reader.readconst((17,17,17), (2,2,2), as_float = True, verbose=verbose)
b = reader.readconst((17,17,17), (2,2,2), as_float = False)
assert(a is None)
assert(b is None)
# In this case the enclosing brick was explicitly written with
# constant value 0, which will be read back as 1 because
# the range is not zero centric.
a = reader.readconst((1,2,67), (4,5,6), as_float = True)
b = reader.readconst((1,2,67), (4,5,6), as_float = False)
assert math.isclose(a, 1.0)
assert math.isclose(b, -127)
# Brick written as constant value 0 but only the region inside
# the survey. Whether this registers as "constant" may be
# considered an implementation detail. But ideally it ought to.
a = reader.readconst((65,2,67), (4,5,6), as_float = True)
b = reader.readconst((65,2,67), (4,5,6), as_float = False)
assert math.isclose(a, 1.0)
assert math.isclose(b, -127)
# Two bricks never written, two with constant value 0.
a = reader.readconst((0,0,64), (128,64,128), as_float = True)
b = reader.readconst((0,0,64), (128,64,128), as_float = False)
assert math.isclose(a, 1.0)
assert math.isclose(b, -127)
def testFancyMisc():
"""
Part of the test suite using the same test data stored in different ways.
"""
with LocalFileAutoDelete("fancy-1.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter)
# Doesn't really belong here but doesn't bother to create a test file.
runCloseOnException(fn.name, newzgy.ZgyReader)
runErrorOnClose(fn.name, newzgy.ZgyReader)
runConversions(fn.name, newzgy.ZgyReader)
runErrorIfNotOpenForRead(fn.name, newzgy.ZgyReader)
runDumpToDevNull(fn.name, newzgy.ZgyReader)
if HasOldZgy():
runCloseOnException(fn.name, oldzgy.ZgyReader)
runConversions(fn.name, oldzgy.ZgyReader)
runErrorIfNotOpenForRead(fn.name, oldzgy.ZgyReader)
with LocalFileAutoDelete("fancy-1-clone.zgy") as cloned:
runClone(cloned.name, fn.name)
runUpdate(cloned.name)
runDumpMembers(cloned.name, fn.name)
def testFancy1():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, int8.
The coding range is asymmetric but zero centric.
"""
with LocalFileAutoDelete("fancy-1.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
# The next line reveals a bug in ZGY-Public.
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 100)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy2():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, int8.
Unlike #1 the coding range is not zero centric. So 0 cannot be represented.
When can be stored is -2, +1, +4, ..., +763 i.e. only values 3*n+1.
So my sample data values 31 and 301 are representable, but zero is not.
"""
with LocalFileAutoDelete("fancy-2.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-2,+763),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 1, 0)
checkContents(fn.name, newzgy.ZgyReader, 1, 1)
checkLodContents(fn.name, oldzgy.ZgyReader, 1, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 1, 1)
# The next line reveals a bug in ZGY-Public.
checkRawContents(fn.name, oldzgy.ZgyReader, 1, 382)
checkRawContents(fn.name, newzgy.ZgyReader, 1, 1)
checkStatistics(fn.name, oldzgy.ZgyReader, 1, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 1, 1, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 1, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 1, 0, True)
def testFancy3():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, int16.
Unlike #1 and #2 zero is not included in the coding range.
The closest representable value to zero is +20
The valuetype is now int16 instead of int8 for variation.
"""
with LocalFileAutoDelete("fancy-3.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int16, (+20,+16403.75),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 20, 0)
checkContents(fn.name, newzgy.ZgyReader, 20, 20)
checkLodContents(fn.name, oldzgy.ZgyReader, 20, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 20, 20)
checkRawContents(fn.name, oldzgy.ZgyReader, 20, 8212)
checkRawContents(fn.name, newzgy.ZgyReader, 20, 20)
checkStatistics(fn.name, oldzgy.ZgyReader, 20, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 20, 20, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 20, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 20, 20, True)
def testFancy4():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, float32.
Bad coding range hint.
The coding range for float cubes is just a hint that might be used as a
hint for the histogram range. Or it might be completely ignored
if the histogram is written during a separate pass where the exact
range is already known.
"""
with LocalFileAutoDelete("fancy-4.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+1),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy5():
"""
Part of the test suite using the same test data stored in different ways.
Unline 1..4, this uses the old ZGY-Public writer, to help verify that
the old and new code produces the same result. The test uses both OpenZGY
and ZGY-Public reader, local file, int8.
"""
with LocalFileAutoDelete("fancy-5.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
oldzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
# The next line reveals a bug in ZGY-Public.
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 100)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, False)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, False)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, False)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, False)
def testFancy6():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, both OpenZGY and ZGY-Public reader, local file, float.
Compared to the old writer the user specified codingrange
will now be ignored and the statistical range used instead.
Note that if api.ZgyMeta.datarange chooses to enforce this
then only the old reader will be able to verify what was written.
"""
with LocalFileAutoDelete("fancy-6.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+42),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy7():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, int8 with lossless compression.
Currently this is explicitly forbidden by a test in the api.
See comments in the doc and in the ZgyWriter source code for why. Also,
fewer checks because the old reader cannot handle the new compression.
"""
lossless = ZgyCompressFactory("ZFP", snr = 99)
with LocalFileAutoDelete("fancy-7.zgy") as fn:
with MustThrow("need to be stored as float", newzgy.ZgyUserError):
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossless})
#checkContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
#checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
fn.disarm()
def testFancy8():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, float32 with lossy compression.
"""
lossless = ZgyCompressFactory("ZFP", snr = 99)
with LocalFileAutoDelete("fancy-8.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+42),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossless})
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy9():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, int8 with lossy compression.
Currently this is explicitly forbidden by a test in the api.
See comments in the doc and in the ZgyWriter source code for why. Also,
fewer checks because the old reader cannot handle the new compression.
"""
lossy = ZgyCompressFactory("ZFP", snr = 30)
with LocalFileAutoDelete("fancy-9.zgy") as fn:
with MustThrow("need to be stored as float", newzgy.ZgyUserError):
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossy})
#checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=1.5)
#checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.5)
#checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=8000)
#checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
fn.disarm()
def testFancy10():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, float32 with lossy compression.
"""
lossy = ZgyCompressFactory("ZFP", snr = 30)
with LocalFileAutoDelete("fancy-10.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+42),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossy})
checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=5000)
#checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy11():
"""
Part of the test suite using the same test data stored in different ways.
New code only, small bricksize, no compression.
"""
with LocalFileAutoDelete("fancy-11.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter,
kwargs={"bricksize": (32,32,32)})
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy12():
"""
Part of the test suite using the same test data stored in different ways.
New code only, large bricksize, no compression.
"""
with LocalFileAutoDelete("fancy-12.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter,
kwargs={"bricksize": (128,128,128)})
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy13():
"""
Part of the test suite using the same test data stored in different ways.
New code only, non-rectangular bricks, no compression.
Need single_write=True because with the very small
bricksize my test code ends up writing nore than
one brick past the end of the survey.
"""
with LocalFileAutoDelete("fancy-13.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"bricksize": (16,32,128)})
checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=5000)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy14():
"""
Part of the test suite using the same test data stored in different ways.
New code only, non-rectangular bricks, with compression.
"""
lossy = ZgyCompressFactory("ZFP", snr = 30)
with LocalFileAutoDelete("fancy-14.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"bricksize": (16,32,128), "compressor": lossy})
checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=5000)
#FAILS checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testCloudAutoDelete():
with CloudFileAutoDelete("xyzzy", None) as fn:
assert fn.name[:5] == "sd://"
fn.disarm()
# Seismic drive, missing credentials.
with MustThrow("service URL has not been defined", RuntimeError):
with CloudFileAutoDelete("xyzzy", None, silent=True) as fn:
assert fn.name[:5] == "sd://"
# Seismic drive, file not found.
# As of 2021-02-12 it is no longer an error to delete a non-existing file.
#with MustThrow("does not exist", RuntimeError):
with CloudFileAutoDelete("xyzzy", SDCredentials(), silent=True) as fn:
assert fn.name[:5] == "sd://"
def testReadFromCloud(filename):
with newzgy.ZgyReader(filename, iocontext=SDCredentials()) as reader, io.StringIO() as bitbucket:
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
assert reader.size == (181, 241, 169)
tmp = np.zeros((100, 50, 30), dtype=np.int8)
reader.read((42, 70, 50), tmp, verbose=verbose)
#print(tuple(tmp[0,0,:5]), tuple(tmp[0,0,-5:]))
assert tuple(tmp[0,0,:5]) == (57, 48, 38, 28, 17)
assert tuple(tmp[0,0,-5:]) == (-101, -91, -79, -65, -51)
def testCloudWriter(filename):
"""
File written by the new code to seismic store
I haven't hooked up the old API to seismic store, so do the read
checks only with newzgy.
"""
with TimeMe(" createFancyFile"):
createFancyFile(filename, SampleDataType.int8, (-28,+227), newzgy.ZgyWriter)
with TimeMe(" checkContents"):
checkContents(filename, newzgy.ZgyReader, 0, 0)
with TimeMe(" checkLodContents"):
checkLodContents(filename, newzgy.ZgyReader, 0, 0)
with TimeMe(" checkRawContents"):
checkRawContents(filename, newzgy.ZgyReader, 0, 0)
with TimeMe(" checkStatistics"):
checkStatistics(filename, newzgy.ZgyReader, 0, 0, True)
with TimeMe(" checkHistogram"):
checkHistogram(filename, newzgy.ZgyReader, 0, 0, True)
with TimeMe(" delete #1"):
newzgy.ZgyUtils(SDCredentials()).delete(filename)
with TimeMe(" delete #2"):
newzgy.ZgyUtils(SDCredentials()).delete(filename)
def testLegalTag(filename):
meta = {"foo": "bar", "foocount": 42}
meta = {"kind": "slb:openzgy:test:1.0.0", "data": meta}
iocontext = SDCredentials(legaltag="slb-synthetic-seismic",
writeid="test-my-write", seismicmeta=meta)
with newzgy.ZgyWriter(filename,
iocontext = iocontext,
size = (64, 64, 64),
datatype = SampleDataType.float) as writer:
data = np.zeros((64, 64, 64), dtype=np.float32)
writer.write((0, 0, 0), data)
writer.finalize()
#os.system("sdutil stat " + SDTestSink("legaltag.zgy") + " --detailed")
# TODO-Test, read back metadata and confirm it was stored correctly.
# Not possible yet.
# TODO-Question, there is both a {get,set}MetaData and a {get,set}SeismicMeta().
# I suspect the former only sets the "data" portion of SeismicMeta
# but the two might also be completely unrelated.
# TODO-Question, when (and only when) I specify seismicmeta I see that
# sdutil stat --detailed will show me the seismicmeta and this
# includes the legaltag. Is the legaltag in the seismicmeta
# different from the "old" legaltag? Can it be changed, since we
# do have a setSeismicMeta?
def testCloudConsolidateBricks(filename, *, verbose = False):
"""
When reading from seismic store, bricks that are contiguous in memory
should be read in a single operation because larger brick size is
faster (up to a point). When not contiguous the reads should still
make just a single call to seismic store with a scatter/gather array
so the lower level code miggt do multi-threading.
This test also enables the single-block caching which will cause
all the headers to be read in a single operation. It can also speed
up regular brick access. Note that this cache is extremely simplistic,
it only remembers the previous result and it only returns a match
if the next request is exactly identical.
TODO-Low consider splitting this into multiple tests.
"""
vprint = ((lambda *args, **kwargs: print(*args, **kwargs)) if verbose
else (lambda *args, **kwargs: None))
trace = TraceCallsToSD(verbose = verbose)
iocontext = SDCredentials(aligned=1, maxsize=64, maxhole=1, threads=1,
_debug_trace = trace
)
bricksize = np.array((64, 64, 64), dtype=np.int64)
brick = np.product(bricksize) * np.dtype(np.float32).itemsize
size = np.array((181, 241, 169), dtype=np.int64)
numbricks = (size + bricksize - 1) // bricksize
vprint("Creating. Expect header written twice, then bulk data once.")
with newzgy.ZgyWriter(filename, iocontext=iocontext,
bricksize = tuple(bricksize),
size = tuple(size)) as writer:
data = np.arange(np.product(size), dtype=np.float32).reshape(size)
writer.write((0,0,0), data)
# lod 0 bricks: 3 * 4 * 3 = 36
# lod 1 bricks: 2 * 2 * 2 = 8
# lod 2 bricks: 1
# sum bricks on file: 45
# Writing the final header is the penultimate and not the last write.
# This is due to how SeismicStoreFileDelayedWrite works. See also
# comments in ZgyWriter.close().
assert len(trace.calls) == 3
assert trace.calls[0] == ("append", brick, brick, 1)
assert trace.calls[1] == ("write", brick, brick, 1)
assert trace.calls[2] == ("append", 45 * brick, 45*brick, 1)
trace.reset()
vprint("Opening. Expect all headers read in just one real access.")
with newzgy.ZgyReader(filename, iocontext = iocontext) as reader:
assert len(trace.calls) >= 1
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
# The size in bricks, il/xl/slice, is (3, 4, 3).
# Reading a single inline should require just a single access.
# Reading a single crossline should read one brick-column (3 bricks)
# at a time, so it will need 3 reads. Each brick is 256 KB.
ildata = np.zeros((1, size[1], size[2]), dtype=np.float32)
xldata = np.zeros((size[0], 1, size[2]), dtype=np.float32)
vprint("read one il,", numbricks[1] * numbricks[2], "bricks")
reader.read((0,0,0), ildata)
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv",
brick*numbricks[1]*numbricks[2],
brick*numbricks[1]*numbricks[2], 1)
trace.reset()
vprint("read one xl,", numbricks[0], "*", numbricks[2], "bricks")
reader.read((0,0,0), xldata)
# Not contiguous, but a single scatter/gather read.
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv",
brick*numbricks[0]*numbricks[2],
brick*numbricks[0]*numbricks[2], 3)
trace.reset()
sample = np.zeros((1,1,1), dtype=np.float32)
vprint("read one sample. Should require just one brick.")
reader.read((100,100,100), sample)
assert len(trace.calls) == 1
assert trace.calls[0].nbytes == brick
trace.reset()
vprint("read another sample in the same brick. Should be cached.")
reader.read((101,102,103), sample)
assert len(trace.calls) == 1
assert trace.calls[0] == ("cachehit", brick, brick, 1)
trace.reset()
vprint("Opening with 64 MB buffers. Everything ought to be cached.")
# Note that the entire file is smaller than the requested blocking,
# it is important to veryfy that this doesn't cause problems when
# hitting EOF. The "simple cache" and the "scatter/gather" cases
# need to be tested separately.
iocontext = SDCredentials(aligned=64, maxsize=64, maxhole=1, threads=1,
_debug_trace = trace
)
with newzgy.ZgyReader(filename, iocontext = iocontext) as reader:
# As with the previous case there should just be a single read.
assert len(trace.calls) >= 1
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
# This will currently not be very performant. The requested
# padding will be applied but the simplistic cache won't use it.
# Not that big a deal since the padding in real cases should
# probably be just 4 MB or so, Small enough for the wasted
# bytes not actually costing anything.
# The test is important though. The padding to align reads
# is still applied, but in a different place in the code.
vprint("read one il,", numbricks[1] * numbricks[2], "bricks")
ildata = np.zeros((1, size[1], size[2]), dtype=np.float32)
reader.read((0,0,0), ildata)
assert len(trace.calls) == 1
# See FileAdt._consolidate_requests._groupsize()
# The header segment is not aligned to out oversized "align"
# parameter. This causes some needless data access because
# the padding will cross a segment boundary. Segment 0 (headers)
# will be read again even though we don't need it.
# The asserts below reflect the current implementation.
#assert trace.calls[0] == ("readv", 12*brick, 45*brick, 2)
assert trace.calls[0] == ("readv", 12*brick, 46*brick, 2)
trace.reset()
vprint("read one xl,", numbricks[0], "*", numbricks[2], "bricks")
xldata = np.zeros((size[0], 1, size[2]), dtype=np.float32)
reader.read((0,0,0), xldata)
# Consolidate and split causes this to end up as 3 separate
# non contiguous reads. Applying "align" is done too late
# which causes each of these 3 reads to cover the exact same
# area. And those areas in turn consist of two reads since
# we are reading the header also. The naive cache doesn't
# help us here. Fortunately this is a very contrived case.
assert len(trace.calls) == 1
#assert trace.calls[0] == ("readv", 9*brick, 45*brick, 1)
assert trace.calls[0] == ("readv", 9*brick, 3*46*brick, 6)
trace.reset()
# This should trigger the naive cache, tailored specifically
# to how Petrel reads data from ZGY.
vprint("read one il, one brick at a time")
ildata = np.zeros((1, 64, 64), dtype=np.float32)
for xl in range(0, size[1], 64):
for zz in range(0, size[2], 64):
reader.read((0, xl, zz), ildata)
assert len(trace.calls) >= 1
# The cache was cleared after readv, so expect one and just one
# read request to fill it.
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
vprint("read one xl, one brick at a time")
xldata = np.zeros((64, 1, 64), dtype=np.float32)
for il in range(0, size[0], 64):
for zz in range(0, size[2], 64):
reader.read((il, 0, zz), ildata)
assert len(trace.calls) >= 1
assert all([t.what == "cachehit" for t in trace.calls[0:]])
trace.reset()
# Re-create the file with 7 MB segment size, to stress some more code.
iocontext = SDCredentials(aligned=1, maxsize=64, maxhole=1, threads=1,
segsize=7, _debug_trace = trace
)
bricksize = np.array((64, 64, 64), dtype=np.int64)
brick = np.product(bricksize) * np.dtype(np.float32).itemsize
size = np.array((181, 241, 169), dtype=np.int64)
numbricks = (size + bricksize - 1) // bricksize
vprint("Creating. Expect header written twice and bulk data in 7 parts.")
with newzgy.ZgyWriter(filename, iocontext=iocontext,
bricksize = tuple(bricksize),
size = tuple(size)) as writer:
data = np.arange(np.product(size), dtype=np.float32).reshape(size)
writer.write((0,0,0), data)
# There may be several reads needed to generate lod 1 bricks
# from data already flushed. Ignore those.
calls = list([ e for e in trace.calls
if e.what not in ("readv", "cachehit", "cachemiss")])
assert len(calls) == 9
assert calls[0] == ("append", brick, brick, 1) # empty header
assert calls[1] == ("append", 7 * brick, 7 * brick, 1)
assert calls[2] == ("append", 7 * brick, 7 * brick, 1)
assert calls[3] == ("append", 7 * brick, 7 * brick, 1)
assert calls[4] == ("append", 7 * brick, 7 * brick, 1)
assert calls[5] == ("append", 7 * brick, 7 * brick, 1)
assert calls[6] == ("append", 7 * brick, 7 * brick, 1)
assert calls[7] == ("write", brick, brick, 1) # actual header
assert calls[8] == ("append", 3 * brick, 3 * brick, 1) # mop up.
trace.reset()
iocontext = SDCredentials(aligned=1, maxsize=64, maxhole=1, threads=1,
_debug_trace = trace
)
with newzgy.ZgyReader(filename, iocontext = iocontext) as reader:
assert len(trace.calls) >= 1
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
vprint("read one il,", numbricks[1] * numbricks[2], "bricks")
ildata = np.zeros((1, size[1], size[2]), dtype=np.float32)
reader.read((0,0,0), ildata)
# There will be two reads since it crissed a segment boundary.
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv", 12*brick, 12*brick, 2)
trace.reset()
vprint("read one xl,", numbricks[0], "*", numbricks[2], "bricks")
xldata = np.zeros((size[0], 1, size[2]), dtype=np.float32)
reader.read((0,0,0), xldata)
# Not contiguous, but a single scatter/gather read.
# More that 3 parts due to crossing segment boundaries.
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv", 9*brick, 9*brick, 4)
trace.reset()
vprint("done.")
def Main():
np.seterr(all='raise')
with TimeMe("ProgressWithDots"):
testProgressWithDots()
with TimeMe("BadArgumentsOnCreate"):
testBadArgumentsOnCreate()
with TimeMe("BadArgumentsOnReadWrite"):
with LocalFileAutoDelete("somefile.zgy") as fn:
testBadArgumentsOnReadWrite(fn.name)
with TimeMe("AutoDelete"):
testAutoDelete()
if HasOldZgy():
with TimeMe("HistogramRangeIsCenterNotEdge"):
with LocalFileAutoDelete("histo.zgy") as fn:
testHistogramRangeIsCenterNotEdge(fn.name)
with TimeMe("EmptyFile_NN"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, newzgy.ZgyWriter, newzgy.ZgyReader)
if HasOldZgy():
with TimeMe("EmptyFile_ON"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, newzgy.ZgyReader)
with TimeMe("EmptyFile_NO"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, newzgy.ZgyWriter, oldzgy.ZgyReader)
with TimeMe("EmptyFile_OO"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, oldzgy.ZgyReader)
with LocalFileAutoDelete("rmwfile.zgy") as fn:
testRmwFile(fn.name, newzgy.ZgyWriter)
with LocalFileAutoDelete("fatal-error.zgy") as fn:
testFatalErrorFlag(fn.name)
if False: # Disabled because it takes too long.
with TimeMe("LargeSparseFile"):
with LocalFileAutoDelete("largesparse.zgy") as fn:
testLargeSparseFile(fn.name, newzgy.ZgyWriter, newzgy.ZgyReader)
with TimeMe("Naan"):
with LocalFileAutoDelete("naan.zgy") as fn:
testNaan(fn.name)
with TimeMe("WriteNaanToIntegerStorage"):
with LocalFileAutoDelete("intnaan.zgy") as fn:
testWriteNaanToIntegerStorage(fn.name)
with TimeMe("ZeroCentric"):
with LocalFileAutoDelete("zerocentric.zgy") as fn:
testZeroCentric(fn.name)
with TimeMe("FinalizeProgress"):
with LocalFileAutoDelete("finalize.zgy") as fn:
testFinalizeProgress(fn.name, abort = False)
with TimeMe("FinalizeProgress"):
with LocalFileAutoDelete("finalize.zgy") as fn:
testFinalizeProgress(fn.name, abort = True)
with TimeMe("HugeFile"):
with LocalFileAutoDelete("huge.zgy") as fn:
testHugeFile(fn.name)
with LocalFileAutoDelete("oddsize.zgy") as fn:
testDecimateOddSize(fn.name)
with TimeMe("DecimateWeightedAverage"):
with LocalFileAutoDelete("weighted.zgy") as fn:
testDecimateWeightedAverage(fn.name)
with TimeMe("MixingUserAndStorage"):
with LocalFileAutoDelete("mixuserstorage.zgy") as fn:
testMixingUserAndStorage(fn.name)
with TimeMe("SmallConstArea"):
with LocalFileAutoDelete("smallconstarea.zgy") as fn:
testSmallConstArea(fn.name)
with LocalFileAutoDelete("testhisto_f.zgy") as fn:
testHistoCornercaseFloat(fn.name)
with LocalFileAutoDelete("testhisto_i.zgy") as fn:
testHistoCornercaseInt(fn.name)
with TimeMe("FancyDefaultValue"):
testFancyDefaultValue()
with TimeMe("FancyReadConstant"):
testFancyReadConstant()
with TimeMe("FancyMisc"):
testFancyMisc()
with TimeMe("TestFancy1"):
testFancy1()
with TimeMe("TestFancy2"):
testFancy2()
with TimeMe("TestFancy3"):
testFancy3()
with TimeMe("TestFancy4"):
testFancy4()
if HasOldZgy():
with TimeMe("TestFancy5"):
testFancy5()
with TimeMe("TestFancy6"):
testFancy6()
with TimeMe("TestFancy11"):
testFancy11()
with TimeMe("TestFancy12"):
testFancy12()
with TimeMe("TestFancy13"):
testFancy13()
# ZFP COMPRESSION
if HasZFPCompression():
with TimeMe("RegisteredCompressors"):
testRegisteredCompressors()
with TimeMe("TestFancy7"):
testFancy7()
with TimeMe("TestFancy8"):
testFancy8()
with TimeMe("TestFancy9"):
testFancy9()
with TimeMe("TestFancy10"):
testFancy10()
with TimeMe("TestFancy14"):
testFancy14()
with TimeMe("NoRmwInCompressedFile"):
with LocalFileAutoDelete("no-rmw.zgy") as fn:
testNoRmwInCompressedFile(fn.name)
with TimeMe("Naan"):
with LocalFileAutoDelete("naan.zgy") as fn:
testNaan(fn.name, 70)
# SEISMIC STORE
if not HasSeismicStore():
print("SKIPPING seismic store tests")
return
with TimeMe("testCloudAutoDelete"):
testCloudAutoDelete()
with TimeMe("testReadFromCloud"):
testReadFromCloud(SDTestData("Synt2.zgy"))
with TimeMe("testCloudWriter"):
with CloudFileAutoDelete("openzgy-rules.zgy", SDCredentials()) as cad:
testCloudWriter(cad.name)
cad.disarm() # The test function cleans up itself, unless it throws.
with TimeMe("EmptyFile"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name)
# oldzgy probably doesn't have zgycloud set up in this test.
if HasOldZgy() and False:
with TimeMe("EmptyFile_ON"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, newzgy.ZgyReader)
with TimeMe("EmptyFile_NO"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name, newzgy.ZgyWriter, oldzgy.ZgyReader)
with TimeMe("EmptyFile_OO"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, oldzgy.ZgyReader)
with TimeMe("EmptyExistingFile"):
testEmptyExistingFile("sd://sntc/testdata/OldEmpty.zgy")
with TimeMe("testRmwFile"):
with CloudFileAutoDelete("rmwfile.zgy", SDCredentials()) as fn:
testRmwFile(fn.name, newzgy.ZgyWriter)
with TimeMe("testLegalTag"):
with CloudFileAutoDelete("legaltag.zgy", SDCredentials()) as fn:
testLegalTag(fn.name)
with CloudFileAutoDelete("consolidate.zgy", SDCredentials()) as fn:
with TimeMe("ConsolidateBricks"):
testCloudConsolidateBricks(fn.name, verbose = False)
if __name__ == "__main__":
Main()
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/test/black.py | black.py |
import sys
import numpy as np
from ..api import ProgressWithDots, ZgyReader, ZgyWriter, SampleDataType
from ..test.utils import SDCredentials
from ..iterator import readall
def _test_consume(iterator):
ncalls = 0
nvoxels = 0
maxconsumed = [0, 0, 0]
for pos, count, data in iterator:
if tuple(count) != data.shape: return
ncalls += 1
nvoxels += int(data.size)
for i in range(3):
maxconsumed[i] = max(maxconsumed[i], data.shape[i])
return ncalls, nvoxels, tuple(maxconsumed)
def test_iterator():
class MockReader:
def __init__(self):
self.readcount = 0
self.voxelcount = 0
self.maxread = [0, 0, 0]
def read(self, pos, data, *, lod=0):
if False:
print("read", tuple(pos), tuple(data.shape), lod)
self.readcount += 1
self.voxelcount += data.size
for i in range(3):
self.maxread[i] = max(self.maxread[i], data.shape[i])
@property
def size(self):
return (201, 334, 1001)
@property
def bricksize(self):
return (64, 64, 64)
@property
def datatype(self):
return SampleDataType.int16
@property
def result(self):
return self.readcount, self.voxelcount, tuple(self.maxread)
class CountWork:
def __init__(self, abort = None):
self.count = 0
self.done = 0
self.total = 0
self._abort = abort
def __call__(self, done, total):
self.count += 1
self.done = done
self.total = total
return not self._abort or self.count < self._abort
def __str__(self):
return "done {0}/{1} in {2} calls".format(
self.done, self.total, self.count)
ssize = 201 * 334 * 1001
ssize_lod1 = (202//2) * (334//2) * (1002//2)
# For the first set of tests, leave chunksize unset so the
# consumer gets called once for each data read.
# If blocksize is set it will be honored, and zero means all.
mock = MockReader()
tmp = readall(mock, blocksize = (0, 0, 0))
assert mock.readcount == 0
assert _test_consume(tmp) == (1, ssize, tuple(mock.size))
assert mock.result == (1, ssize, tuple(mock.size))
# Try a reasonable blocksize
mock = MockReader()
tmp = readall(mock, blocksize = (128, 128, 0))
assert _test_consume(tmp) == (6, ssize, (128, 128, 1001))
assert mock.result == (6, ssize, (128, 128, 1001))
# Try a silly blocksize
mock = MockReader()
tmp = readall(mock, blocksize = (0, 42, 501))
assert _test_consume(tmp) == (16, ssize, (201, 42, 501))
assert mock.result == (16, ssize, (201, 42, 501))
# If blocksize is not set, should end up reading the entire file
# because it is less than 1 GB
mock = MockReader()
tmp = readall(mock, maxbytes=1024*1024*1024)
assert _test_consume(tmp) == (1, ssize, (201, 334, 1001))
assert mock.result == (1, ssize, (201, 334, 1001))
# If blocksize is not set but max size only has room for 3 brick-columns
# we will be reading more in the crossline direction.
mock = MockReader()
tmp = readall(mock, maxbytes = 3*2048*64*64)
assert _test_consume(tmp) == (12, ssize, (64, 128, 1001))
assert mock.result == (12, ssize, (64, 128, 1001))
# Setting the blocksize to zero means as little as possible,
# which is one brick colums unless you want to be really inefficient.
mock = MockReader()
tmp = readall(mock, maxbytes = 0)
assert _test_consume(tmp) == (24, ssize, (64, 64, 1001))
assert mock.result == (24, ssize, (64, 64, 1001))
# Now test setting the chunksize, leaving blocksize unset
# causing it to read our small data in just one read.
mock = MockReader()
tmp = readall(mock, chunksize = (64, 64, 64), maxbytes=1024*1024*1024)
assert _test_consume(tmp) == (4*6*16, ssize, (64, 64, 64))
assert mock.result == (1, ssize, tuple(mock.size))
# As above but set a silly chunksize.
mock = MockReader()
tmp = readall(mock, chunksize = (100, 42, 0), maxbytes=1024*1024*1024)
assert _test_consume(tmp) == (24, ssize, (100, 42, 1001))
assert mock.result == (1, ssize, tuple(mock.size))
# Setting both.
mock = MockReader()
tmp = readall(mock, blocksize = (128, 128, 0), chunksize = (64, 64, 0))
assert _test_consume(tmp) == (24, ssize, (64, 64, 1001))
assert mock.result == (6, ssize, (128, 128, 1001))
# Setting chunksize to more than blocksize has no effect.
mock = MockReader()
tmp = readall(mock, blocksize = (64, 64, 0), chunksize = (128, 199, 0))
assert _test_consume(tmp) == (24, ssize, (64, 64, 1001))
assert mock.result == (24, ssize, (64, 64, 1001))
# A dry run may be used to get the "total" argument to the progress
# report without actually doing any work. Pass a dummy progress
# that saves the total and then returns False to abort.
# Make user that (1) the iterator is consumed, so total can be saved,
# and (2) when the iterator is consumed it doesn't actually read.
mock = MockReader()
progress = CountWork(abort=1)
tmp = readall(mock, chunksize = (100, 42, 0), progress=progress)
assert mock.readcount == 0
assert progress.total == 0 # Because iterator not consumed yet.
assert _test_consume(tmp) == (0, 0, (0, 0, 0))
assert mock.result == (0, 0, (0, 0, 0))
assert mock.readcount == 0
assert progress.total == ssize
# The same run, not dry run this time, to test the progress report.
mock = MockReader()
progress = CountWork()
tmp = readall(mock, chunksize = (100, 42, 0), maxbytes=1024*1024*1024, progress=progress)
assert _test_consume(tmp) == (24, ssize, (100, 42, 1001))
assert mock.result == (1, ssize, tuple(mock.size))
#print(progress)
assert progress.count == 26 # one initial, one final, 24 regular.
assert progress.done == ssize
assert progress.total == ssize
# Decimated data with silly chunksize.
mock = MockReader()
tmp = readall(mock, chunksize = (100, 42, 0), lod=1)
assert _test_consume(tmp) == (8, ssize_lod1, (100, 42, 501))
assert mock.result == (1, ssize_lod1, (101, 167, 501))
def copyall(srcfilename, dstfilename, *, maxbytes=128*1024*1024):
"""
Simple test that exercises the fancy iterator.
Sets chunksize to a silly value to stress the code more.
This is really inefficient and triggers read/modify/write
in the writer.
"""
p1, p2 = (ProgressWithDots(), ProgressWithDots())
with ZgyReader(srcfilename, iocontext = SDCredentials()) as r:
with ZgyWriter(dstfilename, templatename=srcfilename,
iocontext = SDCredentials()) as w:
alldata = readall(r, maxbytes=maxbytes, progress=p1,
chunksize=(100, 100, 0))
for datastart, datasize, data in alldata:
w.write(datastart, data)
w.finalize(progress=p2)
if __name__ == "__main__":
test_iterator()
if len(sys.argv) > 1:
copyall(sys.argv[1],
sys.argv[2],
maxbytes = int(sys.argv[3]) if len(sys.argv) > 3 else None)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/test/iterator.py | iterator.py |
import numpy as np
import os
import io
import sys
from ..impl import file as impl_file
from ..exception import *
from ..test.utils import SDCredentials, LocalFileAutoDelete, CloudFileAutoDelete, HasSeismicStore, SDTestData, SDTestSink
def checkThrows(e, msg, fn):
try:
fn()
assert False and "Should have gotten an exception here."
except e or ZgyUserError as ex:
if msg:
if str(ex).find(msg) < 0:
print('Expect "{0}" got "{1}"'.format(msg, str(ex)))
assert str(ex).find(msg) >= 0 and "Wrong exception message"
except AssertionError:
raise
except Exception as ex:
print('Expect "{0}" got "{1}"'.format(e or ZgyUserError, str(type(ex))))
assert False and "Got the wrong type of exception."
class SimpleReceiver:
def __call__(self, data, *args, **kwargs):
self.data = bytes(data)
def _test_FileADT_read(f):
"""
Check contents of a file created by test_SDFile.
The test can be run both on a file descriptor still
open for write and one recently opened read only.
Also it works both for SD and local access.
"""
# Simple interface
data = f.xx_read(7, 9)
assert data == b"continent"
# Scatter/gather interface, a bit more roundabout to use.
# Note: When reading back a SD file while it is still open
# for write, the last (i.e. open) segment will be ' seg\n'.
# This means that the part3 request spans the closed / open
# segment boundary. Testing that corner case is important
# because gthe ZGY library will encounter it very rarely and
# only in conjunction with misaligned (compressed) files.
part1 = SimpleReceiver()
part2 = SimpleReceiver()
part3 = SimpleReceiver()
f.xx_readv([(1, 4, part1), (18, 7, part2), (60, 8, part3)])
assert part1.data == b'ello'
assert part2.data == b'this is'
assert part3.data == b'last seg'
assert f.xx_eof == 69
checkThrows(None, "Invalid offset", lambda: f.xx_read(None, 1))
checkThrows(None, "Invalid size", lambda: f.xx_read(0, None))
checkThrows(None, "Invalid size", lambda: f.xx_read(0, 0))
def test_FileADT(filename):
try:
old_pread = os.pread if hasattr(os, "pread") else None
with impl_file.FileFactory(filename, "w+b", "faux-context") as f:
f.xx_write(b"hello, continent, THIS is a test.\n", 0)
f.xx_write(b"hello again.\n", 34)
f.xx_write(b"third block.\n", 47)
f.xx_write(b"hello, continent, this is a test.\n", 0) # update
f.xx_write(b"last sEG\n", 60)
f.xx_write(b"eg", 66)
checkThrows(None, "Invalid offset", lambda: f.xx_write("X", None))
checkThrows(None, "Invalid offset", lambda: f.xx_write("X", -1))
checkThrows(None, "Invalid size", lambda: f.xx_write(None, 0))
checkThrows(None, "Invalid size", lambda: f.xx_write("", 0))
with impl_file.FileFactory(filename, "rb", "faux-context") as f:
assert f.threadsafe
checkThrows(None, "open for writing", lambda: f.xx_write("X", 0))
_test_FileADT_read(f)
if old_pread: del os.pread
with impl_file.FileFactory(filename, "rb", "faux-context") as f:
assert not f.threadsafe
_test_FileADT_read(f)
checkThrows(None, "Opening ZGY as a is not supported.", lambda: impl_file.FileFactory(filename, "a", "faux-context"))
finally:
if old_pread: os.pread = old_pread
def test_SDFile(filename, buffered):
with impl_file.FileFactory(filename, "w+b", SDCredentials(segsize=10/(1024*1024) if buffered else 0)) as f:
assert not f.threadsafe
assert f.xx_eof == 0
f.xx_write(b"hello, continent, THIS is a test.\n", 0)
f.xx_write(b"hello again.\n", 34)
f.xx_write(b"third block.\n", 47)
f.xx_write(b"hello, continent, this is a test.\n", 0) # update
if not buffered:
f.xx_write(b"last seg\n", 60)
else:
f.xx_write(b"last sEG\n", 60)
f.xx_write(b"eg", 66) # writing to open segment
# With segment size 10 and segment 0 unrestricted,
# this is how the blocks should end up in storage.
# "hello, continent, THIS is a test.\n"
# "hello agai"
# "n.\nthird b"
# "lock.\nlast"
# " seg\n"
assert f.xx_eof == 69
checkThrows(ZgySegmentIsClosed, "write resized segment", lambda: f.xx_write(b"X", 34))
checkThrows(ZgySegmentIsClosed, "write part of segment", lambda: f.xx_write(b"X", 35))
checkThrows(ZgyUserError, "segments out of order", lambda: f.xx_write(b"X", 99))
if not buffered:
# When buffered this will be in the open segment, hence ok.
checkThrows(ZgySegmentIsClosed, "write part of segment", lambda: f.xx_write(b"eg", 66))
# The very ZGY specific limitation that all segments except
# first and last must have the same size. Note that this will
# not fail in the buffered test, because the buffering also
# takes care of making all the segments the same size.
checkThrows(ZgyUserError, "arbitrarily sized seg", lambda: f.xx_write(b"X", 69))
# It should be possible to read back the file, even if some of it
# is still sitting in the open segment. The test below also checks
# reading across the closed / open segment boundary when buffered.
_test_FileADT_read(f)
with impl_file.FileFactory(filename, "rb", SDCredentials()) as f:
assert f.threadsafe
assert f.xx_eof == 69
if not buffered:
assert tuple(f._sizes) == (34, 13, 13, 9)
else:
assert tuple(f._sizes) == (34, 10, 10, 10, 5)
_test_FileADT_read(f)
# Test a read that crosses segment boundaries.
slurp = f.xx_read(18, 45)
assert slurp == b"this is a test.\nhello again.\nthird block.\nlas"
# Test a read of a single, complete segment. There is a short cut.
slurp = f.xx_read(34, 13)
assert slurp == b"hello again.\n"
# Test the readv interface in the same way.
delivery = SimpleReceiver()
f.xx_readv([(18, 45, delivery)])
assert delivery.data == b"this is a test.\nhello again.\nthird block.\nlas"
delivery = SimpleReceiver()
f.xx_readv([(34, 13, delivery)])
assert slurp == b"hello again.\n"
def test_SDFileReadPastEOF(filename):
"""
Reading a block that crosses EOF should throw.
"""
with impl_file.FileFactory(filename, "rb", SDCredentials()) as f:
parts = list([SimpleReceiver() for i in range(8)])
BS = 16 * 256 * 1024
checkThrows(ZgyEndOfFile, None, lambda: (
f.xx_readv([(0*BS, BS, parts[0]),
(1*BS, BS, parts[1]),
(2*BS, BS, parts[2]),
(3*BS, BS, parts[3])])
))
checkThrows(ZgyEndOfFile, None, lambda: (
f.xx_readv([(3*BS, BS, parts[7])])))
def test_Consolidate():
"""
The logic for consolidating multiple requests into one is tricky
enough to warrant a separate unit test.
"""
class Want:
def __init__(self, offset, size):
if offset < 100000:
# EOF is at 100000, we never get more than that.
self._offset = offset
self._size = min(offset + size, 100000) - offset
else:
# At or past EOF, we don't get any data at all.
self._offset = None
self._size = 0
self.called = 0
def __call__(self, data):
self.called += 1
data = np.frombuffer(data, dtype=np.uint8)
if False:
print("Delivery", (data[0], len(data)),
"expected", (self._offset, self._size),
"->", (self._offset % 251, self._size))
assert self.called == 1
assert len(data) == self._size
if len(data):
assert data[0] == self._offset % 251
def test(requests, *args, **kwargs):
# Add a functor that remembers what offset and size we ordered,
# so that this can be checked when data gets delivered.
requests = list([(offset, size, Want(offset, size)) for offset, size, _ in requests])
# Invoke the function we are testing.
result = impl_file.FileADT._consolidate_requests(requests, *args, **kwargs)
# Make a delivery of data where each element contains the absolute
# offset of that element. Unless we pass our EOF marker.
end_of_file = 100000
for r in result:
data = np.arange(r[0], min(r[0] + r[1], end_of_file), dtype=np.uint32)
# Make it fit in a byte. Use a prime number modulus.
data = (data % 251).astype(np.uint8)
r[2](data.tobytes())
# Check that each of our callbacks actually got called.
for offset, size, fn in requests:
assert fn.called == 1
# Strip off the forward_result functor for easier checking of results.
# Also convert to a tuple for the same reason.
result = tuple([(e[0], e[1], None) for e in result])
# Also strip functors off the input request, only for the below print()
requests = tuple([(e[0], e[1], None) for e in requests])
#print("in ", requests)
#print("out", result)
return result
# Hole too large, no consolidation.
requests = ((2, 98, None), (1000, 42, None))
out = test(requests, max_hole = 300, max_size = None, force_align = None)
assert out == requests
# Same input, hole now deemed small enough.
out = test(requests, max_hole = 1000, max_size = None, force_align = None)
assert len(out) == 1
assert out[0] == (2, 1040, None)
# Same input, Hole too large, alignment was added.
out = test(requests, max_hole = 300, max_size = None, force_align = 100)
assert out == ((0, 100, None), (1000, 100, None))
# Same input, hole now deemed small enough, alignment was added.
out = test(requests, max_hole = 1000, max_size = None, force_align = 100)
assert out[0] == (0, 1100, None)
# max_size was exceeded.
# Splitting is not very smart; see comments in source code.
requests = list([(i, 70, None) for i in range(0, 66000, 100)])
out = test(requests, max_hole = 300, max_size = 64000, force_align = None)
assert out == ((0, 64000-30, None), (64000, 2000-30, None))
# Requests larger than max_size
requests = ((0, 1000, None), (1000, 2000, None), (3000, 500, None))
out = test(requests, max_hole = 300, max_size = 10, force_align = None)
assert out == requests
# Request passes EOF
requests = ((90000, 8000, None), # Inside
(98000, 5000, None), # Crosses EOF
(103000, 5000, None), # Past EOF
)
out = test(requests, max_hole = 1000000, max_size = None, force_align = None)
assert out == ((90000, 18000, None),)
# Ditto but no consolidation, due to a small max_size.
# No clipping needed in this case, as the lowest level
# read will handle that.
out = test(requests, max_hole = 300, max_size = 10, force_align = None)
assert out == requests
if __name__ == "__main__":
np.seterr(all='raise')
with LocalFileAutoDelete("testfile.dat") as fn:
test_FileADT(fn.name)
if HasSeismicStore():
with CloudFileAutoDelete("openzgy-1.dat", SDCredentials()) as fn:
test_SDFile(fn.name, False)
with CloudFileAutoDelete("openzgy-2.dat", SDCredentials()) as fn:
test_SDFile(fn.name, True)
test_SDFileReadPastEOF(SDTestData("Synt2.zgy"))
test_Consolidate()
sys.exit(0)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/test/file.py | file.py |
import numpy as np
from enum import Enum
from ..impl.lodalgo import decimate, decimate8, DecimationType
from ..impl.histogram import HistogramData
def _make8(brick, special = None):
"""
Expand the input brick to double size, with each sample in the input
ending up in a 2x2x2 neighborhod with similar values (in the range
sample_in..saple_in+6m average sample_in+3. Then chop the result into
8 bricks having the same size as the original.
Note, if using int8 then make sure the input values are at most 121.
"""
half = brick.shape
full = (half[0]*2, half[1]*2, half[2]*2)
tmp = np.zeros(full, dtype=brick.dtype)
tmp[0::2, 0::2, 0::2] = brick
tmp[0::2, 0::2, 1::2] = brick + 1
tmp[0::2, 1::2, 0::2] = brick + 2
tmp[0::2, 1::2, 1::2] = brick + 3
tmp[1::2, 0::2, 0::2] = brick + 4
tmp[1::2, 0::2, 1::2] = brick + 5
tmp[1::2, 1::2, 0::2] = brick + 6
tmp[1::2, 1::2, 1::2] = brick + 3 if special is None else special
bricks = []
bricks.append(tmp[:half[0], :half[1], :half[2]].copy())
bricks.append(tmp[:half[0], :half[1], half[2]:].copy())
bricks.append(tmp[:half[0], half[1]:, :half[2]].copy())
bricks.append(tmp[:half[0], half[1]:, half[2]:].copy())
bricks.append(tmp[half[0]:, :half[1], :half[2]].copy())
bricks.append(tmp[half[0]:, :half[1], half[2]:].copy())
bricks.append(tmp[half[0]:, half[1]:, :half[2]].copy())
bricks.append(tmp[half[0]:, half[1]:, half[2]:].copy())
return bricks
def testLodAlgorithmsWithData(b0):
"""
This will be run once with int8 data and once with float32
data including NaNs. For most of the algorithms the expected
result will be the same. In the int8 case the NaN values are
replaced with a sample equal to the average of the other
samples in that 2x2x2 region.
"""
is_float = not np.issubdtype(b0.dtype, np.integer)
bricks = _make8(b0, np.nan if is_float else None)
# Make a variant of the test data that has one zero in each 2x2x2
# neighborhood instead of a NaN, and that has no other zeros.
b0x = b0.copy()
b0x[b0x <= 0] = 1
bricks_with_0 = _make8(b0x, 0)
histogram = HistogramData(dtype=b0.dtype)
histogram.add(bricks)
result_LowPass = decimate8(bricks, DecimationType.LowPass)
result_WeightedAverage = decimate8(bricks, DecimationType.WeightedAverage, histogram = histogram, defaultvalue = np.nan)
result_Average = decimate8(bricks, DecimationType.Average)
result_Median = decimate8(bricks, DecimationType.Median)
result_Minimum = decimate8(bricks, DecimationType.Minimum)
result_Maximum = decimate8(bricks, DecimationType.Maximum)
result_Decimate = decimate8(bricks, DecimationType.Decimate)
result_AllZero = decimate8(bricks, DecimationType.AllZero)
result_AvgNon0 = decimate8(bricks_with_0, DecimationType.AverageNon0)
assert np.all(result_Average == b0 + 3)
assert np.all(result_Median == b0 + 3)
assert np.all(result_Minimum == b0)
assert np.all(result_Maximum == b0 + 6)
assert np.all(result_Decimate == b0)
assert np.all(result_AllZero == np.zeros_like(bricks[0]))
assert np.all(result_AvgNon0 == b0x + 3)
# Expected result for LowPass is too difficult to compute if
# a random cube is input. For slowly varying data it ought to
# be similar to the Decimate case. And for all-constant data
# the output should be that same constant.
# The same reasoning applies to WeightedAverage.
def testLodAlgorithms():
bricksize = (64, 64, 64)
b0 = (np.random.random(bricksize) * 201 - 100).astype(np.int32)
#print(b0[0,0,:])
testLodAlgorithmsWithData(b0.astype(np.int8))
testLodAlgorithmsWithData(b0.astype(np.float32))
def testLowpassLodAlgorithm():
bricksize = (64, 64, 64)
b0 = np.arange(np.product(bricksize), dtype=np.float64).reshape(bricksize)
#b0.fill(42)
bricks = _make8(b0, None) # TODO-Test also test NaN handling?
lowpass = decimate8(bricks, DecimationType.LowPass)
delta = lowpass - b0
error = np.sqrt(np.average(delta * delta))
#print("input ", bricks[0][0,0,:])
#print("lowpass", lowpass[0,0,:])
#print("error ", error)
assert error < 0.1
b0.fill(42)
for b in bricks: b.fill(42)
lowpass = decimate8(bricks, DecimationType.LowPass)
delta = lowpass - b0
error = np.sqrt(np.average(delta * delta))
#print("input ", bricks[0][0,0,:])
#print("lowpass", lowpass[0,0,:])
#print("error ", error)
assert error < 0.0001
brick = [0.5, 0.1, 0.01] + 23*[0] + [1] + 37*[0]
brick = np.array(4*brick)
brick = brick.reshape((2, 2, len(brick)//4))
d = decimate(brick, DecimationType.LowPass)
# This input is simple enough that it makes sense to just
# eyeball it. Not adding any asserts here.
#print(brick[0,0,:])
#print(d[0,0,:])
def testSpecial(dtype=np.float32, last=0):
"""
There is some overlap with testLodAlgorithmsWithData(), but this
test also does the newly added WeightedAverage algorithm which is
important for Petrel.
"""
cast = np.dtype(dtype).type
cube = np.zeros((6, 10, 20), dtype=dtype)
nibble = np.array([250, 42, 42, -1, -1, -1, 0, last], dtype=dtype)
cube[:2,:2,:2] = nibble.reshape((2,2,2))
# I want a total of 1*250, 5*42, 100*-1, 1094*0
cube[2,0,:3] = 42
cube[3,:10,:10] = -1 # add 100
cube[3,0,:3] = 0 # and remove 3
histogram = HistogramData(range_hint=(-3,+252), dtype=dtype)
if dtype == np.int16:
# Need a 1:1 mapping of inregral input data to bins
histogram._size = 65536
histogram._bins = np.zeros(histogram._size, dtype=np.int64)
histogram.add(cube)
# Expected weighted average computed by hand.
s = 250/1 + 2*42/5 - 3*1/100 + 2*0/1094
w = 1/1 + 2/5 + 3/100 + 2/1094
if not np.isfinite(last): w -= 1/1094
expect_wavg = s / w
# Expected lowpass was just extracted from a previous run.
expect_lowpass = 141.996475
expect = {
DecimationType.LowPass: expect_lowpass,
DecimationType.WeightedAverage: expect_wavg,
DecimationType.Average: np.nanmean(nibble),
DecimationType.Median: 0,
DecimationType.Minimum: -1,
DecimationType.Maximum: max(250, last),
DecimationType.MinMax: None,
DecimationType.Decimate: 250,
DecimationType.DecimateSkipNaN: None,
DecimationType.DecimateRandom: None,
DecimationType.AllZero: 0,
DecimationType.WhiteNoise: None,
DecimationType.MostFrequent: None,
DecimationType.MostFrequentNon0: None,
DecimationType.AverageNon0: None, # Set below
}
# expect0 is the output when all 8 inputs are zero.
expect0 = { k: (None if v is None else 0) for k, v in expect.items() }
expect0[DecimationType.AverageNon0] = 99 # defaultvalue because all 0
expect[DecimationType.AverageNon0] = np.mean([250, 42, 42, -1, -1, -1])
if last == np.inf:
# last entry is now in upper part instead of lower part.
# The two elements at the center are 42 and 0, so the median
# is reported as the average of those two.
expect[DecimationType.Median] = 21
actual = {}
actual0 = {}
for mode in expect:
if expect[mode] is not None:
if mode == DecimationType.WeightedAverage:
a = decimate(cube, mode, histogram=histogram, defaultvalue=0)
elif mode == DecimationType.AverageNon0:
a = decimate(cube, mode, defaultvalue = 99)
else:
a = decimate(cube, mode)
actual[mode] = a[0,0,0]
actual0[mode] = a[2,0,0]
errors = 0
for mode in expect:
if expect[mode] is not None:
if not np.isclose(cast(expect[mode]), cast(actual[mode])):
print("Error in decimate {0}: expect {1} got {2}".format(
mode.name, cast(expect[mode]), cast(actual[mode])))
errors += 1
elif not np.isclose(cast(expect0[mode]), cast(actual0[mode])):
print("Error in decimate#2 {0}: expect {1} got {2}".format(
mode.name, cast(expect0[mode]), cast(actual0[mode])))
errors += 1
#else:
# print("Decimation {0}: got {1} and {2}".format(
# mode.name, cast(actual[mode]), cast(actual0[mode])))
assert not errors
def numpyBugs():
"""
I am fairly sure this is a bug in numpy. In operations on masked
arrays a value of np.inf is sometimes treated as masked and sometimes
not. The behavior is repeatable but changing an expression in a very
minor fashion may change the result. Bottom line, try to avoid code
that depends on this "feature".
The unit test contains asserts cagainst the currently observed
behavior in numpy 1.18.2. If the test starts failing after a numpy
upgrade then this might mean that the bug has been fixed.
"""
data1d = np.array([250, 42, 42, -1, -1, -1, 0, np.inf])
data1d = np.ma.masked_equal(data1d, 0)
data2d = data1d.reshape(-1,1)
out1d = np.mean(data1d)
out2d = np.mean(data2d, 0)[0]
weird = np.mean(data2d, 1)
def typename(x): return type(x).__module__ + "." + type(x).__name__
#print(out1d, typename(out1d))
#print(out2d, typename(out2d))
#print(weird)
# mean of 1d array with both masked elements and +inf.
# Expected and actual results are positive infinity.
assert str(out1d) == "inf"
# mean over a single dimension of a 2d array, where the
# other dimenzion is 1. Effectively the exact same data
# except the result will be an array of one element.
# This is NOT the case. The resulting value is masked.
assert str(out2d) == "--"
# Trying to do a mean over the other dimension, giving
# 8 "mean" values where the input for each of them is
# just a single value. Expected result is the original
# data. Observed result is that the np.inf has been
# replaced with a masked value.
assert str(weird) == "[250.0 42.0 42.0 -1.0 -1.0 -1.0 -- --]"
# To add to the confusion, if the out1d array is still a
# masked array but starts out with nothing masked then the
# behavior of the first case will change. Now this will
# also return a masked value instead of inf.
data1d = np.array([250, 42, 42, -1, -1, -1, 2, np.inf])
data1d = np.ma.masked_equal(data1d, 0)
out1d = np.mean(data1d)
#print(out1d)
assert str(out1d) == "--"
if __name__ == "__main__":
np.seterr(all='raise')
testLodAlgorithms()
testLowpassLodAlgorithm()
testSpecial(np.int16)
testSpecial(np.float32)
testSpecial(np.float32, np.nan)
testSpecial(np.float32, np.inf)
numpyBugs()
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/test/lodalgo.py | lodalgo.py |
import os, time
import numpy as np
from .. import api
# If SD tests cannot run then in most cases treat this as a warning only.
# There is not much point in letting dozens of tests fail for the same
# reason. Once Seismic Store is fully supported, make one and only one
# test that fails if we cannot connect.
#
# Tests do not hard code credentials, so either check that the fallback
# environment has been set, or explicitly pick those up here.
try:
import sdglue as sd
except Exception as ex:
print("seismic store access via sdglue is not available:", ex)
sd = None
if sd and not (os.getenv("OPENZGY_SDURL") and os.getenv("OPENZGY_SDAPIKEY")):
print("seismic store access requires $OPENZGY_SDURL and $OPENZGY_SDAPIKEY")
sd = None
def SDTestData(name):
"""
Return the full path of a test file stored in Seismic Store.
The default sd:// uri is hard coded but may be overridden by
setting $OPENZGY_SDTESTDATA. The expected test data must have
been uploaded to that location. The tests need read access and
should preferably not have write access. If name is empty then
just return the prefix without a trailing slash.
"""
project = os.getenv("OPENZGY_SDTESTDATA", "sd://sntc/testdata")
if project and project[-1] == "/": project = project[:-1]
return project + "/" + name if name else project
def SDTestSink(name):
"""
Return the full path to where in Seismic Store the specified test
output should be stored. The default sd:// uri is hard coded but
may be overridden by setting $OPENZGY_SDTESTSINK. The tests need
read/write access. If name is empty then just return the prefix
without a trailing slash.
"""
project = os.getenv("OPENZGY_SDTESTSINK", "sd://sntc/testsink/d")
if project and project[-1] == "/": project = project[:-1]
return project + "/" + name if name else project
def SDCredentials(**kwargs):
"""
Convenience to hard code credentials for testing. Returns a dict.
Picking up sdurl/sdapikey from the environment is redundant since
the library already does this as a fallback.
"""
result = {
"sdurl": os.getenv("OPENZGY_SDURL", ""),
"sdapikey": os.getenv("OPENZGY_SDAPIKEY", ""),
"sdtoken": os.getenv("OPENZGY_TOKEN", "FILE:carbon.slbapp.com")
}
for k, v in kwargs.items():
result[k] = v
return result
def HasSeismicStore():
"""
Used to enable or disable unit tests based on what software is loaded.
TODO-Low structure the tests better, to make it less likely to need this.
"""
return not sd is None
def HasZFPCompression():
"""
Used to enable or disable unit tests based on what software is loaded.
TODO-Low structure the tests better, to make it less likely to need this.
"""
return ("ZFP" in api.ZgyKnownCompressors() and
"ZFP" in api.ZgyKnownDecompressors())
class TempFileAutoDelete:
"""
Arrange for this explicitly named temporary file to be deleted when
the instance goes out of scope. Works both for seismic store and locally.
A missing file is considered an error. The test somehow did not manage
to create the file. If the test wants to delete the file itself then
it should call disarm() after the deletion.
If an exception is already pending then a missing or undeletable file
only causes a warning to be printed. The pending exception is presumably
more important.
There is a fairly harmless race condition between the creation of the
instance and the actual creation of the file. If an exception is thrown
in that interval then a message is printed about not being able to delete
the file. You may alternatively pass armed=False to the constructor and
then invoke arm() at the exact point you know the file exists. Good luck.
If you get an exception when creating a file on the cloud, you might not
know whether the file got created or not.
The recommended naming convention is to use both the current time and
a per-process random number as a prefix. This makes it simpler to
clean up files that in spite of the aito delete got left behind.
The class can also be used for files intended to be persistent, by
ensuring the file gets deleted if any error happened while creating
it. Call disarm() only when you are sure you want to keep it.
At that point your file is no longer considered temporary.
Init takes a "silent" parameter which is only intended for unit tests.
"""
def __init__(self, name, iocontext = None, *, armed = True, silent = False):
self._name = name
self._iocontext = iocontext
self._silent = silent
self._armed = armed
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._armed and self._name:
self.remove(self._name, self._iocontext,
raise_on_error = False if type else True,
silent = self._silent)
self._armed = False
#print("AUTO-DELETE", self._name)
#else:
# print("AUTO-DELETE was disarmed.")
def disarm(self):
"""
Tell the instance to not try to delete the file. Either because we
deleted it ourselves, or because we for some reason decided to keep it,
or we want to guard agaist exceptions during create by temporarily
disarming and then arming the file.
"""
#print("DISARM", self._name)
self._armed = False
def arm(self):
"""
Tell the instance to try to delete the file on exit.
"""
#print("ARM", self._name)
self._armed = True
@property
def name(self):
return self._name
@staticmethod
def remove(name, iocontext = None, raise_on_error = True, silent = False):
"""
Delete a file locally or in seismic store.
Not using openzgy.api.ZgyUtils.delete() because that method
will always suppress "file not found" errors. Please Don't do this
unless you are ok with the explicit sdapi dependency.
"""
credentials = ("", "", "")
if iocontext:
try:
credentials = (iocontext["sdurl"],
iocontext["sdapikey"],
iocontext["sdtoken"])
except (TypeError, KeyError):
credentials = (iocontext.sdurl,
iocontext.sdapikey,
iocontext.sdtoken)
if name:
try:
if name and name[:5] == "sd://":
if sd is not None:
with sd.SdUtil(credentials) as u:
u.delete(name)
else:
os.remove(name)
except Exception as ex:
if not silent:
print("WARNING: Cannot AUTO-DELETE", name, str(type(ex)), str(ex))
if raise_on_error:
raise
@staticmethod
def _randomname():
try:
with open("/dev/urandom", "rb") as random:
n = np.frombuffer(random.read(4), dtype=np.uint32)[0]
except FileNotFoundError: # Probably on a windows box.
n =random.randint(0, 0xffffffff)
return "{0:08x}-".format(n)
class LocalFileAutoDelete(TempFileAutoDelete):
"""
As TempFileAutoDelete, but explicitly requesting a local file with
a random prefix in from of the name.
"""
_prefix = "tmp-{0:08x}-".format(int(time.time()))
_prefix = os.path.join(os.getenv("TESTRUNDIR", '.'), _prefix)
def __init__(self, suffix, *, silent = False):
name = self._prefix + self._randomname() + suffix
super().__init__(name, None, silent=silent)
class CloudFileAutoDelete(TempFileAutoDelete):
"""
As TempFileAutoDelete, but explicitly requesting a file on the
seismic store with a random prefix in from of the name.
"""
_prefix = SDTestSink(None)
_prefix += "/tmp-{0:08x}-".format(int(time.time()))
def __init__(self, suffix, iocontext,*, silent = False):
name = self._prefix + self._randomname() + (suffix or "tmp.tmp")
super().__init__(name, iocontext, silent=silent)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/test/utils.py | utils.py |
import numpy as np
import os
import io
import sys
import math
from ..impl import bulk as impl_bulk
from ..impl import meta as impl_meta
from ..impl import file as impl_file
from ..impl import stats as impl_stats
from ..impl import enum as impl_enum
from ..impl import histogram as impl_histogram
from .. import api as openzgy
from ..exception import *
from ..test.utils import SDCredentials, LocalFileAutoDelete
def test_ScaleToStorage():
"""
Unit test for ZgyInternalBulk._scaleToStorage.
"""
fn = impl_bulk.ZgyInternalBulk._scaleToStorage
codingrange = (-10.0, +30.0)
# Converting the min- and max of the coding range should yield
# the first and last valid storage number.
assert fn(np.float32(-10.0), codingrange, np.int8) == -128
assert fn(np.float32(+30.0), codingrange, np.int8) == 127
assert fn(np.float32(-10.0), codingrange, np.uint16) == 0
assert fn(np.float32(+30.0), codingrange, np.uint16) == 65535
# Outside valid range should silently clip.
assert fn(np.float32(-15.0), codingrange, np.int8) == -128
assert fn(np.float32(+35.0), codingrange, np.int8) == 127
# Conversion should round to nearest. For this coding range
# each slot covers a bit more than +/- 0.0783
assert fn(np.float32(-10.0 + 0.0783), codingrange, np.int8) == -128
assert fn(np.float32(-10.0 + 0.0786), codingrange, np.int8) == -127
assert fn(np.float32(-10.0 + 3*0.0783), codingrange, np.int8) == -127
assert fn(np.float32(-10.0 + 3*0.0786), codingrange, np.int8) == -126
def test_LookupTables():
"""
Unit test for the following related functions:
_get{Brick,Alpha}FilePosition()
_get{Brick,Alpha}LookupIndex()
_validatePosition()
lod out of range
ijk out of range for lod 0
ijk out of range for lod N, would not have been if lod == 0
Calls these functions to get mocked data; these are assumed tested
elsewhere. Or that they are so trivial that they don't need testing.
"""
#test all-constant brick (both types) and missing brick
# Arrange
class MockInternalMeta: pass
class MockInfoHeader: pass
mock = MockInternalMeta()
mock._ih = MockInfoHeader()
mock._ih._size = (112, 64, 176) # bricks: (2, 1, 3)
mock._ih._bricksize = (64, 64, 64)
mock._ih._lodsizes = impl_meta._CalcLodSizes(mock._ih._size, mock._ih._bricksize)
mock._ih._nlods = len(mock._ih._lodsizes)
mock._ih._brickoffsets = impl_meta._CalcLutOffsets(mock._ih._lodsizes, False)
mock._ih._alphaoffsets = impl_meta._CalcLutOffsets(mock._ih._lodsizes, True)
mock._blup = [0] * mock._ih._brickoffsets[mock._ih._nlods]
mock._alup = [0] * mock._ih._alphaoffsets[mock._ih._nlods]
assert mock._ih._nlods == 3
assert type(mock._ih._lodsizes) == tuple
assert type(mock._ih._lodsizes[0]) == tuple
assert tuple(mock._ih._lodsizes[0]) == (2, 1, 3)
assert tuple(mock._ih._lodsizes[1]) == (1, 1, 2)
assert tuple(mock._ih._lodsizes[2]) == (1, 1, 1)
def test_throws(e, f):
try:
f(None)
return False
except e:
return True
def test_MapEnums():
"""
Unit test for conversion between various internal and api enums.
"""
from ..impl import enum as internal
# (internal) RawDataType to (public) SampleDataType
# Errors returned as unknown because the code should
# be tolerant with regards to garbage in the file.
assert openzgy._map_DataTypeToSampleDataType(internal.RawDataType.SignedInt8) == openzgy.SampleDataType.int8
assert openzgy._map_DataTypeToSampleDataType(internal.RawDataType.SignedInt16) == openzgy.SampleDataType.int16
assert openzgy._map_DataTypeToSampleDataType(internal.RawDataType.Float32) == openzgy.SampleDataType.float
# Maps to unknown because I have not added this one to the enum
assert openzgy._map_DataTypeToSampleDataType(internal.RawDataType.UnsignedInt8) == openzgy.SampleDataType.unknown
# (public) SampleDataType to (internal) RawDataType
# Errors should raise exceptions.
assert openzgy._map_SampleDataTypeToDataType(openzgy.SampleDataType.int8) == internal.RawDataType.SignedInt8
assert openzgy._map_SampleDataTypeToDataType(openzgy.SampleDataType.int16) == internal.RawDataType.SignedInt16
assert openzgy._map_SampleDataTypeToDataType(openzgy.SampleDataType.float) == internal.RawDataType.Float32
# (public) UnitDimension to (internal) Horizontal- and Vertical dim
# Errors should raise exceptions.
assert openzgy._map_UnitDimensionToHorizontalDimension(openzgy.UnitDimension.length) == internal.RawHorizontalDimension.Length
assert openzgy._map_UnitDimensionToHorizontalDimension(openzgy.UnitDimension.arcangle) == internal.RawHorizontalDimension.ArcAngle
# time is not allowed horizontally.
assert test_throws(openzgy.ZgyUserError, lambda x: openzgy._map_UnitDimensionToHorizontalDimension(openzgy.UnitDimension.time))
assert openzgy._map_UnitDimensionToVerticalDimension(openzgy.UnitDimension.length) == internal.RawVerticalDimension.Depth
assert openzgy._map_UnitDimensionToVerticalDimension(openzgy.UnitDimension.time) == internal.RawVerticalDimension.SeismicTWT
# arcangle not allowed vertically.
assert test_throws(openzgy.ZgyUserError, lambda x: openzgy._map_UnitDimensionToVerticalDimension(openzgy.UnitDimension.arcangle))
# (internal) Horizontal- and Vertical dim to (public) UnitDimension
# Errors should return unknown, but if there was a bad tag it should
# have been mapped to unknown already.
assert openzgy._map_VerticalDimensionToUnitDimension(internal.RawVerticalDimension.SeismicTWT) == openzgy.UnitDimension.time
assert openzgy._map_VerticalDimensionToUnitDimension(internal.RawVerticalDimension.SeismicOWT) == openzgy.UnitDimension.time
assert openzgy._map_VerticalDimensionToUnitDimension(internal.RawVerticalDimension.Depth) == openzgy.UnitDimension.length
assert openzgy._map_VerticalDimensionToUnitDimension(internal.RawVerticalDimension.Unknown) == openzgy.UnitDimension.unknown
assert openzgy._map_HorizontalDimensionToUnitDimension(internal.RawHorizontalDimension.Length) == openzgy.UnitDimension.length
assert openzgy._map_HorizontalDimensionToUnitDimension(internal.RawHorizontalDimension.ArcAngle) == openzgy.UnitDimension.arcangle
assert openzgy._map_HorizontalDimensionToUnitDimension(internal.RawHorizontalDimension.Unknown) == openzgy.UnitDimension.unknown
del internal
def test_FormatDescription():
for verbose in (0, 1, 2):
with io.StringIO() as stream:
impl_meta.checkAllFormats(verbose=verbose, file=stream)
# The part of checkAllFormats that would dump the definitions
# in C++ or HTML have been moved to openzgy/tools/cppmeta.py.
# The checks and now not expected to output anything.
if verbose == 0: assert len(stream.getvalue()) == 0
if verbose > 0: assert len(stream.getvalue()) == 0
# No test on the result, only see that it doesn't crash.
#print("FORMATS TYPE", verbose, stream.getvalue(), "@END")
def test_Statistics():
stats = impl_stats.StatisticData()
# adding scalars
# -NOT. StatisticData() changed to only accept numpy arrays.
stats += np.array(42)
stats += np.array(2)
stats += np.array(np.inf)
stats += np.array(-np.inf)
stats += np.array(-np.nan)
#print(repr(stats))
assert stats._cnt == 2
assert stats._inf == 3
assert stats._sum == 42 + 2
assert stats._ssq == 42*42 + 2*2
assert stats._min == 2
assert stats._max == 42
# adding arrays
a = np.array([42, 2, np.inf, -np.inf, np.nan])
stats2 = impl_stats.StatisticData()
stats2 += a
assert stats == stats2
# Multiply by integral factor
stats *= 3
stats2 += a
stats2 += a
assert stats == stats2
# Add two instances
stats = stats + 3 * (impl_stats.StatisticData() + a)
stats2 = stats2 + stats2
# Scaling
a = np.array([1, 0, 2, 3, 42], dtype=np.int8)
stats = impl_stats.StatisticData()
stats2 = impl_stats.StatisticData()
stats += a
stats2 += np.array([3.14*x + 2.72 for x in a])
# old signature: stats.scale(0, 1, 2.72, 3.14+2.72)
stats.scale(3.14, 2.72)
#print(repr(stats))
#print(repr(stats2))
assert stats._cnt == stats2._cnt
assert stats._inf == stats2._inf
assert math.isclose(stats._sum, stats2._sum)
assert math.isclose(stats._ssq, stats2._ssq)
assert math.isclose(stats._min, stats2._min)
assert math.isclose(stats._max, stats2._max)
def alltiles(reader):
for lod in range(reader.nlods):
size = reader.brickcount[lod]
for ii in range(size[0]):
for jj in range(size[1]):
yield ii, jj, lod
def allbricks(reader):
for lod in range(reader.nlods):
size = reader.brickcount[lod]
for ii in range(size[0]):
for jj in range(size[1]):
for kk in range(size[2]):
yield ii, jj, kk, lod
def _typename(t):
return t.__name__ if t.__module__ is None or t.__module__ == type.__module__ else t.__module__ + "." + t.__name__
def test_bricksize(filename):
with openzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
if False:
print("Opened file size", reader.size, "bricks", reader.brickcount)
print(list(allbricks(reader)))
for ii, jj, kk, lod in allbricks(reader):
ix = reader._accessor._getBrickLookupIndex(ii, jj, kk, lod)
raw_beg = reader._accessor._metadata._blup._lookup[ix]
raw_end = reader._accessor._metadata._blup._lookend[ix]
if False:
print("begin: {0}({1}), end: {2}({3})".format(
_typename(type(raw_beg)), hex(raw_beg),
_typename(type(raw_end)), hex(raw_end)))
beg, size = reader._accessor._getBrickBegAndSize(ix, sane = False)
if False:
print("ix {ix} lod {lod} brickpos ({ii},{jj},{kk}) beg 0x{beg:x} size 0x{size:x}".format(
ix=ix, lod=lod, ii=ii, jj=jj, kk=kk, beg=beg, size=size))
# Size will be > 64^3 sometimes due to interleaved alpha tiles
# and it will be very large for the last brick since we skipped
# the test for EOF. For Salt2-v3.zgy, lowres alpha tiles exist
# between 0x440000 and 0x480000 so the brick at 0x400000 will
# appear twice as large as it ought to be.
assert beg == 0 or size >= 64*64*64
for ii, jj, lod in alltiles(reader):
ix = reader._accessor._getAlphaLookupIndex(ii, jj, lod)
raw_beg = reader._accessor._metadata._alup._lookup[ix]
raw_end = reader._accessor._metadata._alup._lookend[ix]
if False:
print("begin: {0}({1}), end: {2}({3})".format(
_typename(type(raw_beg)), hex(raw_beg),
_typename(type(raw_end)), hex(raw_end)))
beg, size = reader._accessor._getAlphaBegAndSize(ix, sane = False)
if False:
print("ix {ix} lod {lod} alphapos ({ii},{jj}) beg 0x{beg:x} size 0x{size:x}".format(
ix=ix, lod=lod, ii=ii, jj=jj, beg=beg, size=size))
assert beg == 0 or size >= 64*64
def test_PaddingOutsideSurvey(filename):
with openzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
size = (7, 13, 17),
datatype = openzgy.SampleDataType.int16,
datarange = (-32768,+32767)) as writer:
data = np.arange(1, 7*13*17+1, dtype=np.int16).reshape((7,13,17))
data = np.pad(data, ((0,64-7),(0,64-13),(0,64-17)),
mode='constant', constant_values=-999)
writer.write((0,0,0), data)
(brickstatus, fileoffset, constvalue, bricksize) = (
writer._accessor._getBrickFilePosition(0, 0, 0, 0))
# With ZgyReader the padding area should be filled with a default value,
# which is zero in this simple case where there is no scale/offset.
# So the only nonzero data should be our real samples.
with openzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
readback = np.full((64, 64, 64), 42, dtype=np.int16)
reader.read((0,0,0), readback)
assert np.count_nonzero(readback) == 7 * 13 * 17
# Reading using basic file I/O to get the data that the compressor saw.
with open(filename, "rb") as f:
f.seek(fileoffset, 0)
check = f.read(64*64*64*2)
check = np.frombuffer(check, dtype=np.int16).reshape((64, 64, 64))
samples_in_survey = 7 * 13 * 17
expect_nonzero_samples = 8 * 16 * 20
actual_nonzero_samples = np.count_nonzero(check)
assert np.all(data[:7,:13,:17] == check[:7,:13,:17])
assert brickstatus == impl_enum.BrickStatus.Normal
assert fileoffset == 64*64*64*2 # Not really a problem if it fails
assert actual_nonzero_samples == expect_nonzero_samples
assert check[7,12,16] == data[6,12,16] # one past lower corner
assert check[6,13,16] == data[6,12,16]
assert check[6,12,17] == data[6,12,16]
assert np.all(check[7,:13,:17] == check[6,:13,:17])
assert np.all(check[:7,13,:17] == check[:7,12,:17])
assert np.all(check[:7,14,:17] == check[:7,12,:17])
assert np.all(check[:7,15,:17] == check[:7,12,:17])
assert np.all(check[:7,:13,17] == check[:7,:13,16])
assert np.all(check[:7,:13,18] == check[:7,:13,16])
assert np.all(check[:7,:13,19] == check[:7,:13,16])
def test_histogram16():
hist = impl_histogram.HistogramData(dtype=np.int16)
assert hist.vv_range == (-32768, +32767)
if hist.bins.shape[0] != 65536:
# I might not have enabled the feature of using oversize histograms.
# But I can fake it by resizing the new, empty histogram.
hist.resize(65536)
assert hist.vv_range == (-32768, +32767)
assert hist.bins.shape[0] == 65536
# Using only 4096 of the possible 65536 values
hist.add(1000+np.arange(4096, dtype=np.int16))
assert np.all(hist.bins[32768+1000:32768+1000+4096] == 1)
# The first number, 1000, ended up in bin 32768+1000.
assert np.isclose(1000, hist.binvalue(32768+1000))
hist.resize(256)
# Current implementation reserves slot 0 for zero-centric
# adjustment which is not implemented yet. The factor will
# end up as 17, so we will have 4096/17 = 240 bins with count
# 17 and the 16 left over in the bin immediately after those.
assert len(hist.bins) == 256
assert hist.bins[0] == 0
assert np.all(hist.bins[1:241] == 17)
assert hist.bins[241] == 16
assert np.all(hist.bins[242:] == 0)
#print(hist._hmin, hist._hmax, hist._bins, sep="\n")
# The first number, 1000, should have moved to bin 1.
assert np.isclose(1000, hist.binvalue(1))
def test_histogram8():
hist = impl_histogram.HistogramData(dtype=np.int8)
assert hist.vv_range == (-128, +127)
# Using only 90 of the possible 256 values
hist.add(10+np.arange(80, dtype=np.int8))
assert np.all(hist.bins[128+10:128+10+80] == 1)
# The first number, 10, ended up in bin 128+10.
assert np.isclose(10, hist.binvalue(128+10))
hist.resize(32)
# Current implementation reserves slot 0 for zero-centric
# adjustment which is not implemented yet. The factor will
# end up as 3, so we will have 80/3 = 26 bins with count
# 3 and the 2 left over in the bin immediately after those.
assert len(hist.bins) == 32
assert hist.bins[0] == 0
assert np.all(hist.bins[1:1+26] == 3)
assert hist.bins[27] == 2
assert np.all(hist.bins[28:] == 0)
#print(hist._hmin, hist._hmax, hist._bins, sep="\n")
# The first number, 1000, should have moved to bin 1.
assert np.isclose(10, hist.binvalue(1))
# Now resize it larger. All the bins end up clumped together
# in the lower past; the resize will simply add more empty
# bins at the end.
hist.resize(1000)
assert len(hist.bins) == 1000
assert hist.bins[0] == 0
assert np.all(hist.bins[1:1+26] == 3)
assert hist.bins[27] == 2
assert np.all(hist.bins[28:] == 0)
#print(hist._hmin, hist._hmax, hist._bins, sep="\n")
# The first number, 1000, should have moved to bin 1.
assert np.isclose(10, hist.binvalue(1))
if __name__ == "__main__":
np.seterr(all='raise')
test_histogram16()
test_histogram8()
test_ScaleToStorage()
test_LookupTables()
test_MapEnums()
test_FormatDescription()
test_Statistics()
# TODO-High re-enable, use test data reachable from the cloud.
#test_bricksize("/home/paal/git/Salmon/UnitTestData/Salmon/UnitTest/Salt2-v3.zgy")
#test_bricksize("/home/paal/git/Salmon/UnitTestData/Salmon/UnitTest/Salt2-32.zgy")
with LocalFileAutoDelete("padding.zgy") as fn:
test_PaddingOutsideSurvey(fn.name)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/test/white.py | white.py |
#print('Running' if __name__ == '__main__' else 'Importing', __file__)
import numpy as np
import sys
import os
import time
from PIL import Image
from .. import api as newzgy
try:
from .. import zgypublic as oldzgy
except Exception:
oldzgy = None
from ..exception import ZgyMissingFeature
from ..test.utils import SDCredentials, SDTestData
from .viewzgy import savePNG
def read_data_all_at_once(reader, lod, start, size):
section = np.zeros(size, dtype=np.float32)
reader.read(start, section, lod = lod)
return section
def read_data_b_at_a_time(reader, lod, start, size):
bs = np.array(reader.bricksize, dtype=np.int64)
padsize = ((np.array(size, np.int64) + bs - 1) // bs) * bs
brick = np.zeros(bs, dtype=np.float32)
section = np.zeros(padsize, dtype=np.float32)
for ii in range(0, size[0], bs[0]):
for jj in range(0, size[1], bs[1]):
for kk in range(0, size[2], bs[2]):
reader.read((start[0]+ii, start[1]+jj, start[2]+kk),brick, lod = lod)
section[ii:ii+bs[0], jj:jj+bs[1], kk:kk+bs[2]] = brick
return section[:size[0],:size[1],:size[2]]
def timing_report(reader, lod, start, size, elapsed):
bs = np.array(reader.bricksize if isinstance(reader, newzgy.ZgyReader) else (64, 64, 64), dtype=np.int64)
padsize = ((np.array(size, np.int64) + bs - 1) // bs) * bs
bandwidth = np.product(padsize) / elapsed # should I use size or padsize?
bandwidth /= (1024*1024)
print("Elapsed {0:6.2f} seconds, bandwidth {1:6.2f} MVoxel/s reading {2} lod {3} size {4} start {5}".format(elapsed, bandwidth, reader.datatype, lod, tuple(size), tuple(start)))
_zgycloud_inited = False
def init_zgycloud():
global _zgycloud_inited
if _zgycloud_inited or oldzgy is None: return
import zgycloud
zgy = oldzgy.zgy
#print("Glue together error handling", flush=True)
zgy.setErrorHooks(zgycloud.resetError, zgycloud.lastError)
#print("Handle logging inside Python", flush=True)
zgycloud.setLogger(lambda pri,x: print("log:", x, end='', flush=True), 0)
# Modify this to run the tests in a different environment.
if False:
zgycloud.configure('sd://', '',
'@/etc/delfi/demo-sdapi-key',
'@/etc/delfi/demo-sdapi-url')
#print("Get test token", flush=True)
token = zgycloud.getTestToken("sd://")
zgycloud.setToken("sd://", token, "stoken", True)
zgycloud.enableRealCache(True, 1024)
zgycloud.setSegmentSize(1024)
_zgycloud_inited = True
def run(filename, *, lods = [0], direction = 0, slurp=True,
readerfactory = newzgy.ZgyReader, outname = None, iocontext=None):
"""
Read 64 traces or slices in the specified direction.
Optionally save the first of these as PNG.
"""
if iocontext is None and filename[:5] == "sd://":
iocontext = SDCredentials()
if False:
print("Read", filename,
("64 inlines", "64 crosslines", "64 slices")[direction],
"slurp" if slurp else "block at a time")
allslices = []
with readerfactory(filename, iocontext=iocontext) as reader:
slicenumber = reader.size[direction] // 2
#reader.dump()
for lod in lods:
step = 1<<lod
start = [0, 0, 0]
size = np.array(reader.size, dtype=np.int64) // (1 << lod)
start[direction] = slicenumber >> lod
size[direction] = 1
if direction == 2:
size[0] = min(size[0], 1024)
size[1] = min(size[1], 1024)
start = tuple(start)
size = tuple(size)
starttime = time.time()
if slurp:
section = read_data_all_at_once(reader, lod, start, size)
else:
section = read_data_b_at_a_time(reader, lod, start, size)
#timing_report(reader, lod, start, size, time.time() - starttime)
# Display only the first section or slice
if outname:
if direction == 0:
myslice = section[0,...]
elif direction == 1:
myslice = section[:,0,:]
else:
myslice = section[...,0]
#savePNG(myslice, outname + "_lod" + str(lod) + ".png")
allslices.append(myslice)
if outname:
s = allslices[0].shape
w = np.sum([allslices[i].shape[0] for i in range(3)])
combined = np.zeros((w, s[1]), dtype=allslices[0].dtype)
combined[0:s[0],:] = allslices[0]
ss = allslices[1].shape
combined[s[0]:s[0]+ss[0], 0:ss[1]] = allslices[1]
sss = allslices[2].shape
combined[s[0]+ss[0]:s[0]+ss[0]+sss[0], 0:sss[1]] = allslices[2]
combined = combined[::-1,::]
savePNG(combined, outname + ".png")
def Rema1000(filelist):
"""
Specific test for the Rema 1000 solution: What is the best case
and worst case effect of using 4 MB block size regardless of how
the data is layed out?
The file list should be one completely sorted, one completely random
In all cases, read one brick at a time, strictly ordered by
inline slowest, crossline, vertical fastest
Currently the cache only holds the last block read, but
in these lab conditions that should be enough.
Control: config aligned = 0. Access direction and sorted or not
shouldn't have any effect. If it does then there is probably
some caching that I am not aware of, and it will be more
difficult to interpret the results. Test all combinations of
unsorted/sorted and access directions.
Test: config aligned = 4 MB. What I expect / hope is that for a
sorted file the inline- and crossline access should be
significantly better. With inline access possibly a bit better
than crossline. Slice access and all access to scrambled files
should all be similar (assuming the controls were all similar)
and hopefully not very much worse than the control.
The entire test suite should be run both for 8-bit and float.
Primarily test on cloud, but perhaps also on-prem with limited
bandwidth.
"""
for alignment in [0, 4]:
print("Force alignment:", alignment)
for filename in filelist:
for direction in (0, 1, 2):
run(filename, direction=direction, slurp=False,
readerfactory=newzgy.ZgyReader,
iocontext=SDCredentials(aligned=alignment))
# For comparison, best case for the fancy reader.
run(filelist[0], direction=0, slurp=True, readerfactory=newzgy.ZgyReader)
def Main(args):
if not args:
args = ["../build/testdata/Empty-v3.zgy",
"../build/testdata/Empty-v1.zgy",
SDTestData("SyntFixed.zgy")]
if any([name[:5] == "sd://" for name in args]):
init_zgycloud()
suffix = 1
for filename in args:
out = os.path.join(os.getenv("TESTRUNDIR", '.'), "new" + str(suffix))
try:
run(filename, lods=range(3), direction=0, slurp=True,
readerfactory=newzgy.ZgyReader, outname = out)
except ZgyMissingFeature as ex:
print("{0}: {1}".format(filename, str(ex)))
out = os.path.join(os.getenv("TESTRUNDIR", '.'), "old" + str(suffix))
if oldzgy is not None:
run(filename, lods=range(3), direction=0, slurp=True,
readerfactory=oldzgy.ZgyReader, outname = out)
suffix += 1
if __name__ == "__main__":
np.seterr(all='raise')
Main(sys.argv[1:])
sys.exit(0)
Rema1000([SDTestData("bigtestdata/synt-50gb-8bit-sorted.zgy"),
SDTestData("bigtestdata/synt-50gb-8bit-randomized.zgy")])
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/show.py | show.py |
import numpy as np
import os, sys, argparse
import matplotlib.pyplot as plt
from ..api import ZgyReader, ZgyWriter, SampleDataType, ProgressWithDots
from ..impl.enum import BrickStatus
from ..impl.compress import CompressStats as Stats
from ..test.utils import SDCredentials
from ..iterator import readall
def compare_stats(r1, r2):
old = r1.statistics
new = r2.statistics
old_avg = old.sum / old.cnt
old_rms = float(np.sqrt(old.ssq / old.cnt))
new_avg = new.sum / new.cnt
new_rms = float(np.sqrt(new.ssq / new.cnt))
# Note, use "rms" as the reference also for avg, as old_avg may be ~0.
delta_avg = (new_avg - old_avg) / old_rms
delta_rms = (new_rms - old_rms) / old_rms
# Also check the value range.
delta_min = abs(old.min - new.min) / (old.max - old.min)
delta_max = abs(old.max - new.max) / (old.max - old.min)
delta_range = max(delta_min, delta_max)
print("in: ", old)
print("out:", new)
print('"Difference in stats" "avg:" {0:7.4f}% "rms:" {1:7.4f}% "range:" {2:7.4f}% "count:" {3}'.format(
100*delta_avg, 100*delta_rms, 100*delta_range, new.cnt - old.cnt))
def compare_open_files(r1, r2, w, progress, histogram, label):
def _roundup(x, step): return ((x + step - 1) // step) * step
filesize1 = r1._fd.xx_eof
filesize2 = r2._fd.xx_eof
stats = Stats("MyStats")
bricksize = np.minimum(np.array(r1.bricksize, dtype=np.int64),
np.array(r2.bricksize, dtype=np.int64))
bufsize = np.zeros(3, dtype=np.int64)
delta_histogram_clip = 0.01 # @@ change me!
delta_histogram = None
delta_histogram = np.zeros(200, dtype=np.int64)
input_rms = np.sqrt(r1.statistics.ssq / r1.statistics.cnt)
delta_hist_range = (-input_rms * delta_histogram_clip,
+input_rms * delta_histogram_clip)
for start, count, _ in readall(r1, sizeonly=True):
bufsize = np.maximum(bufsize, count)
data1 = np.zeros(bufsize, dtype=np.float32)
data2 = np.zeros(bufsize, dtype=np.float32)
for start, count, _ in readall(r1, sizeonly=True, progress=progress):
view1 = data1[:count[0],:count[1],:count[2]]
view2 = data2[:count[0],:count[1],:count[2]]
r1.read(start, view1)
r2.read(start, view2)
if w:
w.write(start, view2 - view1)
ddelta = (view2 - view1)
np.clip(ddelta, delta_hist_range[0], delta_hist_range[1], out=ddelta)
dhist = np.histogram(ddelta, bins=len(delta_histogram), range=delta_hist_range)
delta_histogram += dhist[0]
# readall() can't help with the inner loop because we had to
# read from two files, not just one.
for ii in range(0, count[0], bricksize[0]):
for jj in range(0, count[1], bricksize[1]):
for kk in range(0, count[2], bricksize[2]):
beg = np.array((ii, jj, kk), dtype=np.int64)
end = np.minimum(beg + bricksize, count)
bpos = beg // r2.bricksize
bstatus, _, _, bsize = r2._accessor._getBrickFilePosition(*bpos, 0)
d1 = view1[beg[0]:end[0],beg[1]:end[1],beg[2]:end[2]]
d2 = view2[beg[0]:end[0],beg[1]:end[1],beg[2]:end[2]]
if bstatus in (BrickStatus.Normal, BrickStatus.Compressed):
stats.add_data(d1, bsize, d2, msg=str(bstatus))
MEGA = float(1024*1024)
print("Actual file size {0:.0f} MB / {1:.0f} MB = ratio {2:.1f} relative {3:.1f}%".format(
filesize1 / MEGA, filesize2 / MEGA, filesize1 / filesize2, 100 * (filesize2 / filesize1)))
compare_stats(r1, r2)
stats.dump()
if histogram:
#print(stats._all_info)
# For the histogram I want to also include the lossless bricks;
# unlike what is done in CompressStats.dump().
allsnr = list([x[0] for x in stats._all_info]) or [99.0]
allsnr = np.clip(allsnr, 0, 99)
allrat = list([100.0/e[1] for e in stats._all_info if e[1] != 0]) or [1]
allrat = np.clip(allrat, 0.8001, 99.9999)
allrat = 100.0 / allrat # Now in range 1% .. 125%
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(8, 9), dpi=100)
fig.subplots_adjust(hspace=0.5)
ax1.hist(allsnr, bins=100, range=(0, 100))
#ax1.title.set_text("Signal to Noise (dB) for each brick.")
#ax1.text(0.5, 0.9, 'Signal to Noise (dB) for each brick.',
# transform=ax1.transAxes, ha="center")
ax1.set_xlabel('Measured signal to noise (dB)')
ax1.set_ylabel('% bricks with this SNR')
ax2.hist(allrat, bins=100, range=(1, 125))
#ax2.title.set_text("Compression ratio for each brick.")
#ax2.text(0.5, 0.9, 'Compression ratio for each brick.',
# transform=ax2.transAxes, ha="center")
ax2.set_xlabel('Size in % of input')
ax2.set_ylabel('% bricks with this size')
xbins = len(delta_histogram)
xaxis = np.arange(xbins, dtype=np.float32) # 0..nbins
xaxis /= xbins # 0..1
xaxis = 2*xaxis - 1 # -1..+1
xaxis *= delta_histogram_clip
#print("@@@ sample count", np.sum(delta_histogram), "zeros", delta_histogram[xbins//2])
ax3.plot(xaxis, delta_histogram * (100 / np.sum(delta_histogram)), color='orange')
#ax3.title.set_text("Histogram of errors, relative to RMS of data.")
#ax3.text(0.5, 0.9, 'Histogram of errors, relative to RMS of data.',
# transform=ax3.transAxes, ha="center")
ax3.set_xlabel('Error relative to RMS of data ({0:.0f})'.format(input_rms))
ax3.set_ylabel('% samples with this error')
fig.suptitle(label)
plt.savefig(histogram)
def compare(filename1, filename2, outfilename, histogram, label):
with ZgyReader(filename1, iocontext = SDCredentials()) as r1:
with ZgyReader(filename2, iocontext = SDCredentials()) as r2:
if r1.size != r2.size:
print("Survey size mismatch:", r1.size, r2.size)
else:
if outfilename:
with ZgyWriter(outfilename,
templatename=filename1,
datatype=SampleDataType.float,
iocontext = SDCredentials()) as w:
compare_open_files(r1, r2, w, progress=ProgressWithDots(), histogram=histogram, label=label)
w.finalize(progress=ProgressWithDots())
else:
compare_open_files(r1, r2, None, progress=ProgressWithDots(), histogram=histogram, label=label)
if __name__ == "__main__":
np.seterr(all='raise')
parser = argparse.ArgumentParser(description='Compare two presumed equal ZGY files. Report any noise that might be caused by compression and/or quantization.', epilog=None)
parser.add_argument('input1', help='ZGY input cube, local or sd://')
parser.add_argument('input2', help='ZGY input cube, local or sd://')
parser.add_argument('--output', help='Optional difference ZGY output cube, local or sd://, overwritten if it already exists.')
parser.add_argument('--histogram', help='Optional histograms of noise and compression saved to this png file. Local files only.')
parser.add_argument('--label', help='Title for the histogram. Defaults to the file names.')
args = parser.parse_args()
#print(args)
if not args.label:
args.label = "File 1: {0}\nFile 2: {1}".format(args.input1, args.input2)
compare(args.input1, args.input2, args.output, args.histogram, args.label)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/compare.py | compare.py |
import numpy as np
import os, sys, time, argparse
import matplotlib.pyplot as plt
from ..api import ZgyReader, ZgyWriter, SampleDataType, ProgressWithDots
from ..test.utils import SDCredentials
from ..iterator import readall
def find_center(r):
"""
Crop partial bricks outside or below the survey since they
would just confuse the statistics.
If the file is large then crop out an area around the center.
Try to read the full traces but limit the il and xl.
"""
size = np.array(r.size, dtype=np.int64)
cropsize = np.minimum(size, np.array([640,640,12800], dtype=np.int64))
cropsize = (cropsize//64)*64
cropoffset = ((size - cropsize)//128)*64
if np.any(size - cropsize >= 64):
print("Reading center", tuple(cropsize),
"offset", tuple(cropoffset),
"of survey size", tuple(r.size))
return cropsize, cropoffset
def run_open_file(r, datarange, *, progress = None, bincount = 256, crop = False):
histogram = np.zeros(bincount, dtype=np.int64)
(cropsize, cropoffset) = find_center(r) if crop else (None, None)
for _, _, data in readall(r, dtype=np.float32, cropsize=cropsize, cropoffset=cropoffset, progress=progress):
histogram += np.histogram(data, bins=bincount, range=datarange)[0]
return histogram
def run(srcfilename, outfilename, datarange=None, bincount=256, crop=False):
with ZgyReader(srcfilename, iocontext = SDCredentials()) as r:
if not datarange:
datarange = tuple(r.datarange)
# Make symmetric
if datarange[0] * datarange[1] < 0:
lim = max(abs(datarange[0]), abs(datarange[1]))
datarange = (-lim, lim)
print("Data type", r.datatype,
"range", datarange, "codingrange", r.datarange,
"legacy", r._meta._ih._codingrange,
"statistical", (r.statistics.min, r.statistics.max))
if r.datatype == SampleDataType.int8:
bincount=256
histogram = run_open_file(r, datarange, bincount=bincount, crop=crop,
progress=ProgressWithDots())
assert len(histogram) == bincount
plot_histogram("file: " + srcfilename, histogram)
plt.savefig(outfilename)
def plot_histogram(title, histogram):
bincount = len(histogram)
nz = np.count_nonzero(histogram)
zz = len(histogram) - nz
#print(list(sorted(histogram)))
#clipvalue = list(sorted(histogram))[-nz//10] # at 90-percentile
clipvalue = list(sorted(histogram))[-5] # clip a fixed number of bins
# Clipping so a large number of zero or close-to-zero won't cause trouble.
if False:
print("raw histogram ", histogram)
print("sorted histogram", np.array(sorted(histogram))[-6:])
print("clip from", list(sorted(histogram))[-1],
"to", clipvalue, "samples/bin")
# But: If there are fewer than 5 buckets with data in them, don't bother.
# This can really happen. If a data set has a single huge spike then only
# the "spike" bin and the "close to zero" bin will be populated.
# The histogram will be useless, but at least try to show what happened
# instead of bombing out with a divide by zero.
# Example data set: sd://sntc/millet/usgs/zgy/B-09-88-LA.zgy
# which has a real value range +/- 100000 but spices at -9.1e15
if clipvalue != 0:
np.clip(histogram, 0, clipvalue, out=histogram)
#print("clipped hist at ", clipvalue, ":", histogram)
else:
print("WARNING: Fewer than 5 buckets filled. Are there spikes?")
topvalue = list(sorted(histogram))[-1]
sumvalue = np.sum(histogram)
print("{0:.1f}% of the {1} histogram bins are empty.".format(
100.0*zz/len(histogram),
len(histogram)))
if sumvalue == 0:
print("ERROR: histogram is empty.")
else:
print("{0:.1f}% of all samples fall in the same bin.".format(
100.0*topvalue/sumvalue))
# The plot only allows displaying 512 distinct values without the plot
# itself starting to generate artifacts. So we must compress, or zoom,
# or preferably both.
factor = max(1, bincount // 512)
zoomed = histogram[(factor-1)*bincount//(2*factor):(factor+1)*bincount//(2*factor)]
compressed = np.array([np.sum(histogram[ii:ii+factor]) for ii in range(0, bincount, factor)], dtype=np.int64)
#plt.figure(num=None, figsize=(20, 10), dpi=80)
fig, (ax1, ax2) = plt.subplots(2, figsize=(12, 7), dpi=100, facecolor="red", edgecolor="green")
#ax1.set_facecolor('#dddddd')
ax1.plot(np.arange(len(zoomed)), zoomed)
ax2.plot(np.arange(len(compressed)), compressed)
ax1.title.set_text("Zoomed in {0}x, showing the {1} bins at the center of a {2} bin histogram".format(factor, bincount//factor, bincount))
ax2.title.set_text("Entire histogram, compressed to {0} bins".format(bincount//factor))
#x = [111,122,155,192,11,123,120,]
#y = [3,4,3,5,9,10,23]
#plt.bar(x,y)
fig.suptitle(title)
if __name__ == "__main__":
np.seterr(all='raise')
parser = argparse.ArgumentParser(description='Compute and show histogram')
parser.add_argument('input', help='ZGY input cube, local or sd://')
parser.add_argument('--output', default=None, help='PNG output file')
parser.add_argument('--datarange', nargs=2, default=None, metavar=('MIN', 'MAX'), type=float, help='Histogram limits')
parser.add_argument('--bins', nargs=1, default=[4096], type=int, help='Number of bins in histogram')
parser.add_argument("--crop", action='store_true', help='Limit the number of traces scanned')
args = parser.parse_args()
args.datarange = tuple(args.datarange) if args.datarange is not None else None
args.bins = args.bins[0]
if not args.output:
args.output = (args.input.rstrip('/').split('/')[-1]) + "-histogram.png"
print(args)
run(args.input, args.output, args.datarange, args.bins, args.crop)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/dumphist.py | dumphist.py |
import numpy as np
import os, sys
import argparse
from ..api import ZgyReader, SampleDataType
from ..test.utils import SDCredentials
_brief_info = """
File name = '{name}'
File size (bytes) = {r._fd.xx_eof:,d}
File format and version = {r.datatype.name} ZGY version {r._accessor._metadata._fh._version}
Current data Version = {r.verid}
Brick size I,J,K = {r.bricksize}
Number of bricks I,J,K = {r.brickcount[0]}
Number of LODs = {r.nlods}
Coding range min/max = {r.datarange[0]:.6g} {r.datarange[1]:.6g} (raw: {r.raw_datarange[0]:.6g} {r.raw_datarange[1]:.6g}) {nsamples:,d}
Statistical min/max/count = {r.statistics.min:.6g} {r.statistics.max:.6g} {r.statistics.cnt:,d}
Histogram range min/max/count = {r.histogram.min:.6g} {r.histogram.max:.6g} {r.histogram.cnt:,d}
Inline start/increment/count = {r.annotstart[0]} {r.annotinc[0]} {r.size[0]}
Xline start/increment/count = {r.annotstart[1]} {r.annotinc[1]} {r.size[1]}
Sample start/increment/count = {r.zstart} {r.zinc} {r.size[2]}
Horizontal projection system = {r._accessor._metadata._ih._hprjsys}
Horizontal dim/factor/name = {r.hunitdim.name} {r.hunitfactor} '{r.hunitname}'
Vertical dim/factor/name = {r.zunitdim.name} {r.zunitfactor} '{r.zunitname}'
Ordered Corner Points Legend = [ <i>, <j>] {{ <inline>, <xline>}} ( <easting>, <northing>)
Ordered Corner Point 1 = [{r.indexcorners[0][0]:5d}, {r.indexcorners[0][1]:5d}] {{{r.annotcorners[0][0]:9g}, {r.annotcorners[0][1]:9g}}} ({r.corners[0][0]:11.2f}, {r.corners[0][1]:11.2f})
Ordered Corner Point 2 = [{r.indexcorners[1][0]:5d}, {r.indexcorners[1][1]:5d}] {{{r.annotcorners[1][0]:9g}, {r.annotcorners[1][1]:9g}}} ({r.corners[1][0]:11.2f}, {r.corners[1][1]:11.2f})
Ordered Corner Point 3 = [{r.indexcorners[2][0]:5d}, {r.indexcorners[2][1]:5d}] {{{r.annotcorners[2][0]:9g}, {r.annotcorners[2][1]:9g}}} ({r.corners[2][0]:11.2f}, {r.corners[2][1]:11.2f})
Ordered Corner Point 4 = [{r.indexcorners[3][0]:5d}, {r.indexcorners[3][1]:5d}] {{{r.annotcorners[3][0]:9g}, {r.annotcorners[3][1]:9g}}} ({r.corners[3][0]:11.2f}, {r.corners[3][1]:11.2f})
"""
_hist_info = "Histogram bin {0:3d} = {1:11d}"
def all_brick(reader):
for lod in range(reader.nlods):
for ii in range(reader.brickcount[lod][0]):
for jj in range(reader.brickcount[lod][1]):
for kk in range(reader.brickcount[lod][2]):
info = reader._accessor._getBrickFilePosition(ii,jj,kk,lod)
yield ((lod, ii, jj, kk),) + info
def all_alpha(reader):
for lod in range(reader.nlods):
for ii in range(reader.brickcount[lod][0]):
for jj in range(reader.brickcount[lod][1]):
info = reader._accessor._getAlphaFilePosition(ii,jj,lod)
yield ((lod, ii, jj),) + info + (0,)
def summary_brick_offsets(reader):
alpha = dict()
brick = dict()
for info in all_alpha(reader):
alpha[info[1].name] = alpha.get(info[1].name, 0) + 1
for info in all_brick(reader):
brick[info[1].name] = brick.get(info[1].name, 0) + 1
print("{0:30s} = {1}".format("Alpha status", str(alpha)))
print("{0:30s} = {1}".format("Brick status", str(brick)))
def summary_normal_size(reader, *, header = True):
"""
Useful for performance measurements.
1) The number of allocated LOD0 bricks, used to compute bandwidth after
timing the read of the entire file. Specified as bricks and MB.
2) Size of one brick_column, allocated or not, given as above.
3) Brick size in bytes.
Note that size in MB is computed by integer division.
If you need them exact, do the (float) division yourself.
"""
x = [e for e in all_brick(reader) if e[0][0] == 0 and e[1].name == "Normal"]
normal = len(x)
bytespersample = {SampleDataType.int8: 1,
SampleDataType.int16: 2,
SampleDataType.float: 4}[reader.datatype]
bytesperbrick = np.product(reader.bricksize) * bytespersample
colsize = (reader.size[2] + reader.bricksize[2] - 1) // reader.bricksize[2]
bytespercolumn = bytesperbrick * colsize
fmt = "{0:30s} = LOD0: {1} {2} MB column {3} {4} MB brick {5}"
if not header: fmt = "{1} {2} {3} {4} {5}"
print(fmt.format(
"Normal LOD0 bricks & col size", normal,
(normal * bytesperbrick) // (1024*1024),
colsize,
(colsize * bytesperbrick) // (1024*1024),
bytesperbrick))
def dump_brick_offsets(reader, sort):
print("BRICK offsets:")
table = all_brick(reader)
if sort:
table = sorted(table, key=lambda x: x[2] or 0)
for (pos, brickstatus, fileoffset, constvalue, bricksize) in table:
addr = "[{0}][{1}][{2}][{3}]".format(pos[0], pos[1], pos[2], pos[3])
if fileoffset is not None:
print("{addr:20s} = {fileoffset:16x} {brickstatus.name} Size {bricksize:8x}".format(
addr=addr,
brickstatus=brickstatus, fileoffset=fileoffset,
constvalue=constvalue, bricksize=bricksize))
else:
print("{addr:20s} = {fileoffset:16s} {brickstatus.name} {constvalue}".format(
addr=addr,
brickstatus=brickstatus, fileoffset="",
constvalue=constvalue, bricksize=""))
def dump_alpha_offsets(reader, sort):
print("ALPHA offsets:")
table = all_alpha(reader)
if sort:
table = sorted(table, key=lambda x: x[2] or 0)
for (pos, brickstatus, fileoffset, constvalue, bricksize) in table:
addr = "[{0}][{1}][{2}]".format(pos[0], pos[1], pos[2])
if fileoffset is not None:
print("{addr:20s} = {fileoffset:16x} {brickstatus.name}".format(
addr=addr,
brickstatus=brickstatus, fileoffset=fileoffset,
constvalue=constvalue))
else:
print("{addr:20s} = {fileoffset:16s} {brickstatus.name} {constvalue}".format(
addr=addr,
brickstatus=brickstatus, fileoffset="",
constvalue=constvalue))
def dump_combined_offsets(reader, sort):
print("BRICK and ALPHA offsets sorted by address:")
table = list(all_brick(reader)) + list(all_alpha(reader))
if sort:
table.sort(key=lambda x: x[2] or 0)
for (pos, brickstatus, fileoffset, constvalue, bricksize) in table:
if len(pos) == 4:
addr = "brick [{0}][{1}][{2}][{3}]".format(pos[0], pos[1], pos[2], pos[3])
elif len(pos) == 3:
addr = "alpha [{0}][{1}][{2}]".format(pos[0], pos[1], pos[2])
if fileoffset is not None:
print("{addr:26s} = {fileoffset:16x} {brickstatus.name} Size {bricksize:8x}".format(
addr=addr,
brickstatus=brickstatus, fileoffset=fileoffset,
constvalue=constvalue, bricksize=bricksize))
else:
print("{addr:26s} = {fileoffset:16s} {brickstatus.name} {constvalue}".format(
addr=addr,
brickstatus=brickstatus, fileoffset="",
constvalue=constvalue, bricksize=""))
def run(filename, options):
with ZgyReader(filename, iocontext = SDCredentials()) as reader:
if options.only_lod0_info:
summary_normal_size(reader, header=False)
return
args = dict(name=filename,
nsamples=np.product(reader.size),
r=reader)
#print(_brief_info.format(**args))
for line in _brief_info.split('\n'):
if line:
try:
print(line.format(**args))
except Exception as ex:
print(line.split('=')[0] + "= N/A " + str(ex))
summary_brick_offsets(reader)
summary_normal_size(reader)
if options.histogram:
hh = reader.histogram.bin
for ii in range(len(hh)):
print(_hist_info.format(ii, hh[ii]))
if options.sorted_offsets:
dump_combined_offsets(reader, True)
elif options.offsets:
dump_brick_offsets(reader, False)
dump_alpha_offsets(reader, False)
# Features from the old C++ zgydump
# -b --brief BriefInfo()
# (default) FullInfo()
# In new code, --histogram must be requested explicitly.
# There is no --brief since the two outputs then end up
# rather similar.
# -p --performance PerfInfo()
# normal/empty/const counts now printed unconditionally.
# Can also consider merging test_zgydump and test_isoptimal.
# -o --offset Offsets()
# Better than the original, handles const(value) and compressed(size).
# -s --slice Slice()
# Covered by test_show. Might as well keep this as a separate exe.
# -a --alpha Alpha()
# Not implemented, as the alpha tiles are deprecated.
def Main():
np.seterr(all='raise')
parser = argparse.ArgumentParser(description='Show ZGY file details')
parser.add_argument('files', nargs="+", help='ZGY files, local or sd://')
parser.add_argument('--histogram', action='store_true', help="Show the 256-bin histogram")
parser.add_argument('--offsets', action='store_true', help="Show offset of each brick")
parser.add_argument('--sorted-offsets', action='store_true', help="Sort by file offset")
parser.add_argument('--only-lod0-info', action='store_true', help="Only lod0 stats etc.")
args = parser.parse_args()
for filename in args.files:
run(filename, args)
if __name__ == "__main__":
Main()
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/zgydump.py | zgydump.py |
import numpy as np
import os, sys, math
from ..api import ZgyReader, ZgyWriter, ProgressWithDots
from ..test.utils import SDCredentials
from ..iterator import readall
def round_sig(x, sig=2):
return 0 if not x else round(x, sig-1-int(math.floor(math.log10(abs(x)))))
def scan_open_file(r, progress):
limits = []
for datastart, datasize, data in readall(r, progress=progress):
lim = np.amax(np.abs(data), axis=2)
limits.extend(map(float, lim.flat))
limits.sort()
cutoff = limits[(len(limits)*99)//100]
print("Trace count: {0:,d}, 99-percentile: {1:.6g}".format(
len(limits), cutoff))
return round_sig(cutoff * 10)
def copy_open_file(r, w, cutoff, progress):
nuked = 0
total = 0
for datastart, datasize, data in readall(r, progress=progress):
lim = np.amax(np.abs(data), axis=2)
for ii in range(lim.shape[0]):
for jj in range(lim.shape[1]):
total += 1
if lim[ii,jj] > cutoff:
nuked += 1
data[ii,jj,:] = 0
if False:
print("trace {0},{1} max {2:.6g}".format(
datastart[0]+ii, datastart[1]+jj, lim[ii,jj]))
w.write(datastart, data)
print("{0:,d} of {1:,d} traces spiked > {2:.6g} and were zeroed.".format(
nuked, total, cutoff))
def copy(srcfilename, dstfilename):
ProgressWithDots()(100, 100)
with ZgyReader(srcfilename, iocontext = SDCredentials()) as r:
with ZgyWriter(dstfilename, templatename=srcfilename,
iocontext = SDCredentials()) as w:
cutoff = scan_open_file(r, progress=ProgressWithDots())
copy_open_file(r, w, cutoff, progress=ProgressWithDots())
w.finalize(progress=ProgressWithDots())
if __name__ == "__main__":
np.seterr(all='raise')
copy(sys.argv[1], sys.argv[2])
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/badtrace.py | badtrace.py |
import numpy as np
import os, sys
from ..api import ZgyReader, ZgyWriter, ProgressWithDots, SampleDataType
from ..test.utils import SDCredentials
from ..iterator import readall
def suggest_range(value, dt):
"""Special case handling for inconsistent all-constant files."""
dt_lo, dt_hi = {SampleDataType.int8: (-128, +127),
SampleDataType.int16: (-32768, +32767)}[dt]
if value == 0:
return (dt_lo / dt_hi, 1)
elif value > 0:
return (0, value * (1 - dt_hi / dt_lo))
else:
return (value * (1 - dt_lo / dt_hi), 0)
def copy_open_file(r, w, progress):
"""Simple example of manually iterating over files."""
def _roundup(x, step): return ((x + step - 1) // step) * step
blocksize = (r.bricksize[0], r.bricksize[1],
_roundup(r.size[2], r.bricksize[2]))
total = (((r.size[0] + blocksize[0] - 1) // blocksize[0]) *
((r.size[1] + blocksize[1] - 1) // blocksize[1]))
done = 0
data = np.zeros(blocksize, dtype=np.float32)
for ii in range(0, r.size[0], blocksize[0]):
for jj in range(0, r.size[1], blocksize[1]):
r.read((ii, jj, 0), data)
w.write((ii, jj, 0), data)
done += 1
if progress: progress(done, total)
def copy_open_v2(r, w, progress):
"""Fewer lines of code but more going on behind the scenes."""
for datastart, datasize, data in readall(r, progress=progress):
w.write(datastart, data)
def copy(srcfilename, dstfilename):
with ZgyReader(srcfilename, iocontext = SDCredentials()) as r:
if r.datatype in (SampleDataType.int8, SampleDataType.int16):
if r.raw_datarange[0] == r.raw_datarange[1]:
datarange = suggest_range(r.raw_datarange[0], r.datatype)
with ZgyWriter(dstfilename, templatename=srcfilename,
datarange = datarange,
iocontext = SDCredentials()) as w:
w.writeconst((0,0,0), r.raw_datarange[0], w.size, False)
w.finalize(progress=ProgressWithDots())
return
with ZgyWriter(dstfilename, templatename=srcfilename,
iocontext = SDCredentials()) as w:
copy_open_file(r, w, progress=ProgressWithDots())
w.finalize(progress=ProgressWithDots())
if __name__ == "__main__":
np.seterr(all='raise')
copy(sys.argv[1], sys.argv[2])
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/simplecopy.py | simplecopy.py |
import numpy as np
import os, sys, time, argparse
from ..api import ZgyReader, ZgyWriter, SampleDataType, ProgressWithDots, ZgyCompressFactory
from ..test.utils import SDCredentials
from ..iterator import readall
def suggest_range(value, dt):
"""
Special case handling for inconsistent all-constant files.
A coding range with min==max is not valid, so we must choose
something else.
"""
dt_lo, dt_hi = {SampleDataType.int8: (-128, +127),
SampleDataType.int16: (-32768, +32767)}[dt]
if value == 0:
# Choose the range -1..+1, but slightly wider at the low end
# to make float-0 map exactly to int-0, zero centric.
return (dt_lo / dt_hi, 1)
elif value > 0:
# Choose the range 0..2*value, but slightly less at the high end
# to make float-value map exactly to int-0, zero centric.
return (0, value * (1 - dt_hi / dt_lo))
else:
# Choose the range 2*value..0, but slightly wider at the low end
# to make float-value map exactly to int-0, zero centric.
return (value * (1 - dt_lo / dt_hi), 0)
def find_center(r, maxsize):
"""
Crop partial bricks outside or below the survey since they
would just confuse the statistics.
If the file is large then crop out an area around the center.
Try to read the full traces but limit the il and xl.
"""
size = np.array(r.size, dtype=np.int64)
cropsize = np.minimum(size, np.array(maxsize, dtype=np.int64))
cropsize = (cropsize//64)*64
cropoffset = ((size - cropsize)//128)*64
if np.any(size - cropsize >= 64):
print("Reading center", tuple(cropsize),
"offset", tuple(cropoffset),
"of survey size", tuple(r.size))
return tuple(cropsize), tuple(cropoffset)
def copy_open_file(r, w, *, progress, offset=(0,0,0), noisefactor=0):
noiselevel = abs(r.datarange[1] - r.datarange[0]) / noisefactor if noisefactor > 0 else None
for datastart, datasize, data in readall(r, dtype=np.float32, cropsize=w.size, cropoffset=offset, progress=progress):
if noiselevel:
# Note, maybe multiply with a random factor as well,
# if the goal is to test compression when white noise
# is present but where the input used to be integral.
data += (np.random.random_sample(data.shape) - 0.5) * noiselevel
w.write(datastart, data)
def read_and_discard(r, cropsize, *, progress, offset=(0,0,0), noisefactor=0):
"""This is for performance testing only."""
for datastart, datasize, data in readall(r, dtype=np.float32, cropsize=cropsize, cropoffset=offset, progress=progress):
pass
def copy(srcfilename, dstfilename, crop_offset=None, crop_size=None, crop_center=None, forcetype='unchanged', datarange=None, noisefactor=0, snr = 0, progress1 = None, progress2 = None):
starttime = time.time()
with ZgyReader(srcfilename, iocontext = SDCredentials()) as r:
if crop_center:
if crop_offset and crop_offset != (0,0,0):
print("WARNING: ignoring --offset because --center specified.")
if not crop_size or crop_size == (0,0,0):
crop_size = (640, 640, 12800)
crop_size, crop_offset = find_center(r, crop_size)
# Zero cropped size means no cropping, i.e. "to end of survey".
# TODO-Low passing crop_size but not crop_offset could mean "center".
crop_beg = crop_offset or (0,0,0)
crop_size = crop_size or (0,0,0)
crop_beg = tuple([crop_beg[i] if crop_beg[i] >= 0 else r.size[i] + crop_beg[i] for i in range(3)])
crop_size = tuple([crop_size[i] or r.size[i]-crop_beg[i] for i in range(3)])
crop_end = tuple([crop_beg[i] + crop_size[i] - 1 for i in range(3)])
# Need to re-calculate the corners and the annotation
index_corners = [ [ crop_beg[0], crop_beg[1] ],
[ crop_end[0], crop_beg[1] ],
[ crop_beg[0], crop_end[1] ],
[ crop_end[0], crop_end[1] ] ]
try:
world_corners = [ r.indexToWorld(tuple(x)) for x in index_corners ]
except Exception:
# World coordinates bad, e.g. all might have been zero.
# Keeping the original corners can hardly make it worse.
world_corners = [ [0, 0], [0, 0], [0, 0], [0, 0] ]
annotstart = r.indexToAnnot((crop_beg[0], crop_beg[1]))
zstart = r.zstart + crop_beg[2] * r.zinc
# Optionally change the datatype (given in bytes per sample)
datatype = {
'unchanged': r.datatype,
'int8': SampleDataType.int8,
'int16': SampleDataType.int16,
'float': SampleDataType.float
} [forcetype]
singlevalue = None
if not datarange:
if r.datatype in (SampleDataType.int8, SampleDataType.int16):
if r.raw_datarange[0] == r.raw_datarange[1]:
singlevalue = r.raw_datarange[0]
datarange = suggest_range(singlevalue, r.datatype)
# Note, if read datatype != write datatype the
# data range no longer is zero centric. On the
# other hand, the write datatype is then probably
# float which means it doesn't matter.
print("Cropping: offset ", crop_beg, "size", crop_size, "of", r.size)
#print("World corners now", world_corners, "was", r.corners)
#print("Annot start now ", annotstart, "was", r.annotstart)
print("Data type", r.datatype, "->", datatype)
with ZgyWriter(dstfilename,
compressor = ZgyCompressFactory("ZFP", snr = snr),
iocontext = SDCredentials(),
size = crop_size or r.size,
datatype = datatype if snr<=0 else SampleDataType.float,
datarange = datarange or r.datarange,
zunitdim = r.zunitdim,
zunitname = r.zunitname,
zunitfactor = r.zunitfactor,
hunitdim = r.hunitdim,
hunitname = r.hunitname,
hunitfactor = r.hunitfactor,
zstart = zstart,
zinc = r.zinc,
annotstart = r.annotstart,
annotinc = r.annotinc,
corners = world_corners) as w:
opentime = time.time()
if dstfilename != "/dev/null":
if singlevalue is not None:
# The input file is known to contain just a single value.
# And the coding range is degenerate. How this is handled
# by the reader is not well defined. So, ignore the values
# that were read and just use the constant. Don't honor
# noiselevel int this case. That kludge doesn't really
# make sense for integral files anyway.
if True:
print("Writing a constant-value file, range",
datarange[0], "..", datarange[1],
"value", singlevalue)
w.writeconst((0,0,0), singlevalue, w.size, False)
else:
copy_open_file(r, w, progress = progress1 or ProgressWithDots(), offset=crop_beg, noisefactor=noisefactor)
else:
read_and_discard(r, crop_size or r.size, progress = progress1 or ProgressWithDots(), offset=crop_beg)
copytime = time.time()
if dstfilename != "/dev/null":
w.finalize(progress = progress2 or ProgressWithDots())
finaltime = time.time()
flushtime = time.time()
if True:
timing_report(w, flushtime - starttime)
if True:
print("Times: open {0:.2f} copy {1:.2f} final {2:.2f} flush {3:.2f}".format(
opentime - starttime,
copytime - opentime,
finaltime - copytime,
flushtime - finaltime))
def timing_report(writer, elapsed):
bs = np.array(writer.bricksize, dtype=np.int64)
size = np.array(writer.size, dtype=np.int64)
paddedsize = ((size + bs - 1) // bs) * bs
bandwidth = np.product(paddedsize) / elapsed # should I use size or padsize?
bandwidth /= (1024*1024)
print("Elapsed {0:7.2f} seconds, bandwidth {1:6.2f} MVoxel/s copying {2} {3} samples, exact {4:.0f} MVoxel, padded {5:.0f} MVoxel".format(
elapsed, bandwidth, writer.datatype, tuple(size),
np.product(size) / (1024*1024),
np.product(paddedsize) / (1024*1024)))
def parseints(s):
return tuple(map(int,s.split(",")))
def Main():
# Can do this in a stand alone app, but not as part of a library.
# The main problem is expressions like x = int(offset) + np.int32(size)
# which works fine on Linux but fails occasionally on windows. The
# reason is that offset gets changed to np.int32 on windows if it is
# small enough; np.float64 otherwise. On Linux it is always changed
# to np.int64. On windows the expression will overflow if and only
# if offset is slightly less than np.int32.max.
# Since I am adding extra tests anyway I'll cause exceptions to be
# raised also on divide by error, underflow, etc.
np.seterr(all='raise')
parser = argparse.ArgumentParser(description='Copy a ZGY file.', epilog="""
The output cube will have its data bricks sorted by lod, I, J, K
for optimized reads from the cloud.""")
parser.add_argument('input', help='ZGY input cube, local or sd://')
parser.add_argument('output', help='ZGY output cube, local or sd://')
parser.add_argument('--offset', nargs=1, default=["0,0,0"], type=str,
help='i,j,k Starting point in the source cube. Negative numbers count from the end.')
parser.add_argument('--size', nargs=1, default=["0,0,0"], type=str,
help='i,j,k size of data to be copied. Zero means to end of cube.')
parser.add_argument('--center', action='store_true', help="Ignore --offset, crop out center of cube.")
parser.add_argument('--forcetype', default='unchanged', choices=['unchanged', 'int8', 'int16', 'float'], help='Produce a cube of this type instead of keeping the input type. If converting from float to int then the coding range must already be correct.')
parser.add_argument('--datarange', nargs=2, default=None, metavar=('MIN', 'MAX'), type=float, help='Required when converting from float to integral types.')
parser.add_argument('--noisefactor', nargs=1, default=[0], metavar='FACTOR', type=float, help='Amount of noise to add. E.g. 5000 means add 1/5000 of total data range.')
parser.add_argument('--overwrite', action='store_true', help="Quietly overwrite the output file if it exists already.")
parser.add_argument('--snr', default=0, type=int,
help='Pass 10..70 for lossy compression, 99 for lossless, 0 for uncompressed.')
args = parser.parse_args()
#print(args)
args.offset = parseints(args.offset[0])
args.size = parseints(args.size[0])
args.datarange = tuple(args.datarange) if args.datarange is not None else None
args.noisefactor = args.noisefactor[0]
#print(args)
if not args.input or not args.output:
print("File names cannot be empty.", file=sys.stderr)
sys.exit(1)
if not args.overwrite and os.path.exists(args.output):
print('Output file "{0}" already exists.'.format(args.output), file=sys.stderr)
print('If you really meant to overwrite it, specify --overwrite.', file=sys.stderr)
sys.exit(1)
copy(args.input, args.output, args.offset, args.size, args.center, args.forcetype, args.datarange, args.noisefactor, args.snr)
if __name__ == "__main__":
Main()
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/copy.py | copy.py |
#print('Running' if __name__ == '__main__' else 'Importing', __file__)
import numpy as np
import sys
import os
import argparse
from PIL import Image
import tkinter as tk
from ..api import ZgyReader
from ..test.utils import SDCredentials
from ..tools.viewzgy import savePNG, showFileInTk
def run(filename, *, lods = [0], direction = 0, datarange=None,
outname = None, iocontext=None):
"""
Read one trace or slices in the specified direction, save and display.
"""
allslices = []
with ZgyReader(filename, iocontext=SDCredentials()) as reader:
for lod in lods:
step = 1<<lod
start = [0, 0, 0]
size = np.array(reader.size, dtype=np.int64) // (1 << lod)
start[direction] = size[direction] // 2
size[direction] = 1
if direction == 2:
size[0] = min(size[0], 1024)
size[1] = min(size[1], 1024)
section = np.zeros(size, dtype=np.float32)
reader.read(start, section, lod)
allslices.append(np.squeeze(section))
s = allslices[0].shape
w = np.sum([allslices[i].shape[0] for i in range(3)])
combined = np.zeros((w, s[1]), dtype=allslices[0].dtype)
combined[0:s[0],:] = allslices[0]
ss = allslices[1].shape
combined[s[0]:s[0]+ss[0], 0:ss[1]] = allslices[1]
sss = allslices[2].shape
combined[s[0]+ss[0]:s[0]+ss[0]+sss[0], 0:sss[1]] = allslices[2]
combined = combined[::-1,::]
savePNG(combined, outname, datarange=datarange)
showFileInTk(outname, title=os.path.basename(outname))
def Main(args, *, gain="1", prefix=""):
"""
Show 3 ZGY files, with the 3 first LODs.
The files are assumed to be uncompressed original, file after compression,
and the difference between those two. The value range of the diff is a
certain percentage of the value range in the first inout. This makes it
possible to compare the quality from different settings.
"""
with ZgyReader(args[0], iocontext=None) as reader:
datarange0 = reader.datarange
datarange1 = ((reader.datarange[0]/gain, reader.datarange[1]/gain))
run(args[0], lods=range(3), direction=0, datarange = datarange0,
outname = prefix + "-showorig.png")
run(args[1], lods=range(3), direction=0, datarange = datarange0,
outname = prefix + "-showcomp.png")
run(args[2], lods=range(3), direction=0, datarange = datarange1,
outname = prefix + "-showdiff.png")
if __name__ == "__main__":
np.seterr(all='raise')
parser = argparse.ArgumentParser(description='Evaluate compression')
parser.add_argument('orig', help='Original ZGY input file')
parser.add_argument('comp', help='Data after compression')
parser.add_argument('diff', help='Compression noise')
parser.add_argument('--gain', nargs=1, default=[50], type=int,
help='Scaling of diff cube relative to orig range.')
parser.add_argument('--prefix', nargs=1, default=["check"], type=str,
help='prefix for output file name.')
args = parser.parse_args()
#print(args)
Main([args.orig, args.comp, args.diff], gain=args.gain[0], prefix=args.prefix[0])
sys.exit(0)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/evalnoise.py | evalnoise.py |
import numpy as np
import sys
import os
from PIL import Image, ImageTk
import tkinter as tk
seismic_default = [
[161, 255, 255], [160, 253, 255], [159, 252, 254], [158, 250, 254],
[157, 249, 253], [156, 247, 253], [155, 246, 253], [154, 244, 252],
[153, 242, 252], [152, 241, 251], [151, 239, 251], [150, 237, 251],
[148, 236, 250], [147, 234, 250], [146, 232, 249], [145, 230, 249],
[144, 229, 248], [143, 227, 248], [142, 225, 247], [140, 223, 247],
[139, 221, 246], [138, 219, 246], [137, 217, 246], [136, 215, 245],
[134, 213, 245], [133, 211, 244], [132, 209, 243], [130, 207, 243],
[129, 205, 242], [128, 203, 242], [126, 200, 241], [125, 198, 241],
[123, 196, 240], [122, 194, 240], [120, 191, 239], [119, 189, 238],
[117, 186, 238], [116, 184, 237], [114, 181, 237], [113, 179, 236],
[111, 177, 235], [110, 174, 235], [108, 171, 234], [106, 169, 233],
[104, 166, 233], [103, 163, 232], [101, 160, 231], [99, 157, 231],
[97, 155, 230], [96, 152, 229], [94, 149, 228], [92, 146, 228],
[90, 143, 227], [88, 139, 226], [86, 136, 225], [84, 133, 225],
[82, 130, 224], [80, 126, 223], [77, 123, 222], [75, 119, 221],
[73, 116, 220], [71, 112, 219], [68, 109, 218], [66, 105, 217],
[64, 101, 217], [61, 97, 216], [59, 93, 215], [56, 89, 214],
[54, 85, 213], [51, 81, 211], [48, 76, 210], [45, 72, 209],
[43, 68, 208], [40, 63, 207], [37, 59, 206], [34, 54, 205],
[31, 49, 203], [28, 44, 202], [24, 39, 201], [21, 34, 200],
[18, 29, 198], [14, 23, 197], [11, 17, 196], [8, 12, 194],
[4, 6, 193], [0, 0, 191], [5, 5, 184], [9, 9, 178],
[13, 13, 171], [18, 18, 164], [22, 22, 158], [27, 27, 151],
[32, 32, 144], [36, 36, 138], [40, 40, 131], [45, 45, 124],
[49, 49, 117], [54, 54, 110], [58, 58, 104], [63, 63, 97],
[67, 67, 90], [72, 72, 83], [77, 77, 77], [81, 81, 81],
[86, 86, 86], [92, 92, 92], [96, 96, 96], [101, 101, 101],
[107, 107, 107], [111, 111, 111], [116, 116, 116], [122, 122, 122],
[126, 126, 126], [131, 131, 131], [137, 137, 137], [141, 141, 141],
[146, 146, 146], [152, 152, 152], [156, 156, 156], [162, 162, 162],
[167, 167, 167], [172, 172, 172], [177, 177, 177], [182, 182, 182],
[187, 187, 187], [192, 192, 192], [197, 197, 197], [202, 202, 202],
[202, 201, 200], [198, 196, 192], [193, 190, 184], [189, 185, 176],
[185, 180, 168], [181, 175, 160], [177, 170, 152], [172, 164, 144],
[168, 159, 136], [164, 153, 128], [160, 148, 120], [156, 143, 112],
[151, 137, 104], [147, 132, 96], [143, 127, 88], [139, 122, 80],
[135, 116, 72], [130, 111, 64], [126, 106, 56], [122, 101, 48],
[118, 95, 40], [114, 90, 32], [109, 85, 24], [105, 79, 16],
[101, 74, 8], [97, 69, 0], [103, 65, 0], [108, 61, 0],
[114, 56, 0], [119, 53, 0], [125, 48, 0], [130, 44, 0],
[136, 40, 0], [141, 36, 0], [147, 32, 0], [152, 28, 0],
[158, 24, 0], [164, 20, 0], [169, 16, 0], [175, 12, 0],
[180, 8, 0], [186, 4, 0], [191, 0, 0], [193, 6, 0],
[194, 12, 0], [196, 17, 0], [197, 23, 0], [198, 29, 0],
[200, 34, 0], [201, 39, 0], [202, 44, 0], [203, 49, 0],
[205, 54, 0], [206, 59, 0], [207, 63, 0], [208, 68, 0],
[209, 72, 0], [210, 76, 0], [211, 81, 0], [213, 85, 0],
[214, 89, 0], [215, 93, 0], [216, 97, 0], [217, 101, 0],
[217, 105, 0], [218, 109, 0], [219, 112, 0], [220, 116, 0],
[221, 120, 0], [222, 123, 0], [223, 126, 0], [224, 130, 0],
[225, 133, 0], [225, 136, 0], [226, 140, 0], [227, 143, 0],
[228, 146, 0], [228, 149, 0], [229, 152, 0], [230, 155, 0],
[231, 158, 0], [231, 160, 0], [232, 163, 0], [233, 166, 0],
[233, 169, 0], [234, 171, 0], [235, 174, 0], [235, 177, 0],
[236, 179, 0], [237, 182, 0], [237, 184, 0], [238, 187, 0],
[238, 189, 0], [239, 191, 0], [240, 194, 0], [240, 196, 0],
[241, 198, 0], [241, 200, 0], [242, 203, 0], [242, 205, 0],
[243, 207, 0], [244, 209, 0], [244, 211, 0], [245, 213, 0],
[245, 215, 0], [246, 217, 0], [246, 219, 0], [247, 221, 0],
[247, 223, 0], [247, 225, 0], [248, 227, 0], [248, 229, 0],
[249, 230, 0], [249, 232, 0], [250, 234, 0], [250, 236, 0],
[251, 237, 0], [251, 239, 0], [251, 241, 0], [252, 242, 0],
[252, 244, 0], [253, 246, 0], [253, 247, 0], [253, 249, 0],
[254, 250, 0], [254, 252, 0], [255, 254, 0], [255, 255, 0],
]
seismic_default = np.array(seismic_default, dtype=np.uint8)
def savePNG(data, outfile, *, title="Seismic", datarange=None):
def normalize(a, *, datarange = None):
a = a.astype(np.float32)
dead = np.isnan(a)
amin, amax = datarange or (np.nanmin(a), np.nanmax(a))
# Zero should be at the center
if amin * amax < 0:
x = max(abs(amin), abs(amax))
amin, amax = (-x, x)
# NaN and Inf show as smallest number
a[dead] = amin
if amin == amax:
a *= 0
else:
# Avoid underflow, because app might have np.seterr(all='raise')
a = a.astype(np.float64)
a = (a - amin) / (amax - amin)
a = (a * 255).astype(np.uint8)
return a, dead
if not outfile:
raise ValueError("outfile must be specified")
#elif outfile[:-4] != ".png":
# raise ValueError("outfile must end in .png:", outfile[:-4])
data = np.squeeze(data)
data = np.transpose(data)
data = np.flip(data, 1)
data, dead = normalize(data, datarange=datarange)
tmp = np.zeros((data.shape[0], data.shape[1], 3), dtype=np.uint8)
r = tmp[...,0]
g = tmp[...,1]
b = tmp[...,2]
ind = seismic_default[data]
r += ind[...,0] # data
g += ind[...,1] # data
b += ind[...,2] # data
r[dead] = 255
g[dead] = 255
b[dead] = 0
im = Image.fromarray(tmp, mode="RGB")
im.save(outfile, format="PNG")
def showFileInTk(filename, title):
window = tk.Tk()
#Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
img = ImageTk.PhotoImage(Image.open(filename))
#This creates the main window of an application
window.title(title)
window.geometry("{0}x{1}".format(img.width(), img.height()))
window.configure(background='grey')
#The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel = tk.Label(window, image = img)
#The Pack geometry manager packs widgets in rows or columns.
panel.pack(side = "bottom", fill = "both", expand = "yes")
#Start the GUI
window.mainloop()
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/viewzgy.py | viewzgy.py |
from ..impl import meta
from collections import namedtuple
_fixedType = namedtuple("Attribute", "cname dims ptype ctype csize comment atype aread")
def fixtuple(e):
"""
from: ('_size', '3i', 'int32[3]', '...')
to: ('_size[3]', '3i', 'int32', 3, '...')
"""
cname = e[0]
ptype = e[1]
ctype = e[2]
csize = 0
dims = ""
p1 = ctype.find("[")
p2 = ctype.find("]")
if p1 > 0 and p2 > p1:
csize = ctype[p1+1:p2]
dims = "[" + csize + "]"
try:
csize = int(csize)
except ValueError:
csize = -1
#cname = cname + "[" + csize + "]"
ctype = ctype[0:p1]
try:
ctype = {
"float32": "float",
"float64": "double",
"uint8": "std::uint8_t",
"uint16": "std::uint16_t",
"uint32": "std::uint32_t",
"uint64": "std::uint64_t",
"int8": "std::int8_t",
"int16": "std::int16_t",
"int32": "std::int32_t",
"int64": "std::int64_t",
}[ctype]
except KeyError:
pass
# Also suggest a fixed-length std::array type,
# which might work better in access functions.
if ctype == "char*":
atype = "std::string"
aread = 'std::string(_pod.{0} ? _pod.{0} : "")'.format(cname)
elif csize:
atype = "std::array<{0},{1}>".format(ctype, csize)
aread = "ptr_to_array<{0},{1}>(_pod.{2})".format(ctype, csize, cname)
else:
atype = ctype
aread = "align(_pod." + cname + ")"
return _fixedType(cname, dims, ptype, ctype, csize, e[3], atype, aread)
def dumpformats(cls, *, file = None):
file = file or sys.stdout
say = lambda *args, **kwargs: print(*args, **kwargs, file=file)
classname = cls.__name__
basename = "I" + (classname[:-2] if classname[-2]=="V" else classname)+"Access"
if classname[-2:] == "V1":
say()
say("/" * 77)
say("/// " + classname[:-2] + " " + "/" * (70 - len(classname)))
say("/" * 77)
say("""
class {0} : public IHeaderAccess
{{
public:
static std::shared_ptr<{0}> factory(std::uint32_t version);
// TO""""""DO: Add pure virtual functions, more or less matching the signatures
// not of {1} but of the latest version. Then modify older versions
// so they match the expected api. And while you are at it, move dump()
// in the latest version to the base class and remove the others.
public:
}};""".format(basename, classname))
fixedformats = list([fixtuple(ee) for ee in cls._formats()]) if hasattr(cls, "_formats") else []
# ----- Physical layout: class FooV{n}
say("\n// Python layout is:", cls._format())
say("// Size in storage:", cls.headersize() if hasattr(cls, "headersize") else 0, "bytes")
say("#pragma pack(1)\nclass {0}POD\n{{".format(classname))
say("public:")
for e in fixedformats:
say(" {3}{0:17} {1:15} // {2}".format(e.ctype, e.cname + e.dims + ";", e.comment, ("" if e.ptype else "//")))
say("};\n#pragma pack()")
# ------ Read accessors class FooV{n}Access: aggregates FooV{n}
# and inherits FooBase. Note that no example FooBase
# is generated. Use the latest FooV{n} and replace all
# overridden methods with pure virtual methods.
say("\nclass {0}Access : public {1}\n{{\npublic:\n {0} _pod;".format(classname, basename))
say(" virtual void read(const std::shared_ptr<FileADT> file, std::int64_t offset) override;")
say(" virtual void byteswap() override;")
say(" virtual void rawdump(std::ostream& out, const std::string& prefix = \"\") override;")
say(" virtual void dump(std::ostream& out, const std::string& prefix = \"\") override;")
say("public:")
notimpl = 'throw OpenZGY::Errors::ZgyInternalError("Not implemented");'
for e in fixedformats:
sig1 ="virtual {0}".format(e.atype)
sig =" {0:34} {1}() const override".format(sig1, e.cname[1:])
if e.ptype or e.atype == "std::string":
say(sig + " {{ return {0.aread}; }}".format(e))
else:
say(sig + " { " + notimpl + " }")
say("};")
# ----- FooV{n}Access::read() implementation.
say("\nvoid\n{0}Access::read(const std::shared_ptr<FileADT>& file, std::int64_t offset, std::int64_t size)".format(classname))
say("{\n file->xx_read(&this->_pod, offset, sizeof(this->_pod));\n byteswap();\n}")
# ----- FooV{n}Access::byteswap() implementation.
say("\nvoid\n{0}Access::byteswap()\n{{".format(classname))
for e in fixedformats:
if e.ptype and e.atype != "std::string" and e.ctype in (
"float", "double",
"std::int16_t", "std::int32_t", "std::int64_t",
"std::uint16_t", "std::uint32_t", "std::uint64_t"):
if e.csize == 0:
say(" byteswapT(&_pod.{e.cname});".format(e=e))
else:
say(" byteswapT(&_pod.{e.cname}[0], {e.csize});".format(e=e))
else:
if e.ptype:
say(" // byteswap not needed for {e.ctype} {e.cname}{e.dims} because of its type.".format(e=e))
else:
say(" // byteswap not needed for {e.ctype} {e.cname}{e.dims} because it is not stored.".format(e=e))
say("}")
# ----- Debugging: FooV{n}Access::rawdump() implementation.
# This uses the POD data members directly.
say("\nvoid\n{0}Access::rawdump(std::ostream& out, const std::string& prefix)\n{{".format(classname))
say(' out')
for e in fixedformats:
if e.ptype:
say(' << prefix << "{0:20s}"'.format(e.cname+":"), end="")
else:
say(' //<< prefix << "{0:20s}"'.format(e.cname+":"), end="")
if not e.csize:
say(' << _pod.{0} << "\\n"'.format(e.cname))
else:
for i in range(e.csize):
say(' << _pod.{0}[{1}] << " "'.format(e.cname, i), end="");
say(' << "\\n"')
say(" ;")
say("}")
# ----- Debugging: FooV{n}Access::dump() implementation.
# This uses the generated access methods.
# NOTE: Should probably move the highest version of
# FooV{n}Access::dump to the FooBase base class
# and remove the others.
say("\nvoid\n{0}Access::dump(std::ostream& out, const std::string& prefix)\n{{".format(classname))
say(' out')
for e in fixedformats:
say(' << prefix << "{0:20s}"'.format(e.cname[1:]+"():"), end="")
if not e.ptype:
say(' << "N/A\\n"')
elif not e.csize:
say(' << {0}() << "\\n"'.format(e.cname[1:]))
else:
say(' << array_to_string({0}()) << "\\n"'.format(e.cname[1:]))
say(" ;")
say("}")
@classmethod
def checkformats(cls, verbose = False, *, file = None):
"""
This is the old class-dumper, moved from impl.meta because it is
not production code. In fact, its functinality is superceded by
dumpformats but might, like dumpformats, turn out to be useful
at some point. Yagni doesn't apply here.
Helper to compare the python definition of the header layout with
the C++ version. If verbose is True output the entire definition
as it would appear in a C++ header. With verbose False it only
does the consistency check. This is cheap enough to be permanently
enabled. Also check that the same attribute isn't listed twice.
"""
file = file or sys.stdout
mapping = {
"char*": "",
"enum": "B",
"float32": "f",
"float64": "d",
"int32": "i",
"int64": "q",
"uint32": "I",
"uint64": "Q",
"uint8": "B",
}
errors = 0
seen = set()
byteoffset = 0
if verbose == 1:
print("// Python layout is:", cls._format(), file=file)
print("// Size in storage:", cls.headersize(), "bytes", file=file)
print("class {0}\n{{".format(cls.__name__), file=file)
if verbose == 2:
print("<h3>{0}</h3>".format(cls.__name__), file=file)
print('<table border="1" style="border-collapse: collapse">', file=file)
print("<tr><th>offset</th><th>size</th><th>type</th><th>name</th><th>remarks</th></tr>", file=file)
for e in cls._formats():
if e[0] in seen:
print("# ERROR: attribute {0} is listed twice.".format(e[0]), file=file)
seen.add(e[0])
ctype = e[2]
cname = e[0]
csize = None
p1 = ctype.find("[")
p2 = ctype.find("]")
if p1 > 0 and p2 > p1:
csize = ctype[p1+1:p2]
cname = cname + "[" + csize + "]"
ctype = ctype[0:p1]
if verbose == 1:
#print(" // offset", byteoffset, file=file)
print(" {3}{0:10} {1:15} // {2}".format(ctype, cname + ";", e[3], ("" if e[1] else "//")), file=file)
if verbose == 2:
print("<tr><td>{3:3}</td><td>{4:2}</td><td>{0:10}</td><td>{1:15}</td><td>{2}</td></tr>".format(ctype, cname, e[3], byteoffset, struct.calcsize(e[1])), file=file)
expect = (csize if csize else '') + mapping[ctype]
if expect == "16B": expect = "16s"
if expect == "4B": expect = "4s"
actual = e[1]
if actual and actual != expect:
print("# ERROR: Expected code {0}, got {1}".format(expect, e[1]), file=file)
errors += 1
byteoffset += struct.calcsize(e[1])
if verbose == 1:
print("};", file=file)
if verbose == 2:
print('<tr><td>{0:3}</td><td colspan="3"> </td><td>end</td></tr>'.format(byteoffset), file=file)
print("</table>", file=file)
assert not errors
def checkAllFormats(*, verbose = False, file = None):
meta.FileHeader.checkformats(verbose=verbose, file=file)
meta.InfoHeaderV1.checkformats(verbose=verbose, file=file)
meta.InfoHeaderV2.checkformats(verbose=verbose, file=file)
meta.InfoHeaderV3.checkformats(verbose=verbose, file=file)
meta.HistHeaderV1.checkformats(verbose=verbose, file=file)
meta.HistHeaderV2.checkformats(verbose=verbose, file=file)
meta.HistHeaderV3.checkformats(verbose=verbose, file=file)
def dumpAllFormats(*, file = None):
print("// AUTOGENERATED -- NEEDS MAJOR EDITS BEFORE USE\n", file=f)
dumpformats(meta.FileHeader, file=f)
dumpformats(meta.OffsetHeaderV1, file=f)
dumpformats(meta.OffsetHeaderV2, file=f)
dumpformats(meta.InfoHeaderV1, file=f)
dumpformats(meta.InfoHeaderV2, file=f)
dumpformats(meta.HistHeaderV1, file=f)
dumpformats(meta.HistHeaderV2, file=f)
if __name__ == "__main__":
# Consistency check only
checkAllFormats()
# Simple C++ header file (dumpformats does much more)
#checkAllFormats(verbose=1)
# HTML formatted documentation
#checkAllFormats(verbose=2)
with open("tmpmetatmp.h", "w") as f:
dumpAllFormats(file = f)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/cppmeta.py | cppmeta.py |
import numpy as np
import os, sys, math
from collections import namedtuple
from ..api import ZgyReader
from ..test.utils import SDCredentials
from ..impl import enum as impl_enum
_statisticsType = namedtuple("Statistics", "perfect partial noncont")
def iterate_InlineCrosslineSlice(reader, lod):
lodsize = reader.brickcount[lod]
for ii in range(lodsize[0]):
for jj in range(lodsize[1]):
for kk in range(lodsize[2]):
yield (lod, ii, jj, kk)
def iterate_SliceInlineCrossline(reader, lod):
lodsize = reader.brickcount[lod]
for kk in range(lodsize[2]):
for ii in range(lodsize[0]):
for jj in range(lodsize[1]):
yield (lod, ii, jj, kk)
def iterate_CrosslineSliceInline(reader, lod):
lodsize = reader.brickcount[lod]
for jj in range(lodsize[1]):
for kk in range(lodsize[2]):
for ii in range(lodsize[0]):
yield (lod, ii, jj, kk)
def iterate_CrosslineInlineSlice(reader, lod):
lodsize = reader.brickcount[lod]
for jj in range(lodsize[1]):
for ii in range(lodsize[0]):
for kk in range(lodsize[2]):
yield (lod, ii, jj, kk)
def iterate_InlineSliceCrossline(reader, lod):
lodsize = reader.brickcount[lod]
for ii in range(lodsize[0]):
for kk in range(lodsize[2]):
for jj in range(lodsize[1]):
yield (lod, ii, jj, kk)
def iterate_SliceCrosslineInline(reader, lod):
lodsize = reader.brickcount[lod]
for kk in range(lodsize[2]):
for jj in range(lodsize[1]):
for ii in range(lodsize[0]):
yield (lod, ii, jj, kk)
orderings = [
("InlineCrosslineSlice", iterate_InlineCrosslineSlice),
("SliceInlineCrossline", iterate_SliceInlineCrossline),
("CrosslineSliceInline", iterate_CrosslineSliceInline),
("CrosslineInlineSlice", iterate_CrosslineInlineSlice),
("InlineSliceCrossline", iterate_InlineSliceCrossline),
("SliceCrosslineInline", iterate_SliceCrosslineInline),
]
def isContiguous(reader, work, maxhole):
"""
A single brick-column is considered to be fully optimized if bricks
are contiguous and partly optimized if the gap between any two bricks
is small. It is not optimized if any gap is negative, because the
logic that consolidates bricks might refuse to handle that case.
Note that sum(perfect, partial, noncont) will be one less than the actual
number of bricks since the method only deals with gaps between bricks.
"""
perfect, partial, noncont, totalsize, waste = (0, 0, 0, 0, 0)
prev = None
for lod, ii, jj, kk in work:
(brickstatus, fileoffset, constvalue, bricksize) = reader._accessor._getBrickFilePosition(ii, jj, kk, lod)
if brickstatus in (impl_enum.BrickStatus.Compressed,
impl_enum.BrickStatus.Normal):
totalsize += bricksize
if prev is not None:
delta = fileoffset - prev
if delta == 0:
perfect += 1
elif delta > 0 and delta <= maxhole:
partial += 1
waste += delta
else:
noncont += 1
prev = fileoffset + bricksize
return perfect, partial, noncont, totalsize, waste
def run_one_ordering(reader, lods, maxhole, ordering):
"""
Run isContiguous() for a range of lods and sum the results.
Also convert the result to a named tuple.
"""
count_perfect = 0
count_partial = 0
count_noncont = 0
for lod in lods:
work = ordering(reader, lod)
perfect, partial, noncont, _, _ = isContiguous(reader, work, maxhole)
count_perfect += perfect
count_partial += partial
count_noncont += noncont
return _statisticsType(count_perfect, count_partial, count_noncont)
def show_one_ordering(filename, reader, maxhole, ordername, ordering):
"""
For a single ordering, e.g. InlineCrosslineSlice, check both the full
resolution and the low resolution bricks to see how many are contiguous.
Bricks that are less than maxhole distant count as 50% contiguous.
Return the result as a human readable string. Also return the score
as a number, to make it possible to pick the access order that gives
the best result.
"""
lod0 = run_one_ordering(reader, [0], maxhole, ordering)
lodN = run_one_ordering(reader, range(1, reader.nlods), maxhole, ordering)
total = (lod0.perfect + lod0.partial + lod0.noncont +
lodN.perfect + lodN.partial + lodN.noncont)
score = (total -
(lod0.partial + lodN.partial) / 2 -
(lod0.noncont + lodN.noncont)) * (100.0 / total)
if score < 95.0:
overall = "Suboptimal:"
elif lod0.partial or lod0.noncont or lodN.partial or lodN.noncont:
overall = "Acceptable:"
else:
overall = "Optimized: "
if True or lod0.partial or lod0.noncont or lodN.partial or lodN.noncont:
fmt = ("{overall} " +
"contig {lod0.perfect:4d} " +
"partial {lod0.partial:4d} " +
"noncont {lod0.noncont:4d} " +
"lowres: " +
"contig {lodN.perfect:4d} " +
"partial {lodN.partial:4d} " +
"noncont {lodN.noncont:4d} " +
"score {score:6.2f} "
"{name} {ordername}")
else:
fmt = ("{overall} " +
" size {lod0.perfect}+{lodN.perfect} bricks {name}")
message = fmt.format(name=filename, overall=overall, lod0=lod0, lodN=lodN, score=score, ordername=ordername)
return score, ordername, ordering, message
def show_6_orderings(filename, reader, maxhole):
"""
Try all 6 access orders and see which of them gives the highest number
of contiguous bricks.
"""
results = []
for ordername, ordering in orderings:
result = show_one_ordering(filename, reader, maxhole, ordername, ordering)
results.append(result)
print(result[3])
best = sorted(results)[-1]
print("Recommended access order {0} score {1:6.2f} for {2}".format(best[1], best[0], filename))
return best[1]
def run(filename, maxhole = 2*1024*1024):
with ZgyReader(filename, iocontext = SDCredentials()) as reader:
show_6_orderings(filename, reader, maxhole)
if __name__ == "__main__":
np.seterr(all='raise')
for filename in sys.argv[1:]:
run(filename)
#import cProfile
#cProfile.run('run(sys.argv[1])', sort="cumulative")
# To optimize a file using the old tools:
# env SALMON_LOD_AUTOFLUSH=0 zgycopy -v -b 64,1024,0 IN OUT
# To un-optimize it completely use --random.
# There is currently no simple way to optimize for a different ordering.
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/isoptimal.py | isoptimal.py |
print('Running' if __name__ == '__main__' else 'Importing', __file__)
import numpy as np
import sys
from time import time
# The new, pure Python API
from ..api import ZgyReader
# The old Python wrapper on top of C++ ZGY-Public
#from zgy import ZgyReader
from ..iterator import readall
def scanForRange(reader, verbose = False):
realmin = []
realmax = []
begtime = time()
for datastart, datasize, brick in readall(reader, dtype=np.float32):
realmin.append(np.nanmin(brick))
realmax.append(np.nanmax(brick))
valuerange = (np.nanmin(realmin), np.nanmax(realmax))
print("VALUE RANGE", valuerange)
elapsed = time() - begtime
voxels = np.product(reader.size) / (1024*1024)
print(" {0:.1f} MVoxel read in {1:.1f} sec, {2:.1f} Mvoxel/s".format(
voxels, elapsed, voxels/elapsed))
return valuerange
def scanForHistogram(reader, valuerange, verbose = False):
"""
Generate a histogram for all samples on the file.
Note that if you want to verify the histogram on the file,
the saved histogram range gives you the center of the first
and last range but numpy wants the outer edges.
"""
hh = None
begtime = time()
for datastart, datasize, brick in readall(reader, dtype=np.float32):
h = np.histogram(brick, bins=256, range=valuerange)
if hh is None:
hh = h[0]
else:
hh += h[0]
elapsed = time() - begtime
voxels = np.product(reader.size) / (1024*1024)
print(" {0:.1f} MVoxel read in {1:.1f} sec, {2:.1f} Mvoxel/s".format(
voxels, elapsed, voxels/elapsed))
return hh
def verifyHistogram(reader, verbose = False):
h = reader.histogram
binwidth = (h.max - h.min) / 255.0
nprange = (h.min - binwidth/2, h.max + binwidth/2)
hh = scanForHistogram(reader, nprange, verbose=verbose)
stored = np.array(h.bin)
delta = hh - stored
#print("STORED", stored, sep="\n")
#print("COMPUTED", hh, sep="\n")
#print("COMPARED", delta, sep="\n")
mismatch = np.array([(x, delta[x]) for x in range(len(delta)) if delta[x]])
if len(mismatch):
print("MISMATCH (bin, count) =", mismatch)
else:
print("HISTOGRAM CORRECT")
if __name__ == "__main__":
np.seterr(all='raise')
if len(sys.argv) <= 1:
args = [ "/home/paal/git/Salmon/UnitTestData/Salmon/UnitTest/Salt2-v3.zgy" ]
else:
args = sys.argv[1:]
for filename in args:
with ZgyReader(filename) as reader:
#reader.dump()
print("size", reader.size, "datatype", reader.datatype)
valuerange = scanForRange(reader, verbose=True)
verifyHistogram(reader, verbose=True)
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/histcheck.py | histcheck.py |
import os
import time
from datetime import datetime
import traceback
import tkinter as tk
import tkinter.filedialog
import tkinter.messagebox
import tkinter.ttk as ttk
from PIL import Image, ImageTk
from ..tools import copy
class TemporaryWidget(tk.Frame):
def __init__(self, parent=None):
super().__init__(parent, bd=2)
self._parent = parent
self.create_widgets()
def create_widgets(self):
if False:
load = Image.open("dialog.png")
render = ImageTk.PhotoImage(load)
self.orig_label = tk.Label(self, image=render)
self.orig_label.image = render
elif False:
self.orig_label = tk.Label(self, text="Compress or decompress ZGY files")
else:
self.orig_label = tk.Label(self)
self.orig_label.grid(sticky="nsew")
class RadioOne(tk.Frame):
def __init__(self, parent=None):
super().__init__(parent)
self._parent = parent
self.create_widgets()
def create_widgets(self):
tk.Radiobutton(self, text="Lossy", padx=0, justify=tk.LEFT,
variable=self._parent._parent.compressmode,
value=1).grid(row=0, column=1)
tk.Radiobutton(self, text="Lossless", padx=0, justify=tk.LEFT,
variable=self._parent._parent.compressmode,
value=2).grid(row=0, column=2)
tk.Radiobutton(self, text="Uncompressed", padx=0, justify=tk.LEFT,
variable=self._parent._parent.compressmode,
value=3).grid(row=0, column=3)
class UserInputWidget(tk.Frame):
def __init__(self, parent=None):
super().__init__(parent, relief=tk.GROOVE, bd=2)
self._parent = parent
self.create_widgets()
def create_widgets(self):
self._lbl_inputfile = tk.Label(self, text="Input file:")
self._lbl_inputfile.grid(row=0, column=0, sticky="w")
self._txt_inputfile = tk.Entry(self, width=50, textvariable=self._parent.inputname)
self._txt_inputfile.grid(row=0, column=1, sticky="ew")
self._btn_inputfile_select = tk.Button(self, text="...", command=self._parent.open_input_dialog)
self._btn_inputfile_select.grid(row=0, column=2, sticky="w")
self._lbl_outputfile = tk.Label(self, text="Output file:")
self._lbl_outputfile.grid(row=1, column=0, sticky="w")
self._txt_outputfile = tk.Entry(self, width=50, textvariable=self._parent.outputname)
self._txt_outputfile.grid(row=1, column=1, sticky="ew")
self._btn_outputfile_select = tk.Button(self, text="...", command=self._parent.open_output_dialog)
self._btn_outputfile_select.grid(row=1, column=2, sticky="w")
self._lbl_radio_one = tk.Label(self, text="Output is:")
self._lbl_radio_one.grid(row=2, column=0, sticky="w")
self._radio_one = RadioOne(self)
self._radio_one.grid(row=2, column=1, columnspan=2, sticky="w")
self._scale_sqnr = tk.Scale(self, orient=tk.HORIZONTAL, from_=20, to=70, tickinterval=10, label="Signal/noise ratio in dB", resolution=5, variable=self._parent.snr)
self._scale_sqnr.set(30)
self._scale_sqnr.grid(row=3, column=0, columnspan=3, sticky="eww")
self._lbl_1 = tk.Label(self, text="<-- Smaller files Better fidelity -->")
self._lbl_1.grid(row=4, column=0, columnspan=3, sticky="eww")
self.grid_columnconfigure(1, weight=1)
class ProgressWidget(tk.Frame):
def __init__(self, parent=None):
super().__init__(parent, bd=2)
self._parent = parent
self.create_widgets()
def create_widgets(self):
self._lbl_last_completed_hdr = tk.Label(self, text="Last completed")
self._lbl_input_size_hdr = tk.Label(self, text="Input size")
self._lbl_output_percent_hdr = tk.Label(self, text="New size")
self._lbl_output_size_hdr = tk.Label(self, text="New size")
self._lbl_last_completed = tk.Label(self, text="(name)")
self._lbl_input_size = tk.Label(self, text="XX MB")
self._lbl_output_percent = tk.Label(self, text="XX %")
self._lbl_output_size = tk.Label(self, text="XX MB")
self._lbl_last_completed_hdr.grid(row=0, column=2, padx=5, sticky="w")
self._lbl_input_size_hdr.grid(row=0, column=3, padx=5)
self._lbl_output_percent_hdr.grid(row=0, column=4, padx=5)
self._lbl_output_size_hdr.grid(row=0, column=5, padx=5)
self._lbl_last_completed.grid(row=1, column=2, padx=5, sticky="w")
self._lbl_input_size.grid(row=1, column=3, padx=5)
self._lbl_output_percent.grid(row=1, column=4, padx=5)
self._lbl_output_size.grid(row=1, column=5, padx=5)
self._lbl_time_elapsed = tk.Label(self, text="00:00:00", font='TkFixedFont')
self._lbl_time_elapsed.grid(row=2, column=0, sticky="w")
self._lbl_time_remain = tk.Label(self, text="00:00:00", font='TkFixedFont')
self._lbl_time_remain.grid(row=3, column=0)
self._lbl_elapsed = tk.Label(self, text="elapsed")
self._lbl_elapsed.grid(row=2, column=1)
self._lbl_remain = tk.Label(self, text="remain")
self._lbl_remain.grid(row=3, column=1)
self._pb_progress = ttk.Progressbar(self)
self._pb_progress.grid(row=2, column=2, rowspan=2, columnspan=4, sticky="ew")
self.grid_columnconfigure(2, weight=1)
class BottomWidget(tk.Frame):
def __init__(self, parent=None):
super().__init__(parent, bd=2)
self._parent = parent
self.create_widgets()
def create_widgets(self):
self._estimate = tk.Button(self, text="Estimate size...", state=tk.DISABLED)
self._estimate.grid(row=0, column=0, padx=5)
self._run = tk.Button(self, text="Run", command=self._parent.clicked_run)
self._run.grid(row=0, column=1, padx=5)
self._stop = tk.Button(self, text="Stop", state=tk.DISABLED, command=self._parent.clicked_stop)
self._stop.grid(row=0, column=2, padx=5)
self._quit = tk.Button(self, text="Quit", command=self._parent.master.destroy)
self._quit.grid(row=0, column=3, padx=5)
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.grid(sticky="nsew")
self.create_variables()
self.create_widgets()
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(3, weight=1)
self._running = False
self._stopping = False
self._has_run = False
self._start_time = 0
self._stop_time = 0
self.update_dependencies()
def report_callback_exception(self, *args):
err = traceback.format_exception(*args)
tkinter.messagebox.showerror("Internal error", err)
self.master.destroy()
def set_overwrite_confirmed(self, value):
"""
True means the output file does not exist or that the user has
answered "Yes" to the prompt about overwriting. Note that the
file selection dialog pops up this question itself, so if the
output file was filled in from there it will already be confirmed.
Otherwise we wait until the user clicks "Run" before we check.
"""
#print("overwrite_confirmed(" + str(bool(value)) + ")")
self.confirmed = value
def validate(self):
if not self.inputname.get():
tkinter.messagebox.showerror("Error", "Input file must be specified.")
return False
elif not os.path.exists(self.inputname.get()):
tkinter.messagebox.showerror("Error", "Input file does not exist.")
return False
if not self.outputname.get():
tkinter.messagebox.showerror("Error", "Output file must be specified.")
return False
elif self.outputname.get() == self.inputname.get():
tkinter.messagebox.showerror("Error", "Input and output cannot be the same file.")
return False
# Note that user can sneak around this when typing the name,
# e.g. foo.zgy and ./foo.zgy.
elif not self.confirmed:
if not os.path.exists(self.outputname.get()):
self.confirmed = True
else:
self.confirmed = tkinter.messagebox.askokcancel(
"Question",
'File "{0}" already exists.\nDo you want to overwrite it?'.format(self.outputname.get()),
icon=tkinter.messagebox.WARNING)
if not self.confirmed:
return False
return True
def update_dependencies(self):
self.set_state(self._user_input_widget, not self._running)
self.set_state(self._user_input_widget._scale_sqnr,
self.compressmode.get() == 1 and not self._running)
self.set_state(self._user_input_widget._lbl_1,
self.compressmode.get() == 1 and not self._running)
self.set_state(self._progress_widget, self._running or self._has_run)
self.set_state(self._bottom_widget._run, not self._running)
self.set_state(self._bottom_widget._stop, self._running)
@staticmethod
def nicenumber(n):
"""
Format a number with units. Output is always 7 chars.
"""
if not n: return " "
if n < 10000: return "{0:4d} ".format(n)
if n < 1024*10000: return "{0:4d} KB".format(n // 1024)
if n < 1024*1024*10000: return "{0:4d} MB".format(n // (1024*1024))
return "{0:4d} GB".format(n // (1024*1024*1024))
def show_results(self):
isize, osize = (None, None)
try:
isize = os.path.getsize(self.inputname.get())
except Exception:
pass
if not self._running and self._has_run:
try:
osize = os.path.getsize(self.outputname.get())
except Exception:
pass
isize_str = self.nicenumber(isize) if isize else ""
osize_str = self.nicenumber(osize) if osize else ""
osize_pct = "{0:3d} %".format(
100 * osize // isize) if isize and osize else " "
pw = self._progress_widget
pw._lbl_last_completed_hdr.configure(
text = "Current file" if self._running else "Last completed")
shortname = self.inputname.get()
shortname = os.path.basename(shortname)
if len(shortname) > 23: shortname = "---" + shortname[-20:]
pw._lbl_last_completed.configure(text=shortname)
pw._lbl_input_size.configure(text=isize_str)
pw._lbl_output_percent.configure(text=osize_pct)
pw._lbl_output_size.configure(text=osize_str)
elapsed = " : : "
elapsed_sec = int(self._stop_time - self._start_time)
if elapsed_sec > 0 and elapsed_sec < 24*60*60:
elapsed = datetime.utcfromtimestamp(elapsed_sec).strftime('%H:%M:%S')
pw._lbl_time_elapsed.configure(text=elapsed)
pw._lbl_time_remain.configure(text=" : : ")
#self._pb_progress
def set_state(self, w, state):
if isinstance(w, tk.Frame):
for child in w.winfo_children():
self.set_state(child, state)
elif isinstance(w, ttk.Progressbar):
pass # has no "state"
else:
s = tk.NORMAL if state else tk.DISABLED
if False:
print("Change {0} from {1} to {2}".format(str(w), w.cget('state'), s))
if isinstance(w, tk.Scale):
w.configure(state=s, fg="black" if state else "gray")
w.configure(state=s)
def set_running(self, on):
if on and not self._running: self._start_time = time.time()
if not on and self._running: self._stop_time = time.time()
self._running = on
self._stopping = False
if on: self._has_run = True
self.update_dependencies()
self.show_results()
def clicked_run(self):
#print("RUN", self)
if self.validate():
self.set_running(True)
mode = self.compressmode.get()
snr = int(self.snr.get()) if mode==1 else 99 if mode==2 else 0
cmd = [
"python3", "-m", "openzgy.tools.copy",
"--force", "float",
"--snr", str(snr),
self.inputname.get(),
self.outputname.get(),
]
print(*cmd)
self.set_overwrite_confirmed(False)
if True:
copy.copy(
self.inputname.get(), self.outputname.get(),
progress1=self.update_progress,
progress2=lambda done, total: self.update_progress(done, total, True),
forcetype='float', snr=snr)
if self._stopping:
# User clicked 'Stop', so the output, if it exists,
# is most likely corrupt.
self.update_progress(0, 100)
if tkinter.messagebox.askokcancel(
"Question",
'You clicked "Stop",\nso the output file\nis probably unusable.\nDelete it now?', icon=tkinter.messagebox.WARNING):
try:
os.remove(self.outputname.get())
except IOError as ex:
tkinter.messagebox.showerror("Error", "While deteling output:\n" + str(ex))
else:
self.update_progress(100, 100, True)
# If the actual copy is disabled,
# clicked_stop needs to do this for us.
self.set_running(False)
else:
self.set_running(False)
def update_progress(self, done, total, flushing=False):
percent = int(50.0 * done / total)
if done != 0: percent = max(1, percent)
if done >= total: percent = 50
if flushing: percent += 50
self._progress_widget._pb_progress['value'] = percent
if False:
print("DONE:", self._progress_widget._pb_progress['value'],
"of", self._progress_widget._pb_progress['maximum'],
"flushing LODs" if flushing else "copying")
root.update()
return self._running and not self._stopping
def clicked_stop(self):
#print("STOP", self)
self._stopping = True
# Only when debugging, when the copy process isn't started.
# Otherwise it will be called when the process finishes.
#self.set_running(False)
def showvar(self, name):
var = getattr(self, name)
value = var.get()
if value is str: value = '"' + value + '"'
print('trace {0} {1} = {2}'.format(name, var, value))
def create_variables(self):
self.confirmed = False
self.inputname = tk.StringVar(self)
self.outputname = tk.StringVar(self)
self.outputname.trace("w", lambda *args: self.set_overwrite_confirmed(False))
self.compressmode = tk.IntVar(self, 1)
self.compressmode.trace("w", lambda *args: self.update_dependencies())
self.snr = tk.DoubleVar(self, 30)
if False: # debug
self.inputname.trace("w", lambda *args: self.showvar("inputname"))
self.outputname.trace("w", lambda *args: self.showvar("outputname"))
self.compressmode.trace("w", lambda *args: self.showvar("compressmode"))
self.snr.trace("w", lambda *args: self.showvar("snr"))
def create_widgets(self):
#self._temporary_widget = TemporaryWidget(self)
#self._temporary_widget.grid(row=0, column=0, sticky="we")
self._user_input_widget = UserInputWidget(self)
self._user_input_widget.grid(row=1, column=0, padx=5, pady=5, sticky="ew")
self._progress_widget = ProgressWidget(self)
self._progress_widget.grid(row=2, column=0, sticky="eww")
self._bottom_widget = BottomWidget(self)
self._bottom_widget.grid(row=3, column=0, sticky="se", padx=5, pady=5)
def open_input_dialog(self):
name = tk.filedialog.askopenfilename(filetypes=[("ZGY", "*.zgy")])
if name:
self.inputname.set(name)
def open_output_dialog(self):
name = tk.filedialog.asksaveasfilename(filetypes=[("ZGY", "*.zgy")], defaultextension=".zgy")
if name:
self.outputname.set(name)
self.set_overwrite_confirmed(True)
root, app = (None, None)
def Main():
global root, app
root = tk.Tk()
root.title("Compress or decompress ZGY files")
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
app = Application(master=root)
root.report_callback_exception = app.report_callback_exception
app.mainloop()
if __name__ == "__main__":
Main()
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | zgyio | /zgyio-0.0.4.tar.gz/zgyio-0.0.4/openzgy/tools/gui_copy.py | gui_copy.py |
<div align="center">
<p>
<a align="left" href="https://ultralytics.com/yolov5" target="_blank">
<img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/splash.jpg"></a>
</p>
<br>
<div>
<a href="https://github.com/ultralytics/yolov5/actions"><img src="https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg" alt="CI CPU testing"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
<br>
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
<a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
<a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
</div>
<br>
<div align="center">
<a href="https://github.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-github.png" width="2%"/>
</a>
<img width="2%" />
<a href="https://www.linkedin.com/company/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-linkedin.png" width="2%"/>
</a>
<img width="2%" />
<a href="https://twitter.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-twitter.png" width="2%"/>
</a>
<img width="2%" />
<a href="https://youtube.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-youtube.png" width="2%"/>
</a>
<img width="2%" />
<a href="https://www.facebook.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-facebook.png" width="2%"/>
</a>
<img width="2%" />
<a href="https://www.instagram.com/ultralytics/">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-instagram.png" width="2%"/>
</a>
</div>
<br>
<p>
YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents <a href="https://ultralytics.com">Ultralytics</a>
open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
</p>
<!--
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
<img width="800" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-api.png"></a>
-->
</div>
## <div align="center">Documentation</div>
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment.
## <div align="center">Quick Start Examples</div>
<details open>
<summary>Install</summary>
[**Python>=3.6.0**](https://www.python.org/) is required with all
[requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including
[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/):
<!-- $ sudo apt update && apt install -y libgl1-mesa-glx libsm6 libxext6 libxrender-dev -->
```bash
$ git clone https://github.com/ultralytics/yolov5
$ cd yolov5
$ pip install -r requirements.txt
```
</details>
<details open>
<summary>Inference</summary>
Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download
from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases).
```python
import torch
# Model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom
# Images
img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list
# Inference
results = model(img)
# Results
results.print() # or .show(), .save(), .crop(), .pandas(), etc.
```
</details>
<details>
<summary>Inference with detect.py</summary>
`detect.py` runs inference on a variety of sources, downloading models automatically from
the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
```bash
$ python detect.py --source 0 # webcam
file.jpg # image
file.mp4 # video
path/ # directory
path/*.jpg # glob
'https://youtu.be/NUsoVlDFqZg' # YouTube
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
```
</details>
<details>
<summary>Training</summary>
Run commands below to reproduce results
on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on
first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the
largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices).
```bash
$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64
yolov5m 40
yolov5l 24
yolov5x 16
```
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
</details>
<details open>
<summary>Tutorials</summary>
* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 RECOMMENDED
* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️
RECOMMENDED
* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) 🌟 NEW
* [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518) 🌟 NEW
* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475)
* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ⭐ NEW
* [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀
* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303)
* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318)
* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304)
* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607)
* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) ⭐ NEW
* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx)
</details>
## <div align="center">Environments and Integrations</div>
Get started in seconds with our verified environments and integrations,
including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment
logging. Click each icon below for details.
<div align="center">
<a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="15%"/>
</a>
<a href="https://www.kaggle.com/ultralytics/yolov5">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="15%"/>
</a>
<a href="https://hub.docker.com/r/ultralytics/yolov5">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="15%"/>
</a>
<a href="https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="15%"/>
</a>
<a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="15%"/>
</a>
<a href="https://wandb.ai/site?utm_campaign=repo_yolo_readme">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-wb-small.png" width="15%"/>
</a>
</div>
## <div align="center">Compete and Win</div>
We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes!
<p align="center">
<a href="https://github.com/ultralytics/yolov5/discussions/3213">
<img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-export-competition.png"></a>
</p>
## <div align="center">Why YOLOv5</div>
<p align="center"><img width="800" src="https://user-images.githubusercontent.com/26833433/114313216-f0a5e100-9af5-11eb-8445-c682b60da2e3.png"></p>
<details>
<summary>YOLOv5-P5 640 Figure (click to expand)</summary>
<p align="center"><img width="800" src="https://user-images.githubusercontent.com/26833433/114313219-f1d70e00-9af5-11eb-9973-52b1f98d321a.png"></p>
</details>
<details>
<summary>Figure Notes (click to expand)</summary>
* GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size
32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS.
* EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8.
* **Reproduce** by
`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
</details>
### Pretrained Checkpoints
[assets]: https://github.com/ultralytics/yolov5/releases
|Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>test<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>V100 (ms) | |params<br><sup>(M) |FLOPs<br><sup>640 (B)
|--- |--- |--- |--- |--- |--- |---|--- |---
|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0
|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3
|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4
|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8
| | | | | | | | |
|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4
|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4
|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7
|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9
| | | | | | | | |
|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
<details>
<summary>Table Notes (click to expand)</summary>
* AP<sup>test</sup> denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results
denote val2017 accuracy.
* AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP**
by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
* Speed<sub>GPU</sub> averaged over 5000 COCO val2017 images using a
GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and
includes FP16 inference, postprocessing and NMS. **Reproduce speed**
by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half`
* All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation).
* Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale
augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
</details>
## <div align="center">Contribute</div>
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see
our [Contributing Guide](CONTRIBUTING.md) to get started.
## <div align="center">Contact</div>
For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or
professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact).
<br>
<div align="center">
<a href="https://github.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-github.png" width="3%"/>
</a>
<img width="3%" />
<a href="https://www.linkedin.com/company/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-linkedin.png" width="3%"/>
</a>
<img width="3%" />
<a href="https://twitter.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-twitter.png" width="3%"/>
</a>
<img width="3%" />
<a href="https://youtube.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-youtube.png" width="3%"/>
</a>
<img width="3%" />
<a href="https://www.facebook.com/ultralytics">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-facebook.png" width="3%"/>
</a>
<img width="3%" />
<a href="https://www.instagram.com/ultralytics/">
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-instagram.png" width="3%"/>
</a>
</div>
| zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/README.md | README.md |
import logging
import math
import warnings
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import exif_transpose, letterbox
from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \
scale_coords, xyxy2xywh
from utils.plots import Annotator, colors
from utils.torch_utils import time_sync
LOGGER = logging.getLogger(__name__)
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
return self.act(self.conv(x))
class DWConv(Conv):
# Depth-wise convolution class
def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3)
return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h)
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class C3SPP(C3):
# C3 module with SPP()
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = SPP(c_, c_, k)
class C3Ghost(C3):
# C3 module with GhostBottleneck()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])
class SPP(nn.Module):
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
def __init__(self, c1, c2, k=(5, 9, 13)):
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super().__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super().__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super().__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class AutoShape(nn.Module):
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self, model):
super().__init__()
self.model = model.eval()
def autoshape(self):
LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# file: imgs = 'data/images/zidane.jpg' # str or PosixPath
# URI: = 'https://ultralytics.com/images/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_sync()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, (str, Path)): # filename or uri
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
im = np.asarray(exif_transpose(im))
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_sync())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_sync())
# Post-process
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_sync())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# YOLOv5 detections class for inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super().__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred.shape[0]:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop:
annotator = Annotator(im, pil=not self.ascii)
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else: # all others
annotator.box_label(box, label, color=colors(cls))
im = annotator.im
else:
str += '(no detections)'
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
LOGGER.info(str.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
if i == self.n - 1:
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %
self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/detect/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save_dir='runs/detect/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir
self.display(crop=True, save_dir=save_dir) # crop results
LOGGER.info(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/models/common.py | common.py |
import argparse
import sys
from copy import deepcopy
from pathlib import Path
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.plots import feature_visualization
from utils.torch_utils import time_sync, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPs computation
except ImportError:
thop = None
LOGGER = logging.getLogger(__name__)
class Detect(nn.Module):
stride = None # strides computed during build
onnx_dynamic = False # ONNX export parameter
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
z = [] # inference output
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
if self.inplace:
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
self.inplace = self.yaml.get('inplace', True)
# LOGGER.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.inplace = self.inplace
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# LOGGER.info('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
LOGGER.info('')
def forward(self, x, augment=False, profile=False, visualize=False):
if augment:
return self.forward_augment(x) # augmented inference, None
return self.forward_once(x, profile, visualize) # single-scale inference, train
def forward_augment(self, x):
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi = self._descale_pred(yi, fi, si, img_size)
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
def forward_once(self, x, profile=False, visualize=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
c = isinstance(m, Detect) # copy input as inplace fix
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
t = time_sync()
for _ in range(10):
m(x.copy() if c else x)
dt.append((time_sync() - t) * 100)
if m == self.model[0]:
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}")
LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
if profile:
LOGGER.info('%.1fms total' % sum(dt))
return x
def _descale_pred(self, p, flips, scale, img_size):
# de-scale predictions following augmented inference (inverse operation)
if self.inplace:
p[..., :4] /= scale # de-scale
if flips == 2:
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
elif flips == 3:
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
else:
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
if flips == 2:
y = img_size[0] - y # de-flip ud
elif flips == 3:
x = img_size[1] - x # de-flip lr
p = torch.cat((x, y, wh, p[..., 4:]), -1)
return p
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
LOGGER.info(
('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
LOGGER.info('Fusing layers... ')
for m in self.model.modules():
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.forward_fuse # update forward
self.info()
return self
def autoshape(self): # add AutoShape module
LOGGER.info('Adding AutoShape... ')
m = AutoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR, C3Ghost]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--profile', action='store_true', help='profile model speed')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
if opt.profile:
img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
y = model(img, profile=True)
# Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter('.')
# LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/models/yolo.py | yolo.py |
import numpy as np
import torch
import torch.nn as nn
from models.common import Conv
from utils.downloads import attempt_download
class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super().__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
class MixConv2d(nn.Module):
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super().__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super().__init__()
def forward(self, x, augment=False, profile=False, visualize=False):
y = []
for module in self:
y.append(module(x, augment, profile, visualize)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, map_location=None, inplace=True, fuse=True):
from models.yolo import Detect, Model
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location=map_location) # load
if fuse:
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
else:
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
m.inplace = inplace # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print(f'Ensemble created with {weights}\n')
for k in ['names']:
setattr(model, k, getattr(model[-1], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
return model # return ensemble | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/models/experimental.py | experimental.py |
import argparse
import logging
import os
import sys
import traceback
from copy import deepcopy
from pathlib import Path
sys.path.append('./') # to run '$ python *.py' files in subdirectories
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
import yaml
from tensorflow import keras
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3
from models.experimental import MixConv2d, CrossConv, attempt_load
from models.yolo import Detect
from utils.datasets import LoadImages
from utils.general import make_divisible, check_file, check_dataset
logger = logging.getLogger(__name__)
class tf_BN(keras.layers.Layer):
# TensorFlow BatchNormalization wrapper
def __init__(self, w=None):
super(tf_BN, self).__init__()
self.bn = keras.layers.BatchNormalization(
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
epsilon=w.eps)
def call(self, inputs):
return self.bn(inputs)
class tf_Pad(keras.layers.Layer):
def __init__(self, pad):
super(tf_Pad, self).__init__()
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
def call(self, inputs):
return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
class tf_Conv(keras.layers.Layer):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
# ch_in, ch_out, weights, kernel, stride, padding, groups
super(tf_Conv, self).__init__()
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
assert isinstance(k, int), "Convolution with multiple kernels are not allowed."
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
# see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
conv = keras.layers.Conv2D(
c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False,
kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()))
self.conv = conv if s == 1 else keras.Sequential([tf_Pad(autopad(k, p)), conv])
self.bn = tf_BN(w.bn) if hasattr(w, 'bn') else tf.identity
# YOLOv5 activations
if isinstance(w.act, nn.LeakyReLU):
self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity
elif isinstance(w.act, nn.Hardswish):
self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity
elif isinstance(w.act, nn.SiLU):
self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity
def call(self, inputs):
return self.act(self.bn(self.conv(inputs)))
class tf_Focus(keras.layers.Layer):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
# ch_in, ch_out, kernel, stride, padding, groups
super(tf_Focus, self).__init__()
self.conv = tf_Conv(c1 * 4, c2, k, s, p, g, act, w.conv)
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
# inputs = inputs / 255. # normalize 0-255 to 0-1
return self.conv(tf.concat([inputs[:, ::2, ::2, :],
inputs[:, 1::2, ::2, :],
inputs[:, ::2, 1::2, :],
inputs[:, 1::2, 1::2, :]], 3))
class tf_Bottleneck(keras.layers.Layer):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
super(tf_Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = tf_Conv(c_, c2, 3, 1, g=g, w=w.cv2)
self.add = shortcut and c1 == c2
def call(self, inputs):
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
class tf_Conv2d(keras.layers.Layer):
# Substitution for PyTorch nn.Conv2D
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
super(tf_Conv2d, self).__init__()
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
self.conv = keras.layers.Conv2D(
c2, k, s, 'VALID', use_bias=bias,
kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()),
bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, )
def call(self, inputs):
return self.conv(inputs)
class tf_BottleneckCSP(keras.layers.Layer):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
# ch_in, ch_out, number, shortcut, groups, expansion
super(tf_BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = tf_Conv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
self.cv3 = tf_Conv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
self.cv4 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv4)
self.bn = tf_BN(w.bn)
self.act = lambda x: keras.activations.relu(x, alpha=0.1)
self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
def call(self, inputs):
y1 = self.cv3(self.m(self.cv1(inputs)))
y2 = self.cv2(inputs)
return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
class tf_C3(keras.layers.Layer):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
# ch_in, ch_out, number, shortcut, groups, expansion
super(tf_C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = tf_Conv(c1, c_, 1, 1, w=w.cv2)
self.cv3 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv3)
self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
def call(self, inputs):
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
class tf_SPP(keras.layers.Layer):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
super(tf_SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1)
self.cv2 = tf_Conv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
def call(self, inputs):
x = self.cv1(inputs)
return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
class tf_Detect(keras.layers.Layer):
def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer
super(tf_Detect, self).__init__()
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [tf.zeros(1)] * self.nl # init grid
self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
self.anchor_grid = tf.reshape(tf.convert_to_tensor(w.anchor_grid.numpy(), dtype=tf.float32),
[self.nl, 1, -1, 1, 2])
self.m = [tf_Conv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
self.export = False # onnx export
self.training = True # set to False after building model
for i in range(self.nl):
ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i]
self.grid[i] = self._make_grid(nx, ny)
def call(self, inputs):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
x = []
for i in range(self.nl):
x.append(self.m[i](inputs[i]))
# x(bs,20,20,255) to x(bs,3,20,20,85)
ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i]
x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3])
if not self.training: # inference
y = tf.sigmoid(x[i])
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i]
# Normalize xywh to 0-1 to reduce calibration error
xy /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32)
wh /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32)
y = tf.concat([xy, wh, y[..., 4:]], -1)
z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no]))
return x if self.training else (tf.concat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
# return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
class tf_Upsample(keras.layers.Layer):
def __init__(self, size, scale_factor, mode, w=None):
super(tf_Upsample, self).__init__()
assert scale_factor == 2, "scale_factor must be 2"
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
if opt.tf_raw_resize:
# with default arguments: align_corners=False, half_pixel_centers=False
self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
size=(x.shape[1] * 2, x.shape[2] * 2))
else:
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
def call(self, inputs):
return self.upsample(inputs)
class tf_Concat(keras.layers.Layer):
def __init__(self, dimension=1, w=None):
super(tf_Concat, self).__init__()
assert dimension == 1, "convert only NCHW to NHWC concat"
self.d = 3
def call(self, inputs):
return tf.concat(inputs, self.d)
def parse_model(d, ch, model): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m_str = m
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
c1, c2 = ch[f], args[0]
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
elif m is Detect:
args.append([ch[x + 1] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
else:
c2 = ch[f]
tf_m = eval('tf_' + m_str.replace('nn.', ''))
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
else tf_m(*args, w=model.model[i]) # module
torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in torch_m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
ch.append(c2)
return keras.Sequential(layers), sorted(save)
class tf_Model():
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, input channels, number of classes
super(tf_Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
# Define model
if nc and nc != self.yaml['nc']:
print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc))
self.yaml['nc'] = nc # override yaml value
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model) # model, savelist, ch_out
def predict(self, inputs, profile=False):
y = [] # outputs
x = inputs
for i, m in enumerate(self.model.layers):
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
x = m(x) # run
y.append(x if m.i in self.savelist else None) # save output
# Add TensorFlow NMS
if opt.tf_nms:
boxes = xywh2xyxy(x[0][..., :4])
probs = x[0][:, :, 4:5]
classes = x[0][:, :, 5:]
scores = probs * classes
if opt.agnostic_nms:
nms = agnostic_nms_layer()((boxes, classes, scores))
return nms, x[1]
else:
boxes = tf.expand_dims(boxes, 2)
nms = tf.image.combined_non_max_suppression(
boxes, scores, opt.topk_per_class, opt.topk_all, opt.iou_thres, opt.score_thres, clip_boxes=False)
return nms, x[1]
return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...]
# x = x[0][0] # [x(1,6300,85), ...] to x(6300,85)
# xywh = x[..., :4] # x(6300,4) boxes
# conf = x[..., 4:5] # x(6300,1) confidences
# cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
# return tf.concat([conf, cls, xywh], 1)
class agnostic_nms_layer(keras.layers.Layer):
# wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
def call(self, input):
return tf.map_fn(agnostic_nms, input,
fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
name='agnostic_nms')
def agnostic_nms(x):
boxes, classes, scores = x
class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
scores_inp = tf.reduce_max(scores, -1)
selected_inds = tf.image.non_max_suppression(
boxes, scores_inp, max_output_size=opt.topk_all, iou_threshold=opt.iou_thres, score_threshold=opt.score_thres)
selected_boxes = tf.gather(boxes, selected_inds)
padded_boxes = tf.pad(selected_boxes,
paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
mode="CONSTANT", constant_values=0.0)
selected_scores = tf.gather(scores_inp, selected_inds)
padded_scores = tf.pad(selected_scores,
paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]],
mode="CONSTANT", constant_values=-1.0)
selected_classes = tf.gather(class_inds, selected_inds)
padded_classes = tf.pad(selected_classes,
paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]],
mode="CONSTANT", constant_values=-1.0)
valid_detections = tf.shape(selected_inds)[0]
return padded_boxes, padded_scores, padded_classes, valid_detections
def xywh2xyxy(xywh):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
def representative_dataset_gen():
# Representative dataset for use with converter.representative_dataset
n = 0
for path, img, im0s, vid_cap in dataset:
# Get sample input data as a numpy array in a method of your choosing.
n += 1
input = np.transpose(img, [1, 2, 0])
input = np.expand_dims(input, axis=0).astype(np.float32)
input /= 255.0
yield [input]
if n >= opt.ncalib:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='cfg path')
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--dynamic-batch-size', action='store_true', help='dynamic batch size')
parser.add_argument('--source', type=str, default='../data/coco128.yaml', help='dir of images or data.yaml file')
parser.add_argument('--ncalib', type=int, default=100, help='number of calibration images')
parser.add_argument('--tfl-int8', action='store_true', dest='tfl_int8', help='export TFLite int8 model')
parser.add_argument('--tf-nms', action='store_true', dest='tf_nms', help='TF NMS (without TFLite export)')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--tf-raw-resize', action='store_true', dest='tf_raw_resize',
help='use tf.raw_ops.ResizeNearestNeighbor for resize')
parser.add_argument('--topk-per-class', type=int, default=100, help='topk per class to keep in NMS')
parser.add_argument('--topk-all', type=int, default=100, help='topk for all classes to keep in NMS')
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
# Input
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection
# Load PyTorch model
model = attempt_load(opt.weights, map_location=torch.device('cpu'), inplace=True, fuse=False)
model.model[-1].export = False # set Detect() layer export=True
y = model(img) # dry run
nc = y[0].shape[-1] - 5
# TensorFlow saved_model export
try:
print('\nStarting TensorFlow saved_model export with TensorFlow %s...' % tf.__version__)
tf_model = tf_Model(opt.cfg, model=model, nc=nc)
img = tf.zeros((opt.batch_size, *opt.img_size, 3)) # NHWC Input for TensorFlow
m = tf_model.model.layers[-1]
assert isinstance(m, tf_Detect), "the last layer must be Detect"
m.training = False
y = tf_model.predict(img)
inputs = keras.Input(shape=(*opt.img_size, 3), batch_size=None if opt.dynamic_batch_size else opt.batch_size)
keras_model = keras.Model(inputs=inputs, outputs=tf_model.predict(inputs))
keras_model.summary()
path = opt.weights.replace('.pt', '_saved_model') # filename
keras_model.save(path, save_format='tf')
print('TensorFlow saved_model export success, saved as %s' % path)
except Exception as e:
print('TensorFlow saved_model export failure: %s' % e)
traceback.print_exc(file=sys.stdout)
# TensorFlow GraphDef export
try:
print('\nStarting TensorFlow GraphDef export with TensorFlow %s...' % tf.__version__)
# https://github.com/leimao/Frozen_Graph_TensorFlow
full_model = tf.function(lambda x: keras_model(x))
full_model = full_model.get_concrete_function(
tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
f = opt.weights.replace('.pt', '.pb') # filename
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=os.path.dirname(f),
name=os.path.basename(f),
as_text=False)
print('TensorFlow GraphDef export success, saved as %s' % f)
except Exception as e:
print('TensorFlow GraphDef export failure: %s' % e)
traceback.print_exc(file=sys.stdout)
# TFLite model export
if not opt.tf_nms:
try:
print('\nStarting TFLite export with TensorFlow %s...' % tf.__version__)
# fp32 TFLite model export ---------------------------------------------------------------------------------
# converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
# converter.allow_custom_ops = False
# converter.experimental_new_converter = True
# tflite_model = converter.convert()
# f = opt.weights.replace('.pt', '.tflite') # filename
# open(f, "wb").write(tflite_model)
# fp16 TFLite model export ---------------------------------------------------------------------------------
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.representative_dataset = representative_dataset_gen
# converter.target_spec.supported_types = [tf.float16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.allow_custom_ops = False
converter.experimental_new_converter = True
tflite_model = converter.convert()
f = opt.weights.replace('.pt', '-fp16.tflite') # filename
open(f, "wb").write(tflite_model)
print('\nTFLite export success, saved as %s' % f)
# int8 TFLite model export ---------------------------------------------------------------------------------
if opt.tfl_int8:
# Representative Dataset
if opt.source.endswith('.yaml'):
with open(check_file(opt.source)) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # data dict
check_dataset(data) # check
opt.source = data['train']
dataset = LoadImages(opt.source, img_size=opt.img_size, auto=False)
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
converter.allow_custom_ops = False
converter.experimental_new_converter = True
converter.experimental_new_quantizer = False
tflite_model = converter.convert()
f = opt.weights.replace('.pt', '-int8.tflite') # filename
open(f, "wb").write(tflite_model)
print('\nTFLite (int8) export success, saved as %s' % f)
except Exception as e:
print('\nTFLite export failure: %s' % e)
traceback.print_exc(file=sys.stdout) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/models/tf.py | tf.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class MemoryEfficientMish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))
def forward(self, x):
return self.F.apply(x)
# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
class FReLU(nn.Module):
def __init__(self, c1, k=3): # ch_in, kernel
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
# ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
class AconC(nn.Module):
r""" ACON activation (activate or not).
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
class MetaAconC(nn.Module):
r""" ACON activation (activate or not).
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
"""
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
super().__init__()
c2 = max(r, c1 // r)
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
# self.bn1 = nn.BatchNorm2d(c2)
# self.bn2 = nn.BatchNorm2d(c1)
def forward(self, x):
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
dpx = (self.p1 - self.p2) * x
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/activations.py | activations.py |
import math
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw, ImageFont
from utils.general import is_ascii, xyxy2xywh, xywh2xyxy
from utils.metrics import fitness
# Settings
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
FILE = Path(__file__).absolute()
ROOT = FILE.parents[1] # yolov5/ dir
class Colors:
# Ultralytics color palette https://ultralytics.com/
def __init__(self):
# hex = matplotlib.colors.TABLEAU_COLORS.values()
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb('#' + c) for c in hex]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors() # create instance for 'from utils.plots import colors'
def check_font(font='Arial.ttf', size=10):
# Return a PIL TrueType Font, downloading to ROOT dir if necessary
font = Path(font)
font = font if font.exists() else (ROOT / font.name)
try:
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
except Exception as e: # download if missing
url = "https://ultralytics.com/assets/" + font.name
print(f'Downloading {url} to {font}...')
torch.hub.download_url_to_file(url, str(font))
return ImageFont.truetype(str(font), size)
class Annotator:
check_font() # download TTF if necessary
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True):
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
self.pil = pil
if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
self.fh = self.font.getsize('a')[1] - 3 # font height
else: # use cv2
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label
if self.pil or not is_ascii(label):
self.draw.rectangle(box, width=self.lw, outline=color) # box
if label:
w = self.font.getsize(label)[0] # text width
self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color)
self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls')
else: # cv2
c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0]
c2 = c1[0] + w, c1[1] - h - 3
cv2.rectangle(self.im, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im, label, (c1[0], c1[1] - 2), 0, self.lw / 3, txt_color, thickness=tf,
lineType=cv2.LINE_AA)
def rectangle(self, xy, fill=None, outline=None, width=1):
# Add rectangle to image (PIL-only)
self.draw.rectangle(xy, fill, outline, width)
def text(self, xy, text, txt_color=(255, 255, 255)):
# Add text to image (PIL-only)
w, h = self.font.getsize(text) # text width, height
self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
def result(self):
# Return annotated image as array
return np.asarray(self.im)
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
from scipy.signal import butter, filtfilt
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
if np.max(images[0]) <= 1:
images *= 255.0 # de-normalise (optional)
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Build Image
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, im in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
im = im.transpose(1, 2, 0)
mosaic[y:y + h, x:x + w, :] = im
# Resize (optional)
scale = max_size / ns / max(h, w)
if scale < 1:
h = math.ceil(scale * h)
w = math.ceil(scale * w)
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
# Annotate
fs = int((h + w) * ns * 0.01) # font size
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs)
for i in range(i + 1):
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
if paths:
annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
if len(targets) > 0:
ti = targets[targets[:, 0] == i] # image targets
boxes = xywh2xyxy(ti[:, 2:6]).T
classes = ti[:, 1].astype('int')
labels = ti.shape[1] == 6 # labels if no conf column
conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1]:
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
elif scale < 1: # absolute coords need scale if image scales
boxes *= scale
boxes[[0, 2]] += x
boxes[[1, 3]] += y
for j, box in enumerate(boxes.T.tolist()):
cls = classes[j]
color = colors(cls)
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
annotator.box_label(box, label, color=color)
annotator.im.save(fname) # save
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
plt.close()
def plot_val_txt(): # from utils.plots import *; plot_val()
# Plot val.txt histograms
x = np.loadtxt('val.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by val.py
plot2 = False # plot additional results
if plot2:
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
for f in sorted(Path(path).glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
if plot2:
s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
for i in range(7):
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 57)
ax2.set_ylim(30, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
plt.savefig(str(Path(path).name) + '.png', dpi=300)
def plot_labels(labels, names=(), save_dir=Path('')):
# plot dataset labels
print('Plotting labels... ')
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
plt.close()
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
# [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
ax[0].set_ylabel('instances')
if 0 < len(names) < 30:
ax[0].set_xticks(range(len(names)))
ax[0].set_xticklabels(names, rotation=90, fontsize=10)
else:
ax[0].set_xlabel('classes')
sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(save_dir / 'labels.jpg', dpi=200)
matplotlib.use('Agg')
plt.close()
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
files = list(Path(save_dir).glob('frames*.txt'))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = (results[0] - results[0].min()) # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel('time (s)')
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ['top', 'right']:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
# Plot evolve.csv hyp evolution results
evolve_csv = Path(evolve_csv)
data = pd.read_csv(evolve_csv)
keys = [x.strip() for x in data.columns]
x = data.values
f = fitness(x)
j = np.argmax(f) # max fitness index
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, k in enumerate(keys[7:]):
v = x[:, 7 + i]
mu = v[j] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print('%15s: %.3g' % (k, mu))
f = evolve_csv.with_suffix('.png') # filename
plt.savefig(f, dpi=200)
plt.close()
print(f'Saved {f}')
def plot_results(file='path/to/results.csv', dir=''):
# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
save_dir = Path(file).parent if file else Path(dir)
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
files = list(save_dir.glob('results*.csv'))
assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
for fi, f in enumerate(files):
try:
data = pd.read_csv(f)
s = [x.strip() for x in data.columns]
x = data.values[:, 0]
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
y = data.values[:, j]
# y[y == 0] = np.nan # don't show zero values
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
ax[i].set_title(s[j], fontsize=12)
# if j in [8, 9, 10]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print(f'Warning: Plotting error for {f}: {e}')
ax[1].legend()
fig.savefig(save_dir / 'results.png', dpi=200)
plt.close()
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
"""
x: Features to be visualized
module_type: Module type
stage: Module stage within model
n: Maximum number of feature maps to plot
save_dir: Directory to save results
"""
if 'Detect' not in module_type:
batch, channels, height, width = x.shape # batch, channels, height, width
if height > 1 and width > 1:
f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
n = min(n, channels) # number of plots
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
ax = ax.ravel()
plt.subplots_adjust(wspace=0.05, hspace=0.05)
for i in range(n):
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
ax[i].axis('off')
print(f'Saving {save_dir / f}... ({n}/{channels})')
plt.savefig(save_dir / f, dpi=300, bbox_inches='tight')
plt.close() | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/plots.py | plots.py |
import math
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
def fitness(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
return (x[:, :4] * w).sum(1)
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at [email protected]
save_dir: Plot save directory
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
if plot:
plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
i = f1.mean(0).argmax() # max F1 index
return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves
# Arguments
recall: The recall curve (list)
precision: The precision curve (list)
# Returns
Average precision, precision curve, recall curve
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([1.0], precision, [0.0]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap, mpre, mrec
class ConfusionMatrix:
# Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
def __init__(self, nc, conf=0.25, iou_thres=0.45):
self.matrix = np.zeros((nc + 1, nc + 1))
self.nc = nc # number of classes
self.conf = conf
self.iou_thres = iou_thres
def process_batch(self, detections, labels):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
None, updates confusion matrix accordingly
"""
detections = detections[detections[:, 4] > self.conf]
gt_classes = labels[:, 0].int()
detection_classes = detections[:, 5].int()
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where(iou > self.iou_thres)
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
else:
matches = np.zeros((0, 3))
n = matches.shape[0] > 0
m0, m1, _ = matches.transpose().astype(np.int16)
for i, gc in enumerate(gt_classes):
j = m0 == i
if n and sum(j) == 1:
self.matrix[detection_classes[m1[j]], gc] += 1 # correct
else:
self.matrix[self.nc, gc] += 1 # background FP
if n:
for i, dc in enumerate(detection_classes):
if not any(m1 == i):
self.matrix[dc, self.nc] += 1 # background FN
def matrix(self):
return self.matrix
def plot(self, normalize=True, save_dir='', names=()):
try:
import seaborn as sn
array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
fig = plt.figure(figsize=(12, 9), tight_layout=True)
sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
xticklabels=names + ['background FP'] if labels else "auto",
yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
fig.axes[0].set_xlabel('True')
fig.axes[0].set_ylabel('Predicted')
fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
plt.close()
except Exception as e:
print(f'WARNING: ConfusionMatrix plot failure: {e}')
def print(self):
for i in range(self.nc + 1):
print(' '.join(map(str, self.matrix[i])))
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def bbox_ioa(box1, box2, eps=1E-7):
""" Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2
box1: np.array of shape(4)
box2: np.array of shape(nx4)
returns: np.array of shape(n)
"""
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
# Intersection over box2 area
return inter_area / box2_area
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
# Plots ----------------------------------------------------------------------------------------------------------------
def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
# Precision-recall curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
py = np.stack(py, axis=1)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py.T):
ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
else:
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f [email protected]' % ap[:, 0].mean())
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(Path(save_dir), dpi=250)
plt.close()
def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'):
# Metric-confidence curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py):
ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
else:
ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
y = py.mean(0)
ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(Path(save_dir), dpi=250)
plt.close() | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/metrics.py | metrics.py |
import os
import platform
import subprocess
import time
import urllib
from pathlib import Path
import requests
import torch
def gsutil_getsize(url=''):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
# Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
file = Path(file)
assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
try: # url1
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, str(file))
assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
except Exception as e: # url2
file.unlink(missing_ok=True) # remove partial downloads
print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
finally:
if not file.exists() or file.stat().st_size < min_bytes: # check
file.unlink(missing_ok=True) # remove partial downloads
print(f"ERROR: {assert_msg}\n{error_msg}")
print('')
def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download()
# Attempt file download if does not exist
file = Path(str(file).strip().replace("'", ''))
if not file.exists():
# URL specified
name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
if str(file).startswith(('http:/', 'https:/')): # download
url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
safe_download(file=name, url=url, min_bytes=1E5)
return name
# GitHub assets
file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
try:
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
try:
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
except:
tag = 'v5.0' # current release
if name in assets:
safe_download(file,
url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
# url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional)
min_bytes=1E5,
error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
return str(file)
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
# Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download()
t = time.time()
file = Path(file)
cookie = Path('cookie') # gdrive cookie
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
file.unlink(missing_ok=True) # remove existing file
cookie.unlink(missing_ok=True) # remove existing cookie
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'): # large file
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else: # small file
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s) # execute, capture return
cookie.unlink(missing_ok=True) # remove existing cookie
# Error check
if r != 0:
file.unlink(missing_ok=True) # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if file.suffix == '.zip':
print('unzipping... ', end='')
os.system(f'unzip -q {file}') # unzip
file.unlink() # remove zip to free space
print(f'Done ({time.time() - t:.1f}s)')
return r
def get_token(cookie="./cookie"):
with open(cookie) as f:
for line in f:
if "download" in line:
return line.split()[-1]
return ""
# Google utils: https://cloud.google.com/storage/docs/reference/libraries ----------------------------------------------
#
#
# def upload_blob(bucket_name, source_file_name, destination_blob_name):
# # Uploads a file to a bucket
# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
#
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(destination_blob_name)
#
# blob.upload_from_filename(source_file_name)
#
# print('File {} uploaded to {}.'.format(
# source_file_name,
# destination_blob_name))
#
#
# def download_blob(bucket_name, source_blob_name, destination_file_name):
# # Uploads a blob from a bucket
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(source_blob_name)
#
# blob.download_to_filename(destination_file_name)
#
# print('Blob {} downloaded to {}.'.format(
# source_blob_name,
# destination_file_name)) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/downloads.py | downloads.py |
class Callbacks:
""""
Handles all registered callbacks for YOLOv5 Hooks
"""
_callbacks = {
'on_pretrain_routine_start': [],
'on_pretrain_routine_end': [],
'on_train_start': [],
'on_train_epoch_start': [],
'on_train_batch_start': [],
'optimizer_step': [],
'on_before_zero_grad': [],
'on_train_batch_end': [],
'on_train_epoch_end': [],
'on_val_start': [],
'on_val_batch_start': [],
'on_val_image_end': [],
'on_val_batch_end': [],
'on_val_end': [],
'on_fit_epoch_end': [], # fit = train + val
'on_model_save': [],
'on_train_end': [],
'teardown': [],
}
def __init__(self):
return
def register_action(self, hook, name='', callback=None):
"""
Register a new action to a callback hook
Args:
hook The callback hook name to register the action to
name The name of the action
callback The callback to fire
"""
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
assert callable(callback), f"callback '{callback}' is not callable"
self._callbacks[hook].append({'name': name, 'callback': callback})
def get_registered_actions(self, hook=None):
""""
Returns all the registered actions by callback hook
Args:
hook The name of the hook to check, defaults to all
"""
if hook:
return self._callbacks[hook]
else:
return self._callbacks
def run_callbacks(self, hook, *args, **kwargs):
"""
Loop through the registered actions and fire all callbacks
"""
for logger in self._callbacks[hook]:
# print(f"Running callbacks.{logger['callback'].__name__}()")
logger['callback'](*args, **kwargs)
def on_pretrain_routine_start(self, *args, **kwargs):
"""
Fires all registered callbacks at the start of each pretraining routine
"""
self.run_callbacks('on_pretrain_routine_start', *args, **kwargs)
def on_pretrain_routine_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of each pretraining routine
"""
self.run_callbacks('on_pretrain_routine_end', *args, **kwargs)
def on_train_start(self, *args, **kwargs):
"""
Fires all registered callbacks at the start of each training
"""
self.run_callbacks('on_train_start', *args, **kwargs)
def on_train_epoch_start(self, *args, **kwargs):
"""
Fires all registered callbacks at the start of each training epoch
"""
self.run_callbacks('on_train_epoch_start', *args, **kwargs)
def on_train_batch_start(self, *args, **kwargs):
"""
Fires all registered callbacks at the start of each training batch
"""
self.run_callbacks('on_train_batch_start', *args, **kwargs)
def optimizer_step(self, *args, **kwargs):
"""
Fires all registered callbacks on each optimizer step
"""
self.run_callbacks('optimizer_step', *args, **kwargs)
def on_before_zero_grad(self, *args, **kwargs):
"""
Fires all registered callbacks before zero grad
"""
self.run_callbacks('on_before_zero_grad', *args, **kwargs)
def on_train_batch_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of each training batch
"""
self.run_callbacks('on_train_batch_end', *args, **kwargs)
def on_train_epoch_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of each training epoch
"""
self.run_callbacks('on_train_epoch_end', *args, **kwargs)
def on_val_start(self, *args, **kwargs):
"""
Fires all registered callbacks at the start of the validation
"""
self.run_callbacks('on_val_start', *args, **kwargs)
def on_val_batch_start(self, *args, **kwargs):
"""
Fires all registered callbacks at the start of each validation batch
"""
self.run_callbacks('on_val_batch_start', *args, **kwargs)
def on_val_image_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of each val image
"""
self.run_callbacks('on_val_image_end', *args, **kwargs)
def on_val_batch_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of each validation batch
"""
self.run_callbacks('on_val_batch_end', *args, **kwargs)
def on_val_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of the validation
"""
self.run_callbacks('on_val_end', *args, **kwargs)
def on_fit_epoch_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of each fit (train+val) epoch
"""
self.run_callbacks('on_fit_epoch_end', *args, **kwargs)
def on_model_save(self, *args, **kwargs):
"""
Fires all registered callbacks after each model save
"""
self.run_callbacks('on_model_save', *args, **kwargs)
def on_train_end(self, *args, **kwargs):
"""
Fires all registered callbacks at the end of training
"""
self.run_callbacks('on_train_end', *args, **kwargs)
def teardown(self, *args, **kwargs):
"""
Fires all registered callbacks before teardown
"""
self.run_callbacks('teardown', *args, **kwargs) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/callbacks.py | callbacks.py |
import torch
import torch.nn as nn
from utils.metrics import bbox_iou
from utils.torch_utils import is_parallel
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLoss, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(QFocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
# Compute losses
def __init__(self, model, autobalance=False):
super(ComputeLoss, self).__init__()
self.sort_obj_iou = False
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
def __call__(self, p, targets): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
score_iou = iou.detach().clamp(0).type(tobj.dtype)
if self.sort_obj_iou:
sort_id = torch.argsort(score_iou)
b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.hyp['box']
lobj *= self.hyp['obj']
lcls *= self.hyp['cls']
bs = tobj.shape[0] # batch size
return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
def build_targets(self, p, targets):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/loss.py | loss.py |
import contextlib
import glob
import logging
import math
import os
import platform
import random
import re
import signal
import time
import urllib
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from utils.downloads import gsutil_getsize
from utils.metrics import box_iou, fitness
from utils.torch_utils import init_torch_seeds
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
class Profile(contextlib.ContextDecorator):
# Usage: @Profile() decorator or 'with Profile():' context manager
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(f'Profile results: {time.time() - self.start:.5f}s')
class Timeout(contextlib.ContextDecorator):
# Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
self.seconds = int(seconds)
self.timeout_message = timeout_msg
self.suppress = bool(suppress_timeout_errors)
def _timeout_handler(self, signum, frame):
raise TimeoutError(self.timeout_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0) # Cancel SIGALRM if it's scheduled
if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
return True
def try_except(func):
# try-except function. Usage: @try_except decorator
def handler(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
return handler
def methods(instance):
# Get class/instance methods
return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
def set_logging(rank=-1, verbose=True):
logging.basicConfig(
format="%(message)s",
level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def is_docker():
# Is environment a Docker container?
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance?
try:
import google.colab
return True
except Exception as e:
return False
def is_pip():
# Is file in a pip package?
return 'site-packages' in Path(__file__).absolute().parts
def is_ascii(s=''):
# Is string composed of all ASCII (no UTF) characters?
s = str(s) # convert list, tuple, None, etc. to str
return len(s.encode().decode('ascii', 'ignore')) == len(s)
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(file):
# Return file size in MB
return Path(file).stat().st_size / 1e6
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
@try_except
def check_git_status():
# Recommend 'git pull' if code is out of date
msg = ', for updates see https://github.com/ultralytics/yolov5'
print(colorstr('github: '), end='')
assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
assert not is_docker(), 'skipping check (Docker image)' + msg
assert check_online(), 'skipping check (offline)' + msg
cmd = 'git fetch && git config --get remote.origin.url'
url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
def check_python(minimum='3.6.2'):
# Check current python version vs. required python version
check_version(platform.python_version(), minimum, name='Python ')
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum)
assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed'
@try_except
def check_requirements(requirements='requirements.txt', exclude=(), install=True):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
s = f"{prefix} {r} not found and is required by YOLOv5"
if install:
print(f"{s}, attempting auto-update...")
try:
assert check_online(), f"'pip install {r}' skipped (offline)"
print(check_output(f"pip install '{r}'", shell=True).decode())
n += 1
except Exception as e:
print(f'{prefix} {e}')
else:
print(f'{s}. Please install and rerun your command.')
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s))
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_file(file):
# Search/download file (if necessary) and return path
file = str(file) # convert to str()
if Path(file).is_file() or file == '': # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), f'File not found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(data, autodownload=True):
# Download and/or unzip dataset if not found locally
# Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
# Download (optional)
extract_dir = ''
if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
extract_dir, autodownload = data.parent, False
# Read yaml (optional)
if isinstance(data, (str, Path)):
with open(data, errors='ignore') as f:
data = yaml.safe_load(f) # dictionary
# Parse yaml
path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'
for k in 'train', 'val', 'test':
if data.get(k): # prepend path
data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
assert 'nc' in data, "Dataset 'nc' key missing."
if 'names' not in data:
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')]
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and autodownload: # download script
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} ...')
torch.hub.download_url_to_file(s, f)
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
Path(root).mkdir(parents=True, exist_ok=True) # create root
r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s, {'yaml': data}) # return None
print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
else:
raise Exception('Dataset not found.')
return data # dictionary
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
# Multi-threaded file download and unzip function, used in data.yaml for autodownload
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if Path(url).is_file(): # exists in current path
Path(url).rename(f) # move to dir
elif not f.exists():
print(f'Downloading {url} to {f}...')
if curl:
os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
else:
torch.hub.download_url_to_file(url, f, progress=True) # torch download
if unzip and f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
s = f'unzip -qo {f} -d {dir}' # unzip -quiet -overwrite
elif f.suffix == '.gz':
s = f'tar xfz {f} --directory {f.parent}' # unzip
if delete: # delete zip file after unzip
s += f' && rm {f}'
os.system(s)
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=(), max_det=300):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(results, hyp, save_dir, bucket):
evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f'gs://{bucket}/evolve.csv'
if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0):
os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
# Log to evolve.csv
s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
with open(evolve_csv, 'a') as f:
f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
# Print to screen
print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n')
# Save yaml
with open(evolve_yaml, 'w') as f:
data = pd.read_csv(evolve_csv)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :7])) #
f.write(f'# YOLOv5 Hyperparameter Evolution Results\n' +
f'# Best generation: {i}\n' +
f'# Last generation: {len(data)}\n' +
f'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
f'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
def apply_classifier(x, model, img, im0):
# Apply a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('example%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
clip_coords(xyxy, im.shape)
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
if save:
cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)
return crop
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
suffix = path.suffix
path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # update path
dir = path if path.suffix == '' else path.parent # directory
if not dir.exists() and mkdir:
dir.mkdir(parents=True, exist_ok=True) # make directory
return path | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/general.py | general.py |
import random
import numpy as np
import torch
import yaml
from tqdm import tqdm
from utils.general import colorstr
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
a = m.anchor_grid.prod(-1).view(-1) # anchor area
da = a[-1] - a[0] # delta a
ds = m.stride[-1] - m.stride[0] # delta s
if da.sign() != ds.sign(): # same order
print('Reversing anchor order')
m.anchors[:] = m.anchors.flip(0)
m.anchor_grid[:] = m.anchor_grid.flip(0)
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
prefix = colorstr('autoanchor: ')
print(f'\n{prefix}Analyzing anchors... ', end='')
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
best = x.max(1)[0] # best_x
aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1. / thr).float().mean() # best possible recall
return bpr, aat
anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors
bpr, aat = metric(anchors)
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
if bpr < 0.98: # threshold to recompute
print('. Attempting to improve anchors, please wait...')
na = m.anchor_grid.numel() // 2 # number of anchors
try:
anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
except Exception as e:
print(f'{prefix}ERROR: {e}')
new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference
m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
check_anchor_order(m)
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
else:
print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
print('') # newline
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
dataset: path to data.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
from scipy.cluster.vq import kmeans
thr = 1. / thr
prefix = colorstr('autoanchor: ')
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
if isinstance(dataset, str): # *.yaml file
with open(dataset, errors='ignore') as f:
data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}')
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
k = print_results(k)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k)
return print_results(k) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/autoanchor.py | autoanchor.py |
import logging
import math
import random
import cv2
import numpy as np
from utils.general import colorstr, segment2box, resample_segments, check_version
from utils.metrics import bbox_ioa
class Albumentations:
# YOLOv5 Albumentations class (optional, only used if package is installed)
def __init__(self):
self.transform = None
try:
import albumentations as A
check_version(A.__version__, '1.0.3') # version requirement
self.transform = A.Compose([
A.Blur(p=0.1),
A.MedianBlur(p=0.1),
A.ToGray(p=0.01)],
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
except ImportError: # package not installed, skip
pass
except Exception as e:
logging.info(colorstr('albumentations: ') + f'{e}')
def __call__(self, im, labels, p=1.0):
if self.transform and random.random() < p:
new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
return im, labels
def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
# HSV color-space augmentation
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
dtype = im.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
def hist_equalize(im, clahe=True, bgr=False):
# Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def replicate(im, labels):
# Replicate labels
h, w = im.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return im, labels
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return im, ratio, (dw, dh)
def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
width = im.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return im, targets
def copy_paste(im, labels, segments, p=0.5):
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
n = len(segments)
if p and n:
h, w, c = im.shape # height, width, channels
im_new = np.zeros(im.shape, np.uint8)
for j in random.sample(range(n), k=round(p * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=im, src2=im_new)
result = cv2.flip(result, 1) # augment segments (flip left-right)
i = result > 0 # pixels to replace
# i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
return im, labels, segments
def cutout(im, labels, p=0.5):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
if random.random() < p:
h, w = im.shape[:2]
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s)) # create random masks
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def mixup(im, labels, im2, labels2):
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
return im, labels
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/augmentations.py | augmentations.py |
import datetime
import logging
import math
import os
import platform
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPs computation
except ImportError:
thop = None
LOGGER = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
dist.barrier(device_ids=[local_rank])
yield
if local_rank == 0:
dist.barrier(device_ids=[0])
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def date_modified(path=__file__):
# return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'
cpu = device == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * (len(s) + 1)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def time_sync():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(input, ops, n=10, device=None):
# YOLOv5 speed/memory/FLOPs profiler
#
# Usage:
# input = torch.randn(16, 3, 640, 640)
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(input, [m1, m2], n=100) # profile over 100 iterations
results = []
logging.basicConfig(format="%(message)s", level=logging.INFO)
device = device or select_device()
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
f"{'input':>24s}{'output':>24s}")
for x in input if isinstance(input, list) else [input]:
x = x.to(device)
x.requires_grad = True
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
tf, tb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
except:
flops = 0
try:
for _ in range(n):
t[0] = time_sync()
y = m(x)
t[1] = time_sync()
try:
_ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward()
t[2] = time_sync()
except Exception as e: # no backward method
print(e)
t[2] = float('nan')
tf += (t[1] - t[0]) * 1000 / n # ms per op forward
tb += (t[2] - t[1]) * 1000 / n # ms per op backward
mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')
results.append([p, flops, mem, tf, tb, s_in, s_out])
except Exception as e:
print(e)
results.append(None)
torch.cuda.empty_cache()
return results
def is_parallel(model):
# Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def de_parallel(model):
# De-parallelize a model: returns single-GPU model if model is of type DP or DDP
return model.module if is_parallel(model) else model
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPs
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
except (ImportError, Exception):
fs = ''
LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class EarlyStopping:
# YOLOv5 simple early stopper
def __init__(self, patience=30):
self.best_fitness = 0.0 # i.e. mAP
self.best_epoch = 0
self.patience = patience # epochs to wait after fitness stops improving to stop
def __call__(self, epoch, fitness):
if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training
self.best_epoch = epoch
self.best_fitness = fitness
stop = (epoch - self.best_epoch) >= self.patience # stop training if patience exceeded
if stop:
LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.')
return stop
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/torch_utils.py | torch_utils.py |
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \
xyn2xy, segments2boxes, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
self.imgs[i] = im if success else self.imgs[i] * 0
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files)
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = 0.4 # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, 'Image Not Found ' + path
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image
msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}'
dir = path.with_suffix('') # dataset directory
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f'
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(im_dir / Path(f).name, quality=75) # save
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_file(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file, 'r') as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/datasets.py | datasets.py |
import warnings
from threading import Thread
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/loggers/__init__.py | __init__.py |
import logging
import os
import sys
from contextlib import contextmanager
from pathlib import Path
import yaml
from tqdm import tqdm
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path
from utils.datasets import LoadImagesAndLabels
from utils.datasets import img2label_paths
from utils.general import check_dataset, check_file
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
RANK = int(os.getenv('RANK', -1))
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
return from_string[len(prefix):]
def check_wandb_config_file(data_config_file):
wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
if Path(wandb_config).is_file():
return wandb_config
return data_config_file
def check_wandb_dataset(data_file):
is_wandb_artifact = False
if check_file(data_file) and data_file.endswith('.yaml'):
with open(data_file, errors='ignore') as f:
data_dict = yaml.safe_load(f)
is_wandb_artifact = (data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) or
data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX))
if is_wandb_artifact:
return data_dict
else:
return check_dataset(data_file)
def get_run_info(run_path):
run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
run_id = run_path.stem
project = run_path.parent.stem
entity = run_path.parent.parent.stem
model_artifact_name = 'run_' + run_id + '_model'
return entity, project, run_id, model_artifact_name
def check_wandb_resume(opt):
process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None
if isinstance(opt.resume, str):
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
if RANK not in [-1, 0]: # For resuming DDP runs
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
api = wandb.Api()
artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')
modeldir = artifact.download()
opt.weights = str(Path(modeldir) / "last.pt")
return True
return None
def process_wandb_config_ddp_mode(opt):
with open(check_file(opt.data), errors='ignore') as f:
data_dict = yaml.safe_load(f) # data dict
train_dir, val_dir = None, None
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
api = wandb.Api()
train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
train_dir = train_artifact.download()
train_path = Path(train_dir) / 'data/images/'
data_dict['train'] = str(train_path)
if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
api = wandb.Api()
val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
val_dir = val_artifact.download()
val_path = Path(val_dir) / 'data/images/'
data_dict['val'] = str(val_path)
if train_dir or val_dir:
ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
with open(ddp_data_path, 'w') as f:
yaml.safe_dump(data_dict, f)
opt.data = ddp_data_path
class WandbLogger():
"""Log training runs, datasets, models, and predictions to Weights & Biases.
This logger sends information to W&B at wandb.ai. By default, this information
includes hyperparameters, system configuration and metrics, model metrics,
and basic data metrics and analyses.
By providing additional command line arguments to train.py, datasets,
models and predictions can also be logged.
For more on how this logger is used, see the Weights & Biases documentation:
https://docs.wandb.com/guides/integrations/yolov5
"""
def __init__(self, opt, run_id=None, job_type='Training'):
"""
- Initialize WandbLogger instance
- Upload dataset if opt.upload_dataset is True
- Setup trainig processes if job_type is 'Training'
arguments:
opt (namespace) -- Commandline arguments for this run
run_id (str) -- Run ID of W&B run to be resumed
job_type (str) -- To set the job_type for this run
"""
# Pre-training routine --
self.job_type = job_type
self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run
self.val_artifact, self.train_artifact = None, None
self.train_artifact_path, self.val_artifact_path = None, None
self.result_artifact = None
self.val_table, self.result_table = None, None
self.bbox_media_panel_images = []
self.val_table_path_map = None
self.max_imgs_to_log = 16
self.wandb_artifact_data_dict = None
self.data_dict = None
# It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
if isinstance(opt.resume, str): # checks resume from artifact
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
assert wandb, 'install wandb to resume wandb runs'
# Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
self.wandb_run = wandb.init(id=run_id,
project=project,
entity=entity,
resume='allow',
allow_val_change=True)
opt.resume = model_artifact_name
elif self.wandb:
self.wandb_run = wandb.init(config=opt,
resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
entity=opt.entity,
name=opt.name if opt.name != 'exp' else None,
job_type=job_type,
id=run_id,
allow_val_change=True) if not wandb.run else wandb.run
if self.wandb_run:
if self.job_type == 'Training':
if opt.upload_dataset:
if not opt.resume:
self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)
if opt.resume:
# resume from artifact
if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
self.data_dict = dict(self.wandb_run.config.data_dict)
else: # local resume
self.data_dict = check_wandb_dataset(opt.data)
else:
self.data_dict = check_wandb_dataset(opt.data)
self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict
# write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict},
allow_val_change=True)
self.setup_training(opt)
if self.job_type == 'Dataset Creation':
self.data_dict = self.check_and_upload_dataset(opt)
def check_and_upload_dataset(self, opt):
"""
Check if the dataset format is compatible and upload it as W&B artifact
arguments:
opt (namespace)-- Commandline arguments for current run
returns:
Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
"""
assert wandb, 'Install wandb to upload dataset'
config_path = self.log_dataset_artifact(opt.data,
opt.single_cls,
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
print("Created dataset config file ", config_path)
with open(config_path, errors='ignore') as f:
wandb_data_dict = yaml.safe_load(f)
return wandb_data_dict
def setup_training(self, opt):
"""
Setup the necessary processes for training YOLO models:
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
- Setup log_dict, initialize bbox_interval
arguments:
opt (namespace) -- commandline arguments for this run
"""
self.log_dict, self.current_epoch = {}, 0
self.bbox_interval = opt.bbox_interval
if isinstance(opt.resume, str):
modeldir, _ = self.download_model_artifact(opt)
if modeldir:
self.weights = Path(modeldir) / "last.pt"
config = self.wandb_run.config
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
config.hyp
data_dict = self.data_dict
if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download
self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
opt.artifact_alias)
self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
opt.artifact_alias)
if self.train_artifact_path is not None:
train_path = Path(self.train_artifact_path) / 'data/images/'
data_dict['train'] = str(train_path)
if self.val_artifact_path is not None:
val_path = Path(self.val_artifact_path) / 'data/images/'
data_dict['val'] = str(val_path)
if self.val_artifact is not None:
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
self.val_table = self.val_artifact.get("val")
if self.val_table_path_map is None:
self.map_val_table_path()
if opt.bbox_interval == -1:
self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None
# Update the the data_dict to point to local artifacts dir
if train_from_artifact:
self.data_dict = data_dict
def download_dataset_artifact(self, path, alias):
"""
download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
arguments:
path -- path of the dataset to be used for training
alias (str)-- alias of the artifact to be download/used for training
returns:
(str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
is found otherwise returns (None, None)
"""
if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/"))
assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
datadir = dataset_artifact.download()
return datadir, dataset_artifact
return None, None
def download_model_artifact(self, opt):
"""
download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
arguments:
opt (namespace) -- Commandline arguments for this run
"""
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
modeldir = model_artifact.download()
epochs_trained = model_artifact.metadata.get('epochs_trained')
total_epochs = model_artifact.metadata.get('total_epochs')
is_finished = total_epochs is None
assert not is_finished, 'training is finished, can only resume incomplete runs.'
return modeldir, model_artifact
return None, None
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
"""
Log the model checkpoint as W&B artifact
arguments:
path (Path) -- Path of directory containing the checkpoints
opt (namespace) -- Command line arguments for this run
epoch (int) -- Current epoch number
fitness_score (float) -- fitness score for current epoch
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
"""
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
'original_url': str(path),
'epochs_trained': epoch + 1,
'save period': opt.save_period,
'project': opt.project,
'total_epochs': opt.epochs,
'fitness_score': fitness_score
})
model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
wandb.log_artifact(model_artifact,
aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
print("Saving model artifact on epoch ", epoch + 1)
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
"""
Log the dataset as W&B artifact and return the new data file with W&B links
arguments:
data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
single_class (boolean) -- train multi-class data as single-class
project (str) -- project name. Used to construct the artifact path
overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
file with _wandb postfix. Eg -> data_wandb.yaml
returns:
the new .yaml file with artifact links. it can be used to start training directly from artifacts
"""
self.data_dict = check_dataset(data_file) # parse and check
data = dict(self.data_dict)
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
names = {k: v for k, v in enumerate(names)} # to index dictionary
self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None
self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
if data.get('train'):
data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
if data.get('val'):
data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
path = Path(data_file).stem
path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path
data.pop('download', None)
data.pop('path', None)
with open(path, 'w') as f:
yaml.safe_dump(data, f)
if self.job_type == 'Training': # builds correct artifact pipeline graph
self.wandb_run.use_artifact(self.val_artifact)
self.wandb_run.use_artifact(self.train_artifact)
self.val_artifact.wait()
self.val_table = self.val_artifact.get('val')
self.map_val_table_path()
else:
self.wandb_run.log_artifact(self.train_artifact)
self.wandb_run.log_artifact(self.val_artifact)
return path
def map_val_table_path(self):
"""
Map the validation dataset Table like name of file -> it's id in the W&B Table.
Useful for - referencing artifacts for evaluation.
"""
self.val_table_path_map = {}
print("Mapping dataset")
for i, data in enumerate(tqdm(self.val_table.data)):
self.val_table_path_map[data[3]] = data[0]
def create_dataset_table(self, dataset, class_to_id, name='dataset'):
"""
Create and return W&B artifact containing W&B Table of the dataset.
arguments:
dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
class_to_id (dict(int, str)) -- hash map that maps class ids to labels
name (str) -- name of the artifact
returns:
dataset artifact to be logged or used
"""
# TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
artifact = wandb.Artifact(name=name, type="dataset")
img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
img_files = tqdm(dataset.img_files) if not img_files else img_files
for img_file in img_files:
if Path(img_file).is_dir():
artifact.add_dir(img_file, name='data/images')
labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
artifact.add_dir(labels_path, name='data/labels')
else:
artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
label_file = Path(img2label_paths([img_file])[0])
artifact.add_file(str(label_file),
name='data/labels/' + label_file.name) if label_file.exists() else None
table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
box_data, img_classes = [], {}
for cls, *xywh in labels[:, 1:].tolist():
cls = int(cls)
box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]},
"class_id": cls,
"box_caption": "%s" % (class_to_id[cls])})
img_classes[cls] = class_to_id[cls]
boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()),
Path(paths).name)
artifact.add(table, name)
return artifact
def log_training_progress(self, predn, path, names):
"""
Build evaluation Table. Uses reference from validation dataset table.
arguments:
predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
path (str): local path of the current evaluation image
names (dict(int, str)): hash map that maps class ids to labels
"""
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
box_data = []
total_conf = 0
for *xyxy, conf, cls in predn.tolist():
if conf >= 0.25:
box_data.append(
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"})
total_conf = total_conf + conf
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
id = self.val_table_path_map[Path(path).name]
self.result_table.add_data(self.current_epoch,
id,
self.val_table.data[id][1],
wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
total_conf / max(1, len(box_data))
)
def val_one_image(self, pred, predn, path, names, im):
"""
Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
arguments:
pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
path (str): local path of the current evaluation image
"""
if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
self.log_training_progress(predn, path, names)
if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0:
if self.current_epoch % self.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name))
def log(self, log_dict):
"""
save the metrics to the logging dictionary
arguments:
log_dict (Dict) -- metrics/media to be logged in current step
"""
if self.wandb_run:
for key, value in log_dict.items():
self.log_dict[key] = value
def end_epoch(self, best_result=False):
"""
commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
arguments:
best_result (boolean): Boolean representing if the result of this evaluation is best or not
"""
if self.wandb_run:
with all_logging_disabled():
if self.bbox_media_panel_images:
self.log_dict["Bounding Box Debugger/Images"] = self.bbox_media_panel_images
wandb.log(self.log_dict)
self.log_dict = {}
self.bbox_media_panel_images = []
if self.result_artifact:
self.result_artifact.add(self.result_table, 'result')
wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch),
('best' if best_result else '')])
wandb.log({"evaluation": self.result_table})
self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
def finish_run(self):
"""
Log metrics if any and finish the current W&B run
"""
if self.wandb_run:
if self.log_dict:
with all_logging_disabled():
wandb.log(self.log_dict)
wandb.run.finish()
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
""" source - https://gist.github.com/simon-weber/7853144
A context manager that will prevent any logging messages triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL is defined.
"""
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level) | zh-ito-yolov5 | /zh-ito-yolov5-1.0.0.0.tar.gz/zh-ito-yolov5-1.0.0.0/utils/loggers/wandb/wandb_utils.py | wandb_utils.py |
# 基于本地知识库的 ChatGLM 等大语言模型应用实现
## 介绍
🌍 [_READ THIS IN ENGLISH_](README_en.md)
🤖️ 一种利用 [langchain](https://github.com/hwchase17/langchain) 思想实现的基于本地知识库的问答应用,目标期望建立一套对中文场景与开源模型支持友好、可离线运行的知识库问答解决方案。
💡 受 [GanymedeNil](https://github.com/GanymedeNil) 的项目 [document.ai](https://github.com/GanymedeNil/document.ai) 和 [AlexZhangji](https://github.com/AlexZhangji) 创建的 [ChatGLM-6B Pull Request](https://github.com/THUDM/ChatGLM-6B/pull/216) 启发,建立了全流程可使用开源模型实现的本地知识库问答应用。现已支持使用 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B)、 [ClueAI/ChatYuan-large-v2](https://huggingface.co/ClueAI/ChatYuan-large-v2) 等大语言模型的接入。
✅ 本项目中 Embedding 默认选用的是 [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main),LLM 默认选用的是 [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B)。依托上述模型,本项目可实现全部使用**开源**模型**离线私有部署**。
⛓️ 本项目实现原理如下图所示,过程包括加载文件 -> 读取文本 -> 文本分割 -> 文本向量化 -> 问句向量化 -> 在文本向量中匹配出与问句向量最相似的`top k`个 -> 匹配出的文本作为上下文和问题一起添加到`prompt`中 -> 提交给`LLM`生成回答。

从文档处理角度来看,实现流程如下:

🚩 本项目未涉及微调、训练过程,但可利用微调或训练对本项目效果进行优化。
🌐 [AutoDL 镜像](https://www.codewithgpu.com/i/imClumsyPanda/langchain-ChatGLM/langchain-ChatGLM)
📓 [ModelWhale 在线运行项目](https://www.heywhale.com/mw/project/643977aa446c45f4592a1e59)
## 变更日志
参见 [变更日志](docs/CHANGELOG.md)。
## 硬件需求
- ChatGLM-6B 模型硬件需求
注:如未将模型下载至本地,请执行前检查`$HOME/.cache/huggingface/`文件夹剩余空间,模型文件下载至本地需要 15 GB 存储空间。
模型下载方法可参考 [常见问题](docs/FAQ.md) 中 Q8。
| **量化等级** | **最低 GPU 显存**(推理) | **最低 GPU 显存**(高效参数微调) |
| -------------- | ------------------------- | --------------------------------- |
| FP16(无量化) | 13 GB | 14 GB |
| INT8 | 8 GB | 9 GB |
| INT4 | 6 GB | 7 GB |
- MOSS 模型硬件需求
注:如未将模型下载至本地,请执行前检查`$HOME/.cache/huggingface/`文件夹剩余空间,模型文件下载至本地需要 70 GB 存储空间
模型下载方法可参考 [常见问题](docs/FAQ.md) 中 Q8。
| **量化等级** | **最低 GPU 显存**(推理) | **最低 GPU 显存**(高效参数微调) |
|-------------------|-----------------------| --------------------------------- |
| FP16(无量化) | 68 GB | - |
| INT8 | 20 GB | - |
- Embedding 模型硬件需求
本项目中默认选用的 Embedding 模型 [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese/tree/main) 约占用显存 3GB,也可修改为在 CPU 中运行。
## Docker 部署
为了能让容器使用主机GPU资源,需要在主机上安装 [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-container-toolkit)。具体安装步骤如下:
```shell
sudo apt-get update
sudo apt-get install -y nvidia-container-toolkit-base
sudo systemctl daemon-reload
sudo systemctl restart docker
```
安装完成后,可以使用以下命令编译镜像和启动容器:
```
docker build -f Dockerfile-cuda -t chatglm-cuda:latest .
docker run --gpus all -d --name chatglm -p 7860:7860 chatglm-cuda:latest
#若要使用离线模型,请配置好模型路径,然后此repo挂载到Container
docker run --gpus all -d --name chatglm -p 7860:7860 -v ~/github/langchain-ChatGLM:/chatGLM chatglm-cuda:latest
```
## 开发部署
### 软件需求
本项目已在 Python 3.8 - 3.10,CUDA 11.7 环境下完成测试。已在 Windows、ARM 架构的 macOS、Linux 系统中完成测试。
vue前端需要node18环境
### 从本地加载模型
请参考 [THUDM/ChatGLM-6B#从本地加载模型](https://github.com/THUDM/ChatGLM-6B#从本地加载模型)
### 1. 安装环境
参见 [安装指南](docs/INSTALL.md)。
### 2. 设置模型默认参数
在开始执行 Web UI 或命令行交互前,请先检查 [configs/model_config.py](configs/model_config.py) 中的各项模型参数设计是否符合需求。
### 3. 执行脚本体验 Web UI 或命令行交互
> 注:鉴于环境部署过程中可能遇到问题,建议首先测试命令行脚本。建议命令行脚本测试可正常运行后再运行 Web UI。
执行 [cli_demo.py](cli_demo.py) 脚本体验**命令行交互**:
```shell
$ python cli_demo.py
```
或执行 [webui.py](webui.py) 脚本体验 **Web 交互**
```shell
$ python webui.py
```
或执行 [api.py](api.py) 利用 fastapi 部署 API
```shell
$ python api.py
```
或成功部署 API 后,执行以下脚本体验基于 VUE 的前端页面
```shell
$ cd views
$ pnpm i
$ npm run dev
```
执行后效果如下图所示:
1. `对话` Tab 界面

2. `知识库测试 Beta` Tab 界面

3. `模型配置` Tab 界面

Web UI 可以实现如下功能:
1. 运行前自动读取`configs/model_config.py`中`LLM`及`Embedding`模型枚举及默认模型设置运行模型,如需重新加载模型,可在 `模型配置` Tab 重新选择后点击 `重新加载模型` 进行模型加载;
2. 可手动调节保留对话历史长度、匹配知识库文段数量,可根据显存大小自行调节;
3. `对话` Tab 具备模式选择功能,可选择 `LLM对话` 与 `知识库问答` 模式进行对话,支持流式对话;
4. 添加 `配置知识库` 功能,支持选择已有知识库或新建知识库,并可向知识库中**新增**上传文件/文件夹,使用文件上传组件选择好文件后点击 `上传文件并加载知识库`,会将所选上传文档数据加载至知识库中,并基于更新后知识库进行问答;
5. 新增 `知识库测试 Beta` Tab,可用于测试不同文本切分方法与检索相关度阈值设置,暂不支持将测试参数作为 `对话` Tab 设置参数。
6. 后续版本中将会增加对知识库的修改或删除,及知识库中已导入文件的查看。
### 常见问题
参见 [常见问题](docs/FAQ.md)。
## Demo
以问题`chatglm-6b 的局限性具体体现在哪里,如何实现改进`为例:
未使用 langchain 接入本地文档时:
>ChatGLM-6B 是一个基于 GPT-3 的聊天机器人,它的局限性主要体现在以下几个方面:
>1. 语言模型的精度:GPT-3 是一种大型的语言模型,它的精度比较高,但是仍然有一些局限性。例如,它可能无法准确地理解某些方言、口音或者复杂的句子结构。
>2. 知识图谱的完整性:GPT-3 可以生成一些知识图谱,但是其完整性和准确性可能不够。例如,它可能无法准确地理解某些领域的专业知识和术语。
>3. 对话生成的连贯性:GPT-3 可以生成一些连贯的对话,但是其生成的对话可能不够自然。例如,它可能会生成一些虚假的、不连贯的对话或者误解用户的意图。
>
>为了改进 ChatGLM-6B,可以考虑以下几个方面:
>1. 提高语言模型的精度:可以通过增加训练数据、使用更好的语言模型架构、优化模型训练过程等方式来提高语言模型的精度。
>2. 增强知识图谱的完整性:可以通过增加知识图谱的维度、使用更好的知识图谱构建方法、优化知识图谱的匹配算法等方式来增强知识图谱的完整性。
>3. 提高对话生成的连贯性:可以通过增加对话生成模型的上下文信息、使用更好的上下文信息提取方法、优化对话生成模型的算法等方式来提高对话生成的连贯性。
>4. 引入更多的人类反馈:人类反馈可以帮助 ChatGLM-6B 更好地理解用户的意图和需求,从而提高对话生成的质量和准确性。可以通过引入更多的人类客服、使用人类反馈收集器等方式来引入更多的人类反馈。
使用 langchain 接入 ChatGLM-6B 项目的 README.md 文件后 ChatGLM 的回答:
>ChatGLM-6B 是一个基于深度学习的自然语言处理模型,它在回答问题方面表现出色。但是,它也有一些局限性,具体如下:
>1. 语言理解有限:ChatGLM-6B 只学习了中文自然语言,对于其他语言可能会存在理解上的局限性。
>2. 知识储备不足:ChatGLM-6B 的训练数据集只包含了中文语料,因此它可能无法回答一些非中文的问题或者某些特定领域的问题。
>3. 数据量有限:ChatGLM-6B 的训练数据集只有几百万条记录,因此它可能无法回答一些非常具体或者复杂的问题。
>
>为了改进 ChatGLM-6B,可以考虑以下几个方面:
>1. 学习更多的语言知识:可以学习其他语言的自然语言处理技术,扩大语言理解的范围。
>2. 扩大知识储备:可以收集更多的中文语料,或者使用其他语言的数据集来扩充知识储备。
>3. 增加数据量:可以使用更大的数据集来训练 ChatGLM-6B,提高模型的表现。
>4. 引入更多的评估指标:可以引入更多的评估指标来评估模型的表现,从而发现 ChatGLM-6B 存在的不足和局限性。
>5. 改进模型架构:可以改进 ChatGLM-6B 的模型架构,提高模型的性能和表现。例如,可以使用更大的神经网络或者改进的卷积神经网络结构。
## 路线图
- [ ] Langchain 应用
- [x] 接入非结构化文档(已支持 md、pdf、docx、txt 文件格式)
- [x] jpg 与 png 格式图片的 OCR 文字识别
- [ ] 搜索引擎与本地网页接入
- [ ] 结构化数据接入(如 csv、Excel、SQL 等)
- [ ] 知识图谱/图数据库接入
- [ ] Agent 实现
- [ ] 增加更多 LLM 模型支持
- [x] [THUDM/chatglm-6b](https://huggingface.co/THUDM/chatglm-6b)
- [x] [THUDM/chatglm-6b-int8](https://huggingface.co/THUDM/chatglm-6b-int8)
- [x] [THUDM/chatglm-6b-int4](https://huggingface.co/THUDM/chatglm-6b-int4)
- [x] [THUDM/chatglm-6b-int4-qe](https://huggingface.co/THUDM/chatglm-6b-int4-qe)
- [x] [ClueAI/ChatYuan-large-v2](https://huggingface.co/ClueAI/ChatYuan-large-v2)
- [x] [fnlp/moss-moon-003-sft](https://huggingface.co/fnlp/moss-moon-003-sft)
- [ ] 增加更多 Embedding 模型支持
- [x] [nghuyong/ernie-3.0-nano-zh](https://huggingface.co/nghuyong/ernie-3.0-nano-zh)
- [x] [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh)
- [x] [shibing624/text2vec-base-chinese](https://huggingface.co/shibing624/text2vec-base-chinese)
- [x] [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese)
- [ ] Web UI
- [x] 利用 gradio 实现 Web UI DEMO
- [x] 添加输出内容及错误提示
- [x] 引用标注
- [ ] 增加知识库管理
- [x] 选择知识库开始问答
- [x] 上传文件/文件夹至知识库
- [x] 知识库测试
- [ ] 删除知识库中文件
- [ ] 利用 streamlit 实现 Web UI Demo
- [ ] 增加 API 支持
- [x] 利用 fastapi 实现 API 部署方式
- [ ] 实现调用 API 的 Web UI Demo
## 项目交流群

🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
| zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/README.md | README.md |
import os
import shutil
from .chains.local_doc_qa import LocalDocQA
from .configs.model_config import *
def construct_vector_store(vs_id, files, sentence_size, history, one_conent, one_content_segmentation, text2vec):
for file in files:
assert os.path.exists(file), "输入文件不存在"
import nltk
if NLTK_DATA_PATH not in nltk.data.path: nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
local_doc_qa = LocalDocQA()
local_doc_qa.init_cfg()
vs_path = os.path.join(VS_ROOT_PATH, vs_id)
filelist = []
if not os.path.exists(os.path.join(UPLOAD_ROOT_PATH, vs_id)):
os.makedirs(os.path.join(UPLOAD_ROOT_PATH, vs_id))
if isinstance(files, list):
for file in files:
file_name = file.name if not isinstance(file, str) else file
filename = os.path.split(file_name)[-1]
shutil.copyfile(file_name, os.path.join(UPLOAD_ROOT_PATH, vs_id, filename))
filelist.append(os.path.join(UPLOAD_ROOT_PATH, vs_id, filename))
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, vs_path, sentence_size, text2vec)
else:
vs_path, loaded_files = local_doc_qa.one_knowledge_add(vs_path, files, one_conent, one_content_segmentation,
sentence_size, text2vec)
if len(loaded_files):
file_status = f"已添加 {'、'.join([os.path.split(i)[-1] for i in loaded_files if i])} 内容至知识库,并已加载知识库,请开始提问"
else:
pass
# file_status = "文件未成功加载,请重新上传文件"
# print(file_status)
return local_doc_qa, vs_path
def get_prompt(query, vs_path, score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_conent: bool = True,
chunk_size=CHUNK_SIZE):
resp, prompt = local_doc_qa.get_knowledge_based_conent_test(query=query, vs_path=vs_path,
score_threshold=score_threshold,
vector_search_top_k=vector_search_top_k,
chunk_conent=chunk_conent,
chunk_size=chunk_size)
return resp, prompt
if __name__ == '__main__':
local_doc_qa, vs_path = construct_vector_store(
vs_id = 'bit',
files=["docs/CHANGELOG.md", "docs/Issue-with-Installing-Packages-Using-pip-in-Anaconda.md"],
sentence_size=100,
history=[],
one_conent="",
one_content_segmentation="",
)
resp, prompt = get_prompt(
query = "hello world",
vs_path = vs_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_size=CHUNK_SIZE,
)
print(resp)
print(prompt)
input() | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/zh_langchain.py | zh_langchain.py |
import gradio as gr
import os
import shutil
from chains.local_doc_qa import LocalDocQA
from ..configs.model_config import *
import nltk
nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
def get_vs_list():
lst_default = ["新建知识库"]
if not os.path.exists(VS_ROOT_PATH):
return lst_default
lst = os.listdir(VS_ROOT_PATH)
if not lst:
return lst_default
lst.sort()
return lst_default + lst
vs_list = get_vs_list()
embedding_model_dict_list = list(embedding_model_dict.keys())
llm_model_dict_list = list(llm_model_dict.keys())
local_doc_qa = LocalDocQA()
flag_csv_logger = gr.CSVLogger()
def get_answer(query, vs_path, history, mode, score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_conent: bool = True,
chunk_size=CHUNK_SIZE, streaming: bool = STREAMING):
if mode == "知识库问答" and vs_path is not None and os.path.exists(vs_path):
for resp, history in local_doc_qa.get_knowledge_based_answer(
query=query, vs_path=vs_path, chat_history=history, streaming=streaming):
source = "\n\n"
source += "".join(
[f"""<details> <summary>出处 [{i + 1}] {os.path.split(doc.metadata["source"])[-1]}</summary>\n"""
f"""{doc.page_content}\n"""
f"""</details>"""
for i, doc in
enumerate(resp["source_documents"])])
history[-1][-1] += source
yield history, ""
elif mode == "知识库测试":
if os.path.exists(vs_path):
resp, prompt = local_doc_qa.get_knowledge_based_conent_test(query=query, vs_path=vs_path,
score_threshold=score_threshold,
vector_search_top_k=vector_search_top_k,
chunk_conent=chunk_conent,
chunk_size=chunk_size)
if not resp["source_documents"]:
yield history + [[query,
"根据您的设定,没有匹配到任何内容,请确认您设置的知识相关度 Score 阈值是否过小或其他参数是否正确。"]], ""
else:
source = "\n".join(
[
f"""<details open> <summary>【知识相关度 Score】:{doc.metadata["score"]} - 【出处{i + 1}】: {os.path.split(doc.metadata["source"])[-1]} </summary>\n"""
f"""{doc.page_content}\n"""
f"""</details>"""
for i, doc in
enumerate(resp["source_documents"])])
history.append([query, "以下内容为知识库中满足设置条件的匹配结果:\n\n" + source])
yield history, ""
else:
yield history + [[query,
"请选择知识库后进行测试,当前未选择知识库。"]], ""
else:
for resp, history in local_doc_qa.llm._call(query, history, streaming=streaming):
history[-1][-1] = resp + (
"\n\n当前知识库为空,如需基于知识库进行问答,请先加载知识库后,再进行提问。" if mode == "知识库问答" else "")
yield history, ""
logger.info(f"flagging: username={FLAG_USER_NAME},query={query},vs_path={vs_path},mode={mode},history={history}")
flag_csv_logger.flag([query, vs_path, history, mode], username=FLAG_USER_NAME)
def init_model():
try:
local_doc_qa.init_cfg()
local_doc_qa.llm._call("你好")
reply = """模型已成功加载,可以开始对话,或从右侧选择模式后开始对话"""
logger.info(reply)
return reply
except Exception as e:
logger.error(e)
reply = """模型未成功加载,请到页面左上角"模型配置"选项卡中重新选择后点击"加载模型"按钮"""
if str(e) == "Unknown platform: darwin":
logger.info("该报错可能因为您使用的是 macOS 操作系统,需先下载模型至本地后执行 Web UI,具体方法请参考项目 README 中本地部署方法及常见问题:"
" https://github.com/imClumsyPanda/langchain-ChatGLM")
else:
logger.info(reply)
return reply
def reinit_model(llm_model, embedding_model, llm_history_len, use_ptuning_v2, use_lora, top_k, history):
try:
local_doc_qa.init_cfg(llm_model=llm_model,
embedding_model=embedding_model,
llm_history_len=llm_history_len,
use_ptuning_v2=use_ptuning_v2,
use_lora=use_lora,
top_k=top_k, )
model_status = """模型已成功重新加载,可以开始对话,或从右侧选择模式后开始对话"""
logger.info(model_status)
except Exception as e:
logger.error(e)
model_status = """模型未成功重新加载,请到页面左上角"模型配置"选项卡中重新选择后点击"加载模型"按钮"""
logger.info(model_status)
return history + [[None, model_status]]
def get_vector_store(vs_id, files, sentence_size, history, one_conent, one_content_segmentation):
vs_path = os.path.join(VS_ROOT_PATH, vs_id)
filelist = []
if not os.path.exists(os.path.join(UPLOAD_ROOT_PATH, vs_id)):
os.makedirs(os.path.join(UPLOAD_ROOT_PATH, vs_id))
if isinstance(files, list):
for file in files:
filename = os.path.split(file.name)[-1]
shutil.move(file.name, os.path.join(UPLOAD_ROOT_PATH, vs_id, filename))
filelist.append(os.path.join(UPLOAD_ROOT_PATH, vs_id, filename))
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, vs_path, sentence_size)
else:
vs_path, loaded_files = local_doc_qa.one_knowledge_add(vs_path, files, one_conent, one_content_segmentation,
sentence_size)
if len(loaded_files):
file_status = f"已添加 {'、'.join([os.path.split(i)[-1] for i in loaded_files if i])} 内容至知识库,并已加载知识库,请开始提问"
else:
file_status = "文件未成功加载,请重新上传文件"
logger.info(file_status)
return vs_path, None, history + [[None, file_status]]
# sentence_size 100
# vs_id 知识库名字
# files 文件路径
# history 历史对话
# one_conent '添加至知识库选项'
def change_vs_name_input(vs_id, history):
if vs_id == "新建知识库":
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), None, history
else:
file_status = f"已加载知识库{vs_id},请开始提问"
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), os.path.join(VS_ROOT_PATH,
vs_id), history + [
[None, file_status]]
knowledge_base_test_mode_info = ("【注意】\n\n"
"1. 您已进入知识库测试模式,您输入的任何对话内容都将用于进行知识库查询,"
"并仅输出知识库匹配出的内容及相似度分值和及输入的文本源路径,查询的内容并不会进入模型查询。\n\n"
"2. 知识相关度 Score 经测试,建议设置为 500 或更低,具体设置情况请结合实际使用调整。"
"""3. 使用"添加单条数据"添加文本至知识库时,内容如未分段,则内容越多越会稀释各查询内容与之关联的score阈值。\n\n"""
"4. 单条内容长度建议设置在100-150左右。\n\n"
"5. 本界面用于知识入库及知识匹配相关参数设定,但当前版本中,"
"本界面中修改的参数并不会直接修改对话界面中参数,仍需前往`configs/model_config.py`修改后生效。"
"相关参数将在后续版本中支持本界面直接修改。")
def change_mode(mode, history):
if mode == "知识库问答":
return gr.update(visible=True), gr.update(visible=False), history
# + [[None, "【注意】:您已进入知识库问答模式,您输入的任何查询都将进行知识库查询,然后会自动整理知识库关联内容进入模型查询!!!"]]
elif mode == "知识库测试":
return gr.update(visible=True), gr.update(visible=True), [[None,
knowledge_base_test_mode_info]]
else:
return gr.update(visible=False), gr.update(visible=False), history
def change_chunk_conent(mode, label_conent, history):
conent = ""
if "chunk_conent" in label_conent:
conent = "搜索结果上下文关联"
elif "one_content_segmentation" in label_conent: # 这里没用上,可以先留着
conent = "内容分段入库"
if mode:
return gr.update(visible=True), history + [[None, f"【已开启{conent}】"]]
else:
return gr.update(visible=False), history + [[None, f"【已关闭{conent}】"]]
def add_vs_name(vs_name, vs_list, chatbot):
if vs_name in vs_list:
vs_status = "与已有知识库名称冲突,请重新选择其他名称后提交"
chatbot = chatbot + [[None, vs_status]]
return gr.update(visible=True), vs_list, gr.update(visible=True), gr.update(visible=True), gr.update(
visible=False), chatbot
else:
vs_status = f"""已新增知识库"{vs_name}",将在上传文件并载入成功后进行存储。请在开始对话前,先完成文件上传。 """
chatbot = chatbot + [[None, vs_status]]
return gr.update(visible=True, choices=[vs_name] + vs_list, value=vs_name), [vs_name] + vs_list, gr.update(
visible=False), gr.update(visible=False), gr.update(visible=True), chatbot
block_css = """.importantButton {
background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
border: none !important;
}
.importantButton:hover {
background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
border: none !important;
}"""
webui_title = """
# 🎉langchain-ChatGLM WebUI🎉
👍 [https://github.com/imClumsyPanda/langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM)
"""
default_vs = vs_list[0] if len(vs_list) > 1 else "为空"
init_message = f"""欢迎使用 langchain-ChatGLM Web UI!
请在右侧切换模式,目前支持直接与 LLM 模型对话或基于本地知识库问答。
知识库问答模式,选择知识库名称后,即可开始问答,当前知识库{default_vs},如有需要可以在选择知识库名称后上传文件/文件夹至知识库。
知识库暂不支持文件删除,该功能将在后续版本中推出。
"""
model_status = init_model()
default_theme_args = dict(
font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
)
with gr.Blocks(css=block_css, theme=gr.themes.Default(**default_theme_args)) as demo:
vs_path, file_status, model_status, vs_list = gr.State(
os.path.join(VS_ROOT_PATH, vs_list[0]) if len(vs_list) > 1 else ""), gr.State(""), gr.State(
model_status), gr.State(vs_list)
gr.Markdown(webui_title)
with gr.Tab("对话"):
with gr.Row():
with gr.Column(scale=10):
chatbot = gr.Chatbot([[None, init_message], [None, model_status.value]],
elem_id="chat-box",
show_label=False).style(height=750)
query = gr.Textbox(show_label=False,
placeholder="请输入提问内容,按回车进行提交").style(container=False)
with gr.Column(scale=5):
mode = gr.Radio(["LLM 对话", "知识库问答"],
label="请选择使用模式",
value="知识库问答", )
knowledge_set = gr.Accordion("知识库设定", visible=False)
vs_setting = gr.Accordion("配置知识库")
mode.change(fn=change_mode,
inputs=[mode, chatbot],
outputs=[vs_setting, knowledge_set, chatbot])
with vs_setting:
select_vs = gr.Dropdown(vs_list.value,
label="请选择要加载的知识库",
interactive=True,
value=vs_list.value[0] if len(vs_list.value) > 0 else None
)
vs_name = gr.Textbox(label="请输入新建知识库名称,当前知识库命名暂不支持中文",
lines=1,
interactive=True,
visible=True)
vs_add = gr.Button(value="添加至知识库选项", visible=True)
file2vs = gr.Column(visible=False)
with file2vs:
# load_vs = gr.Button("加载知识库")
gr.Markdown("向知识库中添加文件")
sentence_size = gr.Number(value=SENTENCE_SIZE, precision=0,
label="文本入库分句长度限制",
interactive=True, visible=True)
with gr.Tab("上传文件"):
files = gr.File(label="添加文件",
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count="multiple",
show_label=False)
load_file_button = gr.Button("上传文件并加载知识库")
with gr.Tab("上传文件夹"):
folder_files = gr.File(label="添加文件",
# file_types=['.txt', '.md', '.docx', '.pdf'],
file_count="directory",
show_label=False)
load_folder_button = gr.Button("上传文件夹并加载知识库")
vs_add.click(fn=add_vs_name,
inputs=[vs_name, vs_list, chatbot],
outputs=[select_vs, vs_list, vs_name, vs_add, file2vs, chatbot])
select_vs.change(fn=change_vs_name_input,
inputs=[select_vs, chatbot],
outputs=[vs_name, vs_add, file2vs, vs_path, chatbot])
load_file_button.click(get_vector_store,
show_progress=True,
inputs=[select_vs, files, sentence_size, chatbot, vs_add, vs_add],
outputs=[vs_path, files, chatbot], )
load_folder_button.click(get_vector_store,
show_progress=True,
inputs=[select_vs, folder_files, sentence_size, chatbot, vs_add,
vs_add],
outputs=[vs_path, folder_files, chatbot], )
flag_csv_logger.setup([query, vs_path, chatbot, mode], "flagged")
query.submit(get_answer,
[query, vs_path, chatbot, mode],
[chatbot, query])
with gr.Tab("知识库测试 Beta"):
with gr.Row():
with gr.Column(scale=10):
chatbot = gr.Chatbot([[None, knowledge_base_test_mode_info]],
elem_id="chat-box",
show_label=False).style(height=750)
query = gr.Textbox(show_label=False,
placeholder="请输入提问内容,按回车进行提交").style(container=False)
with gr.Column(scale=5):
mode = gr.Radio(["知识库测试"], # "知识库问答",
label="请选择使用模式",
value="知识库测试",
visible=False)
knowledge_set = gr.Accordion("知识库设定", visible=True)
vs_setting = gr.Accordion("配置知识库", visible=True)
mode.change(fn=change_mode,
inputs=[mode, chatbot],
outputs=[vs_setting, knowledge_set, chatbot])
with knowledge_set:
score_threshold = gr.Number(value=VECTOR_SEARCH_SCORE_THRESHOLD,
label="知识相关度 Score 阈值,分值越低匹配度越高",
precision=0,
interactive=True)
vector_search_top_k = gr.Number(value=VECTOR_SEARCH_TOP_K, precision=0,
label="获取知识库内容条数", interactive=True)
chunk_conent = gr.Checkbox(value=False,
label="是否启用上下文关联",
interactive=True)
chunk_sizes = gr.Number(value=CHUNK_SIZE, precision=0,
label="匹配单段内容的连接上下文后最大长度",
interactive=True, visible=False)
chunk_conent.change(fn=change_chunk_conent,
inputs=[chunk_conent, gr.Textbox(value="chunk_conent", visible=False), chatbot],
outputs=[chunk_sizes, chatbot])
with vs_setting:
select_vs = gr.Dropdown(vs_list.value,
label="请选择要加载的知识库",
interactive=True,
value=vs_list.value[0] if len(vs_list.value) > 0 else None)
vs_name = gr.Textbox(label="请输入新建知识库名称,当前知识库命名暂不支持中文",
lines=1,
interactive=True,
visible=True)
vs_add = gr.Button(value="添加至知识库选项", visible=True)
file2vs = gr.Column(visible=False)
with file2vs:
# load_vs = gr.Button("加载知识库")
gr.Markdown("向知识库中添加单条内容或文件")
sentence_size = gr.Number(value=SENTENCE_SIZE, precision=0,
label="文本入库分句长度限制",
interactive=True, visible=True)
with gr.Tab("上传文件"):
files = gr.File(label="添加文件",
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count="multiple",
show_label=False
)
load_file_button = gr.Button("上传文件并加载知识库")
with gr.Tab("上传文件夹"):
folder_files = gr.File(label="添加文件",
# file_types=['.txt', '.md', '.docx', '.pdf'],
file_count="directory",
show_label=False)
load_folder_button = gr.Button("上传文件夹并加载知识库")
with gr.Tab("添加单条内容"):
one_title = gr.Textbox(label="标题", placeholder="请输入要添加单条段落的标题", lines=1)
one_conent = gr.Textbox(label="内容", placeholder="请输入要添加单条段落的内容", lines=5)
one_content_segmentation = gr.Checkbox(value=True, label="禁止内容分句入库",
interactive=True)
load_conent_button = gr.Button("添加内容并加载知识库")
# 将上传的文件保存到content文件夹下,并更新下拉框
vs_add.click(fn=add_vs_name,
inputs=[vs_name, vs_list, chatbot],
outputs=[select_vs, vs_list, vs_name, vs_add, file2vs, chatbot])
select_vs.change(fn=change_vs_name_input,
inputs=[select_vs, chatbot],
outputs=[vs_name, vs_add, file2vs, vs_path, chatbot])
load_file_button.click(get_vector_store,
show_progress=True,
inputs=[select_vs, files, sentence_size, chatbot, vs_add, vs_add],
outputs=[vs_path, files, chatbot], )
load_folder_button.click(get_vector_store,
show_progress=True,
inputs=[select_vs, folder_files, sentence_size, chatbot, vs_add,
vs_add],
outputs=[vs_path, folder_files, chatbot], )
load_conent_button.click(get_vector_store,
show_progress=True,
inputs=[select_vs, one_title, sentence_size, chatbot,
one_conent, one_content_segmentation],
outputs=[vs_path, files, chatbot], )
flag_csv_logger.setup([query, vs_path, chatbot, mode], "flagged")
query.submit(get_answer,
[query, vs_path, chatbot, mode, score_threshold, vector_search_top_k, chunk_conent,
chunk_sizes],
[chatbot, query])
with gr.Tab("模型配置"):
llm_model = gr.Radio(llm_model_dict_list,
label="LLM 模型",
value=LLM_MODEL,
interactive=True)
llm_history_len = gr.Slider(0, 10,
value=LLM_HISTORY_LEN,
step=1,
label="LLM 对话轮数",
interactive=True)
use_ptuning_v2 = gr.Checkbox(USE_PTUNING_V2,
label="使用p-tuning-v2微调过的模型",
interactive=True)
use_lora = gr.Checkbox(USE_LORA,
label="使用lora微调的权重",
interactive=True)
embedding_model = gr.Radio(embedding_model_dict_list,
label="Embedding 模型",
value=EMBEDDING_MODEL,
interactive=True)
top_k = gr.Slider(1, 20, value=VECTOR_SEARCH_TOP_K, step=1,
label="向量匹配 top k", interactive=True)
load_model_button = gr.Button("重新加载模型")
load_model_button.click(reinit_model, show_progress=True,
inputs=[llm_model, embedding_model, llm_history_len, use_ptuning_v2, use_lora,
top_k, chatbot], outputs=chatbot)
(demo
.queue(concurrency_count=3)
.launch(server_name='0.0.0.0',
server_port=7860,
show_api=False,
share=False,
inbrowser=False)) | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/webui.py | webui.py |
import argparse
import json
import os
import shutil
from typing import List, Optional
import nltk
import pydantic
import uvicorn
from fastapi import Body, FastAPI, File, Form, Query, UploadFile, WebSocket
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing_extensions import Annotated
from starlette.responses import RedirectResponse
from chains.local_doc_qa import LocalDocQA
from ..configs.model_config import (VS_ROOT_PATH, UPLOAD_ROOT_PATH, EMBEDDING_DEVICE,
EMBEDDING_MODEL, LLM_MODEL, NLTK_DATA_PATH,
VECTOR_SEARCH_TOP_K, LLM_HISTORY_LEN, OPEN_CROSS_DOMAIN)
nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
class BaseResponse(BaseModel):
code: int = pydantic.Field(200, description="HTTP status code")
msg: str = pydantic.Field("success", description="HTTP status message")
class Config:
schema_extra = {
"example": {
"code": 200,
"msg": "success",
}
}
class ListDocsResponse(BaseResponse):
data: List[str] = pydantic.Field(..., description="List of document names")
class Config:
schema_extra = {
"example": {
"code": 200,
"msg": "success",
"data": ["doc1.docx", "doc2.pdf", "doc3.txt"],
}
}
class ChatMessage(BaseModel):
question: str = pydantic.Field(..., description="Question text")
response: str = pydantic.Field(..., description="Response text")
history: List[List[str]] = pydantic.Field(..., description="History text")
source_documents: List[str] = pydantic.Field(
..., description="List of source documents and their scores"
)
class Config:
schema_extra = {
"example": {
"question": "工伤保险如何办理?",
"response": "根据已知信息,可以总结如下:\n\n1. 参保单位为员工缴纳工伤保险费,以保障员工在发生工伤时能够获得相应的待遇。\n2. 不同地区的工伤保险缴费规定可能有所不同,需要向当地社保部门咨询以了解具体的缴费标准和规定。\n3. 工伤从业人员及其近亲属需要申请工伤认定,确认享受的待遇资格,并按时缴纳工伤保险费。\n4. 工伤保险待遇包括工伤医疗、康复、辅助器具配置费用、伤残待遇、工亡待遇、一次性工亡补助金等。\n5. 工伤保险待遇领取资格认证包括长期待遇领取人员认证和一次性待遇领取人员认证。\n6. 工伤保险基金支付的待遇项目包括工伤医疗待遇、康复待遇、辅助器具配置费用、一次性工亡补助金、丧葬补助金等。",
"history": [
[
"工伤保险是什么?",
"工伤保险是指用人单位按照国家规定,为本单位的职工和用人单位的其他人员,缴纳工伤保险费,由保险机构按照国家规定的标准,给予工伤保险待遇的社会保险制度。",
]
],
"source_documents": [
"出处 [1] 广州市单位从业的特定人员参加工伤保险办事指引.docx:\n\n\t( 一) 从业单位 (组织) 按“自愿参保”原则, 为未建 立劳动关系的特定从业人员单项参加工伤保险 、缴纳工伤保 险费。",
"出处 [2] ...",
"出处 [3] ...",
],
}
}
def get_folder_path(local_doc_id: str):
return os.path.join(UPLOAD_ROOT_PATH, local_doc_id)
def get_vs_path(local_doc_id: str):
return os.path.join(VS_ROOT_PATH, local_doc_id)
def get_file_path(local_doc_id: str, doc_name: str):
return os.path.join(UPLOAD_ROOT_PATH, local_doc_id, doc_name)
async def upload_file(
file: UploadFile = File(description="A single binary file"),
knowledge_base_id: str = Form(..., description="Knowledge Base Name", example="kb1"),
):
saved_path = get_folder_path(knowledge_base_id)
if not os.path.exists(saved_path):
os.makedirs(saved_path)
file_content = await file.read() # 读取上传文件的内容
file_path = os.path.join(saved_path, file.filename)
if os.path.exists(file_path) and os.path.getsize(file_path) == len(file_content):
file_status = f"文件 {file.filename} 已存在。"
return BaseResponse(code=200, msg=file_status)
with open(file_path, "wb") as f:
f.write(file_content)
vs_path = get_vs_path(knowledge_base_id)
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store([file_path], vs_path)
if len(loaded_files) > 0:
file_status = f"文件 {file.filename} 已上传至新的知识库,并已加载知识库,请开始提问。"
return BaseResponse(code=200, msg=file_status)
else:
file_status = "文件上传失败,请重新上传"
return BaseResponse(code=500, msg=file_status)
async def upload_files(
files: Annotated[
List[UploadFile], File(description="Multiple files as UploadFile")
],
knowledge_base_id: str = Form(..., description="Knowledge Base Name", example="kb1"),
):
saved_path = get_folder_path(knowledge_base_id)
if not os.path.exists(saved_path):
os.makedirs(saved_path)
filelist = []
for file in files:
file_content = ''
file_path = os.path.join(saved_path, file.filename)
file_content = file.file.read()
if os.path.exists(file_path) and os.path.getsize(file_path) == len(file_content):
continue
with open(file_path, "ab+") as f:
f.write(file_content)
filelist.append(file_path)
if filelist:
vs_path, loaded_files = local_doc_qa.init_knowledge_vector_store(filelist, get_vs_path(knowledge_base_id))
if len(loaded_files):
file_status = f"已上传 {'、'.join([os.path.split(i)[-1] for i in loaded_files])} 至知识库,并已加载知识库,请开始提问"
return BaseResponse(code=200, msg=file_status)
file_status = "文件未成功加载,请重新上传文件"
return BaseResponse(code=500, msg=file_status)
async def list_docs(
knowledge_base_id: Optional[str] = Query(default=None, description="Knowledge Base Name", example="kb1")
):
if knowledge_base_id:
local_doc_folder = get_folder_path(knowledge_base_id)
if not os.path.exists(local_doc_folder):
return {"code": 1, "msg": f"Knowledge base {knowledge_base_id} not found"}
all_doc_names = [
doc
for doc in os.listdir(local_doc_folder)
if os.path.isfile(os.path.join(local_doc_folder, doc))
]
return ListDocsResponse(data=all_doc_names)
else:
if not os.path.exists(UPLOAD_ROOT_PATH):
all_doc_ids = []
else:
all_doc_ids = [
folder
for folder in os.listdir(UPLOAD_ROOT_PATH)
if os.path.isdir(os.path.join(UPLOAD_ROOT_PATH, folder))
]
return ListDocsResponse(data=all_doc_ids)
async def delete_docs(
knowledge_base_id: str = Form(...,
description="Knowledge Base Name(注意此方法仅删除上传的文件并不会删除知识库(FAISS)内数据)",
example="kb1"),
doc_name: Optional[str] = Form(
None, description="doc name", example="doc_name_1.pdf"
),
):
if not os.path.exists(os.path.join(UPLOAD_ROOT_PATH, knowledge_base_id)):
return {"code": 1, "msg": f"Knowledge base {knowledge_base_id} not found"}
if doc_name:
doc_path = get_file_path(knowledge_base_id, doc_name)
if os.path.exists(doc_path):
os.remove(doc_path)
else:
BaseResponse(code=1, msg=f"document {doc_name} not found")
remain_docs = await list_docs(knowledge_base_id)
if remain_docs["code"] != 0 or len(remain_docs["data"]) == 0:
shutil.rmtree(get_folder_path(knowledge_base_id), ignore_errors=True)
else:
local_doc_qa.init_knowledge_vector_store(
get_folder_path(knowledge_base_id), get_vs_path(knowledge_base_id)
)
else:
shutil.rmtree(get_folder_path(knowledge_base_id))
return BaseResponse()
async def local_doc_chat(
knowledge_base_id: str = Body(..., description="Knowledge Base Name", example="kb1"),
question: str = Body(..., description="Question", example="工伤保险是什么?"),
history: List[List[str]] = Body(
[],
description="History of previous questions and answers",
example=[
[
"工伤保险是什么?",
"工伤保险是指用人单位按照国家规定,为本单位的职工和用人单位的其他人员,缴纳工伤保险费,由保险机构按照国家规定的标准,给予工伤保险待遇的社会保险制度。",
]
],
),
):
vs_path = os.path.join(VS_ROOT_PATH, knowledge_base_id)
if not os.path.exists(vs_path):
# return BaseResponse(code=1, msg=f"Knowledge base {knowledge_base_id} not found")
return ChatMessage(
question=question,
response=f"Knowledge base {knowledge_base_id} not found",
history=history,
source_documents=[],
)
else:
for resp, history in local_doc_qa.get_knowledge_based_answer(
query=question, vs_path=vs_path, chat_history=history, streaming=True
):
pass
source_documents = [
f"""出处 [{inum + 1}] {os.path.split(doc.metadata['source'])[-1]}:\n\n{doc.page_content}\n\n"""
f"""相关度:{doc.metadata['score']}\n\n"""
for inum, doc in enumerate(resp["source_documents"])
]
return ChatMessage(
question=question,
response=resp["result"],
history=history,
source_documents=source_documents,
)
async def chat(
question: str = Body(..., description="Question", example="工伤保险是什么?"),
history: List[List[str]] = Body(
[],
description="History of previous questions and answers",
example=[
[
"工伤保险是什么?",
"工伤保险是指用人单位按照国家规定,为本单位的职工和用人单位的其他人员,缴纳工伤保险费,由保险机构按照国家规定的标准,给予工伤保险待遇的社会保险制度。",
]
],
),
):
for resp, history in local_doc_qa.llm._call(
prompt=question, history=history, streaming=True
):
pass
return ChatMessage(
question=question,
response=resp,
history=history,
source_documents=[],
)
async def stream_chat(websocket: WebSocket, knowledge_base_id: str):
await websocket.accept()
turn = 1
while True:
input_json = await websocket.receive_json()
question, history, knowledge_base_id = input_json[""], input_json["history"], input_json["knowledge_base_id"]
vs_path = os.path.join(VS_ROOT_PATH, knowledge_base_id)
if not os.path.exists(vs_path):
await websocket.send_json({"error": f"Knowledge base {knowledge_base_id} not found"})
await websocket.close()
return
await websocket.send_json({"question": question, "turn": turn, "flag": "start"})
last_print_len = 0
for resp, history in local_doc_qa.get_knowledge_based_answer(
query=question, vs_path=vs_path, chat_history=history, streaming=True
):
await websocket.send_text(resp["result"][last_print_len:])
last_print_len = len(resp["result"])
source_documents = [
f"""出处 [{inum + 1}] {os.path.split(doc.metadata['source'])[-1]}:\n\n{doc.page_content}\n\n"""
f"""相关度:{doc.metadata['score']}\n\n"""
for inum, doc in enumerate(resp["source_documents"])
]
await websocket.send_text(
json.dumps(
{
"question": question,
"turn": turn,
"flag": "end",
"sources_documents": source_documents,
},
ensure_ascii=False,
)
)
turn += 1
async def document():
return RedirectResponse(url="/docs")
def api_start(host, port):
global app
global local_doc_qa
app = FastAPI()
# Add CORS middleware to allow all origins
# 在config.py中设置OPEN_DOMAIN=True,允许跨域
# set OPEN_DOMAIN=True in config.py to allow cross-domain
if OPEN_CROSS_DOMAIN:
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.websocket("/local_doc_qa/stream-chat/{knowledge_base_id}")(stream_chat)
app.get("/", response_model=BaseResponse)(document)
app.post("/chat", response_model=ChatMessage)(chat)
app.post("/local_doc_qa/upload_file", response_model=BaseResponse)(upload_file)
app.post("/local_doc_qa/upload_files", response_model=BaseResponse)(upload_files)
app.post("/local_doc_qa/local_doc_chat", response_model=ChatMessage)(local_doc_chat)
app.get("/local_doc_qa/list_files", response_model=ListDocsResponse)(list_docs)
app.delete("/local_doc_qa/delete_file", response_model=BaseResponse)(delete_docs)
local_doc_qa = LocalDocQA()
local_doc_qa.init_cfg(
llm_model=LLM_MODEL,
embedding_model=EMBEDDING_MODEL,
embedding_device=EMBEDDING_DEVICE,
llm_history_len=LLM_HISTORY_LEN,
top_k=VECTOR_SEARCH_TOP_K,
)
uvicorn.run(app, host=host, port=port)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default=7861)
args = parser.parse_args()
api_start(args.host, args.port) | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/api.py | api.py |
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from paddleocr import PaddleOCR
import os
import fitz
class UnstructuredPaddlePDFLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load image files, such as PNGs and JPGs."""
def _get_elements(self) -> List:
def pdf_ocr_txt(filepath, dir_path="tmp_files"):
full_dir_path = os.path.join(os.path.dirname(filepath), dir_path)
if not os.path.exists(full_dir_path):
os.makedirs(full_dir_path)
filename = os.path.split(filepath)[-1]
ocr = PaddleOCR(lang="ch", use_gpu=False, show_log=False)
doc = fitz.open(filepath)
txt_file_path = os.path.join(full_dir_path, "%s.txt" % (filename))
img_name = os.path.join(full_dir_path, '.tmp.png')
with open(txt_file_path, 'w', encoding='utf-8') as fout:
for i in range(doc.page_count):
page = doc[i]
text = page.get_text("")
fout.write(text)
fout.write("\n")
img_list = page.get_images()
for img in img_list:
pix = fitz.Pixmap(doc, img[0])
pix.save(img_name)
result = ocr.ocr(img_name)
ocr_result = [i[1][0] for line in result for i in line]
fout.write("\n".join(ocr_result))
os.remove(img_name)
return txt_file_path
txt_file_path = pdf_ocr_txt(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(filename=txt_file_path, **self.unstructured_kwargs)
if __name__ == "__main__":
filepath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "content", "samples", "test.pdf")
loader = UnstructuredPaddlePDFLoader(filepath, mode="elements")
docs = loader.load()
for doc in docs:
print(doc) | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/loader/pdf_loader.py | pdf_loader.py |
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
from ..configs.model_config import SENTENCE_SIZE
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text1(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub('\s', " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text = text.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
ele2_id + 1:]
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
return ls | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/textsplitter/chinese_text_splitter.py | chinese_text_splitter.py |
Pretrained Punkt Models -- Jan Strunk (New version trained after issues 313 and 514 had been corrected)
Most models were prepared using the test corpora from Kiss and Strunk (2006). Additional models have
been contributed by various people using NLTK for sentence boundary detection.
For information about how to use these models, please confer the tokenization HOWTO:
http://nltk.googlecode.com/svn/trunk/doc/howto/tokenize.html
and chapter 3.8 of the NLTK book:
http://nltk.googlecode.com/svn/trunk/doc/book/ch03.html#sec-segmentation
There are pretrained tokenizers for the following languages:
File Language Source Contents Size of training corpus(in tokens) Model contributed by
=======================================================================================================================================================================
czech.pickle Czech Multilingual Corpus 1 (ECI) Lidove Noviny ~345,000 Jan Strunk / Tibor Kiss
Literarni Noviny
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
danish.pickle Danish Avisdata CD-Rom Ver. 1.1. 1995 Berlingske Tidende ~550,000 Jan Strunk / Tibor Kiss
(Berlingske Avisdata, Copenhagen) Weekend Avisen
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
dutch.pickle Dutch Multilingual Corpus 1 (ECI) De Limburger ~340,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
english.pickle English Penn Treebank (LDC) Wall Street Journal ~469,000 Jan Strunk / Tibor Kiss
(American)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
estonian.pickle Estonian University of Tartu, Estonia Eesti Ekspress ~359,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
finnish.pickle Finnish Finnish Parole Corpus, Finnish Books and major national ~364,000 Jan Strunk / Tibor Kiss
Text Bank (Suomen Kielen newspapers
Tekstipankki)
Finnish Center for IT Science
(CSC)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
french.pickle French Multilingual Corpus 1 (ECI) Le Monde ~370,000 Jan Strunk / Tibor Kiss
(European)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
german.pickle German Neue Zürcher Zeitung AG Neue Zürcher Zeitung ~847,000 Jan Strunk / Tibor Kiss
(Switzerland) CD-ROM
(Uses "ss"
instead of "ß")
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
greek.pickle Greek Efstathios Stamatatos To Vima (TO BHMA) ~227,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
italian.pickle Italian Multilingual Corpus 1 (ECI) La Stampa, Il Mattino ~312,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
norwegian.pickle Norwegian Centre for Humanities Bergens Tidende ~479,000 Jan Strunk / Tibor Kiss
(Bokmål and Information Technologies,
Nynorsk) Bergen
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
polish.pickle Polish Polish National Corpus Literature, newspapers, etc. ~1,000,000 Krzysztof Langner
(http://www.nkjp.pl/)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
portuguese.pickle Portuguese CETENFolha Corpus Folha de São Paulo ~321,000 Jan Strunk / Tibor Kiss
(Brazilian) (Linguateca)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
slovene.pickle Slovene TRACTOR Delo ~354,000 Jan Strunk / Tibor Kiss
Slovene Academy for Arts
and Sciences
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
spanish.pickle Spanish Multilingual Corpus 1 (ECI) Sur ~353,000 Jan Strunk / Tibor Kiss
(European)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
swedish.pickle Swedish Multilingual Corpus 1 (ECI) Dagens Nyheter ~339,000 Jan Strunk / Tibor Kiss
(and some other texts)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
turkish.pickle Turkish METU Turkish Corpus Milliyet ~333,000 Jan Strunk / Tibor Kiss
(Türkçe Derlem Projesi)
University of Ankara
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
The corpora contained about 400,000 tokens on average and mostly consisted of newspaper text converted to
Unicode using the codecs module.
Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence Boundary Detection.
Computational Linguistics 32: 485-525.
---- Training Code ----
# import punkt
import nltk.tokenize.punkt
# Make a new Tokenizer
tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()
# Read in training corpus (one example: Slovene)
import codecs
text = codecs.open("slovene.plain","Ur","iso-8859-2").read()
# Train tokenizer
tokenizer.train(text)
# Dump pickled tokenizer
import pickle
out = open("slovene.pickle","wb")
pickle.dump(tokenizer, out)
out.close()
--------- | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/nltk_data/tokenizers/punkt/README | README |
Pretrained Punkt Models -- Jan Strunk (New version trained after issues 313 and 514 had been corrected)
Most models were prepared using the test corpora from Kiss and Strunk (2006). Additional models have
been contributed by various people using NLTK for sentence boundary detection.
For information about how to use these models, please confer the tokenization HOWTO:
http://nltk.googlecode.com/svn/trunk/doc/howto/tokenize.html
and chapter 3.8 of the NLTK book:
http://nltk.googlecode.com/svn/trunk/doc/book/ch03.html#sec-segmentation
There are pretrained tokenizers for the following languages:
File Language Source Contents Size of training corpus(in tokens) Model contributed by
=======================================================================================================================================================================
czech.pickle Czech Multilingual Corpus 1 (ECI) Lidove Noviny ~345,000 Jan Strunk / Tibor Kiss
Literarni Noviny
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
danish.pickle Danish Avisdata CD-Rom Ver. 1.1. 1995 Berlingske Tidende ~550,000 Jan Strunk / Tibor Kiss
(Berlingske Avisdata, Copenhagen) Weekend Avisen
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
dutch.pickle Dutch Multilingual Corpus 1 (ECI) De Limburger ~340,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
english.pickle English Penn Treebank (LDC) Wall Street Journal ~469,000 Jan Strunk / Tibor Kiss
(American)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
estonian.pickle Estonian University of Tartu, Estonia Eesti Ekspress ~359,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
finnish.pickle Finnish Finnish Parole Corpus, Finnish Books and major national ~364,000 Jan Strunk / Tibor Kiss
Text Bank (Suomen Kielen newspapers
Tekstipankki)
Finnish Center for IT Science
(CSC)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
french.pickle French Multilingual Corpus 1 (ECI) Le Monde ~370,000 Jan Strunk / Tibor Kiss
(European)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
german.pickle German Neue Zürcher Zeitung AG Neue Zürcher Zeitung ~847,000 Jan Strunk / Tibor Kiss
(Switzerland) CD-ROM
(Uses "ss"
instead of "ß")
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
greek.pickle Greek Efstathios Stamatatos To Vima (TO BHMA) ~227,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
italian.pickle Italian Multilingual Corpus 1 (ECI) La Stampa, Il Mattino ~312,000 Jan Strunk / Tibor Kiss
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
norwegian.pickle Norwegian Centre for Humanities Bergens Tidende ~479,000 Jan Strunk / Tibor Kiss
(Bokmål and Information Technologies,
Nynorsk) Bergen
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
polish.pickle Polish Polish National Corpus Literature, newspapers, etc. ~1,000,000 Krzysztof Langner
(http://www.nkjp.pl/)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
portuguese.pickle Portuguese CETENFolha Corpus Folha de São Paulo ~321,000 Jan Strunk / Tibor Kiss
(Brazilian) (Linguateca)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
slovene.pickle Slovene TRACTOR Delo ~354,000 Jan Strunk / Tibor Kiss
Slovene Academy for Arts
and Sciences
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
spanish.pickle Spanish Multilingual Corpus 1 (ECI) Sur ~353,000 Jan Strunk / Tibor Kiss
(European)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
swedish.pickle Swedish Multilingual Corpus 1 (ECI) Dagens Nyheter ~339,000 Jan Strunk / Tibor Kiss
(and some other texts)
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
turkish.pickle Turkish METU Turkish Corpus Milliyet ~333,000 Jan Strunk / Tibor Kiss
(Türkçe Derlem Projesi)
University of Ankara
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
The corpora contained about 400,000 tokens on average and mostly consisted of newspaper text converted to
Unicode using the codecs module.
Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence Boundary Detection.
Computational Linguistics 32: 485-525.
---- Training Code ----
# import punkt
import nltk.tokenize.punkt
# Make a new Tokenizer
tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()
# Read in training corpus (one example: Slovene)
import codecs
text = codecs.open("slovene.plain","Ur","iso-8859-2").read()
# Train tokenizer
tokenizer.train(text)
# Dump pickled tokenizer
import pickle
out = open("slovene.pickle","wb")
pickle.dump(tokenizer, out)
out.close()
--------- | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/nltk_data/tokenizers/punkt/PY3/README | README |
The Carnegie Mellon Pronouncing Dictionary [cmudict.0.7a]
ftp://ftp.cs.cmu.edu/project/speech/dict/
https://cmusphinx.svn.sourceforge.net/svnroot/cmusphinx/trunk/cmudict/cmudict.0.7a
Copyright (C) 1993-2008 Carnegie Mellon University. All rights reserved.
File Format: Each line consists of an uppercased word,
a counter (for alternative pronunciations), and a transcription.
Vowels are marked for stress (1=primary, 2=secondary, 0=no stress).
E.g.: NATURAL 1 N AE1 CH ER0 AH0 L
The dictionary contains 127069 entries. Of these, 119400 words are assigned
a unique pronunciation, 6830 words have two pronunciations, and 839 words have
three or more pronunciations. Many of these are fast-speech variants.
Phonemes: There are 39 phonemes, as shown below:
Phoneme Example Translation Phoneme Example Translation
------- ------- ----------- ------- ------- -----------
AA odd AA D AE at AE T
AH hut HH AH T AO ought AO T
AW cow K AW AY hide HH AY D
B be B IY CH cheese CH IY Z
D dee D IY DH thee DH IY
EH Ed EH D ER hurt HH ER T
EY ate EY T F fee F IY
G green G R IY N HH he HH IY
IH it IH T IY eat IY T
JH gee JH IY K key K IY
L lee L IY M me M IY
N knee N IY NG ping P IH NG
OW oat OW T OY toy T OY
P pee P IY R read R IY D
S sea S IY SH she SH IY
T tea T IY TH theta TH EY T AH
UH hood HH UH D UW two T UW
V vee V IY W we W IY
Y yield Y IY L D Z zee Z IY
ZH seizure S IY ZH ER
(For NLTK, entries have been sorted so that, e.g. FIRE 1 and FIRE 2
are contiguous, and not separated by FIRE'S 1.)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
The contents of this file are deemed to be source code.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This work was supported in part by funding from the Defense Advanced
Research Projects Agency, the Office of Naval Research and the National
Science Foundation of the United States of America, and by member
companies of the Carnegie Mellon Sphinx Speech Consortium. We acknowledge
the contributions of many volunteers to the expansion and improvement of
this dictionary.
THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY
NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/nltk_data/corpora/cmudict/README | README |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import UnstructuredFileLoader
import datetime
from typing import List, Tuple
from langchain.docstore.document import Document
import numpy as np
from tqdm import tqdm
from pypinyin import lazy_pinyin
from ..configs.model_config import *
from ..textsplitter import ChineseTextSplitter
def torch_gc():
import torch
if torch.cuda.is_available():
# with torch.cuda.device(DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
elif torch.backends.mps.is_available():
try:
from torch.mps import empty_cache
empty_cache()
except Exception as e:
print(e)
print("如果您使用的是 macOS 建议将 pytorch 版本升级至 2.0.0 或更高版本,以支持及时清理 torch 产生的内存占用。")
def load_file(filepath, sentence_size=SENTENCE_SIZE):
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
# write_check_file(filepath, docs)
return docs
def write_check_file(filepath, docs):
folder_path = os.path.join(os.path.dirname(filepath), "tmp_files")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
fp = os.path.join(folder_path, 'load_file.txt')
fout = open(fp, 'a')
fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
fout.write('\n')
for i in docs:
fout.write(str(i))
fout.write('\n')
fout.close()
def generate_prompt(related_docs: List[str], query: str,
prompt_template=PROMPT_TEMPLATE) -> str:
context = "\n".join([doc.page_content for doc in related_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def seperate_list(ls: List[int]) -> List[List[int]]:
lists = []
ls1 = [ls[0]]
for i in range(1, len(ls)):
if ls[i - 1] + 1 == ls[i]:
ls1.append(ls[i])
else:
lists.append(ls1)
ls1 = [ls[i]]
lists.append(ls1)
return lists
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
docs = []
id_set = set()
store_len = len(self.index_to_docstore_id)
for j, i in enumerate(indices[0]):
if i == -1 or 0 < self.score_threshold < scores[0][j]:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not self.chunk_conent:
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc.metadata["score"] = int(scores[0][j])
docs.append(doc)
continue
id_set.add(i)
docs_len = len(doc.page_content)
for k in range(1, max(i, store_len - i)):
break_flag = False
for l in [i + k, i - k]:
if 0 <= l < len(self.index_to_docstore_id):
_id0 = self.index_to_docstore_id[l]
doc0 = self.docstore.search(_id0)
if docs_len + len(doc0.page_content) > self.chunk_size:
break_flag = True
break
elif doc0.metadata["source"] == doc.metadata["source"]:
docs_len += len(doc0.page_content)
id_set.add(l)
if break_flag:
break
if not self.chunk_conent:
return docs
if len(id_set) == 0 and self.score_threshold > 0:
return []
id_list = sorted(list(id_set))
id_lists = seperate_list(id_list)
for id_seq in id_lists:
for id in id_seq:
if id == id_seq[0]:
_id = self.index_to_docstore_id[id]
doc = self.docstore.search(_id)
else:
_id0 = self.index_to_docstore_id[id]
doc0 = self.docstore.search(_id0)
doc.page_content += " " + doc0.page_content
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
doc.metadata["score"] = int(doc_score)
docs.append(doc)
torch_gc()
return docs
class LocalDocQA:
llm: object = None
embeddings: object = None
top_k: int = VECTOR_SEARCH_TOP_K
chunk_size: int = CHUNK_SIZE
chunk_conent: bool = True
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
def init_cfg(self,
embedding_model: str = EMBEDDING_MODEL,
embedding_device=EMBEDDING_DEVICE,
llm_history_len: int = LLM_HISTORY_LEN,
llm_model: str = LLM_MODEL,
llm_device=LLM_DEVICE,
top_k=VECTOR_SEARCH_TOP_K,
use_ptuning_v2: bool = USE_PTUNING_V2,
use_lora: bool = USE_LORA,
):
self.llm = None
self.top_k = top_k
def init_knowledge_vector_store(self,
filepath: str or List[str],
vs_path: str or os.PathLike = None,
sentence_size=SENTENCE_SIZE,
text2vec=None):
loaded_files = []
failed_files = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("路径不存在")
return None
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
docs = load_file(filepath, sentence_size)
print(f"{file} 已成功加载")
loaded_files.append(filepath)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
return None
elif os.path.isdir(filepath):
docs = []
for file in tqdm(os.listdir(filepath), desc="加载文件"):
fullfilepath = os.path.join(filepath, file)
try:
docs += load_file(fullfilepath, sentence_size)
loaded_files.append(fullfilepath)
except Exception as e:
print(e)
failed_files.append(file)
if len(failed_files) > 0:
print("以下文件未能成功加载:")
for file in failed_files:
print(f"{file}\n")
else:
docs = []
for file in filepath:
try:
docs += load_file(file)
print(f"{file} 已成功加载")
loaded_files.append(file)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
if len(docs) > 0:
print("文件加载完毕,正在生成向量库")
if vs_path and os.path.isdir(vs_path):
self.vector_store = FAISS.load_local(vs_path, text2vec)
self.vector_store.add_documents(docs)
torch_gc()
else:
if not vs_path: assert False
self.vector_store = FAISS.from_documents(docs, text2vec) # docs 为Document列表
torch_gc()
self.vector_store.save_local(vs_path)
return vs_path, loaded_files
else:
self.vector_store = FAISS.load_local(vs_path, text2vec)
torch_gc()
return vs_path, loaded_files
def get_loaded_file(self):
ds = self.vector_store.docstore
return set([ds._dict[k].metadata['source'].split(UPLOAD_ROOT_PATH)[-1] for k in ds._dict])
# query 查询内容
# vs_path 知识库路径
# chunk_conent 是否启用上下文关联
# score_threshold 搜索匹配score阈值
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
# chunk_sizes 匹配单段内容的连接上下文长度
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
text2vec=None):
self.vector_store = FAISS.load_local(vs_path, text2vec)
self.vector_store.chunk_conent = chunk_conent
self.vector_store.score_threshold = score_threshold
self.vector_store.chunk_size = chunk_size
embedding = self.vector_store.embedding_function(query)
related_docs_with_score = similarity_search_with_score_by_vector(self.vector_store, embedding, k=vector_search_top_k)
if not related_docs_with_score:
response = {"query": query,
"source_documents": []}
return response, ""
torch_gc()
# prompt = f"{query}. You should answer this question using information from following documents: \n\n"
prompt = f"{query}. 你必须利用以下文档中包含的信息回答这个问题: \n\n---\n\n"
prompt += "\n\n".join([f"({k}): " + doc.page_content for k, doc in enumerate(related_docs_with_score)])
prompt += "\n\n---\n\n"
prompt = prompt.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
# print(prompt)
response = {"query": query, "source_documents": related_docs_with_score}
return response, prompt | zh-langchain | /zh-langchain-0.2.1.tar.gz/zh-langchain-0.2.1/zh_langchain/chains/local_doc_qa.py | local_doc_qa.py |
CHINESEYEARCODE = [
19416,
19168, 42352, 21717, 53856, 55632, 91476, 22176, 39632,
21970, 19168, 42422, 42192, 53840, 119381, 46400, 54944,
44450, 38320, 84343, 18800, 42160, 46261, 27216, 27968,
109396, 11104, 38256, 21234, 18800, 25958, 54432, 59984,
92821, 23248, 11104, 100067, 37600, 116951, 51536, 54432,
120998, 46416, 22176, 107956, 9680, 37584, 53938, 43344,
46423, 27808, 46416, 86869, 19872, 42416, 83315, 21168,
43432, 59728, 27296, 44710, 43856, 19296, 43748, 42352,
21088, 62051, 55632, 23383, 22176, 38608, 19925, 19152,
42192, 54484, 53840, 54616, 46400, 46752, 103846, 38320,
18864, 43380, 42160, 45690, 27216, 27968, 44870, 43872,
38256, 19189, 18800, 25776, 29859, 59984, 27480, 23232,
43872, 38613, 37600, 51552, 55636, 54432, 55888, 30034,
22176, 43959, 9680, 37584, 51893, 43344, 46240, 47780,
44368, 21977, 19360, 42416, 86390, 21168, 43312, 31060,
27296, 44368, 23378, 19296, 42726, 42208, 53856, 60005,
54576, 23200, 30371, 38608, 19195, 19152, 42192, 118966,
53840, 54560, 56645, 46496, 22224, 21938, 18864, 42359,
42160, 43600, 111189, 27936, 44448, 84835, 37744, 18936,
18800, 25776, 92326, 59984, 27296, 108228, 43744, 37600,
53987, 51552, 54615, 54432, 55888, 23893, 22176, 42704,
21972, 21200, 43448, 43344, 46240, 46758, 44368, 21920,
43940, 42416, 21168, 45683, 26928, 29495, 27296, 44368,
84821, 19296, 42352, 21732, 53600, 59752, 54560, 55968,
92838, 22224, 19168, 43476, 41680, 53584, 62034, 54560
]
'''
从1900年到2100年的农历月份数据代码 20位二进制代码表示一个年份的数据。
前四位0:表示闰月为29天,1:表示闰月为30天
中间12位:从左起表示1-12月每月的大小,1为30天,0为29天
最后四位:表示闰月的月份,0表示当年无闰月
前四位和最后四位应该结合使用,如果最后四位为0,则不考虑前四位
例:
1901年代码为 19168,转成二进制为 0b100101011100000, 最后四位为0,当年无闰月,月份数据为 010010101110 分别代表12月的大小情况
1903年代码为 21717,转成二进制为 0b101010011010101,最后四位为5,当年为闰五月,首四位为0,闰月为29天,月份数据为 010101001101 分别代表12月的大小情况
'''
CHINESENEWYEAR = [
'19000131',
'19010219', '19020208', '19030129', '19040216', '19050204',
'19060125', '19070213', '19080202', '19090122', '19100210',
'19110130', '19120218', '19130206', '19140126', '19150214',
'19160203', '19170123', '19180211', '19190201', '19200220',
'19210208', '19220128', '19230216', '19240205', '19250124',
'19260213', '19270202', '19280123', '19290210', '19300130',
'19310217', '19320206', '19330126', '19340214', '19350204',
'19360124', '19370211', '19380131', '19390219', '19400208',
'19410127', '19420215', '19430205', '19440125', '19450213',
'19460202', '19470122', '19480210', '19490129', '19500217',
'19510206', '19520127', '19530214', '19540203', '19550124',
'19560212', '19570131', '19580218', '19590208', '19600128',
'19610215', '19620205', '19630125', '19640213', '19650202',
'19660121', '19670209', '19680130', '19690217', '19700206',
'19710127', '19720215', '19730203', '19740123', '19750211',
'19760131', '19770218', '19780207', '19790128', '19800216',
'19810205', '19820125', '19830213', '19840202', '19850220',
'19860209', '19870129', '19880217', '19890206', '19900127',
'19910215', '19920204', '19930123', '19940210', '19950131',
'19960219', '19970207', '19980128', '19990216', '20000205',
'20010124', '20020212', '20030201', '20040122', '20050209',
'20060129', '20070218', '20080207', '20090126', '20100214',
'20110203', '20120123', '20130210', '20140131', '20150219',
'20160208', '20170128', '20180216', '20190205', '20200125',
'20210212', '20220201', '20230122', '20240210', '20250129',
'20260217', '20270206', '20280126', '20290213', '20300203',
'20310123', '20320211', '20330131', '20340219', '20350208',
'20360128', '20370215', '20380204', '20390124', '20400212',
'20410201', '20420122', '20430210', '20440130', '20450217',
'20460206', '20470126', '20480214', '20490202', '20500123',
'20510211', '20520201', '20530219', '20540208', '20550128',
'20560215', '20570204', '20580124', '20590212', '20600202',
'20610121', '20620209', '20630129', '20640217', '20650205',
'20660126', '20670214', '20680203', '20690123', '20700211',
'20710131', '20720219', '20730207', '20740127', '20750215',
'20760205', '20770124', '20780212', '20790202', '20800122',
'20810209', '20820129', '20830217', '20840206', '20850126',
'20860214', '20870203', '20880124', '20890210', '20900130',
'20910218', '20920207', '20930127', '20940215', '20950205',
'20960125', '20970212', '20980201', '20990121', '21000209'
]
'''
从1900年,至2100年每年的农历春节的公历日期
''' | zh-lunar-date | /zh_lunar_date-0.0.3-py3-none-any.whl/zhdate/constants.py | constants.py |
changed: Saturday, 21st January 2023
by: Eilles Wan ([email protected])
"""
from datetime import datetime, timedelta
from itertools import accumulate
from .constants import CHINESENEWYEAR, CHINESEYEARCODE
from .model import ZhModel
class ZhDate:
def __init__(self, lunar_year, lunar_month, lunar_day, leap_month=False):
"""初始化函数
Arguments:
lunar_year {int} -- 农历年
lunar_month {int} -- 农历月份
lunar_day {int} -- 农历日
Keyword Arguments:
leap_month {bool} -- 是否是在农历闰月中 (default: {False})
"""
self.lunar_year = lunar_year
self.lunar_month = lunar_month
self.lunar_day = lunar_day
self.leap_month = leap_month
self.year_code = CHINESEYEARCODE[self.lunar_year - 1900]
self.newyear = datetime.strptime(
CHINESENEWYEAR[self.lunar_year - 1900], "%Y%m%d"
)
if not ZhDate.validate(lunar_year, lunar_month, lunar_day, leap_month):
raise TypeError(
"农历日期不支持所谓“{}”,超出农历1900年1月1日至2100年12月29日,或日期不存在".format(self)
)
def to_datetime(self):
"""农历日期转换称公历日期
Returns:
datetime -- 当前农历对应的公历日期
"""
return self.newyear + timedelta(days=self.__days_passed())
@staticmethod
def from_datetime(dt):
"""静态方法,从公历日期生成农历日期
Arguments:
dt {datetime} -- 公历的日期
Returns:
ZhDate -- 生成的农历日期对象
"""
lunar_year = dt.year
# 如果还没有到农历正月初一 农历年份减去1
lunar_year -= (
datetime.strptime(CHINESENEWYEAR[lunar_year - 1900], "%Y%m%d") - dt
).total_seconds() > 0
# 当时农历新年时的日期对象
newyear_dt = datetime.strptime(CHINESENEWYEAR[lunar_year - 1900], "%Y%m%d")
# 查询日期距离当年的春节差了多久
days_passed = (dt - newyear_dt).days
# 被查询日期的年份码
year_code = CHINESEYEARCODE[lunar_year - 1900]
# 取得本年的月份列表
month_days = ZhDate.decode(year_code)
for pos, days in enumerate(accumulate(month_days)):
if days_passed + 1 <= days:
month = pos + 1
lunar_day = month_days[pos] - (days - days_passed) + 1
break
leap_month = False
if (year_code & 0xF) == 0 or month <= (year_code & 0xF):
lunar_month = month
else:
lunar_month = month - 1
if (year_code & 0xF) != 0 and month == (year_code & 0xF) + 1:
leap_month = True
return ZhDate(lunar_year, lunar_month, lunar_day, leap_month)
@staticmethod
def today():
return ZhDate.from_datetime(datetime.now())
def __days_passed(self):
"""私有方法,计算当前农历日期和当年农历新年之间的天数差值
Returns:
int -- 差值天数
"""
month_days = ZhDate.decode(self.year_code)
# 当前农历年的闰月,为0表示无润叶
month_leap = self.year_code & 0xF
# 当年无闰月,或者有闰月但是当前月小于闰月
if (month_leap == 0) or (self.lunar_month < month_leap):
days_passed_month = sum(month_days[: self.lunar_month - 1])
# 当前不是闰月,并且当前月份和闰月相同
elif (not self.leap_month) and (self.lunar_month == month_leap):
days_passed_month = sum(month_days[: self.lunar_month - 1])
else:
days_passed_month = sum(month_days[: self.lunar_month])
return days_passed_month + self.lunar_day - 1
def chinese(self):
ZHNUMS = "〇一二三四五六七八九十"
zh_year = ""
for i in range(0, 4):
zh_year += ZHNUMS[int(str(self.lunar_year)[i])]
if self.leap_month:
zh_month = "闰"
else:
zh_month = ""
if self.lunar_month == 1:
zh_month += "正"
elif self.lunar_month == 12:
zh_month += "腊"
elif self.lunar_month <= 10:
zh_month += ZHNUMS[self.lunar_month]
else:
zh_month += "十{}".format(ZHNUMS[self.lunar_month - 10])
zh_month += "月"
if self.lunar_day <= 10:
zh_day = "初{}".format(ZHNUMS[self.lunar_day])
elif self.lunar_day < 20:
zh_day = "十{}".format(ZHNUMS[self.lunar_day - 10])
elif self.lunar_day == 20:
zh_day = "二十"
elif self.lunar_day < 30:
zh_day = "廿{}".format(ZHNUMS[self.lunar_day - 20])
else:
zh_day = "三十"
year_tiandi = "{}年".format(ZhDate.__tiandi(self.lunar_year - 1900 + 36))
shengxiao = "鼠牛虎兔龙蛇马羊猴鸡狗猪"
return ZhModel(
zh_year=zh_year,
zh_month=zh_month,
zh_day=zh_day,
year_tiandi=year_tiandi,
shengxiao=shengxiao[(self.lunar_year - 1900) % 12],
)
def __str__(self):
"""打印字符串的方法
Returns:
str -- 标准格式农历日期字符串
"""
return "农历{}年{}{}月{}日".format(
self.lunar_year,
"闰" if self.leap_month else "",
self.lunar_month,
self.lunar_day,
)
def __repr__(self):
return self.__str__()
def __eq__(self, another):
if not isinstance(another, ZhDate):
raise TypeError("比较必须都是ZhDate类型")
cond1 = self.lunar_year == another.lunar_year
cond2 = self.lunar_month == another.lunar_month
cond3 = self.lunar_day == another.lunar_day
cond4 = self.leap_month == another.leap_month
return cond1 and cond2 and cond3 and cond4
def __add__(self, another):
if not isinstance(another, int):
raise TypeError("加法只支持整数天数相加")
return ZhDate.from_datetime(self.to_datetime() + timedelta(days=another))
def __sub__(self, another):
if isinstance(another, int):
return ZhDate.from_datetime(self.to_datetime() - timedelta(days=another))
elif isinstance(another, ZhDate):
return (self.to_datetime() - another.to_datetime()).days
elif isinstance(another, datetime):
return (self.to_datetime() - another).days
else:
raise TypeError("减法只支持整数,ZhDate, Datetime类型")
"""
以下为帮助函数
"""
@staticmethod
def __tiandi(anum):
tian = "甲乙丙丁戊己庚辛壬癸"
di = "子丑寅卯辰巳午未申酉戌亥"
return "{}{}".format(tian[anum % 10], di[anum % 12])
@staticmethod
def validate(year, month, day, leap):
"""农历日期校验
Arguments:
year {int} -- 农历年份
month {int} -- 农历月份
day {int} -- 农历日期
leap {bool} -- 农历是否为闰月日期
Returns:
bool -- 校验是否通过
"""
# 年份低于1900,大于2100,或者月份不属于 1-12,或者日期不属于 1-30,返回校验失败
if not (1900 <= year <= 2100 and 1 <= month <= 12 and 1 <= day <= 30):
return False
year_code = CHINESEYEARCODE[year - 1900]
# 有闰月标志
if leap:
if (year_code & 0xF) != month: # 年度闰月和校验闰月不一致的话,返回校验失败
return False
elif day == 30: # 如果日期是30的话,直接返回年度代码首位是否为1,即闰月是否为大月
return (year_code >> 16) == 1
else: # 年度闰月和当前月份相同,日期不为30的情况,返回通过
return True
elif day <= 29: # 非闰月,并且日期小于等于29,返回通过
return True
else: # 非闰月日期为30,返回年度代码中的月份位是否为1,即是否为大月
return ((year_code >> (12 - month) + 4) & 1) == 1
@staticmethod
def decode(year_code):
"""解析年度农历代码函数
Arguments:
year_code {int} -- 从年度代码数组中获取的代码整数
Returns:
list[int, ] -- 当前年度代码解析以后形成的每月天数数组,已将闰月嵌入对应位置,即有闰月的年份返回的列表长度为13,否则为12
"""
# 请问您为什么不在这么重要的地方写注释?
month_days = []
for i in range(4, 16):
# 向右移动相应的位数
# 1 这个数只有一位,与任何数进行 按位与 都只能获得其
# 从后往前第一位,对!是获得这一位
month_days.insert(0, 30 if (year_code >> i) & 1 else 29)
# 0xf 即 15 即二进制的 1111
# 所以 1111 与任何数进行 按位与
# 都将获得其最后四位,对!是获得这最后四位
# 后四位非0则表示有闰月(多一月),则插入一次月份
# 而首四位表示闰月的天数
if year_code & 0xF:
month_days.insert((year_code & 0xF), 30 if year_code >> 16 else 29)
# 返回一个列表
return month_days
@staticmethod
def month_days(year):
"""根据年份返回当前农历月份天数list
Arguments:
year {int} -- 1900到2100的之间的整数
Returns:
[int] -- 农历年份所对应的农历月份天数列表
"""
return ZhDate.decode(CHINESEYEARCODE[year - 1900]) | zh-lunar-date | /zh_lunar_date-0.0.3-py3-none-any.whl/zhdate/__init__.py | __init__.py |
# 中文錯誤類型文字增量
## 安裝
```bash
pip install zh-mistake-text-aug
```
## 使用 (Pipeline)
```python
from zh_mistake_text_aug import Pipeline
import random
random.seed(7)
pipeline = Pipeline()
augs = pipeline("中文語料生成")
for aug in augs:
print(aug)
```
```
type='MissingWordMaker' correct='中文語料生成' incorrect='中文料生成' incorrect_start_at=2 incorrect_end_at=2 span='語'
type='MissingVocabMaker' correct='中文語料生成' incorrect='語料生成' incorrect_start_at=0 incorrect_end_at=2 span='中文'
type='PronounceSimilarWordMaker' correct='中文語料生成' incorrect='中文語尥生成' incorrect_start_at=3 incorrect_end_at=3 span='尥'
type='PronounceSameWordMaker' correct='中文語料生成' incorrect='諥文語料生成' incorrect_start_at=0 incorrect_end_at=0 span='諥'
type='PronounceSimilarVocabMaker' correct='中文語料生成' incorrect='鍾文語料生成' incorrect_start_at=0 incorrect_end_at=2 span='鍾文'
type='PronounceSameVocabMaker' correct='中文語料生成' incorrect='中文预料生成' incorrect_start_at=2 incorrect_end_at=4 span='预料'
type='RedundantWordMaker' correct='中文語料生成' incorrect='成中文語料生成' incorrect_start_at=0 incorrect_end_at=0 span='成'
type='MistakWordMaker' correct='中文語料生成' incorrect='谁文語料生成' incorrect_start_at=0 incorrect_end_at=0 span='谁'
```
## 可用方法
```python
from zh_mistake_text_aug.data_maker import ...
```
|Data Maker|Description|
|---|---|
|MissingWordMaker|隨機缺字|
|MissingVocabMaker|隨機缺詞|
|PronounceSimilarWordMaker|隨機相似字替換|
|PronounceSimilarWordPlusMaker|編輯距離找發音相似並且用高頻字替換|
|PronounceSimilarVocabMaker|發音相似詞替換|
|PronounceSameWordMaker|發音相同字替換|
|PronounceSameVocabMaker|發音相同詞替換|
|RedundantWordMaker|隨機複製旁邊一個字作為沆於字|
|MistakWordMaker|隨機替換字|
|MistakeWordHighFreqMaker|隨機替換高頻字|
|MissingWordHighFreqMaker|隨機刪除高頻字| | zh-mistake-text-aug | /zh-mistake-text-aug-0.2.1.tar.gz/zh-mistake-text-aug-0.2.1/README.md | README.md |
import random
from loguru import logger
import jieba
from .utils import Pronounce2Word
from abc import ABC
from opencc import OpenCC
from typing import Any
from .data_model import NoiseCorpus
from .exception import DataNotFundError,FindOrConvertError
import os
import py_chinese_pronounce
from copy import copy
high_freq_zh_char_path = os.path.join(
os.path.dirname(__file__),
'high_freq_zh_char.txt'
)
class BaseDataMaker(ABC):
def __init__(self,*args,**kwargs) -> None:
self.t2s = OpenCC('t2s.json').convert
self.setup()
def setup(self):
"""
Do something if needed
"""
pass
def make(self,x)->NoiseCorpus:
raise NotImplementedError
def __call__(self, *args: Any, **kwargs: Any)-> NoiseCorpus:
data = self.make(*args,**kwargs)
data.type = self.__class__.__name__
if self.t2s(data.correct) == self.t2s(data.incorrect):
raise FindOrConvertError('After t2s compare is same')
return data
class MissingWordMaker(BaseDataMaker):
def make(self, x)->NoiseCorpus:
correct = x[:]
rand = random.randint(0, len(x)-1)
x = list(x)
span = x.pop(rand)
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x,
)
class MissingVocabMaker(BaseDataMaker):
def make(self,x)->NoiseCorpus:
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list.pop(rand)
return NoiseCorpus(
correct=correct,
incorrect=''.join(seg_list)
)
class PronounceSimilarWordMaker(BaseDataMaker):
"""
去除聲符找相似
"""
def __init__(self,*args,p2w=None,**kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self,x):
correct = x[:]
rand = random.randint(0, len(x)-1)
replace_word = x[rand]
try:
similar_vocab = self.p2w.find_similar(replace_word)
except:
raise FindOrConvertError('p2w find similar error')
rand_for_select_similar_word = random.randint(0, len(similar_vocab)-1)
select_similar_word = similar_vocab[rand_for_select_similar_word]
x = [c for c in x]
x[rand] = select_similar_word
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSimilarWordPlusMaker(BaseDataMaker):
"""
編輯距離找相似+高頻字
"""
def __init__(self,*args,p2w=None,level=1,**kwargs):
super().__init__()
self.level = level
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def setup(self):
self.high_freq_zh_char = []
f = open(high_freq_zh_char_path, encoding='utf-8')
for line in f.readlines():
self.high_freq_zh_char.append(line.replace('\n', ''))
f.close()
def make(self,x):
correct = x[:]
rand = random.randint(0, len(x)-1)
replace_word = x[rand]
try:
new_han_pronounces = self.p2w._find_similar_han_pronounces(self.p2w.to_han(replace_word),level=self.level)
except:
raise FindOrConvertError
random.shuffle(new_han_pronounces)
new_han_pronounce = new_han_pronounces[0]
new_words = self.p2w.han2word(new_han_pronounce)
new_words = list(filter(lambda x:x in self.high_freq_zh_char,new_words))
if len(new_words) == 0:
raise DataNotFundError("No high freq char in string")
random.shuffle(new_words)
new_word = new_words[0]
if new_word == replace_word:
raise DataNotFundError("same word")
rand_for_select_similar_word = random.randint(0, len(new_word)-1)
select_similar_word = new_word[rand_for_select_similar_word]
x = [c for c in x]
x[rand] = select_similar_word
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSameWordMaker(BaseDataMaker):
def __init__(self,*args,p2w=None,**kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self,x):
correct = x[:]
rand = random.randint(0, len(x)-1)
replace_word = x[rand]
try:
similar_vocab = self.p2w.find_same(replace_word)
except:
raise FindOrConvertError
if len(similar_vocab) == 0:
raise DataNotFundError('similar_vocab not found')
rand_for_select_similar_word = random.randint(0, len(similar_vocab)-1)
select_similar_word = similar_vocab[rand_for_select_similar_word]
x = [c for c in x]
x[rand] = select_similar_word
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSimilarVocabMaker(BaseDataMaker):
def __init__(self,*args,p2w=None,**kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self,x):
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list[:].pop(rand)
try:
similar_pronounce_spans = self.p2w.find_similar_vocab(span)
except:
raise FindOrConvertError
if len(similar_pronounce_spans) == 0:
raise DataNotFundError('similar_pronounce_spans not found')
random.shuffle(similar_pronounce_spans)
similar_pronounce_span = similar_pronounce_spans[0]
seg_list[rand] = similar_pronounce_span
x = seg_list
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSameVocabMaker(BaseDataMaker):
def __init__(self,*args,p2w=None,**kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self,x):
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list[:].pop(rand)
try:
similar_pronounce_spans = self.p2w.find_same_vocab(span)
except:
raise FindOrConvertError
if len(similar_pronounce_spans) == 0:
raise DataNotFundError('similar_pronounce_spans not found')
random.shuffle(similar_pronounce_spans)
similar_pronounce_span = similar_pronounce_spans[0]
# logger.debug(f"{rand} {seg_list}")
seg_list[rand] = similar_pronounce_span
x = seg_list
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class RedundantWordMaker(BaseDataMaker):
def __init__(self,*args,p2w=None,**kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self,x):
correct = x[:]
rand = random.randint(0, len(x)-1)
x = list(x)
span = x[rand-1]
x.insert(rand, x[rand-1])
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class MistakWordMaker(BaseDataMaker):
def __init__(self,*args,p2w=None,**kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self,x):
ch_unis = list(self.p2w.uni2cns_map.keys())
random_ch_uni_index = random.randint(0, len(ch_unis))
try:
random_ch = self.p2w._uni2word(ch_unis[random_ch_uni_index])
except:
raise FindOrConvertError("p2w._uni2word out of range")
correct = x[:]
rand = random.randint(0, len(x)-1)
span = random_ch
x = list(x)
x[rand] = span
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class MistakeWordHighFreqMaker(BaseDataMaker):
def setup(self):
self.high_freq_zh_char = []
f = open(high_freq_zh_char_path, encoding='utf-8')
for line in f.readlines():
self.high_freq_zh_char.append(line.replace('\n', ''))
f.close()
def make(self,x):
random_ch_uni_index = random.randint(0, len(self.high_freq_zh_char)-1)
random_ch = self.high_freq_zh_char[random_ch_uni_index]
correct = x[:]
rand = random.randint(0, len(x)-1)
span = random_ch
x = list(x)
x[rand] = span
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class MissingWordHighFreqMaker(BaseDataMaker):
def setup(self):
self.high_freq_zh_char = []
f = open(high_freq_zh_char_path, encoding='utf-8')
for line in f.readlines():
self.high_freq_zh_char.append(line.replace('\n', ''))
f.close()
def make(self,x):
high_freq_char_list = []
for char_x in list(x):
if char_x in self.high_freq_zh_char:
high_freq_char_list.append(char_x)
if len(high_freq_char_list) == 0:
raise DataNotFundError("No high freq char in string")
random_ch = random.choice(high_freq_char_list)
correct = x[:]
x = list(x)
rand = x.index(random_ch)
span = x.pop(rand)
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class RandomInsertVacabMaker(BaseDataMaker):
def setup(self):
sc_dict_path = os.path.join(
os.path.dirname(py_chinese_pronounce.__file__),
'sc-dict.txt'
)
self.sc_dict = open(sc_dict_path,'r').read().split()
def make(self,x):
correct = x[:]
x = x[:]
random_vocab = self.sc_dict[random.randint(0,len(self.sc_dict)-1)]
rand_ins_postion = random.randint(0,len(x))
x = list(x)
x.insert(rand_ins_postion,random_vocab)
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class Pipeline():
def __init__(self,makers=None,maker_weight=None):
self.maker_weight = maker_weight
self.makers = makers
if makers is None:
self.makers = []
for g_var_name in copy(globals()):
try:
if g_var_name != BaseDataMaker.__name__ and issubclass(globals()[g_var_name],BaseDataMaker):
print("O",g_var_name)
self.makers.append(globals()[g_var_name]())
except:
print("X",g_var_name)
if self.maker_weight != None:
assert len(self.maker_weight) == len(self.makers),'While have `maker_weight` must provide maker_weight for each maker'
def _noraml_call(self,x,k,verbose=True,makers=None):
out = []
if makers == None:
makers = self.makers
for maker in makers:
retry = 0
while retry<3:
try:
res = maker(x)
out.append(res)
break
except Exception as e:
retry += 1
if verbose:
logger.warning(f"{x} - {e} - {type(e)} - {maker} retry:{retry}")
random.shuffle(out)
return out[:k]
def _weight_call(self,x,k, verbose=True):
makers = random.choices(
population=self.makers,
weights=self.maker_weight,
k=k
)
return self._noraml_call(x,k,verbose,makers)
def __call__(self,x, k=1,verbose=True):
if self.maker_weight == None:
return self._noraml_call(x,k,verbose)
return self._weight_call(x,k,verbose) | zh-mistake-text-aug | /zh-mistake-text-aug-0.2.1.tar.gz/zh-mistake-text-aug-0.2.1/zh_mistake_text_aug/data_maker.py | data_maker.py |
import random
import jieba
from abc import ABC
from opencc import OpenCC
from typing import Any
import os
import py_chinese_pronounce
from .utils import Pronounce2Word,is_mistake_happend_on_disable_words
from .data_model import NoiseCorpus
from .exception import *
high_freq_zh_char_path = os.path.join(
os.path.dirname(__file__),
'high_freq_zh_char.txt'
)
class BaseDataMaker(ABC):
"""
抽像基類
:meta private:
"""
def __init__(self, *args, **kwargs) -> None:
self.t2s = OpenCC('t2s.json').convert
self.setup()
def setup(self):
"""
Do something if needed
"""
pass
def make(self, x):
raise NotImplementedError
def __call__(self, *args: Any, **kwargs: Any):
data = self.make(*args, **kwargs)
data.type = self.__class__.__name__
if self.t2s(data.correct) == self.t2s(data.incorrect):
raise TraditionalSimplifiedSameError('After t2s compare is same')
if is_mistake_happend_on_disable_words(data.correct,data.incorrect):
raise MistakeTextHappendOnDisableWordsError()
return data
class NoChangeMaker(BaseDataMaker):
"""
保持不變換
"""
def make(self, x):
return NoiseCorpus(
correct=x,
incorrect=x,
)
def __call__(self, *args: Any, **kwargs: Any) -> NoiseCorpus:
data = self.make(*args, **kwargs)
data.type = self.__class__.__name__
return data
class MissingWordMaker(BaseDataMaker):
"""
隨機缺字
"""
def make(self, x):
correct = x[:]
rand = random.randint(0, len(x)-1)
x = list(x)
span = x.pop(rand)
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x,
)
class MissingVocabMaker(BaseDataMaker):
"""
隨機缺詞
"""
def make(self, x):
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list.pop(rand)
return NoiseCorpus(
correct=correct,
incorrect=''.join(seg_list)
)
class PronounceSimilarWordMaker(BaseDataMaker):
"""
去除聲符找相似
"""
def __init__(self, *args, p2w=None, **kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
correct = x[:]
rand = random.randint(0, len(x)-1)
replace_word = x[rand]
try:
similar_vocab = self.p2w.find_similar(replace_word)
except Exception as exc:
raise FindOrConvertError('p2w find similar error') from exc
rand_for_select_similar_word = random.randint(0, len(similar_vocab)-1)
select_similar_word = similar_vocab[rand_for_select_similar_word]
x = [c for c in x]
x[rand] = select_similar_word
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSimilarWordPlusMaker(BaseDataMaker):
"""
編輯距離找相似+高頻字
"""
def __init__(self, *args, p2w=None, level=1, limit_k=10, **kwargs):
super().__init__()
self.level = level
self.limit_k = limit_k
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def setup(self):
self.high_freq_zh_char = []
f = open(high_freq_zh_char_path, encoding='utf-8')
for line in f.readlines():
self.high_freq_zh_char.append(line.replace('\n', ''))
f.close()
def make(self, x):
correct = x[:]
rand = random.randint(0, len(x)-1)
replace_word = x[rand]
try:
new_han_pronounces = self.p2w._find_similar_han_pronounces(
self.p2w.to_han(replace_word), level=self.level)
except Exception as exc:
raise FindOrConvertError from exc
new_han_pronounces = new_han_pronounces[:self.limit_k]
random.shuffle(new_han_pronounces)
new_han_pronounce = new_han_pronounces[0]
new_words = self.p2w.han2word(new_han_pronounce)
new_words = list(
filter(lambda x: x in self.high_freq_zh_char, new_words))
if len(new_words) == 0:
raise ZeorSearchResultsError("No high freq char in string")
random.shuffle(new_words)
new_word = new_words[0]
if new_word == replace_word:
raise ZeorSearchResultsError("same word")
rand_for_select_similar_word = random.randint(0, len(new_word)-1)
select_similar_word = new_word[rand_for_select_similar_word]
x = [c for c in x]
x[rand] = select_similar_word
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSameWordMaker(BaseDataMaker):
"""
相同發音字替換
"""
def __init__(self, *args, p2w=None, **kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
correct = x[:]
rand = random.randint(0, len(x)-1)
replace_word = x[rand]
try:
similar_vocab = self.p2w.find_same(replace_word)
except Exception as exc:
raise FindOrConvertError from exc
if len(similar_vocab) == 0:
raise ZeorSearchResultsError('similar_vocab not found')
rand_for_select_similar_word = random.randint(0, len(similar_vocab)-1)
select_similar_word = similar_vocab[rand_for_select_similar_word]
x = [c for c in x]
x[rand] = select_similar_word
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSimilarVocabMaker(BaseDataMaker):
"""
相似發聲詞彙替換
"""
def __init__(self, *args, p2w=None, **kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list[:].pop(rand)
try:
similar_pronounce_spans = self.p2w.find_similar_vocab(span)
except Exception as exc:
raise FindOrConvertError from exc
if len(similar_pronounce_spans) == 0:
raise ZeorSearchResultsError('similar_pronounce_spans not found')
random.shuffle(similar_pronounce_spans)
similar_pronounce_span = similar_pronounce_spans[0]
seg_list[rand] = similar_pronounce_span
x = seg_list
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSimilarVocabPlusMaker(BaseDataMaker):
def __init__(self, *args, p2w=None, level=1, **kwargs):
super().__init__()
self.level = level
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list[:].pop(rand)
try:
similar_pronounce_spans = self.p2w.find_similar_vocab_level(
span, level=self.level)
except Exception as exc:
raise FindOrConvertError from exc
if len(similar_pronounce_spans) == 0:
raise ZeorSearchResultsError('similar_pronounce_spans not found')
random.shuffle(similar_pronounce_spans)
similar_pronounce_span = similar_pronounce_spans[0]
seg_list[rand] = similar_pronounce_span
x = seg_list
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class PronounceSameVocabMaker(BaseDataMaker):
"""
相同發聲詞彙替換
"""
def __init__(self, *args, p2w=None, **kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
correct = x[:]
seg_list = list(jieba.cut(x))
rand = random.randint(0, len(seg_list)-1)
span = seg_list[:].pop(rand)
try:
similar_pronounce_spans = self.p2w.find_same_vocab(span)
except Exception as exc:
raise FindOrConvertError from exc
if len(similar_pronounce_spans) == 0:
raise ZeorSearchResultsError('similar_pronounce_spans not found')
random.shuffle(similar_pronounce_spans)
similar_pronounce_span = similar_pronounce_spans[0]
# logger.debug(f"{rand} {seg_list}")
seg_list[rand] = similar_pronounce_span
x = seg_list
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class RedundantWordMaker(BaseDataMaker):
"""
隨機複製旁邊字插入
"""
def __init__(self, *args, p2w=None, **kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
correct = x[:]
rand = random.randint(0, len(x)-1)
x = list(x)
span = x[rand-1]
x.insert(rand, x[rand-1])
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class MistakWordMaker(BaseDataMaker):
"""
隨機替換字
"""
def __init__(self, *args, p2w=None, **kwargs):
super().__init__()
if p2w is not None:
self.p2w = p2w
else:
self.p2w = Pronounce2Word()
def make(self, x):
ch_unis = list(self.p2w.uni2cns_map.keys())
random_ch_uni_index = random.randint(0, len(ch_unis))
try:
random_ch = self.p2w._uni2word(ch_unis[random_ch_uni_index])
except Exception as exc:
raise FindOrConvertError("p2w._uni2word out of range") from exc
correct = x[:]
rand = random.randint(0, len(x)-1)
span = random_ch
x = list(x)
x[rand] = span
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class MistakeWordHighFreqMaker(BaseDataMaker):
"""
隨機替換高頻字用字
"""
def setup(self):
self.high_freq_zh_char = []
f = open(high_freq_zh_char_path, encoding='utf-8')
for line in f.readlines():
self.high_freq_zh_char.append(line.replace('\n', ''))
f.close()
def make(self, x):
random_ch_uni_index = random.randint(0, len(self.high_freq_zh_char)-1)
random_ch = self.high_freq_zh_char[random_ch_uni_index]
correct = x[:]
rand = random.randint(0, len(x)-1)
span = random_ch
x = list(x)
x[rand] = span
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class MissingWordHighFreqMaker(BaseDataMaker):
"""
隨機移除高頻字
"""
def setup(self):
self.high_freq_zh_char = []
f = open(high_freq_zh_char_path, encoding='utf-8')
for line in f.readlines():
self.high_freq_zh_char.append(line.replace('\n', ''))
f.close()
def make(self, x):
high_freq_char_list = []
for char_x in list(x):
if char_x in self.high_freq_zh_char:
high_freq_char_list.append(char_x)
if len(high_freq_char_list) == 0:
raise ZeorSearchResultsError("No high freq char in string")
random_ch = random.choice(high_freq_char_list)
correct = x[:]
x = list(x)
rand = x.index(random_ch)
span = x.pop(rand)
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
)
class RandomInsertVacabMaker(BaseDataMaker):
"""
隨機插入詞彙
"""
def setup(self):
sc_dict_path = os.path.join(
os.path.dirname(py_chinese_pronounce.__file__),
'sc-dict.txt'
)
self.sc_dict = open(sc_dict_path, 'r', encoding='utf-8').read().split()
def make(self, x):
correct = x[:]
x = x[:]
random_vocab = self.sc_dict[random.randint(0, len(self.sc_dict)-1)]
rand_ins_postion = random.randint(0, len(x))
x = list(x)
x.insert(rand_ins_postion, random_vocab)
x = ''.join(x)
return NoiseCorpus(
correct=correct,
incorrect=x
) | zh-mistake-text-gen | /zh_mistake_text_gen-0.3.8-py3-none-any.whl/zh_mistake_text_gen/data_maker.py | data_maker.py |
from .data_maker import *
from .exception import *
from copy import copy
from loguru import logger
class Pipeline():
def __init__(self, makers=None, maker_weight=None):
"""
管道類用於快速呼叫多個`data_maker`方法
:param makers: Optional 自訂傳入多個`DataMaker`實例
:param maker_weight: Optional 為每一個 `DataMaker` 設定被選中機率
"""
self.maker_weight = maker_weight
self.makers = makers
if makers is None:
self.makers = []
for g_var_name in copy(globals()):
try:
if g_var_name not in [NoChangeMaker.__name__, BaseDataMaker.__name__] and issubclass(globals()[g_var_name], BaseDataMaker):
print("O", g_var_name)
self.makers.append(globals()[g_var_name]())
else:
print("X", g_var_name)
except:
print("X", g_var_name)
if self.maker_weight != None:
assert len(self.maker_weight) == len(
self.makers), 'While have `maker_weight` must provide maker_weight for each maker'
def _noraml_call(self, x, k, no_change_on_gen_fail=False, verbose=True, makers=None):
out = []
if makers is None:
makers = self.makers
for maker in makers:
retry = 0
while retry < 5:
try:
res = maker(x)
out.append(res)
break
except Exception as e:
retry += 1
if verbose:
logger.warning(
f"{x} - {e} - {type(e)} - {maker} retry:{retry}")
if len(out) == 0 and not no_change_on_gen_fail:
raise ZeorSearchResultsError("Data gen fail, len(out) == 0")
elif len(out) == 0 and no_change_on_gen_fail:
return [NoiseCorpus(
correct=x,
incorrect=x,
type=NoChangeMaker.__name__
)]
random.shuffle(out)
return out[:k]
def _weight_call(self, x, k, no_change_on_gen_fail, verbose=True):
makers = random.choices(
population=self.makers,
weights=self.maker_weight,
k=k
)
return self._noraml_call(x, k, no_change_on_gen_fail, verbose, makers)
def __call__(self, x, error_per_sent=1, no_change_on_gen_fail=False, verbose=True):
"""
呼叫管道生成資料
:param x: 一段正確的中文句子
:param error_per_sent: Optional 在句子中生成多少錯誤
:param no_change_on_gen_fail: 當生成失敗的時候允許使用原句(即不變換),啟用時不拋出錯誤,反之。預設:`False`
:param verbose: 除錯或額外訊息
:type x: str
:type error_pre_sent: int
:type no_change_on_gen_fail: bool
:type verbose: bool
:retrun: 包含錯誤句子的物件
"""
ori_x = x
assert error_per_sent >= 1
error_types = []
for i in range(error_per_sent):
if self.maker_weight is None:
out = self._noraml_call(x, 1, no_change_on_gen_fail, verbose)
x = out[0].incorrect
else:
out = self._weight_call(x, 1, no_change_on_gen_fail, verbose)
x = out[0].incorrect
#
error_types.append(out[0].type)
for o in out:
o.correct = ori_x
out[0].type = '_'.join(error_types)
return out[0] | zh-mistake-text-gen | /zh_mistake_text_gen-0.3.8-py3-none-any.whl/zh_mistake_text_gen/pipeline.py | pipeline.py |
import logging
import os
import pickle
from typing import Any, Dict, Optional, Text
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.model import Metadata
from rasa.nlu.components import Component
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.constants import ENTITIES, TEXT
logger = logging.getLogger(__name__)
class TFNLUExtractor(EntityExtractor):
name = "addons_ner_tfnlu"
provides = ["entities"]
requires = ["tensorflow", "tfnlu"]
def __init__(self,
component_config: Optional[Dict[Text, Any]] = None,
model=None) -> None:
self.model = model
self.result_dir = None if 'result_dir' not in component_config else component_config['result_dir']
self.batch_size = component_config.get("batch_size", 32)
self.epochs = component_config.get("epochs", 20)
self.encoder_path = component_config.get('encoder_path', None)
super(TFNLUExtractor, self).__init__(component_config)
@classmethod
def required_packages(cls):
return ["tensorflow", "tfnlu"]
def train(
self, training_data: TrainingData, cfg: RasaNLUModelConfig, **kwargs: Any
) -> None:
from tfnlu import Tagger
X = []
Y = []
for ex in training_data.nlu_examples:
text = ex.get(TEXT)
entities = ex.get(ENTITIES)
x = list(text)
y = ['O'] * len(x)
if entities is not None:
for e in entities:
for i in range(e['start'], e['end']):
y[i] = 'I' + e['entity']
y[e['start']] = 'B' + e['entity']
X.append(x)
Y.append(y)
self.model = model = Tagger(encoder_path=self.encoder_path)
model.fit(X, Y, validation_data=(X, Y), batch_size=min(len(X), self.batch_size), epochs=self.epochs)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any
) -> Component:
if cached_component:
return cached_component
else:
path = os.path.join(model_dir, meta['name'] + '.pkl')
with open(path, 'rb') as fp:
model = pickle.load(fp)
return cls(meta, model)
def process(self, message: Message, **kwargs: Any) -> None:
from tfnlu.tagger.extract_entities import extract_entities
text = message.get(TEXT)
if text:
logger.debug('predict entities %s', text)
pred = self.model.predict([list(text)], verbose=0)
entities = extract_entities(pred[0], text)
ent_data = []
for ent in entities:
ent_data.append({
"entity": ent[2],
"value": ent[3],
"start": ent[0],
"end": ent[1],
"confidence": None
})
logger.debug('predict entities %s %s', text, str(ent_data))
message.set("entities",
message.get(ENTITIES, []) + ent_data,
add_to_output=True)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persist this model into the passed directory.
Returns the metadata necessary to load the model again."""
path = os.path.join(model_dir, self.name + '.pkl')
with open(path, 'wb') as fp:
pickle.dump(self.model, fp)
return { 'name': self.name } | zh-rasa | /zh_rasa-0.0.11-py3-none-any.whl/zh_rasa/extractors/tfnlu_extractor.py | tfnlu_extractor.py |
import logging
import os
import pickle
from typing import Any, Dict, Optional, Text
from rasa.nlu.model import Metadata
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.constants import INTENT, TEXT
logger = logging.getLogger(__name__)
class TFNLUClassifier(Component):
supported_language_list = ["zh"]
name = "addons_intent_classifier_tfnlu"
provides = ["intent", "intent_ranking"]
def __init__(self,
component_config: Optional[Dict[Text, Any]],
model=None) -> None:
self.model = model
self.result_dir = None if 'result_dir' not in component_config else component_config['result_dir']
self.batch_size = component_config.get("batch_size", 32)
self.epochs = component_config.get("epochs", 20)
self.encoder_path = component_config.get('encoder_path', None)
super(TFNLUClassifier, self).__init__(component_config)
@classmethod
def required_packages(cls):
return ["tensorflow", "tfnlu"]
def train(self,
training_data: TrainingData,
config: RasaNLUModelConfig,
**kwargs: Any) -> None:
from tfnlu import Classification
X = []
Y = []
for ex in training_data.intent_examples:
text = ex.get(TEXT)
intent = ex.get(INTENT)
X.append(list(text))
Y.append(intent)
self.model = model = Classification(encoder_path=self.encoder_path)
model.fit(X, Y, batch_size=min(self.batch_size, len(X)), epochs=self.epochs)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any
) -> Component:
if cached_component:
return cached_component
else:
path = os.path.join(model_dir, meta['name'] + '.pkl')
with open(path, 'rb') as fp:
model = pickle.load(fp)
return cls(meta, model)
def process(self, message: Message, **kwargs: Any) -> None:
text = message.get(TEXT)
if text:
logger.debug('predict intent %s', text)
pred, probs = self.model.predict_proba([list(text)], verbose=0)
intent = {"name": pred[0], "confidence": probs[0]}
logger.debug('predict intent %s %s', text, pred[0])
print(intent)
message.set(INTENT, intent, add_to_output=True)
if message.get(INTENT) is not None:
return
def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]:
"""Persist this model into the passed directory.
Returns the metadata necessary to load the model again."""
path = os.path.join(model_dir, self.name + '.pkl')
with open(path, 'wb') as fp:
pickle.dump(self.model, fp)
return { 'name': self.name } | zh-rasa | /zh_rasa-0.0.11-py3-none-any.whl/zh_rasa/classifiers/tfnlu_classifier.py | tfnlu_classifier.py |
# zh-doclint
Note: This project is highly related to Chinese, so the document is writtern in Chinese.
## 简介
一个检查文档风格的工具。
## 安装
```
pip install zh-doclint
```
## 使用
```shell
$ zh-doclint --help
Usage: zh-doclint [OPTIONS] FPATH
Options:
--version Show the version and exit.
--help Show this message and exit.
$ ccat doc.md
我跟你讲,这里有问题. 这个
case一看就是“药丸”
$ zh-doclint doc.md
==========================================
E101: 英文与非标点的中文之间需要有一个空格
==========================================
LINE: 2
case一看就是“
--
...............
==================================================
E201: 只有中文或中英文混排中,一律使用中文全角标点
==================================================
LINE: 1
里有问题.
-
.........
==========================================================
E204: 中文文案中使用中文引号「」和『』,其中「」为外层引号
==========================================================
LINE: 2
一看就是“药丸
-
..............
LINE: 2
是“药丸”
-
..........
```
## 支持的检查项目
| 错误码 | 检查范围 | 描述 |
| ---- | -------- | ------------------------------------------------------------------------------ |
| E101 | 段落 | 英文与非标点的中文之间需要有一个空格 |
| E102 | 段落 | 数字与非标点的中文之间需要有一个空格 |
| E103 | 段落 | 除了「%」、「℃」、以及倍数单位(如 2x、3n)之外,其余数字与单位之间需要加空格 |
| E104 | 段落 | 书写时括号中全为数字,则括号用半角括号且首括号前要空一格 |
| E201 | 句子 | 只有中文或中英文混排中,一律使用中文全角标点 |
| E202 | 句子 | 如果出现整句英文,则在这句英文中使用英文、半角标点 |
| E203 | 段落 | 中文标点与其他字符间一律不加空格 |
| E204 | 句子 | 中文文案中使用中文引号「」和『』,其中「」为外层引号 |
| E205 | 段落 | 省略号请使用「……」标准用法 |
| E206 | 段落 | 感叹号请使用「!」标准用法 |
| E207 | 段落 | 请勿在文章内使用「~」 |
| E301 | 段落 | 常用名词错误 |
详情见 [写作规范和格式规范,DaoCloud 文档](http://docs-static.daocloud.io/write-docs/format)。
| zh_doclint | /zh_doclint-0.1.2.tar.gz/zh_doclint-0.1.2/README.md | README.md |
A sample Python project
=======================
A sample project that exists as an aid to the `Python Packaging User Guide
<https://packaging.python.org>`_'s `Tutorial on Packaging and Distributing
Projects <https://packaging.python.org/en/latest/distributing.html>`_.
This projects does not aim to cover best practices for Python project
development as a whole. For example, it does not provide guidance or tool
recommendations for version control, documentation, or testing.
----
This is the README file for the project.
The file should use UTF-8 encoding and be written using ReStructured Text. It
will be used to generate the project webpage on PyPI and will be displayed as
the project homepage on common code-hosting services, and should be written for
that purpose.
Typical contents for this file would include an overview of the project, basic
usage examples, etc. Generally, including the project changelog in here is not
a good idea, although a simple "What's New" section for the most recent version
may be appropriate. | zh_recover | /zh_recover-1.2.0.tar.gz/zh_recover-1.2.0/README.rst | README.rst |
Python Word Segmentation
========================
`zh_segment`_ is an Apache2 licensed module for English word
segmentation, written in pure-Python, and based on a trillion-word corpus.
Based on code from the chapter "`Natural Language Corpus Data`_" by Peter
Norvig from the book "`Beautiful Data`_" (Segaran and Hammerbacher, 2009).
Data files are derived from the `Google Web Trillion Word Corpus`_, as
described by Thorsten Brants and Alex Franz, and `distributed`_ by the
Linguistic Data Consortium. This module contains only a subset of that
data. The unigram data includes only the most common 333,000 words. Similarly,
bigram data includes only the most common 250,000 phrases. Every word and
phrase is lowercased with punctuation removed.
.. _`zh_segment`: https://github.com/wuhaifengdhu/zh_segment/tree/master/docs
.. _`Natural Language Corpus Data`: http://norvig.com/ngrams/
.. _`Beautiful Data`: http://oreilly.com/catalog/9780596157111/
.. _`Google Web Trillion Word Corpus`: http://googleresearch.blogspot.com/2006/08/all-our-n-gram-are-belong-to-you.html
.. _`distributed`: https://catalog.ldc.upenn.edu/LDC2006T13
Features
--------
- Pure-Python
- Fully documented
- 100% Test Coverage
- Includes unigram and bigram data
- Command line interface for batch processing
- Easy to hack (e.g. different scoring, new data, different language)
- Developed on Python 2.7
- Tested on CPython 2.6, 2.7, 3.2, 3.3, 3.4 and PyPy 2.5+, PyPy3 2.4+
.. image:: https://github.com/wuhaifengdhu/zh_segment/blob/master/docs/_static/zh_segment.png?raw=true
:target: https://github.com/wuhaifengdhu/zh_segment
Quickstart
----------
Installing zh_segment is simple with
`pip <http://www.pip-installer.org/>`_::
$ pip install zh_segment
You can access documentation in the interpreter with Python's built-in help
function::
>>> import zh_segment
>>> help(zh_segment)
Tutorial
--------
In your own Python programs, you'll mostly want to use `segment` to divide a
phrase into a list of its parts::
>>> from zh_segment import segment
>>> segment('1077501; 1296599; 5000; 5000; 4975; 36 months; 10.64%; 162.87; B; B2;;10+ years;RENT')
['1077501', '1296599', '5000', '5000', '4975', '36', 'months', '10.64%', '162.87', 'B', 'B', '2', '10+', 'years', 'RENT']
zh_segment also provides a command-line interface for batch processing. This
interface accepts two arguments: in-file and out-file. Lines from in-file are
iteratively segmented, joined by a space, and written to out-file. Input and
output default to stdin and stdout respectively. ::
$ echo thisisatest | python -m zh_segment
this is a test
The maximum segmented word length is 24 characters. Neither the unigram nor
bigram data contain words exceeding that length. The corpus also excludes
punctuation and all letters have been lowercased. Before segmenting text,
`clean` is called to transform the input to a canonical form::
>>> from zh_segment import clean
>>> clean('She said, "Python rocks!"')
'shesaidpythonrocks'
>>> segment('She said, "Python rocks!"')
['she', 'said', 'python', 'rocks']
Sometimes its interesting to explore the unigram and bigram counts
themselves. These are stored in Python dictionaries mapping word to count. ::
>>> import zh_segment as ws
>>> ws.load()
>>> ws.UNIGRAMS['the']
23135851162.0
>>> ws.UNIGRAMS['gray']
21424658.0
>>> ws.UNIGRAMS['grey']
18276942.0
Above we see that the spelling `gray` is more common than the spelling `grey`.
Bigrams are joined by a space::
>>> import heapq
>>> from pprint import pprint
>>> from operator import itemgetter
>>> pprint(heapq.nlargest(10, ws.BIGRAMS.items(), itemgetter(1)))
[('of the', 2766332391.0),
('in the', 1628795324.0),
('to the', 1139248999.0),
('on the', 800328815.0),
('for the', 692874802.0),
('and the', 629726893.0),
('to be', 505148997.0),
('is a', 476718990.0),
('with the', 461331348.0),
('from the', 428303219.0)]
Some bigrams begin with `<s>`. This is to indicate the start of a bigram::
>>> ws.BIGRAMS['<s> where']
15419048.0
>>> ws.BIGRAMS['<s> what']
11779290.0
The unigrams and bigrams data is stored in the `zh_segment_data` directory in
the `unigrams.txt` and `bigrams.txt` files respectively.
Reference and Indices
---------------------
* `zh_segment Documentation`_
* `zh_segment at PyPI`_
* `zh_segment at Github`_
* `zh_segment Issue Tracker`_
.. _`zh_segment Documentation`: https://github.com/wuhaifengdhu/zh_segment/tree/master/docs/docs
.. _`zh_segment at PyPI`: https://pypi.python.org/pypi/zh_segment
.. _`zh_segment at Github`: https://github.com/wuhaifengdhu/zh_segment
.. _`zh_segment Issue Tracker`: https://github.com/wuhaifengdhu/zh_segment/issues
zh_segment License
-------------------
Copyright 2017 Z&H
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| zh_segment | /zh_segment-1.2.1.tar.gz/zh_segment-1.2.1/README.rst | README.rst |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zha_quirks-0.0.103.dist-info/LICENSE.md | LICENSE.md |
from zigpy.quirks import (
SIG_ENDPOINTS,
SIG_EP_INPUT,
SIG_EP_OUTPUT,
SIG_EP_PROFILE,
SIG_EP_TYPE,
SIG_MANUFACTURER,
SIG_MODEL,
SIG_MODELS_INFO,
SIG_NODE_DESC,
SIG_SKIP_CONFIG,
)
ARGS = "args"
ATTR_ID = "attr_id"
ATTRIBUTE_ID = "attribute_id"
ATTRIBUTE_NAME = "attribute_name"
BUTTON = "button"
BUTTON_1 = "button_1"
BUTTON_2 = "button_2"
BUTTON_3 = "button_3"
BUTTON_4 = "button_4"
BUTTON_5 = "button_5"
BUTTON_6 = "button_6"
CLICK_TYPE = "click_type"
CLOSE = "close"
CLUSTER_COMMAND = "cluster_command"
CLUSTER_ID = "cluster_id"
COMMAND = "command"
COMMAND_ATTRIBUTE_UPDATED = "attribute_updated"
COMMAND_BUTTON_DOUBLE = "button_double"
COMMAND_BUTTON_HOLD = "button_hold"
COMMAND_BUTTON_SINGLE = "button_single"
COMMAND_CLICK = "click"
COMMAND_DOUBLE = "double"
COMMAND_FURIOUS = "furious"
COMMAND_HOLD = "hold"
COMMAND_ID = "command_id"
COMMAND_M_INITIAL_PRESS = "initial_press"
COMMAND_M_LONG_PRESS = "long_press"
COMMAND_M_LONG_RELEASE = "long_release"
COMMAND_M_MULTI_PRESS_COMPLETE = "multi_press_complete"
COMMAND_M_MULTI_PRESS_ONGOING = "multi_press_ongoing"
COMMAND_M_SHORT_RELEASE = "short_release"
COMMAND_M_SWLATCHED = "switch_latched"
COMMAND_MOVE = "move"
COMMAND_MOVE_COLOR_TEMP = "move_color_temp"
COMMAND_MOVE_ON_OFF = "move_with_on_off"
COMMAND_MOVE_SATURATION = "move_saturation"
COMMAND_MOVE_TO_SATURATION = "move_to_saturation"
COMMAND_MOVE_TO_LEVEL_ON_OFF = "move_to_level_with_on_off"
COMMAND_OFF = "off"
COMMAND_OFF_WITH_EFFECT = "off_with_effect"
COMMAND_ON = "on"
COMMAND_PRESS = "press"
COMMAND_QUAD = "quadruple"
COMMAND_RELEASE = "release"
COMMAND_SHAKE = "shake"
COMMAND_SINGLE = "single"
COMMAND_STEP = "step"
COMMAND_STEP_COLOR_TEMP = "step_color_temp"
COMMAND_STEP_HUE = "step_hue"
COMMAND_STEP_ON_OFF = "step_with_on_off"
COMMAND_STEP_SATURATION = "step_saturation"
COMMAND_STOP = "stop"
COMMAND_STOP_MOVE_STEP = "stop_move_step"
COMMAND_STOP_ON_OFF = "stop_with_on_off"
COMMAND_TILT = "Tilt"
COMMAND_TOGGLE = "toggle"
COMMAND_TRIPLE = "triple"
DESCRIPTION = "description"
DEVICE_TYPE = SIG_EP_TYPE
DIM_DOWN = "dim_down"
DIM_UP = "dim_up"
DOUBLE_PRESS = "remote_button_double_press"
ALT_DOUBLE_PRESS = "remote_button_alt_double_press"
ENDPOINT_ID = "endpoint_id"
ENDPOINTS = SIG_ENDPOINTS
INPUT_CLUSTERS = SIG_EP_INPUT
LEFT = "left"
LONG_PRESS = "remote_button_long_press"
LONG_RELEASE = "remote_button_long_release"
ALT_LONG_PRESS = "remote_button_alt_long_press"
ALT_LONG_RELEASE = "remote_button_alt_long_release"
MANUFACTURER = SIG_MANUFACTURER
MODEL = SIG_MODEL
MODELS_INFO = SIG_MODELS_INFO
MOTION_EVENT = "motion_event"
NODE_DESCRIPTOR = SIG_NODE_DESC
OCCUPANCY_EVENT = "occupancy_event"
OCCUPANCY_STATE = 0
OFF = 0
ON = 1
OPEN = "open"
OUTPUT_CLUSTERS = SIG_EP_OUTPUT
PARAMS = "params"
PRESS_TYPE = "press_type"
PRESSED = "initial_switch_press"
PROFILE_ID = SIG_EP_PROFILE
QUADRUPLE_PRESS = "remote_button_quadruple_press"
QUINTUPLE_PRESS = "remote_button_quintuple_press"
RELATIVE_DEGREES = "relative_degrees"
RIGHT = "right"
ROTATED = "device_rotated"
ROTATED_FAST = "device_rotated_fast"
ROTATED_SLOW = "device_rotated_slow"
STOP = "stop"
SHAKEN = "device_shaken"
SHORT_PRESS = "remote_button_short_press"
ALT_SHORT_PRESS = "remote_button_alt_short_press"
SKIP_CONFIGURATION = SIG_SKIP_CONFIG
SHORT_RELEASE = "remote_button_short_release"
TOGGLE = "toggle"
TRIPLE_PRESS = "remote_button_triple_press"
TURN_OFF = "turn_off"
TURN_ON = "turn_on"
UNKNOWN = "Unknown"
VALUE = "value"
ZHA_SEND_EVENT = "zha_send_event"
ZONE_STATUS_CHANGE_COMMAND = 0x0000
ZONE_STATE = 0x0000
ZONE_TYPE = 0x0001
ZONE_STATUS = 0x0002 | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/const.py | const.py |
from __future__ import annotations
import asyncio
import importlib
import importlib.util
import logging
import pathlib
import pkgutil
import sys
from typing import Any
import zigpy.device
import zigpy.endpoint
from zigpy.quirks import CustomCluster, CustomDevice
import zigpy.types as t
from zigpy.util import ListenableMixin
from zigpy.zcl import foundation
from zigpy.zcl.clusters.general import PowerConfiguration
from zigpy.zcl.clusters.measurement import OccupancySensing
from zigpy.zcl.clusters.security import IasZone
from zigpy.zdo import types as zdotypes
from .const import (
ATTRIBUTE_ID,
ATTRIBUTE_NAME,
CLUSTER_COMMAND,
COMMAND_ATTRIBUTE_UPDATED,
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MANUFACTURER,
MODEL,
MODELS_INFO,
MOTION_EVENT,
NODE_DESCRIPTOR,
OCCUPANCY_EVENT,
OCCUPANCY_STATE,
OFF,
ON,
OUTPUT_CLUSTERS,
PROFILE_ID,
UNKNOWN,
VALUE,
ZHA_SEND_EVENT,
ZONE_STATUS_CHANGE_COMMAND,
)
_LOGGER = logging.getLogger(__name__)
class Bus(ListenableMixin):
"""Event bus implementation."""
def __init__(self, *args, **kwargs):
"""Init event bus."""
super().__init__(*args, **kwargs)
self._listeners = {}
class LocalDataCluster(CustomCluster):
"""Cluster meant to prevent remote calls."""
_CONSTANT_ATTRIBUTES = {}
async def bind(self):
"""Prevent bind."""
return (foundation.Status.SUCCESS,)
async def unbind(self):
"""Prevent unbind."""
return (foundation.Status.SUCCESS,)
async def _configure_reporting(self, *args, **kwargs): # pylint: disable=W0221
"""Prevent remote configure reporting."""
return (foundation.ConfigureReportingResponse.deserialize(b"\x00")[0],)
async def read_attributes_raw(self, attributes, manufacturer=None):
"""Prevent remote reads."""
records = [
foundation.ReadAttributeRecord(
attr, foundation.Status.UNSUPPORTED_ATTRIBUTE, foundation.TypeValue()
)
for attr in attributes
]
for record in records:
if record.attrid in self._CONSTANT_ATTRIBUTES:
record.value.value = self._CONSTANT_ATTRIBUTES[record.attrid]
else:
record.value.value = self._attr_cache.get(record.attrid)
if record.value.value is not None:
record.status = foundation.Status.SUCCESS
return (records,)
async def write_attributes(self, attributes, manufacturer=None):
"""Prevent remote writes."""
for attrid, value in attributes.items():
if isinstance(attrid, str):
attrid = self.attributes_by_name[attrid].id
elif attrid not in self.attributes:
self.error("%d is not a valid attribute id", attrid)
continue
self._update_attribute(attrid, value)
return ([foundation.WriteAttributesStatusRecord(foundation.Status.SUCCESS)],)
class EventableCluster(CustomCluster):
"""Cluster that generates events."""
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: list[Any],
*,
dst_addressing: None
| (t.Addressing.Group | t.Addressing.IEEE | t.Addressing.NWK) = None,
):
"""Send cluster requests as events."""
if (
self.server_commands is not None
and self.server_commands.get(hdr.command_id) is not None
):
self.listener_event(
ZHA_SEND_EVENT,
self.server_commands.get(hdr.command_id, (hdr.command_id)).name,
args,
)
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid in self.attributes:
attribute_name = self.attributes[attrid].name
else:
attribute_name = UNKNOWN
self.listener_event(
ZHA_SEND_EVENT,
COMMAND_ATTRIBUTE_UPDATED,
{
ATTRIBUTE_ID: attrid,
ATTRIBUTE_NAME: attribute_name,
VALUE: value,
},
)
class GroupBoundCluster(CustomCluster):
"""Cluster that can only bind to a group instead of direct to hub.
Binding this cluster results in binding to a group that the coordinator
is a member of.
"""
COORDINATOR_GROUP_ID = 0x30 # Group id with only coordinator as a member
async def bind(self):
"""Bind cluster to a group."""
# Ensure coordinator is a member of the group
application = self._endpoint.device.application
coordinator = application.get_device(application.state.node_info.ieee)
await coordinator.add_to_group(
self.COORDINATOR_GROUP_ID,
name="Coordinator Group - Created by ZHAQuirks",
)
# Bind cluster to group
dstaddr = zdotypes.MultiAddress()
dstaddr.addrmode = 1
dstaddr.nwk = self.COORDINATOR_GROUP_ID
dstaddr.endpoint = self._endpoint.endpoint_id
return await self._endpoint.device.zdo.Bind_req(
self._endpoint.device.ieee,
self._endpoint.endpoint_id,
self.cluster_id,
dstaddr,
)
class DoublingPowerConfigurationCluster(CustomCluster, PowerConfiguration):
"""PowerConfiguration cluster implementation.
This implementation doubles battery pct remaining for non standard devices
that don't follow the reporting spec.
"""
cluster_id = PowerConfiguration.cluster_id
BATTERY_PERCENTAGE_REMAINING = 0x0021
def _update_attribute(self, attrid, value):
if attrid == self.BATTERY_PERCENTAGE_REMAINING:
value = value * 2
super()._update_attribute(attrid, value)
class PowerConfigurationCluster(CustomCluster, PowerConfiguration):
"""Common use power configuration cluster."""
cluster_id = PowerConfiguration.cluster_id
BATTERY_VOLTAGE_ATTR = 0x0020
BATTERY_PERCENTAGE_REMAINING = 0x0021
MIN_VOLTS = 1.5 # old 2.1
MAX_VOLTS = 2.8 # old 3.2
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid == self.BATTERY_VOLTAGE_ATTR and value not in (0, 255):
super()._update_attribute(
self.BATTERY_PERCENTAGE_REMAINING,
self._calculate_battery_percentage(value),
)
def _calculate_battery_percentage(self, raw_value):
volts = raw_value / 10
volts = max(volts, self.MIN_VOLTS)
volts = min(volts, self.MAX_VOLTS)
percent = round(
((volts - self.MIN_VOLTS) / (self.MAX_VOLTS - self.MIN_VOLTS)) * 200
)
self.debug(
"Voltage [RAW]:%s [Max]:%s [Min]:%s, Battery Percent: %s",
raw_value,
self.MAX_VOLTS,
self.MIN_VOLTS,
percent / 2,
)
return percent
class _Motion(CustomCluster, IasZone):
"""Self reset Motion cluster."""
reset_s: int = 30
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self._loop = asyncio.get_running_loop()
self._timer_handle = None
def _turn_off(self):
self._timer_handle = None
self.debug("%s - Resetting motion sensor", self.endpoint.device.ieee)
self.listener_event(
CLUSTER_COMMAND, 253, ZONE_STATUS_CHANGE_COMMAND, [OFF, 0, 0, 0]
)
class MotionWithReset(_Motion):
"""Self reset Motion cluster.
Optionally send event over device bus.
"""
send_occupancy_event: bool = False
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: list[Any],
*,
dst_addressing: None
| (t.Addressing.Group | t.Addressing.IEEE | t.Addressing.NWK) = None,
):
"""Handle the cluster command."""
# check if the command is for a zone status change of ZoneStatus.Alarm_1 or ZoneStatus.Alarm_2
if hdr.command_id == ZONE_STATUS_CHANGE_COMMAND and args[0] & 3:
if self._timer_handle:
self._timer_handle.cancel()
self._timer_handle = self._loop.call_later(self.reset_s, self._turn_off)
if self.send_occupancy_event:
self.endpoint.device.occupancy_bus.listener_event(OCCUPANCY_EVENT)
class MotionOnEvent(_Motion):
"""Motion based on received events from occupancy."""
reset_s: int = 120
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.motion_bus.add_listener(self)
def motion_event(self):
"""Motion event."""
super().listener_event(
CLUSTER_COMMAND, 254, ZONE_STATUS_CHANGE_COMMAND, [ON, 0, 0, 0]
)
self.debug("%s - Received motion event message", self.endpoint.device.ieee)
if self._timer_handle:
self._timer_handle.cancel()
self._timer_handle = self._loop.call_later(self.reset_s, self._turn_off)
class _Occupancy(CustomCluster, OccupancySensing):
"""Self reset Occupancy cluster."""
reset_s: int = 600
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self._timer_handle = None
self._loop = asyncio.get_running_loop()
def _turn_off(self):
self._timer_handle = None
self._update_attribute(OCCUPANCY_STATE, OFF)
class OccupancyOnEvent(_Occupancy):
"""Self reset occupancy from bus."""
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.occupancy_bus.add_listener(self)
def occupancy_event(self):
"""Occupancy event."""
self._update_attribute(OCCUPANCY_STATE, ON)
if self._timer_handle:
self._timer_handle.cancel()
self._timer_handle = self._loop.call_later(self.reset_s, self._turn_off)
class OccupancyWithReset(_Occupancy):
"""Self reset Occupancy cluster and send event on motion bus."""
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid == OCCUPANCY_STATE and value == ON:
if self._timer_handle:
self._timer_handle.cancel()
self.endpoint.device.motion_bus.listener_event(MOTION_EVENT)
self._timer_handle = self._loop.call_later(self.reset_s, self._turn_off)
class QuickInitDevice(CustomDevice):
"""Devices with quick initialization from quirk signature."""
signature: dict[str, Any] | None = None
@classmethod
def from_signature(
cls, device: zigpy.device.Device, model: str | None = None
) -> zigpy.device.Device:
"""Update device accordingly to quirk signature."""
assert isinstance(cls.signature, dict)
if model is None:
model = cls.signature[MODEL]
manufacturer = cls.signature.get(MANUFACTURER)
if manufacturer is None:
manufacturer = cls.signature[MODELS_INFO][0][0]
device.node_desc = cls.signature[NODE_DESCRIPTOR]
endpoints = cls.signature[ENDPOINTS]
for ep_id, ep_data in endpoints.items():
endpoint = device.add_endpoint(ep_id)
endpoint.profile_id = ep_data[PROFILE_ID]
endpoint.device_type = ep_data[DEVICE_TYPE]
for cluster_id in ep_data[INPUT_CLUSTERS]:
cluster = endpoint.add_input_cluster(cluster_id)
if cluster.ep_attribute == "basic":
manuf_attr_id = cluster.attributes_by_name[MANUFACTURER].id
cluster._update_attribute( # pylint: disable=W0212
manuf_attr_id, manufacturer
)
cluster._update_attribute( # pylint: disable=W0212
cluster.attributes_by_name[MODEL].id, model
)
for cluster_id in ep_data[OUTPUT_CLUSTERS]:
endpoint.add_output_cluster(cluster_id)
endpoint.status = zigpy.endpoint.Status.ZDO_INIT
device.status = zigpy.device.Status.ENDPOINTS_INIT
device.manufacturer = manufacturer
device.model = model
return device
class NoReplyMixin:
"""A simple mixin.
Allows a cluster to have configurable list of command
ids that do not generate an explicit reply.
"""
void_input_commands: set[int] = {}
async def command(self, command, *args, expect_reply=None, **kwargs):
"""Override the default Cluster command.
expect_reply behavior is based on void_input_commands.
Note that this method changes the default value of
expect_reply to None. This allows the caller to explicitly force
expect_reply to true.
"""
if expect_reply is None and command in self.void_input_commands:
cmd_expect_reply = False
elif expect_reply is None:
cmd_expect_reply = True # the default
else:
cmd_expect_reply = expect_reply
rsp = await super().command(
command, *args, expect_reply=cmd_expect_reply, **kwargs
)
if expect_reply is None and command in self.void_input_commands:
# Pretend we received a default reply
return foundation.GENERAL_COMMANDS[
foundation.GeneralCommand.Default_Response
].schema(command_id=command, status=foundation.Status.SUCCESS)
return rsp
def setup(custom_quirks_path: str | None = None) -> None:
"""Register all quirks with zigpy, including optional custom quirks."""
# Import all quirks in the `zhaquirks` package first
for _importer, modname, _ispkg in pkgutil.walk_packages(
path=__path__,
prefix=__name__ + ".",
):
_LOGGER.debug("Loading quirks module %r", modname)
importlib.import_module(modname)
if custom_quirks_path is None:
return
path = pathlib.Path(custom_quirks_path)
_LOGGER.debug("Loading custom quirks from %r", path)
loaded = False
# Treat the custom quirk path (e.g. `/config/custom_quirks/`) itself as a module
for importer, modname, _ispkg in pkgutil.walk_packages(path=[str(path)]):
_LOGGER.debug("Loading custom quirk module %r", modname)
try:
spec = importer.find_spec(modname)
module = importlib.util.module_from_spec(spec)
sys.modules[modname] = module
spec.loader.exec_module(module)
except Exception:
_LOGGER.exception("Unexpected exception importing custom quirk %r", modname)
else:
loaded = True
if loaded:
_LOGGER.warning(
"Loaded custom quirks. Please contribute them to"
" https://github.com/zigpy/zha-device-handlers"
) | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/__init__.py | __init__.py |
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
)
from zigpy.zcl.clusters.homeautomation import Diagnostic
from zigpy.zcl.clusters.lighting import Color
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.ledvance import LEDVANCE, LedvanceLightCluster
class LedvanceA19RGBW(CustomDevice):
"""Ledvance A19 RGBW device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=258
# device_version=2 input_clusters=[0, 3, 4, 5, 6, 8, 768, 2821, 64513]
# output_clusters=[25]>
MODELS_INFO: [(LEDVANCE, "A19 RGBW")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
Diagnostic.cluster_id,
LedvanceLightCluster.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
Diagnostic.cluster_id,
LedvanceLightCluster,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
}
}
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/ledvance/a19rgbw.py | a19rgbw.py |
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
)
from zigpy.zcl.clusters.homeautomation import Diagnostic
from zigpy.zcl.clusters.lighting import Color
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.ledvance import LEDVANCE, LedvanceLightCluster
class FlexRGBW(CustomDevice):
"""Ledvance Flex RGBW LED strip."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=258
# device_version=2 input_clusters=[0, 3, 4, 5, 6, 8, 768, 2821, 64527]
# output_clusters=[25]>
MODELS_INFO: [(LEDVANCE, "FLEX RGBW")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
LedvanceLightCluster.cluster_id,
Diagnostic.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
LedvanceLightCluster,
Diagnostic.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
}
}
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/ledvance/flexrgbw.py | flexrgbw.py |
from zigpy.profiles import zha, zll
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
BinaryInput,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
Scenes,
)
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.philips import (
HUE_REMOTE_DEVICE_TRIGGERS,
PHILIPS,
SIGNIFY,
PhilipsBasicCluster,
PhilipsRemoteCluster,
)
DEVICE_SPECIFIC_UNKNOWN = 64512
class PhilipsRWLFirstGen(CustomDevice):
"""Philips updated RWL020 and RWL021 devices."""
signature = {
# <SimpleDescriptor endpoint=1 profile=49246 device_type=2096
# device_version=2
# input_clusters=[0]
# output_clusters=[0, 3, 4, 6, 8, 5]>
MODELS_INFO: [
(PHILIPS, "RWL020"),
(SIGNIFY, "RWL020"),
(PHILIPS, "RWL021"),
(SIGNIFY, "RWL021"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: zll.DeviceType.SCENE_CONTROLLER,
INPUT_CLUSTERS: [Basic.cluster_id],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=12
# device_version=0
# input_clusters=[0, 1, 3, 15, 64512]
# output_clusters=[25]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
BinaryInput.cluster_id,
DEVICE_SPECIFIC_UNKNOWN,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
INPUT_CLUSTERS: [Basic.cluster_id],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
],
},
2: {
INPUT_CLUSTERS: [
PhilipsBasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
BinaryInput.cluster_id,
PhilipsRemoteCluster,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
}
}
device_automation_triggers = HUE_REMOTE_DEVICE_TRIGGERS
class PhilipsRWLFirstGen2(CustomDevice):
"""Philips older RWL020 and RWL021 devices."""
signature = {
# <SimpleDescriptor endpoint=1 profile=49246 device_type=2080
# device_version=2
# input_clusters=[0]
# output_clusters=[0, 3, 4, 6, 8]>
MODELS_INFO: [
(PHILIPS, "RWL020"),
(SIGNIFY, "RWL020"),
(PHILIPS, "RWL021"),
(SIGNIFY, "RWL021"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: zll.DeviceType.CONTROLLER,
INPUT_CLUSTERS: [Basic.cluster_id],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=12
# device_version=0
# input_clusters=[0, 1, 3, 15, 64512]
# output_clusters=[25]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
BinaryInput.cluster_id,
DEVICE_SPECIFIC_UNKNOWN,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
INPUT_CLUSTERS: [Basic.cluster_id],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
],
},
2: {
INPUT_CLUSTERS: [
PhilipsBasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
BinaryInput.cluster_id,
PhilipsRemoteCluster,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
}
}
device_automation_triggers = HUE_REMOTE_DEVICE_TRIGGERS | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/philips/rwlfirstgen.py | rwlfirstgen.py |
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
Scenes,
)
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.philips import (
HUE_REMOTE_DEVICE_TRIGGERS,
SIGNIFY,
PhilipsBasicCluster,
PhilipsRemoteCluster,
)
DEVICE_SPECIFIC_UNKNOWN = 64512
class PhilipsRWL022(CustomDevice):
"""Philips RWL022 device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=2096
# device_version=1
# input_clusters=[0, 1, 3, 64512, 4096]
# output_clusters=[25, 0, 3, 4, 6, 8, 5, 4096]>
MODELS_INFO: [(SIGNIFY, "RWL022")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.NON_COLOR_SCENE_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
DEVICE_SPECIFIC_UNKNOWN,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
LightLink.cluster_id,
],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.NON_COLOR_SCENE_CONTROLLER,
INPUT_CLUSTERS: [
PhilipsBasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
PhilipsRemoteCluster,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
LightLink.cluster_id,
],
}
}
}
device_automation_triggers = HUE_REMOTE_DEVICE_TRIGGERS | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/philips/rwl022.py | rwl022.py |
from zigpy.profiles import zha, zll
from zigpy.quirks import CustomCluster, CustomDevice
import zigpy.types as t
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
Scenes,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.measurement import (
IlluminanceMeasurement,
OccupancySensing,
TemperatureMeasurement,
)
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.philips import PHILIPS, SIGNIFY, PhilipsOccupancySensing
class BasicCluster(CustomCluster, Basic):
"""Hue Motion Basic cluster."""
attributes = Basic.attributes.copy()
attributes[0x0033] = ("trigger_indicator", t.Bool, True)
class PhilipsMotion(CustomDevice):
"""Old Philips motion sensor devices."""
signature = {
MODELS_INFO: [(PHILIPS, "SML001"), (PHILIPS, "SML002")],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=49246 device_type=2128
# device_version=?
# input_clusters=[0]
# output_clusters=[0, 3, 4, 5, 6, 8, 768]>
1: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: zll.DeviceType.ON_OFF_SENSOR,
INPUT_CLUSTERS: [Basic.cluster_id],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=0x0107
# device_version=?
# input_clusters=[0, 1, 3, 1024, 1026, 1030]
# output_clusters=[25]>
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.OCCUPANCY_SENSOR,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
IlluminanceMeasurement.cluster_id,
TemperatureMeasurement.cluster_id,
OccupancySensing.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zll.PROFILE_ID,
DEVICE_TYPE: zll.DeviceType.ON_OFF_SENSOR,
INPUT_CLUSTERS: [Basic.cluster_id],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.OCCUPANCY_SENSOR,
INPUT_CLUSTERS: [
BasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
IlluminanceMeasurement.cluster_id,
TemperatureMeasurement.cluster_id,
PhilipsOccupancySensing,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
}
}
class SignifyMotion(CustomDevice):
"""New Philips motion sensor devices."""
signature = {
MODELS_INFO: [(SIGNIFY, "SML003"), (SIGNIFY, "SML004")],
ENDPOINTS: {
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.OCCUPANCY_SENSOR,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
IlluminanceMeasurement.cluster_id,
TemperatureMeasurement.cluster_id,
OccupancySensing.cluster_id,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
OnOff.cluster_id,
Ota.cluster_id,
],
},
},
}
replacement = {
ENDPOINTS: {
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.OCCUPANCY_SENSOR,
INPUT_CLUSTERS: [
BasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
IlluminanceMeasurement.cluster_id,
TemperatureMeasurement.cluster_id,
PhilipsOccupancySensing,
],
OUTPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
OnOff.cluster_id,
Ota.cluster_id,
],
},
}
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/philips/motion.py | motion.py |
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
Scenes,
)
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
COMMAND,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
LONG_RELEASE,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
QUADRUPLE_PRESS,
QUINTUPLE_PRESS,
SHORT_PRESS,
SHORT_RELEASE,
TRIPLE_PRESS,
TURN_ON,
)
from zhaquirks.philips import (
PHILIPS,
SIGNIFY,
PhilipsBasicCluster,
PhilipsRemoteCluster,
)
DEVICE_SPECIFIC_UNKNOWN = 64512
class PhilipsROM001(CustomDevice):
"""Philips ROM001 device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=2096
# device_version=1
# input_clusters=[0, 1, 3, 64512, 4096]
# output_clusters=[25, 0, 3, 4, 6, 8, 5, 4096]>
MODELS_INFO: [(PHILIPS, "ROM001"), (SIGNIFY, "ROM001")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.NON_COLOR_SCENE_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
DEVICE_SPECIFIC_UNKNOWN,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
LightLink.cluster_id,
],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.NON_COLOR_SCENE_CONTROLLER,
INPUT_CLUSTERS: [
PhilipsBasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
PhilipsRemoteCluster,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Scenes.cluster_id,
LightLink.cluster_id,
],
}
}
}
device_automation_triggers = {
(SHORT_PRESS, TURN_ON): {COMMAND: "on_press"},
(LONG_PRESS, TURN_ON): {COMMAND: "on_hold"},
(DOUBLE_PRESS, TURN_ON): {COMMAND: "on_double_press"},
(TRIPLE_PRESS, TURN_ON): {COMMAND: "on_triple_press"},
(QUADRUPLE_PRESS, TURN_ON): {COMMAND: "on_quadruple_press"},
(QUINTUPLE_PRESS, TURN_ON): {COMMAND: "on_quintuple_press"},
(SHORT_RELEASE, TURN_ON): {COMMAND: "on_short_release"},
(LONG_RELEASE, TURN_ON): {COMMAND: "on_long_release"},
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/philips/rom001.py | rom001.py |
import logging
from typing import Any, List, Optional, Union
from zigpy.profiles import zha
from zigpy.quirks import CustomCluster, CustomDevice
import zigpy.types as t
from zigpy.zcl import foundation
from zigpy.zcl.clusters.general import (
Basic,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
PowerConfiguration,
)
from zhaquirks.const import (
ARGS,
BUTTON,
COMMAND,
COMMAND_ID,
DEVICE_TYPE,
DOUBLE_PRESS,
ENDPOINTS,
INPUT_CLUSTERS,
LONG_PRESS,
LONG_RELEASE,
MODELS_INFO,
OUTPUT_CLUSTERS,
PRESS_TYPE,
PROFILE_ID,
QUADRUPLE_PRESS,
QUINTUPLE_PRESS,
RIGHT,
SHORT_PRESS,
SHORT_RELEASE,
TRIPLE_PRESS,
TURN_ON,
ZHA_SEND_EVENT,
)
from zhaquirks.philips import PHILIPS, SIGNIFY
DEVICE_SPECIFIC_UNKNOWN = 64512
_LOGGER = logging.getLogger(__name__)
class PhilipsBasicCluster(CustomCluster, Basic):
"""Philips Basic cluster."""
attributes = Basic.attributes.copy()
attributes.update(
{
0x0031: ("philips", t.bitmap16, True),
0x0034: ("mode", t.enum8, True),
}
)
attr_config = {0x0031: 0x000B, 0x0034: 0x02}
async def bind(self):
"""Bind cluster."""
result = await super().bind()
await self.write_attributes(self.attr_config, manufacturer=0x100B)
return result
class PhilipsRemoteCluster(CustomCluster):
"""Philips remote cluster."""
cluster_id = 64512
name = "PhilipsRemoteCluster"
ep_attribute = "philips_remote_cluster"
client_commands = {
0x00: foundation.ZCLCommandDef(
"notification",
{
"param1": t.uint8_t,
"param2": t.uint24_t,
"param3": t.uint8_t,
"param4": t.uint8_t,
"param5": t.uint8_t,
"param6": t.uint8_t,
},
is_manufacturer_specific=True,
direction=foundation.Direction.Server_to_Client,
)
}
BUTTONS = {
1: "left",
2: "right",
}
PRESS_TYPES = {0: "press", 1: "hold", 2: "press_release", 3: "hold_release"}
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: List[Any],
*,
dst_addressing: Optional[
Union[t.Addressing.Group, t.Addressing.IEEE, t.Addressing.NWK]
] = None,
):
"""Handle the cluster command."""
_LOGGER.debug(
"PhilipsRemoteCluster - handle_cluster_request tsn: [%s] command id: %s - args: [%s]",
hdr.tsn,
hdr.command_id,
args,
)
button = self.BUTTONS.get(args[0], args[0])
press_type = self.PRESS_TYPES.get(args[2], args[2])
event_args = {
BUTTON: button,
PRESS_TYPE: press_type,
COMMAND_ID: hdr.command_id,
ARGS: args,
}
action = f"{button}_{press_type}"
self.listener_event(ZHA_SEND_EVENT, action, event_args)
class PhilipsROM001(CustomDevice):
"""Philips ROM001 device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=2080
# device_version=1
# input_clusters=[0, 1, 3, 64512]
# output_clusters=[3, 4, 6, 8, 25]>
MODELS_INFO: [(PHILIPS, "RDM001"), (SIGNIFY, "RDM001")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
PowerConfiguration.cluster_id,
Identify.cluster_id,
DEVICE_SPECIFIC_UNKNOWN,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
INPUT_CLUSTERS: [
PhilipsBasicCluster,
PowerConfiguration.cluster_id,
Identify.cluster_id,
PhilipsRemoteCluster,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
],
}
}
}
device_automation_triggers = {
(SHORT_PRESS, TURN_ON): {COMMAND: "left_press"},
(LONG_PRESS, TURN_ON): {COMMAND: "left_hold"},
(DOUBLE_PRESS, TURN_ON): {COMMAND: "left_double_press"},
(TRIPLE_PRESS, TURN_ON): {COMMAND: "left_triple_press"},
(QUADRUPLE_PRESS, TURN_ON): {COMMAND: "left_quadruple_press"},
(QUINTUPLE_PRESS, TURN_ON): {COMMAND: "left_quintuple_press"},
(SHORT_RELEASE, TURN_ON): {COMMAND: "left_short_release"},
(LONG_RELEASE, TURN_ON): {COMMAND: "left_long_release"},
(SHORT_PRESS, RIGHT): {COMMAND: "right_press"},
(LONG_PRESS, RIGHT): {COMMAND: "right_hold"},
(DOUBLE_PRESS, RIGHT): {COMMAND: "right_double_press"},
(TRIPLE_PRESS, RIGHT): {COMMAND: "right_triple_press"},
(QUADRUPLE_PRESS, RIGHT): {COMMAND: "right_quadruple_press"},
(QUINTUPLE_PRESS, RIGHT): {COMMAND: "right_quintuple_press"},
(SHORT_RELEASE, RIGHT): {COMMAND: "right_short_release"},
(LONG_RELEASE, RIGHT): {COMMAND: "right_long_release"},
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/philips/rdm001.py | rdm001.py |
import asyncio
import logging
import time
from typing import Any, List, Optional, Union
from zigpy.quirks import CustomCluster
import zigpy.types as t
from zigpy.zcl import foundation
from zigpy.zcl.clusters.general import Basic
from zigpy.zcl.clusters.measurement import OccupancySensing
from zhaquirks.const import (
ARGS,
BUTTON,
COMMAND,
COMMAND_ID,
DIM_DOWN,
DIM_UP,
DOUBLE_PRESS,
LONG_PRESS,
LONG_RELEASE,
PRESS_TYPE,
QUADRUPLE_PRESS,
QUINTUPLE_PRESS,
SHORT_PRESS,
SHORT_RELEASE,
TRIPLE_PRESS,
TURN_OFF,
TURN_ON,
ZHA_SEND_EVENT,
)
PHILIPS = "Philips"
SIGNIFY = "Signify Netherlands B.V."
_LOGGER = logging.getLogger(__name__)
HUE_REMOTE_DEVICE_TRIGGERS = {
(SHORT_PRESS, TURN_ON): {COMMAND: "on_press"},
(SHORT_PRESS, TURN_OFF): {COMMAND: "off_press"},
(SHORT_PRESS, DIM_UP): {COMMAND: "up_press"},
(SHORT_PRESS, DIM_DOWN): {COMMAND: "down_press"},
(LONG_PRESS, TURN_ON): {COMMAND: "on_hold"},
(LONG_PRESS, TURN_OFF): {COMMAND: "off_hold"},
(LONG_PRESS, DIM_UP): {COMMAND: "up_hold"},
(LONG_PRESS, DIM_DOWN): {COMMAND: "down_hold"},
(DOUBLE_PRESS, TURN_ON): {COMMAND: "on_double_press"},
(DOUBLE_PRESS, TURN_OFF): {COMMAND: "off_double_press"},
(DOUBLE_PRESS, DIM_UP): {COMMAND: "up_double_press"},
(DOUBLE_PRESS, DIM_DOWN): {COMMAND: "down_double_press"},
(TRIPLE_PRESS, TURN_ON): {COMMAND: "on_triple_press"},
(TRIPLE_PRESS, TURN_OFF): {COMMAND: "off_triple_press"},
(TRIPLE_PRESS, DIM_UP): {COMMAND: "up_triple_press"},
(TRIPLE_PRESS, DIM_DOWN): {COMMAND: "down_triple_press"},
(QUADRUPLE_PRESS, TURN_ON): {COMMAND: "on_quadruple_press"},
(QUADRUPLE_PRESS, TURN_OFF): {COMMAND: "off_quadruple_press"},
(QUADRUPLE_PRESS, DIM_UP): {COMMAND: "up_quadruple_press"},
(QUADRUPLE_PRESS, DIM_DOWN): {COMMAND: "down_quadruple_press"},
(QUINTUPLE_PRESS, TURN_ON): {COMMAND: "on_quintuple_press"},
(QUINTUPLE_PRESS, TURN_OFF): {COMMAND: "off_quintuple_press"},
(QUINTUPLE_PRESS, DIM_UP): {COMMAND: "up_quintuple_press"},
(QUINTUPLE_PRESS, DIM_DOWN): {COMMAND: "down_quintuple_press"},
(SHORT_RELEASE, TURN_ON): {COMMAND: "on_short_release"},
(SHORT_RELEASE, TURN_OFF): {COMMAND: "off_short_release"},
(SHORT_RELEASE, DIM_UP): {COMMAND: "up_short_release"},
(SHORT_RELEASE, DIM_DOWN): {COMMAND: "down_short_release"},
(LONG_RELEASE, TURN_ON): {COMMAND: "on_long_release"},
(LONG_RELEASE, TURN_OFF): {COMMAND: "off_long_release"},
(LONG_RELEASE, DIM_UP): {COMMAND: "up_long_release"},
(LONG_RELEASE, DIM_DOWN): {COMMAND: "down_long_release"},
}
class PhilipsOccupancySensing(CustomCluster):
"""Philips occupancy cluster."""
cluster_id = OccupancySensing.cluster_id
ep_attribute = "philips_occupancy"
attributes = OccupancySensing.attributes.copy()
attributes[0x0030] = ("sensitivity", t.uint8_t, True)
attributes[0x0031] = ("sensitivity_max", t.uint8_t, True)
server_commands = OccupancySensing.server_commands.copy()
client_commands = OccupancySensing.client_commands.copy()
class PhilipsBasicCluster(CustomCluster, Basic):
"""Philips Basic cluster."""
attributes = Basic.attributes.copy()
attributes[0x0031] = ("philips", t.bitmap16, True)
attr_config = {0x0031: 0x000B}
async def bind(self):
"""Bind cluster."""
result = await super().bind()
await self.write_attributes(self.attr_config, manufacturer=0x100B)
return result
class ButtonPressQueue:
"""Philips button queue to derive multiple press events."""
def __init__(self):
"""Init."""
self._ms_threshold = 300
self._ms_last_click = 0
self._click_counter = 1
self._button = None
self._callback = lambda x: None
self._task = None
async def _job(self):
await asyncio.sleep(self._ms_threshold / 1000)
self._callback(self._click_counter)
def _reset(self, button):
if self._task:
self._task.cancel()
self._click_counter = 1
self._button = button
def press(self, callback, button):
"""Process a button press."""
self._callback = callback
now_ms = time.time() * 1000
if self._button != button:
self._reset(button)
elif now_ms - self._ms_last_click > self._ms_threshold:
self._click_counter = 1
else:
self._task.cancel()
self._click_counter += 1
self._ms_last_click = now_ms
self._task = asyncio.ensure_future(self._job())
class PhilipsRemoteCluster(CustomCluster):
"""Philips remote cluster."""
cluster_id = 0xFC00
name = "PhilipsRemoteCluster"
ep_attribute = "philips_remote_cluster"
client_commands = {
0x0000: foundation.ZCLCommandDef(
"notification",
{
"button": t.uint8_t,
"param2": t.uint24_t,
"press_type": t.uint8_t,
"param4": t.uint8_t,
"param5": t.uint8_t,
"param6": t.uint8_t,
},
False,
is_manufacturer_specific=True,
)
}
BUTTONS = {1: "on", 2: "up", 3: "down", 4: "off"}
PRESS_TYPES = {0: "press", 1: "hold", 2: "short_release", 3: "long_release"}
button_press_queue = ButtonPressQueue()
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: List[Any],
*,
dst_addressing: Optional[
Union[t.Addressing.Group, t.Addressing.IEEE, t.Addressing.NWK]
] = None,
):
"""Handle the cluster command."""
_LOGGER.debug(
"PhilipsRemoteCluster - handle_cluster_request tsn: [%s] command id: %s - args: [%s]",
hdr.tsn,
hdr.command_id,
args,
)
button = self.BUTTONS.get(args[0], args[0])
press_type = self.PRESS_TYPES.get(args[2], args[2])
event_args = {
BUTTON: button,
PRESS_TYPE: press_type,
COMMAND_ID: hdr.command_id,
ARGS: args,
}
def send_press_event(click_count):
_LOGGER.debug(
"PhilipsRemoteCluster - send_press_event click_count: [%s]", click_count
)
press_type = None
if click_count == 1:
press_type = "press"
elif click_count == 2:
press_type = "double_press"
elif click_count == 3:
press_type = "triple_press"
elif click_count == 4:
press_type = "quadruple_press"
elif click_count > 4:
press_type = "quintuple_press"
if press_type:
# Override PRESS_TYPE
event_args[PRESS_TYPE] = press_type
action = f"{button}_{press_type}"
self.listener_event(ZHA_SEND_EVENT, action, event_args)
# Derive Multiple Presses
if press_type == "press":
self.button_press_queue.press(send_press_event, button)
else:
action = f"{button}_{press_type}"
self.listener_event(ZHA_SEND_EVENT, action, event_args) | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/philips/__init__.py | __init__.py |
from zigpy.profiles import zgp, zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.closures import WindowCovering
from zigpy.zcl.clusters.general import (
Basic,
GreenPowerProxy,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
)
from zigpy.zcl.clusters.lighting import Color
from zhaquirks import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.const import (
ALT_SHORT_PRESS,
BUTTON,
CLOSE,
COMMAND,
COMMAND_MOVE,
COMMAND_MOVE_ON_OFF,
COMMAND_OFF,
COMMAND_ON,
COMMAND_STOP,
COMMAND_TOGGLE,
DIM_DOWN,
DIM_UP,
ENDPOINT_ID,
OPEN,
SHORT_PRESS,
STOP,
TURN_OFF,
TURN_ON,
)
from zhaquirks.insta import INSTA
COMMAND_OPEN = "up_open"
COMMAND_CLOSE = "down_close"
COMMAND_STORE = "store"
COMMAND_RECALL = "recall"
class InstaNexentroPushbuttonInterface(CustomDevice):
"""Insta NEXENTRO Pushbutton Interface device."""
signature = {
MODELS_INFO: [(INSTA, "NEXENTRO Pushbutton Interface")],
ENDPOINTS: {
# <SimpleDescriptor endpoint=4 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3]
# output_clusters=[3, 4, 5, 6, 8, 25, 768]>
4: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=5 profile=260 device_type=261
# device_version=1
# input_clusters=[0, 3]
# output_clusters=[3, 4, 5, 6, 8, 25, 768]>
5: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
],
},
# <SimpleDescriptor endpoint=7 profile=260 device_type=515
# device_version=1
# input_clusters=[0, 3]
# output_clusters=[3, 4, 25, 258]>
7: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Ota.cluster_id,
WindowCovering.cluster_id,
],
},
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# device_version=1
# input_clusters=[]
# output_clusters=[33]>
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
4: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
],
},
5: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Color.cluster_id,
],
},
7: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Groups.cluster_id,
Ota.cluster_id,
WindowCovering.cluster_id,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
}
device_automation_triggers = {
(SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, ENDPOINT_ID: 4},
(ALT_SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, ENDPOINT_ID: 5},
(SHORT_PRESS, TURN_OFF): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 4},
(ALT_SHORT_PRESS, TURN_OFF): {COMMAND: COMMAND_OFF, ENDPOINT_ID: 5},
(SHORT_PRESS, BUTTON): {COMMAND: COMMAND_TOGGLE, ENDPOINT_ID: 4},
(ALT_SHORT_PRESS, BUTTON): {COMMAND: COMMAND_TOGGLE, ENDPOINT_ID: 5},
(SHORT_PRESS, OPEN): {COMMAND: COMMAND_OPEN},
(SHORT_PRESS, CLOSE): {COMMAND: COMMAND_CLOSE},
(SHORT_PRESS, DIM_UP): {COMMAND: COMMAND_MOVE_ON_OFF, ENDPOINT_ID: 4},
(ALT_SHORT_PRESS, DIM_UP): {COMMAND: COMMAND_MOVE_ON_OFF, ENDPOINT_ID: 5},
(SHORT_PRESS, DIM_DOWN): {COMMAND: COMMAND_MOVE, ENDPOINT_ID: 4},
(ALT_SHORT_PRESS, DIM_DOWN): {COMMAND: COMMAND_MOVE, ENDPOINT_ID: 5},
(SHORT_PRESS, STOP): {COMMAND: COMMAND_STOP, ENDPOINT_ID: 4},
(ALT_SHORT_PRESS, STOP): {COMMAND: COMMAND_STOP, ENDPOINT_ID: 5},
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/insta/nexentro_pushbutton_interface.py | nexentro_pushbutton_interface.py |
from zigpy.profiles import zgp, zha
from zigpy.profiles.zha import DeviceType
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
GreenPowerProxy,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
)
from zigpy.zcl.clusters.homeautomation import Diagnostic
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.inovelli import INOVELLI_AUTOMATION_TRIGGERS, Inovelli_VZM35SN_Cluster
INOVELLI_VZM35SN_CLUSTER_ID = 64561
WWAH_CLUSTER_ID = 64599
class InovelliVZM35SN(CustomDevice):
"""VZM35-SN Fan Switch"""
signature = {
MODELS_INFO: [("Inovelli", "VZM35-SN")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Diagnostic.cluster_id,
INOVELLI_VZM35SN_CLUSTER_ID,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Diagnostic.cluster_id,
Inovelli_VZM35SN_Cluster,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
device_automation_triggers = INOVELLI_AUTOMATION_TRIGGERS | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/inovelli/VZM35SN.py | VZM35SN.py |
from zigpy.profiles import zgp, zha
from zigpy.profiles.zha import DeviceType
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
GreenPowerProxy,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
)
from zigpy.zcl.clusters.homeautomation import Diagnostic, ElectricalMeasurement
from zigpy.zcl.clusters.smartenergy import Metering
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.inovelli import INOVELLI_AUTOMATION_TRIGGERS, Inovelli_VZM31SN_Cluster
INOVELLI_VZM31SN_CLUSTER_ID = 64561
WWAH_CLUSTER_ID = 64599
class InovelliVZM31SNv12(CustomDevice):
"""VZM31-SN 2 in 1 Switch/Dimmer Module Firmware version 2.08 and above."""
signature = {
MODELS_INFO: [("Inovelli", "VZM31-SN")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
Inovelli_VZM31SN_Cluster,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Ota.cluster_id,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
device_automation_triggers = INOVELLI_AUTOMATION_TRIGGERS
class InovelliVZM31SNv11(CustomDevice):
"""VZM31-SN 2 in 1 Switch/Dimmer Module."""
signature = {
MODELS_INFO: [("Inovelli", "VZM31-SN")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
Inovelli_VZM31SN_Cluster,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
device_automation_triggers = INOVELLI_AUTOMATION_TRIGGERS
class InovelliVZM31SNv10(CustomDevice):
"""VZM31-SN 2 in 1 Switch/Dimmer Module."""
signature = {
MODELS_INFO: [("Inovelli", "VZM31-SN")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
Inovelli_VZM31SN_Cluster,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
},
}
device_automation_triggers = INOVELLI_AUTOMATION_TRIGGERS
class InovelliVZM31SNv9(CustomDevice):
"""VZM31-SN 2 in 1 Switch/Dimmer Module."""
signature = {
MODELS_INFO: [("Inovelli", "VZM31-SN")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
Inovelli_VZM31SN_Cluster,
WWAH_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
},
}
device_automation_triggers = INOVELLI_AUTOMATION_TRIGGERS
class InovelliVZM31SN(CustomDevice):
"""VZM31-SN 2 in 1 Switch/Dimmer Module."""
signature = {
MODELS_INFO: [("Inovelli", "VZM31-SN")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
INOVELLI_VZM31SN_CLUSTER_ID,
],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Metering.cluster_id,
ElectricalMeasurement.cluster_id,
Diagnostic.cluster_id,
Inovelli_VZM31SN_Cluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Ota.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
2: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: DeviceType.DIMMER_SWITCH,
INPUT_CLUSTERS: [Identify.cluster_id],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Inovelli_VZM31SN_Cluster,
],
},
},
}
device_automation_triggers = INOVELLI_AUTOMATION_TRIGGERS | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/inovelli/VZM31SN.py | VZM31SN.py |
import logging
from typing import Any, List, Optional, Union
from zigpy.quirks import CustomCluster
import zigpy.types as t
from zigpy.zcl import foundation
from zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster
from zhaquirks.const import (
BUTTON,
BUTTON_1,
BUTTON_2,
BUTTON_3,
COMMAND,
COMMAND_DOUBLE,
COMMAND_HOLD,
COMMAND_ID,
COMMAND_PRESS,
COMMAND_QUAD,
COMMAND_RELEASE,
COMMAND_TRIPLE,
DOUBLE_PRESS,
PRESS_TYPE,
QUADRUPLE_PRESS,
QUINTUPLE_PRESS,
TRIPLE_PRESS,
ZHA_SEND_EVENT,
)
_LOGGER = logging.getLogger(__name__)
INOVELLI_VZM31SN_CLUSTER_ID = 64561
# Press Types
# 0 - pressed
# 1 - released
# 2 - held
# 3 - 2x
# 4 - 3x
# 5 - 4x
# 6 - 5x
COMMAND_QUINTUPLE = "quintuple"
PRESS_TYPES = {
0: COMMAND_PRESS,
1: COMMAND_RELEASE,
2: COMMAND_HOLD,
3: COMMAND_DOUBLE,
4: COMMAND_TRIPLE,
5: COMMAND_QUAD,
6: COMMAND_QUINTUPLE,
}
LED_NOTIFICATION_TYPES = {
0: "LED_1",
1: "LED_2",
2: "LED_3",
3: "LED_4",
4: "LED_5",
5: "LED_6",
6: "LED_7",
16: "ALL_LEDS",
255: "CONFIG_BUTTON_DOUBLE_PRESS",
}
# Buttons
# 1 - down button
# 2 - up button
# 3 - config button
BUTTONS = {1: BUTTON_1, 2: BUTTON_2, 3: BUTTON_3}
ON = "Up"
OFF = "Down"
CONFIG = "Config"
NOTIFICATION_TYPE = "notification_type"
class Inovelli_VZM31SN_Cluster(CustomCluster):
"""Inovelli VZM31-SN custom cluster."""
cluster_id = 0xFC31
name = "InovelliVZM31SNCluster"
ep_attribute = "inovelli_vzm31sn_cluster"
attributes = ManufacturerSpecificCluster.attributes.copy()
attributes.update(
{
0x0001: ("dimming_speed_up_remote", t.uint8_t, True),
0x0002: ("dimming_speed_up_local", t.uint8_t, True),
0x0003: ("ramp_rate_off_to_on_local", t.uint8_t, True),
0x0004: ("ramp_rate_off_to_on_remote", t.uint8_t, True),
0x0005: ("dimming_speed_down_remote", t.uint8_t, True),
0x0006: ("dimming_speed_down_local", t.uint8_t, True),
0x0007: ("ramp_rate_on_to_off_local", t.uint8_t, True),
0x0008: ("ramp_rate_on_to_off_remote", t.uint8_t, True),
0x0009: ("minimum_level", t.uint8_t, True),
0x000A: ("maximum_level", t.uint8_t, True),
0x000B: ("invert_switch", t.Bool, True),
0x000C: ("auto_off_timer", t.uint16_t, True),
0x000D: ("default_level_local", t.uint8_t, True),
0x000E: ("default_level_remote", t.uint8_t, True),
0x000F: ("state_after_power_restored", t.uint8_t, True),
0x0010: ("disable_remote_control", t.uint8_t, True),
0x0011: ("load_level_indicator_timeout", t.uint8_t, True),
0x0012: ("active_power_reports", t.uint8_t, True),
0x0013: ("periodic_power_and_energy_reports", t.uint8_t, True),
0x0014: ("active_energy_reports", t.uint16_t, True),
0x0015: ("power_type", t.uint8_t, True),
0x0016: ("switch_type", t.uint8_t, True),
0x0019: ("increased_non_neutral_output", t.Bool, True),
0x0032: ("button_delay", t.uint8_t, True),
0x0033: ("device_bind_number", t.uint8_t, True),
0x0034: ("smart_bulb_mode", t.Bool, True),
0x0035: ("double_tap_up_enabled", t.Bool, True),
0x0036: ("double_tap_down_enabled", t.Bool, True),
0x0037: ("double_tap_up_level", t.uint8_t, True),
0x0038: ("double_tap_down_level", t.uint8_t, True),
0x003C: ("default_led1_strip_color_when_on", t.uint8_t, True),
0x003D: ("default_led1_strip_color_when_off", t.uint8_t, True),
0x003E: ("default_led1_strip_intensity_when_on", t.uint8_t, True),
0x003F: ("default_led1_strip_intensity_when_off", t.uint8_t, True),
0x0041: ("default_led2_strip_color_when_on", t.uint8_t, True),
0x0042: ("default_led2_strip_color_when_off", t.uint8_t, True),
0x0043: ("default_led2_strip_intensity_when_on", t.uint8_t, True),
0x0044: ("default_led2_strip_intensity_when_off", t.uint8_t, True),
0x0046: ("default_led3_strip_color_when_on", t.uint8_t, True),
0x0047: ("default_led3_strip_color_when_off", t.uint8_t, True),
0x0048: ("default_led3_strip_intensity_when_on", t.uint8_t, True),
0x0049: ("default_led3_strip_intensity_when_off", t.uint8_t, True),
0x004B: ("default_led4_strip_color_when_on", t.uint8_t, True),
0x004C: ("default_led4_strip_color_when_off", t.uint8_t, True),
0x004D: ("default_led4_strip_intensity_when_on", t.uint8_t, True),
0x004E: ("default_led4_strip_intensity_when_off", t.uint8_t, True),
0x0050: ("default_led5_strip_color_when_on", t.uint8_t, True),
0x0051: ("default_led5_strip_color_when_off", t.uint8_t, True),
0x0052: ("default_led5_strip_intensity_when_on", t.uint8_t, True),
0x0053: ("default_led5_strip_intensity_when_off", t.uint8_t, True),
0x0055: ("default_led6_strip_color_when_on", t.uint8_t, True),
0x0056: ("default_led6_strip_color_when_off", t.uint8_t, True),
0x0057: ("default_led6_strip_intensity_when_on", t.uint8_t, True),
0x0058: ("default_led6_strip_intensity_when_off", t.uint8_t, True),
0x005A: ("default_led7_strip_color_when_on", t.uint8_t, True),
0x005B: ("default_led7_strip_color_when_off", t.uint8_t, True),
0x005C: ("default_led7_strip_intensity_when_on", t.uint8_t, True),
0x005D: ("default_led7_strip_intensity_when_off", t.uint8_t, True),
0x005F: ("led_color_when_on", t.uint8_t, True),
0x0060: ("led_color_when_off", t.uint8_t, True),
0x0061: ("led_intensity_when_on", t.uint8_t, True),
0x0062: ("led_intensity_when_off", t.uint8_t, True),
0x0064: ("led_scaling_mode", t.Bool, True),
0x007B: ("aux_switch_scenes", t.Bool, True),
0x007D: ("binding_off_to_on_sync_level", t.Bool, True),
0x0100: ("local_protection", t.Bool, True),
0x0101: ("remote_protection", t.Bool, True),
0x0102: ("output_mode", t.Bool, True),
0x0103: ("on_off_led_mode", t.Bool, True),
0x0104: ("firmware_progress_led", t.Bool, True),
0x0105: ("relay_click_in_on_off_mode", t.Bool, True),
0x0106: ("disable_clear_notifications_double_tap", t.Bool, True),
}
)
server_commands = {
0x00: foundation.ZCLCommandDef(
"button_event",
{"button_pressed": t.uint8_t, "press_type": t.uint8_t},
direction=foundation.Direction.Server_to_Client,
is_manufacturer_specific=True,
),
0x01: foundation.ZCLCommandDef(
"led_effect",
{
"led_effect": t.uint8_t,
"led_color": t.uint8_t,
"led_level": t.uint8_t,
"led_duration": t.uint8_t,
},
direction=foundation.Direction.Server_to_Client,
is_manufacturer_specific=True,
),
0x02: foundation.ZCLCommandDef(
"reset_energy_meter",
{},
direction=foundation.Direction.Server_to_Client,
is_manufacturer_specific=True,
),
0x03: foundation.ZCLCommandDef(
"individual_led_effect",
{
"led_number": t.uint8_t,
"led_effect": t.uint8_t,
"led_color": t.uint8_t,
"led_level": t.uint8_t,
"led_duration": t.uint8_t,
},
direction=foundation.Direction.Server_to_Client,
is_manufacturer_specific=True,
),
0x24: foundation.ZCLCommandDef(
"led_effect_complete",
{
"notification_type": t.uint8_t,
},
direction=foundation.Direction.Server_to_Client,
is_manufacturer_specific=True,
),
}
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: List[Any],
*,
dst_addressing: Optional[
Union[t.Addressing.Group, t.Addressing.IEEE, t.Addressing.NWK]
] = None,
):
"""Handle a cluster request."""
_LOGGER.debug(
"%s: handle_cluster_request - Command: %s Data: %s",
self.name,
hdr.command_id,
args,
)
if hdr.command_id == self.commands_by_name["button_event"].id:
button = BUTTONS[args.button_pressed]
press_type = PRESS_TYPES[args.press_type]
action = f"{button}_{press_type}"
event_args = {
BUTTON: button,
PRESS_TYPE: press_type,
COMMAND_ID: hdr.command_id,
}
self.listener_event(ZHA_SEND_EVENT, action, event_args)
return
if hdr.command_id == self.commands_by_name["led_effect_complete"].id:
notification_type = LED_NOTIFICATION_TYPES.get(
args.notification_type, "unknown"
)
action = f"led_effect_complete_{notification_type}"
event_args = {
NOTIFICATION_TYPE: notification_type,
COMMAND_ID: hdr.command_id,
}
self.listener_event(ZHA_SEND_EVENT, action, event_args)
return
VZM35SN_REMOVES = [
0x0012,
0x0013,
0x0014,
0x0019,
0x0034,
0x0064,
0x007D,
0x0105,
]
class Inovelli_VZM35SN_Cluster(Inovelli_VZM31SN_Cluster):
"""Inovelli VZM35-SN custom cluster."""
attributes = {
key: Inovelli_VZM31SN_Cluster.attributes[key]
for key in Inovelli_VZM31SN_Cluster.attributes
if key not in VZM35SN_REMOVES
}
attributes.update(
{
0x0017: ("quick_start_time", t.uint8_t, True),
0x001E: ("non_neutral_aux_med_gear_learn_value", t.uint8_t, True),
0x001F: ("non_neutral_aux_low_gear_learn_value", t.uint8_t, True),
0x0034: ("smart_fan_mode", t.Bool, True),
0x0106: ("smart_fan_led_display_levels", t.uint8_t, True),
}
)
INOVELLI_AUTOMATION_TRIGGERS = {
(COMMAND_PRESS, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_PRESS}"},
(COMMAND_PRESS, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_PRESS}"},
(COMMAND_PRESS, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_PRESS}"},
(COMMAND_HOLD, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_HOLD}"},
(COMMAND_HOLD, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_HOLD}"},
(COMMAND_HOLD, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_HOLD}"},
(DOUBLE_PRESS, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_DOUBLE}"},
(DOUBLE_PRESS, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_DOUBLE}"},
(DOUBLE_PRESS, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_DOUBLE}"},
(TRIPLE_PRESS, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_TRIPLE}"},
(TRIPLE_PRESS, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_TRIPLE}"},
(TRIPLE_PRESS, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_TRIPLE}"},
(QUADRUPLE_PRESS, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_QUAD}"},
(QUADRUPLE_PRESS, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_QUAD}"},
(QUADRUPLE_PRESS, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_QUAD}"},
(QUINTUPLE_PRESS, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_QUINTUPLE}"},
(QUINTUPLE_PRESS, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_QUINTUPLE}"},
(QUINTUPLE_PRESS, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_QUINTUPLE}"},
(COMMAND_RELEASE, ON): {COMMAND: f"{BUTTON_2}_{COMMAND_RELEASE}"},
(COMMAND_RELEASE, OFF): {COMMAND: f"{BUTTON_1}_{COMMAND_RELEASE}"},
(COMMAND_RELEASE, CONFIG): {COMMAND: f"{BUTTON_3}_{COMMAND_RELEASE}"},
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/inovelli/__init__.py | __init__.py |
from zigpy.quirks import CustomCluster, CustomDevice
from zigpy.zcl.clusters.homeautomation import ElectricalMeasurement
from zigpy.zcl.clusters.hvac import Thermostat, UserInterface
from zhaquirks import Bus, LocalDataCluster
ELKO = "ELKO"
class ElkoThermostatCluster(CustomCluster, Thermostat):
"""Thermostat cluster for Elko Thermostats."""
def __init__(self, *args, **kwargs):
"""Init thermostat cluster."""
super().__init__(*args, **kwargs)
self.endpoint.device.thermostat_bus.add_listener(self)
def heating_active_change(self, value):
"""State update from device."""
if value == 0:
mode = self.RunningMode.Off
state = self.RunningState.Idle
else:
mode = self.RunningMode.Heat
state = self.RunningState.Heat_State_On
self._update_attribute(self.attributes_by_name["running_mode"].id, mode)
self._update_attribute(self.attributes_by_name["running_state"].id, state)
class ElkoUserInterfaceCluster(LocalDataCluster, UserInterface):
"""User interface cluster for Elko Thermostats."""
def __init__(self, *args, **kwargs):
"""Init UI cluster."""
super().__init__(*args, **kwargs)
self.endpoint.device.ui_bus.add_listener(self)
def child_lock_change(self, mode):
"""Enable/disable child lock."""
if mode:
lockout = self.KeypadLockout.Level_1_lockout
else:
lockout = self.KeypadLockout.No_lockout
self._update_attribute(self.attributes_by_name["keypad_lockout"].id, lockout)
class ElkoElectricalMeasurementCluster(LocalDataCluster, ElectricalMeasurement):
"""Electrical measurement cluster for Elko Thermostats."""
cluster_id = ElectricalMeasurement.cluster_id
ACTIVE_POWER_ID = 0x050B
def __init__(self, *args, **kwargs):
"""Init electrical measurement cluster."""
super().__init__(*args, **kwargs)
self.endpoint.device.power_bus.add_listener(self)
def power_reported(self, value):
"""Report consumption."""
self._update_attribute(self.ACTIVE_POWER_ID, value)
class ElkoThermostat(CustomDevice):
"""Generic Elko Thermostat device."""
def __init__(self, *args, **kwargs):
"""Init device."""
self.thermostat_bus = Bus()
self.ui_bus = Bus()
self.power_bus = Bus()
super().__init__(*args, **kwargs) | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/elko/__init__.py | __init__.py |
import zigpy.profiles.zha as zha_p
import zigpy.types as t
from zigpy.zcl.clusters.general import Basic, Groups, Identify, Ota, Scenes
from zigpy.zcl.clusters.hvac import Thermostat
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zhaquirks.elko import (
ELKO,
ElkoElectricalMeasurementCluster,
ElkoThermostat,
ElkoThermostatCluster,
ElkoUserInterfaceCluster,
)
LOCAL_TEMP = 0x0000
UNKNOWN_1 = 0x0401
DISPLAY_TEXT = 0x0402
ACTIVE_SENSOR = 0x0403
UNKNOWN_2 = 0x0404
REGULATOR_MODE = 0x0405
DEVICE_ON = 0x0406
UNKNOWN_3 = 0x0407
POWER_CONSUMPTION = 0x0408
FLOOR_SENSOR_TEMPERATURE = 0x0409
UNKNOWN_4 = 0x0410
NIGHT_LOWERING = 0x0411
UNKNOWN_5 = 0x0412
CHILD_LOCK = 0x0413
PROTECTION_MAX_TEMP = 0x0414
HEATING_ACTIVE = 0x0415
UNKNOWN_6 = 0x0416
UNKNOWN_7 = 0x0417
UNKNOWN_8 = 0x0418
UNKNOWN_9 = 0x0419
class ElkoSuperTRThermostatCluster(ElkoThermostatCluster):
"""Elko custom thermostat cluster."""
class Sensor(t.enum8):
"""Working modes of the thermostat."""
AIR = 0x00
FLOOR = 0x01
PROTECTION = 0x03
attributes = ElkoThermostatCluster.attributes.copy()
attributes.update(
{
UNKNOWN_1: ("unknown_1", t.uint16_t),
DISPLAY_TEXT: ("display_text", t.CharacterString),
ACTIVE_SENSOR: ("active_sensor", Sensor),
UNKNOWN_2: ("unknown_2", t.uint8_t),
REGULATOR_MODE: ("regulator_mode", t.Bool),
DEVICE_ON: ("device_on", t.Bool),
UNKNOWN_3: ("unknown_3", t.LongOctetString),
POWER_CONSUMPTION: ("power_consumtion", t.uint16_t),
FLOOR_SENSOR_TEMPERATURE: ("floor_sensor_temperature", t.int16s),
UNKNOWN_4: ("unknown_4", t.uint16_t),
NIGHT_LOWERING: ("night_lowering", t.Bool),
UNKNOWN_5: ("unknown_5", t.Bool),
CHILD_LOCK: ("child_lock", t.Bool),
PROTECTION_MAX_TEMP: ("protection_max_temp", t.uint8_t),
HEATING_ACTIVE: ("heating_active", t.Bool),
UNKNOWN_6: ("unknown_6", t.LongOctetString),
UNKNOWN_7: ("unknown_7", t.int8s),
UNKNOWN_8: ("unknown_8", t.uint8_t),
UNKNOWN_9: ("unknown_9", t.uint8_t),
}
)
def __init__(self, *args, **kwargs):
"""Init Elko thermostat."""
super().__init__(*args, **kwargs)
self.active_sensor = None
async def write_attributes(self, attributes, manufacturer=None):
"""Override writes to thermostat attributes."""
if "system_mode" in attributes:
val = attributes.get("system_mode")
night_lowering = 0
if val == Thermostat.SystemMode.Off:
device_on = 0
elif val == Thermostat.SystemMode.Auto:
device_on = 1
night_lowering = 1
elif val == Thermostat.SystemMode.Heat:
device_on = 1
attributes["device_on"] = device_on
attributes["night_lowering"] = night_lowering
return await super().write_attributes(attributes, manufacturer=manufacturer)
def _update_attribute(self, attrid, value):
if attrid == HEATING_ACTIVE:
self.endpoint.device.thermostat_bus.listener_event(
"heating_active_change", value
)
elif attrid == CHILD_LOCK:
self.endpoint.device.ui_bus.listener_event("child_lock_change", value)
elif attrid == ACTIVE_SENSOR:
self.active_sensor = value
elif attrid == LOCAL_TEMP:
if (
self.active_sensor is not None
and self.active_sensor == self.Sensor.FLOOR
):
# Ignore the air sensor reading if the floor sensor is selected
return
elif attrid == FLOOR_SENSOR_TEMPERATURE:
if (
self.active_sensor is not None
and self.active_sensor == self.Sensor.FLOOR
):
attrid = LOCAL_TEMP
elif attrid == POWER_CONSUMPTION:
if value is not None and value >= 0:
self.endpoint.device.power_bus.listener_event("power_reported", value)
super()._update_attribute(attrid, value)
class ElkoSuperTRThermostat(ElkoThermostat):
"""Elko thermostat custom device."""
manufacturer_id_override = 0
signature = {
MODELS_INFO: [(ELKO, "Super TR")],
ENDPOINTS: {
1: {
PROFILE_ID: zha_p.PROFILE_ID,
DEVICE_TYPE: zha_p.DeviceType.THERMOSTAT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
Thermostat.cluster_id,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Ota.cluster_id,
],
}
},
}
replacement = {
ENDPOINTS: {
1: {
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
ElkoSuperTRThermostatCluster,
ElkoUserInterfaceCluster,
ElkoElectricalMeasurementCluster,
],
OUTPUT_CLUSTERS: [
Identify.cluster_id,
Ota.cluster_id,
],
}
}
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/elko/smart_super_thermostat.py | smart_super_thermostat.py |
from zigpy.profiles import zgp, zha
from zigpy.quirks import CustomCluster, CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
GreenPowerProxy,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
Time,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
class LidlRGBCCTColorCluster(CustomCluster, Color):
"""Lidl RGB+CCT Lighting custom cluster."""
# Set correct capabilities to ct, xy, hs
# LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)
_CONSTANT_ATTRIBUTES = {0x400A: 0b11001}
class RGBCCTLight(CustomDevice):
"""Lidl RGB+CCT Lighting device."""
signature = {
MODELS_INFO: [("_TZ3000_dbou1ap4", "TS0505A")],
ENDPOINTS: {
1: {
# <SimpleDescriptor endpoint=1 profile=269 device_type=268
# device_version=1
# input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
# output_clusters=[10, 25]
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.EXTENDED_COLOR_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# device_version=0
# input_clusters=[]
# output_clusters=[33]
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.EXTENDED_COLOR_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
LidlRGBCCTColorCluster,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/lidl/rgbcct.py | rgbcct.py |
from zigpy.profiles import zgp, zha
from zigpy.quirks import CustomCluster, CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
GreenPowerProxy,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
Time,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
class LidlCCTColorCluster(CustomCluster, Color):
"""Lidl CCT Lighting custom cluster."""
# Remove RGB color wheel for CCT Lighting: only expose color temperature
# LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)
_CONSTANT_ATTRIBUTES = {0x400A: 16}
class CCTLight(CustomDevice):
"""Lidl CCT Lighting device."""
signature = {
MODELS_INFO: [
("_TZ3000_49qchf10", "TS0502A"),
("_TZ3000_oborybow", "TS0502A"),
("_TZ3000_9evm3otq", "TS0502A"),
("_TZ3000_rylaozuc", "TS0502A"),
("_TZ3000_el5kt5im", "TS0502A"),
("_TZ3000_oh7jddmx", "TS0502A"),
("_TZ3000_8uaoilu9", "TS0502A"),
],
ENDPOINTS: {
1: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=268
# device_version=1
# input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
# output_clusters=[10, 25]
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# device_version=0
# input_clusters=[]
# output_clusters=[33]
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
LidlCCTColorCluster,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
}
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/lidl/cct.py | cct.py |
from zigpy.profiles import zgp, zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import (
Basic,
GreenPowerProxy,
Groups,
Identify,
LevelControl,
OnOff,
Ota,
Scenes,
Time,
)
from zigpy.zcl.clusters.lighting import Color
from zigpy.zcl.clusters.lightlink import LightLink
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
class DimmableBulb(CustomDevice):
"""Lidl dimmable bulb."""
signature = {
MODELS_INFO: [
("_TZ3000_nosnx7im", "TS0501A"),
("_TZ3000_nbnmw9nc", "TS0501A"),
("_TZ3000_7dcddnye", "TS0501A"),
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=257
# input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
# output_clusters=[10, 25]>
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
Color.cluster_id,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
# <SimpleDescriptor endpoint=242 profile=41440 device_type=97
# input_clusters=[]
# output_clusters=[33]
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.DIMMABLE_LIGHT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
OnOff.cluster_id,
LevelControl.cluster_id,
LightLink.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
},
242: {
PROFILE_ID: zgp.PROFILE_ID,
DEVICE_TYPE: zgp.DeviceType.PROXY_BASIC,
INPUT_CLUSTERS: [],
OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
},
},
} | zha-quirks | /zha_quirks-0.0.103-py3-none-any.whl/zhaquirks/lidl/TS0501A.py | TS0501A.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.