content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
import cv2
import numpy as np
# from scipy import ndimage
maskgridL = np.meshgrid(np.r_[0:359],np.r_[0:130])
maskgridR = np.meshgrid(np.r_[0:359],np.r_[639-130:639])
# key value
# cam.set(3 , 640) # width
# cam.set(4 , 480) # height
# cam.set(10, 120) # brightness min: 0 , max: 255 , increment:1
# cam.set(11, 50) # contrast min: 0 , max: 255 , increment:1
# cam.set(12, 70) # saturation min: 0 , max: 255 , increment:1
# cam.set(13, 13) # hue
# cam.set(14, 50) # gain min: 0 , max: 127 , increment:1
# cam.set(15, -3) # exposure min: -7 , max: -1 , increment:1
# cam.set(17, 5000) # white_balance min: 4000, max: 7000, increment:1
# cam.set(28, 0) # focus min: 0 , max: 255 , increment:5
def callback(value):
pass
def setup_trackbars(range_filter):
cv2.namedWindow("Thresholds",cv2.WINDOW_NORMAL)
cv2.resizeWindow("Thresholds", 720, 720)
for i in ["MIN", "MAX"]:
v = 0 if i == "MIN" else 255
for j in range_filter:
cv2.createTrackbar("%s_%s" % (j, i), "Thresholds", v, 255, callback)
def get_trackbar_values(range_filter):
values = []
for i in ["MIN", "MAX"]:
for j in range_filter:
v = cv2.getTrackbarPos("%s_%s" % (j, i), "Thresholds")
values.append(v)
return values
got_lowpass = 0
# range_filter = 'RGB'
range_filter = 'HSV'
cam = cv2.VideoCapture(0,cv2.CAP_V4L2)
cam.set(cv2.CAP_PROP_AUTOFOCUS, 0)
cam.set(28, 0)
cam.set(cv2.CAP_PROP_GAIN,0)
cam.set(cv2.CAP_PROP_BRIGHTNESS,0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
cam.set(cv2.CAP_PROP_BRIGHTNESS, 100)
setup_trackbars(range_filter)
while True:
success, image = cam.read()
# image[maskgridL] = 0
# image[maskgridR] = 0
if range_filter == 'RGB':
frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(range_filter)
thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))
preview = cv2.bitwise_and(image, image, mask=thresh)
cv2.imshow("Thresholds", preview)
if cv2.waitKey(1) & 0xFF is ord('q'):
cam.release()
cv2.destroyAllWindows()
break
| 33.027397 | 93 | 0.62754 | [
"Apache-2.0"
] | garethnisbet/T-BOTS | Python/Development/T-Bot_Tracking/getHSVThresh.py | 2,411 | Python |
from __future__ import absolute_import, print_function
import sys
def _verbose_message(message, *args, **kwargs):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
verbosity = kwargs.pop('verbosity', 1)
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
try:
ImportError('msg', name='name', path='path')
except TypeError:
class _ImportError(ImportError):
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', None)
self.path = kwargs.pop('path', None)
super(_ImportError, self).__init__(*args, **kwargs)
else:
_ImportError = ImportError
| 30.8 | 71 | 0.637662 | [
"MIT"
] | asmodehn/lark_import | palimport/_utils.py | 770 | Python |
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2011, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-model-library.tex', u'ns-3 Model Library',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| 32.267281 | 80 | 0.716938 | [
"BSD-3-Clause"
] | maxvonhippel/snake | ns-3-dev/doc/models/source/conf.py | 7,002 | Python |
# Generated by Django 3.2.8 on 2022-01-04 10:37
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('amcm', '0031_auto_20220104_0431'),
]
operations = [
migrations.AddField(
model_name='eventoelegibles',
name='evento',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='amcm.evento'),
preserve_default=False,
),
migrations.AlterField(
model_name='credito',
name='fecha_pago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 1, 4, 10, 37, 3, 977886, tzinfo=utc), null=True, verbose_name='Fecha de pago'),
),
migrations.AlterField(
model_name='credito',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 977861, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='cuentaspago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 961284, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='elegible',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 962608, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='pago',
name='fechaPago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 1, 4, 10, 37, 3, 959833, tzinfo=utc), null=True, verbose_name='Fecha del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaRegistro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 959863, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='recibo',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 1, 4, 10, 37, 3, 976856, tzinfo=utc), verbose_name='Fecha de registro'),
),
]
| 40.637931 | 159 | 0.614765 | [
"MIT"
] | agsneutron/asociacion_mexicana_cuarto_milla | apps/amcm/migrations/0032_auto_20220104_0437.py | 2,357 | Python |
"""Support for Australian BOM (Bureau of Meteorology) weather service."""
import datetime
import ftplib
import gzip
import io
import json
import logging
import os
import re
import zipfile
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
TEMP_CELSIUS,
CONF_NAME,
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_RESOURCE = "http://www.bom.gov.au/fwo/{}/{}.{}.json"
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_STATION_ID = "station_id"
ATTR_STATION_NAME = "station_name"
ATTR_ZONE_ID = "zone_id"
ATTRIBUTION = "Data provided by the Australian Bureau of Meteorology"
CONF_STATION = "station"
CONF_ZONE_ID = "zone_id"
CONF_WMO_ID = "wmo_id"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
SENSOR_TYPES = {
"wmo": ["wmo", None],
"name": ["Station Name", None],
"history_product": ["Zone", None],
"local_date_time": ["Local Time", None],
"local_date_time_full": ["Local Time Full", None],
"aifstime_utc": ["UTC Time Full", None],
"lat": ["Lat", None],
"lon": ["Long", None],
"apparent_t": ["Feels Like C", TEMP_CELSIUS],
"cloud": ["Cloud", None],
"cloud_base_m": ["Cloud Base", None],
"cloud_oktas": ["Cloud Oktas", None],
"cloud_type_id": ["Cloud Type ID", None],
"cloud_type": ["Cloud Type", None],
"delta_t": ["Delta Temp C", TEMP_CELSIUS],
"gust_kmh": ["Wind Gust kmh", "km/h"],
"gust_kt": ["Wind Gust kt", "kt"],
"air_temp": ["Air Temp C", TEMP_CELSIUS],
"dewpt": ["Dew Point C", TEMP_CELSIUS],
"press": ["Pressure mb", "mbar"],
"press_qnh": ["Pressure qnh", "qnh"],
"press_msl": ["Pressure msl", "msl"],
"press_tend": ["Pressure Tend", None],
"rain_trace": ["Rain Today", "mm"],
"rel_hum": ["Relative Humidity", "%"],
"sea_state": ["Sea State", None],
"swell_dir_worded": ["Swell Direction", None],
"swell_height": ["Swell Height", "m"],
"swell_period": ["Swell Period", None],
"vis_km": ["Visability km", "km"],
"weather": ["Weather", None],
"wind_dir": ["Wind Direction", None],
"wind_spd_kmh": ["Wind Speed kmh", "km/h"],
"wind_spd_kt": ["Wind Speed kt", "kt"],
}
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
station = station.replace(".shtml", "")
if not re.fullmatch(r"ID[A-Z]\d\d\d\d\d\.\d\d\d\d\d", station):
raise vol.error.Invalid("Malformed station ID")
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_ZONE_ID, "Deprecated partial station ID"): cv.string,
vol.Inclusive(CONF_WMO_ID, "Deprecated partial station ID"): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM sensor."""
station = config.get(CONF_STATION)
zone_id, wmo_id = config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)
if station is not None:
if zone_id and wmo_id:
_LOGGER.warning(
"Using config %s, not %s and %s for BOM sensor",
CONF_STATION,
CONF_ZONE_ID,
CONF_WMO_ID,
)
elif zone_id and wmo_id:
station = "{}.{}".format(zone_id, wmo_id)
else:
station = closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir,
)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM Current: %s", err)
return
add_entities(
[
BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME))
for variable in config[CONF_MONITORED_CONDITIONS]
]
)
class BOMCurrentSensor(Entity):
"""Implementation of a BOM current sensor."""
def __init__(self, bom_data, condition, stationname):
"""Initialize the sensor."""
self.bom_data = bom_data
self._condition = condition
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
if self.stationname is None:
return "BOM {}".format(SENSOR_TYPES[self._condition][0])
return "BOM {} {}".format(self.stationname, SENSOR_TYPES[self._condition][0])
@property
def state(self):
"""Return the state of the sensor."""
return self.bom_data.get_reading(self._condition)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.bom_data.last_updated,
ATTR_SENSOR_ID: self._condition,
ATTR_STATION_ID: self.bom_data.latest_data["wmo"],
ATTR_STATION_NAME: self.bom_data.latest_data["name"],
ATTR_ZONE_ID: self.bom_data.latest_data["history_product"],
}
return attr
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES[self._condition][1]
def update(self):
"""Update current conditions."""
self.bom_data.update()
class BOMCurrentData:
"""Get data from BOM."""
def __init__(self, station_id):
"""Initialize the data object."""
self._zone_id, self._wmo_id = station_id.split(".")
self._data = None
self.last_updated = None
def _build_url(self):
"""Build the URL for the requests."""
url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id)
_LOGGER.debug("BOM URL: %s", url)
return url
@property
def latest_data(self):
"""Return the latest data object."""
if self._data:
return self._data[0]
return None
def get_reading(self, condition):
"""Return the value for the given condition.
BOM weather publishes condition readings for weather (and a few other
conditions) at intervals throughout the day. To avoid a `-` value in
the frontend for these conditions, we traverse the historical data
for the latest value that is not `-`.
Iterators are used in this method to avoid iterating needlessly
through the entire BOM provided dataset.
"""
condition_readings = (entry[condition] for entry in self._data)
return next((x for x in condition_readings if x != "-"), None)
def should_update(self):
"""Determine whether an update should occur.
BOM provides updated data every 30 minutes. We manually define
refreshing logic here rather than a throttle to keep updates
in lock-step with BOM.
If 35 minutes has passed since the last BOM data update, then
an update should be done.
"""
if self.last_updated is None:
# Never updated before, therefore an update should occur.
return True
now = datetime.datetime.now()
update_due_at = self.last_updated + datetime.timedelta(minutes=35)
return now > update_due_at
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from BOM."""
if not self.should_update():
_LOGGER.debug(
"BOM was updated %s minutes ago, skipping update as"
" < 35 minutes, Now: %s, LastUpdate: %s",
(datetime.datetime.now() - self.last_updated),
datetime.datetime.now(),
self.last_updated,
)
return
try:
result = requests.get(self._build_url(), timeout=10).json()
self._data = result["observations"]["data"]
# set lastupdate using self._data[0] as the first element in the
# array is the latest date in the json
self.last_updated = datetime.datetime.strptime(
str(self._data[0]["local_date_time_full"]), "%Y%m%d%H%M%S"
)
return
except ValueError as err:
_LOGGER.error("Check BOM %s", err.args)
self._data = None
raise
def _get_bom_stations():
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
This function does several MB of internet requests, so please use the
caching version to minimise latency and hit-count.
"""
latlon = {}
with io.BytesIO() as file_obj:
with ftplib.FTP("ftp.bom.gov.au") as ftp:
ftp.login()
ftp.cwd("anon2/home/ncc/metadata/sitelists")
ftp.retrbinary("RETR stations.zip", file_obj.write)
file_obj.seek(0)
with zipfile.ZipFile(file_obj) as zipped:
with zipped.open("stations.txt") as station_txt:
for _ in range(4):
station_txt.readline() # skip header
while True:
line = station_txt.readline().decode().strip()
if len(line) < 120:
break # end while loop, ignoring any footer text
wmo, lat, lon = (
line[a:b].strip() for a, b in [(128, 134), (70, 78), (79, 88)]
)
if wmo != "..":
latlon[wmo] = (float(lat), float(lon))
zones = {}
pattern = (
r'<a href="/products/(?P<zone>ID[A-Z]\d\d\d\d\d)/'
r'(?P=zone)\.(?P<wmo>\d\d\d\d\d).shtml">'
)
for state in ("nsw", "vic", "qld", "wa", "tas", "nt"):
url = "http://www.bom.gov.au/{0}/observations/{0}all.shtml".format(state)
for zone_id, wmo_id in re.findall(pattern, requests.get(url).text):
zones[wmo_id] = zone_id
return {"{}.{}".format(zones[k], k): latlon[k] for k in set(latlon) & set(zones)}
def bom_stations(cache_dir):
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
Results from internet requests are cached as compressed JSON, making
subsequent calls very much faster.
"""
cache_file = os.path.join(cache_dir, ".bom-stations.json.gz")
if not os.path.isfile(cache_file):
stations = _get_bom_stations()
with gzip.open(cache_file, "wt") as cache:
json.dump(stations, cache, sort_keys=True)
return stations
with gzip.open(cache_file, "rt") as cache:
return {k: tuple(v) for k, v in json.load(cache).items()}
def closest_station(lat, lon, cache_dir):
"""Return the ZONE_ID.WMO_ID of the closest station to our lat/lon."""
if lat is None or lon is None or not os.path.isdir(cache_dir):
return
stations = bom_stations(cache_dir)
def comparable_dist(wmo_id):
"""Create a psudeo-distance from latitude/longitude."""
station_lat, station_lon = stations[wmo_id]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(stations, key=comparable_dist)
| 33.880466 | 86 | 0.61234 | [
"Apache-2.0"
] | 5mauggy/home-assistant | homeassistant/components/bom/sensor.py | 11,621 | Python |
class BaseORMException(Exception):
"""
Base ORM Exception.
"""
class FieldError(BaseORMException):
"""
The FieldError exception is raised when there is a problem with a model field.
"""
class ParamsError(BaseORMException):
"""
The ParamsError is raised when function can not be run with given parameters
"""
class ConfigurationError(BaseORMException):
"""
The ConfigurationError exception is raised when the configuration of the ORM is invalid.
"""
class TransactionManagementError(BaseORMException):
"""
The TransactionManagementError is raised when any transaction error occurs.
"""
class OperationalError(BaseORMException):
"""
The OperationalError exception is raised when an operational error occurs.
"""
class IntegrityError(OperationalError):
"""
The IntegrityError exception is raised when there is an integrity error.
"""
class NoValuesFetched(OperationalError):
"""
The NoValuesFetched exception is raised when the related model was never fetched.
"""
class MultipleObjectsReturned(OperationalError):
"""
The MultipleObjectsReturned exception is raised when doing a ``.get()`` operation,
and more than one object is returned.
"""
class DoesNotExist(OperationalError):
"""
The DoesNotExist exception is raised when expecting data, such as a ``.get()`` operation.
"""
class IncompleteInstanceError(OperationalError):
"""
The IncompleteInstanceError exception is raised when a partial model is attempted to be persisted.
"""
class DBConnectionError(BaseORMException, ConnectionError):
"""
The DBConnectionError is raised when problems with connecting to db occurs
"""
| 24.236111 | 102 | 0.716905 | [
"Apache-2.0"
] | Abnormally/tortoise-orm | tortoise/exceptions.py | 1,745 | Python |
import logging
log = logging.getLogger("MPP-Solar")
class baseoutput:
def __str__(self):
return "baseoutput - the base class for the output processors, not used directly"
def get_kwargs(self, kwargs, key, default=None):
if not key in kwargs or not kwargs[key]:
return default
return kwargs[key]
| 22.933333 | 89 | 0.665698 | [
"MIT"
] | 20after4/mpp-solar | mppsolar/outputs/baseoutput.py | 344 | Python |
# -*- coding: utf-8 -*-
#
# FHash documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 21 20:02:16 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx_rtd_theme
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FHash'
copyright = u'2017, Aditya Patil'
author = u'Aditya Patil'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'FHashdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FHash.tex', u'FHash Documentation',
u'Aditya Patil', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fhash', u'FHash Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FHash', u'FHash Documentation',
author, 'FHash', 'One line description of project.',
'Miscellaneous'),
]
| 29.609756 | 79 | 0.682043 | [
"Unlicense"
] | aditya-a-patil/FHash | docs/source/conf.py | 4,856 | Python |
import setuptools
import label_studio
print('Label Studio', label_studio.__version__)
# Readme
with open('README.md', 'r') as f:
long_description = f.read()
# Module dependencies
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name='label-studio',
version=label_studio.__version__,
author='Heartex',
author_email="[email protected]",
description='Label Studio annotation tool',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/heartexlabs/label-studio',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=requirements,
python_requires='>=3.5',
entry_points={
'console_scripts': [
'label-studio=label_studio.server:main',
'label-studio-ml=label_studio.ml.server:main'
],
}
)
| 27.666667 | 57 | 0.674699 | [
"Apache-2.0"
] | Dingbro/label-studio | setup.py | 1,079 | Python |
from caffe_all import *
def parseProtoString(s):
from google.protobuf import text_format
proto_net = pb.NetParameter()
text_format.Merge(s, proto_net)
return proto_net
def get_param(l, exclude=set(['top', 'bottom', 'name', 'type'])):
if not hasattr(l,'ListFields'):
if hasattr(l,'__delitem__'):
return [get_param(i) for i in l]
return l
r = dict()
for f, v in l.ListFields():
if f.name not in exclude:
r[f.name] = get_param(v, [])
return r
class ProtoDesc:
def __init__(self, prototxt):
from os import path
self.prototxt = prototxt
self.parsed_proto = parseProtoString(open(self.prototxt, 'r').read())
# Guess the input dimension
self.input_dim = (3, 227, 227)
net = self.parsed_proto
if len(net.input_dim) > 0:
self.input_dim = net.input_dim[1:]
else:
lrs = net.layer
cs = [l.transform_param.crop_size for l in lrs
if l.HasField('transform_param')]
if len(cs):
self.input_dim = (3, cs[0], cs[0])
def __call__(self, clip=None, **inputs):
from collections import OrderedDict
net = self.parsed_proto
blobs = OrderedDict(inputs)
for l in net.layer:
if l.type not in ['Data', 'ImageData']:
in_place = l.top == l.bottom
param = get_param(l)
tops = getattr(L, l.type)(*[blobs[b] for b in l.bottom],
ntop=len(l.top), in_place=in_place,
name=l.name,
**param)
if len(l.top) <= 1:
tops = [tops]
for i, t in enumerate(l.top):
blobs[t] = tops[i]
if l.name == clip:
break
return list(blobs.values())[-1]
| 34.315789 | 77 | 0.506646 | [
"BSD-2-Clause"
] | jeffdonahue/voc-classification | src/load.py | 1,956 | Python |
def deal(infilename,outfilename):
infile = open(infilename)
lines = infile.readlines()
out = []
for line in lines:
line = line.split(',')
val = line[-1][0]
line = line[:-1]
line.insert(0,val)
out.append(line)
print(out)
str = ''
for line in out:
line_str =''
for each in line:
line_str = line_str + '{},'.format(each)
str = str + line_str[:-1]+'\n'
outfile=open(outfilename,'w')
outfile.write(str)
deal('Haberman Data Set.txt','Haberman_data.txt') | 27.95 | 52 | 0.545617 | [
"Apache-2.0"
] | hrsu/disturb | Haberman Data/deal_data.py | 559 | Python |
import secrets
import os
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from blog import app, db, bcrypt, mail
from blog.forms import (RegistrationForm, LoginForm, UpdateAccountForm, PostForm, \
RequestResetForm, ResetPasswordForm)
from blog.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
from flask_mail import Message
@app.route("/")
@app.route("/home")
def home():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=2)
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created for {form.username.data}! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
if next_page:
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
return redirect(url_for('account'))
else:
flash(f'Login Unsuccessful...Please check email and password!', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=['GET', 'POST'])
@login_required # accessible only if logged in
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
old_picture = current_user.image_file
old_picture_path = os.path.join(app.root_path, 'static/profile_pics', old_picture)
if os.path.exists(old_picture_path):
os.remove(old_picture_path)
else:
print("The file does not exist " + old_picture)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post', form=form, legend='New Post')
@app.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post', form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author=user) \
.order_by(Post.date_posted.desc()) \
.paginate(page=page, per_page=5)
return render_template('user_posts.html', posts=posts, user=user)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request', sender='[email protected]',
recipients=['[email protected]']) # recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', titlee='Reset Password', form=form)
@app.route("/reset_password/<token>", methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash("That is an invalid or expired token", 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash(f'Your password has been updated! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title='Reset Password', form=form)
| 37.283654 | 100 | 0.676209 | [
"Apache-2.0"
] | mlewan01/flaskblog01 | blog/routes.py | 7,755 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode: str):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
self._optimizer_freq_cumsum = None
def on_trainer_init(
self,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
) -> None:
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.should_stop = False
self.trainer.state = TrainerState()
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
# reset bookkeeping
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.lightning_module
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model) -> None:
"""
Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
"""
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", model=lightning_module):
return True
if is_overridden("on_train_epoch_end", model=lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_optimizers_iterable(self, batch_idx=None):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
if batch_idx is None:
batch_idx = self.trainer.total_batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
training_step_output.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(args)
self.trainer.accelerator.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
if training_step_output_for_epoch_end is None:
return
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
result = self.trainer.lightning_module._results
loss = None
hiddens = None
result["extra"] = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
result.minimize = loss
self.trainer.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()
return training_step_output_for_epoch_end, result
@staticmethod
def _prepare_outputs(
outputs: List[List[List[Result]]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``Result`` objects with dimensions:
[optimizer outs][batch outs][tbptt steps].
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will
be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
for tbptt_output in batch_outputs:
out = tbptt_output.extra
out['loss'] = tbptt_output.minimize
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.lightning_module
grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
val_loop_called = False
batch_idx = None
is_last_batch = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
self.trainer.is_last_batch = is_last_batch
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
# TODO: add outputs to batches
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output_for_epoch_end,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation()
self.trainer.training = True
val_loop_called = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps <= self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
if batch_idx is None:
# dataloader/iterator did not produce a batch
return
# handle epoch_output on epoch end
self.on_train_epoch_end(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
# update epoch level lr_schedulers if no val loop outside train loop is triggered
if (val_loop_called and not should_check_val) or should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
if should_train_only:
self.check_checkpoint_callback(True)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation(on_epoch=True)
self.trainer.training = True
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
# prepare epoch output
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model=model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = 'training_epoch_end'
# lightningmodule hook
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
# capture logging
self.trainer.logger_connector.cache_logged_metrics()
# call train epoch end hooks
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
# We cannot rely on Trainer.call_hook because the signatures might be different across
# lightning module and callback
# As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`
# This implementation is copied from Trainer.call_hook
hook_name = "on_train_epoch_end"
# set hook_name to model + reset Result obj
skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)
# always profile hooks
with self.trainer.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
# next call hook in lightningModule
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# if the PL module doesn't have the hook then call the accelerator
# used to auto-reduce things for the user with Results obj
elif hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
if not skip:
self.trainer._cache_logged_metrics()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
optimizers = self.prepare_optimizers()
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in optimizers:
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.trainer.lightning_module.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""Wrap forward, zero_grad and backward in a closure so second order methods work"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
# backward pass
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(
interval="step",
monitor_metrics=monitor_metrics,
opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step = self.trainer.accelerator.update_global_step(
self.trainer.total_batch_idx, self.trainer.global_step
)
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:
""" Decide if we should run validation. """
if not self.trainer.enable_validation:
return False
# check if this epoch is eligible to run validation
if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:
return False
# val_check_batch is inf for iterable datasets with no length defined
# TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = False
if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
# Note: num_training_batches is also inf for iterable datasets with no length defined
epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
if on_epoch:
return (
is_val_check_batch and epoch_end_val_check
) or self.trainer.should_stop or is_last_batch_for_infinite_dataset
else:
return is_val_check_batch and not epoch_end_val_check
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
if not self.trainer.lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
args.append(opt_idx)
elif not self.trainer.has_arg(
"training_step", "optimizer_idx"
) and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
# pass hiddens if using tbptt
if self._truncated_bptt_enabled():
args.append(hiddens)
return args
def _truncated_bptt_enabled(self) -> bool:
""" Temporary tbptt utilities until this flag is fully migrated to the lightning module. """
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
# Give precedence to the LightningModule as the Trainer flag will be removed in v1.5
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.trainer.lightning_module.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| 42.735119 | 119 | 0.654874 | [
"Apache-2.0"
] | dcfidalgo/pytorch-lightning | pytorch_lightning/trainer/training_loop.py | 43,077 | Python |
# coding: utf-8
import gettext
# Make the gettext function _() available in the global namespace, even if no i18n is in use
gettext.install("bookworm", names=["ngettext"])
| 24.857143 | 92 | 0.741379 | [
"MIT"
] | blindpandas/bookworm | bookworm/__init__.py | 174 | Python |
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
if TYPE_CHECKING:
from ..message.emoji import Emoji
@dataclass
class Reaction(APIObject):
"""
Represents a Discord Reaction object
:param count:
times this emoji has been used to react
:param me:
whether the current user reacted using this emoji
:param emoji:
emoji information
"""
count: int
me: bool
emoji: Emoji
| 19.6875 | 65 | 0.706349 | [
"MIT"
] | gillesigot/Pincer | pincer/objects/message/reaction.py | 630 | Python |
"""
.. module:: category_encoders
:synopsis:
:platform:
"""
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.binary import BinaryEncoder
from category_encoders.count import CountEncoder
from category_encoders.hashing import HashingEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.one_hot import OneHotEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.polynomial import PolynomialEncoder
from category_encoders.basen import BaseNEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.glmm import GLMMEncoder
__version__ = '2.2.2'
__author__ = 'willmcginnis'
__all__ = [
'BackwardDifferenceEncoder',
'BinaryEncoder',
'CountEncoder',
'HashingEncoder',
'HelmertEncoder',
'OneHotEncoder',
'OrdinalEncoder',
'SumEncoder',
'PolynomialEncoder',
'BaseNEncoder',
'LeaveOneOutEncoder',
'TargetEncoder',
'WOEEncoder',
'MEstimateEncoder',
'JamesSteinEncoder',
'CatBoostEncoder',
'GLMMEncoder'
]
| 28.96 | 75 | 0.812155 | [
"BSD-3-Clause"
] | Fayel-cyber/category_encoders | category_encoders/__init__.py | 1,448 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SpectralOpsTest(test.TestCase, parameterized.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length, tol):
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(
[actual_stft, actual_stft_from_ph, actual_inverse_stft])
actual_stft_ph = array_ops.placeholder_with_default(
actual_stft, shape=actual_stft.shape)
actual_inverse_stft_from_ph = self.evaluate(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length))
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)
def test_shapes(self):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@parameterized.parameters(
(512, 64, 32, 64, np.float32, 1e-4),
(512, 64, 32, 64, np.float64, 1e-8),
(512, 64, 64, 64, np.float32, 1e-4),
(512, 64, 64, 64, np.float64, 1e-8),
(512, 72, 64, 64, np.float32, 1e-4),
(512, 72, 64, 64, np.float64, 1e-8),
(512, 64, 25, 64, np.float32, 1e-4),
(512, 64, 25, 64, np.float64, 1e-8),
(512, 25, 15, 36, np.float32, 1e-4),
(512, 25, 15, 36, np.float64, 1e-8),
(123, 23, 5, 42, np.float32, 1e-4),
(123, 23, 5, 42, np.float64, 1e-8))
def test_stft_and_inverse_stft(self, signal_length, frame_length,
frame_step, fft_length, np_rtype, tol):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
signal = np.random.random(signal_length).astype(np_rtype)
self._compare(signal, frame_length, frame_step, fft_length, tol)
@parameterized.parameters(
# 87.5% overlap.
(4096, 256, 32, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 32, 256, np.float64, 1e-8, 1e-8),
# 75% overlap.
(4096, 256, 64, 256, np.float32, 1e-5, 1e-6),
(4096, 256, 64, 256, np.float64, 1e-8, 1e-8),
# Odd frame hop.
(4096, 128, 25, 128, np.float32, 1e-3, 1e-6),
(4096, 128, 25, 128, np.float64, 5e-4, 1e-8),
# Odd frame length.
(4096, 127, 32, 128, np.float32, 1e-3, 1e-6),
(4096, 127, 32, 128, np.float64, 1e-3, 1e-8),
# 50% overlap.
(4096, 128, 64, 128, np.float32, 0.4, 1e-6),
(4096, 128, 64, 128, np.float64, 0.4, 1e-8))
def test_stft_round_trip(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, threshold,
corrected_threshold):
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
inverse_stft, inverse_stft_corrected = self.evaluate(
[inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
@parameterized.parameters(
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64))
def test_inverse_stft_window_fn(self, frame_length, frame_step):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
hann_window, inverse_window = self.evaluate([hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
@parameterized.parameters((256, 64), (128, 32))
def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
# TODO(rjryan): Update gradient tests for Eager.
if context.executing_eagerly():
return
with self.session() as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
@parameterized.parameters(
(64, 16, 8, 16, np.float32, 2e-3, 5e-4),
(64, 16, 8, 16, np.float64, 1e-8, 1e-8),
(64, 16, 16, 16, np.float32, 2e-3, 5e-4),
(64, 16, 16, 16, np.float64, 1e-8, 1e-8),
(64, 16, 7, 16, np.float32, 2e-3, 5e-4),
(64, 16, 7, 16, np.float64, 1e-8, 1e-8),
(64, 7, 4, 9, np.float32, 2e-3, 5e-4),
(64, 7, 4, 9, np.float64, 1e-8, 1e-8),
(29, 5, 1, 10, np.float32, 2e-3, 5e-4),
(29, 5, 1, 10, np.float64, 1e-8, 1e-8))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="On ROCm, this fails with mismatches at some locations "
"(possibly due to peculiarities of rocFFT - investigate)")
def test_gradients_numerical(self, signal_length, frame_length, frame_step,
fft_length, np_rtype, forward_tol, backward_tol):
# TODO(rjryan): Investigate why STFT gradient error is so high.
signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1
def forward(signal):
return spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(
forward, [signal])
self.assertAllClose(f_jacob_t, f_jacob_n,
rtol=forward_tol, atol=forward_tol)
def backward(stft):
return spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length)
stft = forward(signal)
((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(
backward, [stft])
self.assertAllClose(b_jacob_t, b_jacob_n,
rtol=backward_tol, atol=backward_tol)
@parameterized.parameters(
itertools.product(
(4000,),
(256,),
(np.float32, np.float64),
("ortho", None),
("vorbis", "kaiser_bessel_derived", None),
(False, True)))
def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,
norm, window_type, pad_end):
if np_rtype == np.float32:
tol = 1e-5
else:
if window_type == "kaiser_bessel_derived":
tol = 1e-6
else:
tol = 1e-8
# Generate a random white Gaussian signal.
signal = np.random.normal(size=signal_length).astype(np_rtype)
if window_type == "vorbis":
window_fn = window_ops.vorbis_window
elif window_type == "kaiser_bessel_derived":
window_fn = window_ops.kaiser_bessel_derived_window
elif window_type is None:
window_fn = None
mdct = spectral_ops.mdct(signal, frame_length, norm=norm,
window_fn=window_fn, pad_end=pad_end)
inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,
window_fn=window_fn)
inverse_mdct = self.evaluate(inverse_mdct)
# Truncate signal and inverse_mdct to their minimum length.
min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])
# Ignore the half_len samples at either edge.
half_len = frame_length // 2
signal = signal[half_len:min_length-half_len]
inverse_mdct = inverse_mdct[half_len:min_length-half_len]
# Check that the inverse and original signal are close.
self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)
if __name__ == "__main__":
test.main()
| 43.661157 | 80 | 0.678087 | [
"Apache-2.0"
] | 05259/tensorflow | tensorflow/python/kernel_tests/signal/spectral_ops_test.py | 15,849 | Python |
import pytest
from api.providers.permissions import GroupHelper
from osf_tests.factories import (
ReviewActionFactory,
AuthUserFactory,
PreprintFactory,
PreprintProviderFactory,
)
from osf.utils import permissions as osf_permissions
@pytest.mark.django_db
class ReviewActionCommentSettingsMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def preprint(self, provider):
return PreprintFactory(provider=provider)
@pytest.fixture()
def actions(self, preprint):
return [ReviewActionFactory(target=preprint) for _ in range(5)]
@pytest.fixture()
def provider_admin(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('admin'))
return user
@pytest.fixture()
def provider_moderator(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('moderator'))
return user
@pytest.fixture()
def node_admin(self, preprint):
user = AuthUserFactory()
preprint.node.add_contributor(
user,
permissions=[
osf_permissions.READ,
osf_permissions.WRITE,
osf_permissions.ADMIN])
return user
def test_comment_settings(
self, app, url, provider, actions, provider_admin,
provider_moderator, node_admin):
expected_ids = set([l._id for l in actions])
for anonymous in [True, False]:
for private in [True, False]:
provider.reviews_comments_anonymous = anonymous
provider.reviews_comments_private = private
provider.save()
# admin always sees comment/creator
res = app.get(url, auth=provider_admin.auth)
self.__assert_fields(res, expected_ids, False, False)
# moderator always sees comment/creator
res = app.get(url, auth=provider_moderator.auth)
self.__assert_fields(res, expected_ids, False, False)
# node admin sees what the settings allow
res = app.get(url, auth=node_admin.auth)
self.__assert_fields(res, expected_ids, anonymous, private)
def __assert_fields(
self, res, expected_ids, hidden_creator, hidden_comment):
data = res.json['data']
actual_ids = set([l['id'] for l in data])
if expected_ids != actual_ids:
raise Exception((expected_ids, actual_ids))
assert expected_ids == actual_ids
for action in data:
if hidden_creator:
assert 'creator' not in action['relationships']
else:
assert 'creator' in action['relationships']
if hidden_comment:
assert 'comment' not in action['attributes']
else:
assert 'comment' in action['attributes']
| 32.861702 | 75 | 0.620913 | [
"Apache-2.0"
] | listinc/osf.io | api_tests/reviews/mixins/comment_settings.py | 3,089 | Python |
import unittest
from streamlink.buffers import Buffer, RingBuffer
class TestBuffer(unittest.TestCase):
def setUp(self):
self.buffer = Buffer()
def test_write(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
def test_read(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(), b"2" * 4096)
self.assertEqual(self.buffer.read(4096), b"")
self.assertEqual(self.buffer.read(), b"")
self.assertEqual(self.buffer.length, 0)
def test_readwrite(self):
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 8192)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.length, 4096)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192)
self.assertEqual(self.buffer.read(1), b"1")
self.assertEqual(self.buffer.read(4095), b"1" * 4095)
self.assertEqual(self.buffer.read(8192), b"2" * 4096)
self.assertEqual(self.buffer.read(8192), b"")
self.assertEqual(self.buffer.read(), b"")
self.assertEqual(self.buffer.length, 0)
def test_close(self):
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 8192)
self.buffer.close()
self.buffer.write(b"2" * 8192)
self.assertEqual(self.buffer.length, 8192)
def test_reuse_input(self):
"""Objects should be reusable after write()"""
original = b"original"
tests = [bytearray(original), memoryview(bytearray(original))]
for data in tests:
self.buffer.write(data)
data[:] = b"reused!!"
self.assertEqual(self.buffer.read(), original)
def test_read_empty(self):
self.assertRaises(
StopIteration,
lambda: next(self.buffer._iterate_chunks(10)))
class TestRingBuffer(unittest.TestCase):
BUFFER_SIZE = 8192 * 4
def setUp(self):
self.buffer = RingBuffer(size=self.BUFFER_SIZE)
def test_write(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
def test_read(self):
self.buffer.write(b"1" * 8192)
self.buffer.write(b"2" * 4096)
self.assertEqual(self.buffer.length, 8192 + 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(4096), b"1" * 4096)
self.assertEqual(self.buffer.read(), b"2" * 4096)
self.assertEqual(self.buffer.length, 0)
def test_read_timeout(self):
self.assertRaises(
IOError,
self.buffer.read, timeout=0.1)
def test_write_after_close(self):
self.buffer.close()
self.buffer.write(b"1" * 8192)
self.assertEqual(self.buffer.length, 0)
self.assertTrue(self.buffer.closed)
def test_resize(self):
self.assertEqual(self.buffer.buffer_size, self.BUFFER_SIZE)
self.buffer.resize(self.BUFFER_SIZE * 2)
self.assertEqual(self.buffer.buffer_size, self.BUFFER_SIZE * 2)
def test_free(self):
self.assertEqual(self.buffer.free, self.BUFFER_SIZE)
self.buffer.write(b'1' * 100)
self.assertEqual(self.buffer.free, self.BUFFER_SIZE - 100)
| 33.072727 | 71 | 0.628642 | [
"BSD-2-Clause"
] | 18928172992817182/streamlink | tests/test_buffer.py | 3,638 | Python |
import os
import rasterio
import mercantile
import numpy as np
import pytest
from tempfile import NamedTemporaryFile, TemporaryDirectory
from affine import Affine
from unittest import TestCase
from unittest.mock import patch
from datetime import datetime
from shapely.geometry import Polygon
from rasterio.enums import Resampling
from rasterio.windows import Window
from rasterio.crs import CRS
from telluric import GeoRaster2, GeoVector
from telluric.constants import WEB_MERCATOR_CRS, WGS84_CRS
from telluric.georaster import MERCATOR_RESOLUTION_MAPPING, GeoRaster2Error, GeoRaster2IOError
from telluric.util.general import convert_resolution_from_meters_to_deg
import sys
import logging
import tempfile
log = logging.getLogger('rasterio._gdal')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
manualtest = pytest.mark.skipif("TEST_MANUAL" not in os.environ, reason="skip on auto testing")
window_data = pytest.mark.skip('pending decission of consistency in results between rasterio read and reproject')
framing = pytest.mark.skip('witing for framing and get_window with boundless false')
tiles = {
10: (579, 394, 10),
11: (1159, 789, 11),
12: (2319, 1578, 12),
14: (9277, 6312, 14),
15: (18554, 12624, 15),
17: (74216, 50496, 17),
18: (148433, 100994, 18)
}
class GeoRaster2TilesTestGeneral(TestCase):
"""GeoRaster2 Tiles general tests."""
def test_raise_exception_on_bad_file_path(self):
vr = GeoRaster2.open('stam')
with self.assertRaises(GeoRaster2IOError):
vr.get_tile(1, 2, 3)
def test_raise_exception_on_bad_raster_url(self):
vr = GeoRaster2.open('http://stam')
with self.assertRaises(GeoRaster2IOError):
vr.get_tile(1, 2, 3)
def test_raise_exception_on_bad_file_path_save_cog(self):
vr = GeoRaster2.open('stam')
with self.assertRaises(GeoRaster2IOError):
vr.save_cloud_optimized('dest_file')
def test_raise_exception_on_bad_raster_url_save_cog(self):
vr = GeoRaster2.open('http://stam')
with self.assertRaises(GeoRaster2IOError):
vr.save_cloud_optimized('dest_file')
class BaseGeoRasterTestCase(TestCase):
@classmethod
def setUpClass(cls):
path = "./tests/data/raster/raster_for_test.tif"
cls.read_only_vgr = GeoRaster2.open(path)
path = "./tests/data/raster/raster_wgs84.tif"
cls.read_only_vgr_wgs84 = GeoRaster2.open(path)
def read_only_virtual_geo_raster(self):
return self.read_only_vgr
def read_only_virtual_geo_raster_wgs84(self):
return self.read_only_vgr_wgs84
class GeoRaster2TestGetTile(BaseGeoRasterTestCase):
"""GeoRaster2 get tile tests."""
def test_geo_bounding_tile(self):
gr = self.read_only_virtual_geo_raster()
gv = gr.footprint().reproject(WGS84_CRS)
bounding_tile = mercantile.bounding_tile(*gv.get_shape(gv.crs).bounds)
self.assertEqual(bounding_tile, (37108, 25248, 16))
@patch.object(GeoRaster2, 'crop')
def test_fails_with_empty_raster_for_tile_out_of_raster_area(self, mock__crop):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
r = raster.get_tile(16384, 16383, 15)
self.assertTrue((r.image.data == 0).all())
self.assertTrue((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
self.assertEqual(r.crs, WEB_MERCATOR_CRS)
mock__crop.assert_not_called()
def test_get_all_raster_in_a_single_tile(self):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
p = raster.footprint().reproject(WGS84_CRS).centroid
r = raster.get_tile(*mercantile.tile(lng=p.x, lat=p.y, zoom=11))
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
self.assertEqual(r.crs, WEB_MERCATOR_CRS)
def test_get_tile_for_different_zoom_levels(self):
for raster in [self.read_only_virtual_geo_raster(), self.read_only_virtual_geo_raster_wgs84()]:
for zoom in tiles:
r = raster.get_tile(*tiles[zoom])
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_tile_from_different_crs_tile_is_not_tilted(self):
raster = self.read_only_virtual_geo_raster_wgs84()
r = raster.get_tile(*tiles[18])
self.assertEqual(1, len(np.unique(r.image.mask)))
def test_get_tile_from_different_crs_tile_is_not_tilted_with_different_buffer(self):
raster = self.read_only_virtual_geo_raster_wgs84()
os.environ["TELLURIC_GET_TILE_BUFFER"] = "0"
try:
r = raster.get_tile(*tiles[18])
except Exception:
del os.environ["TELLURIC_GET_TILE_BUFFER"]
self.assertEqual(2, len(np.unique(r.image.mask)))
def test_get_entire_all_raster(self):
vr = self.read_only_virtual_geo_raster()
roi = GeoVector.from_xyz(37108, 25248, 16)
r = vr.crop(roi)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.shape, (3, 612, 612))
def test_fails_with_empty_raster_for_tile_out_of_raster_area_with_no_tile_size(self):
vr = self.read_only_virtual_geo_raster()
roi = GeoVector.from_xyz(16384, 16383, 15)
r = vr.crop(roi)
self.assertTrue((r.image.data == 0).all())
self.assertTrue((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 1223, 1223))
def test_get_window_of_full_resolution(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 300)
r = vr.get_window(win)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 300, 300))
def test_get_window_resize_to_256(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 300)
r = vr.get_window(win, xsize=256, ysize=256)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_window_of_non_square_resize_to_256(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, xsize=256, ysize=256)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 256, 256))
def test_get_window_of_non_square_keeps_size_proportions_for_give_xsize(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, xsize=150)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 200, 150))
def test_get_window_of_non_square_keeps_size_proportions_for_give_ysize(self):
vr = self.read_only_virtual_geo_raster()
win = Window(0, 0, 300, 400)
r = vr.get_window(win, ysize=200)
self.assertFalse((r.image.data == 0).all())
self.assertFalse((r.image.mask).all())
self.assertEqual(r.image.shape, (3, 200, 150))
def test_get_window_width_height_correctness(self):
# See https://publicgitlab.satellogic.com/telluric/telluric/issues/58
vr = self.read_only_virtual_geo_raster()
expected_height = 200
win = Window(0, vr.height - expected_height, 1, expected_height)
r = vr.get_window(win)
self.assertEqual(r.image.shape, (3, expected_height, 1))
class GeoRasterCropTest(BaseGeoRasterTestCase):
metric_affine = Affine(1, 0.0, 2653750, 0.0, -1, 4594461)
def test_crop_in_memory_and_off_memory_without_resizing_are_the_same(self):
coords = mercantile.xy_bounds(*tiles[18])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster2 = GeoRaster2.open(rf.name)
off_memory_crop = raster2.crop(shape)
# load the image data
raster2.image
in_memory_crop = raster2.crop(shape)
self.assertEqual(off_memory_crop, in_memory_crop)
@window_data
def test_crop_and_get_tile_do_the_same(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster2 = GeoRaster2.open(rf.name)
tile15 = raster2.get_tile(*tiles[15])
# load the image data
raster2.image
cropped15 = raster2.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped15)
@window_data
def test_crop_and_get_tile_do_the_same_when_image_is_populated(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
tile15 = raster.get_tile(*tiles[15])
raster._populate_from_rasterio_object(read_image=True)
cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped_15)
@window_data
def test_crop_image_from_and_get_win_do_the_same_with_resize(self):
bounds = (2, 3, 4, 5)
win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])
xsize = round((bounds[2] - bounds[0]) / 2)
ysize = round((bounds[3] - bounds[1]) / 2)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster.save('area.tif', tags={'AREA_OR_POINT': 'Area'})
raster.save('point.tif', tags={'AREA_OR_POINT': 'Point'})
saved_raster = GeoRaster2.open(rf.name)
cropped_win = saved_raster.get_window(win, xsize=xsize, ysize=ysize)
saved_raster_area = GeoRaster2.open('area.tif')
cropped_win_area = saved_raster_area.get_window(win, xsize=xsize, ysize=ysize)
saved_raster_point = GeoRaster2.open('point.tif')
cropped_win_point = saved_raster_point.get_window(win, xsize=xsize, ysize=ysize)
cropped_image = raster._crop(bounds, xsize=xsize, ysize=ysize)
print('cropped_win_area pixels\n', cropped_win_area.image)
print('cropped_win_point pixels\n', cropped_win_point.image)
print('cropped_win pixels\n', cropped_win.image)
print('cropped_image pixels\n', cropped_image.image)
if (cropped_win_point == cropped_win_area):
print('point == area')
if (cropped_image == cropped_win_area):
print('image == area')
if (cropped_image == cropped_win_point):
print('image == point')
if (cropped_win == cropped_win_area):
print('win == area')
if (cropped_win == cropped_win_point):
print('win == point')
self.assertEqual(cropped_image, cropped_win)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_high_zoom(self):
coords = mercantile.xy_bounds(*tiles[17])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile17 = raster.get_tile(*tiles[17])
cropped_17 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[17])
self.assertEqual(tile17, cropped_17)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_mid_zoom(self):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile15 = raster.get_tile(*tiles[15])
cropped_15 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
self.assertEqual(tile15, cropped_15)
@framing
def test_crop_and_get_tile_do_the_same_when_image_is_populated_first_for_low_zoom(self):
coords = mercantile.xy_bounds(*tiles[11])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster._populate_from_rasterio_object(read_image=True)
tile11 = raster.get_tile(*tiles[11])
cropped_11 = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[11])
self.assertEqual(tile11, cropped_11)
def test_crop_image_from_and_get_win_do_the_same_full_resolution(self):
bounds = (20, 13, 40, 15)
win = rasterio.windows.Window(bounds[0], bounds[1], bounds[2] - bounds[0], bounds[3] - bounds[1])
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
saved_raster = GeoRaster2.open(rf.name)
cropped_win = saved_raster.get_window(win)
cropped_image = raster._crop(bounds)
self.assertEqual(cropped_image, cropped_win)
@patch.object(GeoRaster2, '_crop')
def test_crop_use_crop_image_for_a_loaded_image(self, mock__crop):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
assert mock__crop.called_once
@patch.object(GeoRaster2, 'get_window')
def test_crop_use_get_window_for_a_not_loaded_image(self, mock_get_window):
coords = mercantile.xy_bounds(*tiles[15])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
with NamedTemporaryFile(mode='w+b', suffix=".tif") as rf:
raster.save(rf.name)
raster = GeoRaster2.open(rf.name)
raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[15])
assert mock_get_window.called_once
def test_crop_returns_full_resolution_as_default(self):
coords = mercantile.xy_bounds(*tiles[17])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
_, win = raster._vector_to_raster_bounds(shape)
cropped = raster.crop(shape)
self.assertEqual(cropped.shape, (raster.num_bands, round(win.height), round(win.width)))
self.assertEqual(cropped.affine[0], raster.affine[0])
def test_memory_crop_returns_resized_resolution(self):
coords = mercantile.xy_bounds(*tiles[18])
shape = GeoVector(Polygon.from_bounds(*coords), WEB_MERCATOR_CRS)
raster = self.read_only_virtual_geo_raster()
cropped = raster.crop(shape, MERCATOR_RESOLUTION_MAPPING[18])
self.assertEqual(cropped.shape, (raster.num_bands, 256, 256))
self.assertAlmostEqual(cropped.affine[0], MERCATOR_RESOLUTION_MAPPING[18], 2)
def test_geographic_crop(self):
raster = self.read_only_virtual_geo_raster_wgs84()
rhombus_on_image = Polygon([[0, 2], [1, 1], [2, 2], [1, 3]]) # in pixels
rhombus_world = raster.to_world(rhombus_on_image)
cropped = raster.crop(rhombus_world)
r = raster[0:2, 1:3]
assert cropped == r
def test_geographic_crop_with_resize(self):
coords = mercantile.xy_bounds(*tiles[17])
raster = self.read_only_virtual_geo_raster_wgs84()
vector = GeoVector(Polygon.from_bounds(*coords), crs=WEB_MERCATOR_CRS)
x_ex_res, y_ex_res = convert_resolution_from_meters_to_deg(
self.metric_affine[6], MERCATOR_RESOLUTION_MAPPING[17])
cropped = raster.crop(vector, (x_ex_res, y_ex_res))
self.assertAlmostEqual(cropped.affine[0], x_ex_res)
self.assertAlmostEqual(abs(cropped.affine[4]), y_ex_res, 6)
def test_crop_raises_error_for_impossible_transformation(self):
raster = self.read_only_virtual_geo_raster()
vector = GeoVector(Polygon.from_bounds(-180, -90, 180, 90), crs=WGS84_CRS)
with self.assertRaises(GeoRaster2Error):
raster.crop(vector)
def test_crop_of_rasters_with_opposite_affine_and_data_return_the_same(self):
array = np.arange(0, 20).reshape(1, 4, 5)
array2 = np.arange(19, -1, -1).reshape(1, 4, 5)
array2.sort()
image1 = np.ma.array(array, mask=False)
image2 = np.ma.array(array2, mask=False)
aff2 = Affine.translation(0, -8) * Affine.scale(2, 2)
aff = Affine.scale(2, -2)
r1 = GeoRaster2(image=image1, affine=aff, crs=WEB_MERCATOR_CRS)
r2 = GeoRaster2(image=image2, affine=aff2, crs=WEB_MERCATOR_CRS)
# r1 == r2 # doesn't work, see https://github.com/satellogic/telluric/issues/79
roi = GeoVector(Polygon.from_bounds(0, 0, 3, -3), crs=WEB_MERCATOR_CRS)
r1c = r1.crop(roi)
r2c = r2.crop(roi)
# r1c == r2c # doesn't work, see https://github.com/satellogic/telluric/issues/79
# currently this is the only way to test the result is the same
assert np.all(np.flip(r1c.image, axis=1) == r2c.image)
class GeoRasterMaskedTest(TestCase):
@classmethod
def setUpClass(cls):
cls.dir = TemporaryDirectory()
path = os.path.join(cls.dir.name, 'test_masked_raster.tif')
cls.masked_raster().save(path)
cls.read_only_vgr = GeoRaster2.open(path)
@classmethod
def tearDownClass(cls):
cls.dir.cleanup()
@classmethod
def masked_raster(cls):
data = np.array([
[0, 1, 1, 1],
[0, 2, 0, 2],
[0, 3, 3, 3],
], dtype=np.uint8)
mask = np.array([
[True, False, False, False],
[True, False, False, False],
[True, False, False, False],
], dtype=bool)
image = np.ma.array(
np.repeat(data[np.newaxis, :, :], 3, 0),
mask=np.repeat(mask[np.newaxis, :, :], 3, 0)
)
# Don't use exactly -1.0 for the affine for rasterio < 1.0a13, see
# https://github.com/mapbox/rasterio/issues/1272
affine = Affine.scale(1, -1.0001) * Affine.translation(0, -3)
crs = WGS84_CRS
return GeoRaster2(
image, affine=affine, crs=crs,
)
def read_only_virtual_geo_raster(self):
return self.read_only_vgr
def test_get_smaller_window_respects_mask(self):
window = Window(1, 0, 3, 3)
raster = self.read_only_virtual_geo_raster()
cropped = raster.get_window(window, masked=True)
assert (~cropped.image.mask).all()
def test_get_bigger_window_respects_mask(self):
window = Window(1, 0, 4, 3)
raster = self.read_only_virtual_geo_raster()
cropped = raster.get_window(window, masked=True)
assert cropped.image[:, :, -1].mask.all() # This line of pixels is masked
assert (~cropped.image[:, :, :-1].mask).all() # The rest is not masked
def test_small_read_only_virtual_geo_raster_wgs84_crop():
# See https://github.com/satellogic/telluric/issues/61
roi = GeoVector.from_bounds(xmin=0, ymin=0, xmax=2, ymax=2, crs=WGS84_CRS)
resolution = 1.0 # deg / px
raster = GeoRaster2.empty_from_roi(roi, resolution)
assert raster.crop(roi) == raster.crop(roi, raster.resolution())
@manualtest
class GeoRaster2ManualTest(TestCase):
"""manual testing To be run manually only."""
files = {
'original': 'original2.tif',
'cloudoptimized aligned': 'original2_aligned_cloudoptimized-2.tif',
'mrf aligned': 'original2_aligned.mrf',
'cloudoptimized': 'original2_cloudoptimized-2.tif',
'mrf': 'original2.mrf',
'not aligned cloudoptimized': 'not_aligned_cloudoptimized_2.tif',
'not aligned mrf': 'not_aligned.mrf',
'not aligned mrf split': 'not_aligned_split.mrf',
'aligned mrf split': 'original2_aligned_split.mrf',
'original mrf split': 'original2_split.mrf',
}
resamplings = {
# 'avarage': Resampling.average,
# 'nearest': Resampling.nearest,
# 'bilinear': Resampling.bilinear,
'cubic': Resampling.cubic
}
def random_string(self):
import hashlib
now = '%s' % datetime.now()
return hashlib.md5(now.encode('utf-8')).hexdigest()
def run_test_on_real_rasters(self, zoom, resampling, local):
results_arr = np.empty(shape=(len(self.files)), dtype=object)
# with rasterio.Env(CPL_DEBUG=True, GDAL_CACHEMAX=0):
# with rasterio.Env(CPL_DEBUG=False):
print('*' * 80)
print(zoom)
print('*' * 80)
print('#' * 80)
print(resampling.name)
print('#' * 80)
for i, (file_type, file_url) in enumerate(self.files.items()):
if local or 'split' in file_type:
base_url = './notebooks/'
else:
base_url = 'https://ariel.blob.core.windows.net/rastersfortest/'
file_url = base_url + file_url
if local and 'mrf' not in file_type:
new_file = file_url + self.random_string()
os.system("cp %s %s" % (file_url, new_file))
else:
new_file = file_url
print('file type: %s' % file_type)
print('-' * 80)
print('file_url: %s' % file_url)
print('new_file: %s' % new_file)
print('-' * 80)
vr = GeoRaster2.open(new_file)
start = datetime.now()
rasterio_ops = {
'CPL_DEBUG': True,
'GDAL_DISABLE_READDIR_ON_OPEN': 'YES'
}
if 'mrf' not in file_type:
rasterio_ops['CPL_VSIL_CURL_ALLOWED_EXTENSIONS'] = '.tif'
with rasterio.Env(**rasterio_ops):
vr.get_tile(*tiles[zoom], resampling=resampling)
end = datetime.now()
tt = (end - start).total_seconds() * 1000
print("stars time : %s end time: %s total: %s ms" % (start, end, tt))
results_arr[i] = "type: %s, zoom: %i, resampling: %s time: %s msec" % (file_type, zoom,
resampling.name, tt)
if local and 'mrf' not in file_type:
os.system("rm -f %s" % (new_file))
print('=' * 80)
print(results_arr)
def test_zoom_remote_11_resampling_cubic(self):
self.run_test_on_real_rasters(11, Resampling.cubic, False)
def test_zoom_remote_12_resampling_cubic(self):
self.run_test_on_real_rasters(12, Resampling.cubic, False)
def test_zoom_remote_14_resampling_cubic(self):
self.run_test_on_real_rasters(14, Resampling.cubic, False)
def test_zoom_remote_15_resampling_cubic(self):
self.run_test_on_real_rasters(15, Resampling.cubic, False)
def test_zoom_remote_17_resampling_cubic(self):
self.run_test_on_real_rasters(17, Resampling.cubic, False)
def test_zoom_remote_18_resampling_cubic(self):
self.run_test_on_real_rasters(18, Resampling.cubic, False)
| 41.684746 | 113 | 0.654591 | [
"MIT"
] | AmitAronovitch/telluric | tests/test_georaster_tiling.py | 24,594 | Python |
from plotly.basedatatypes import BaseTraceType
import copy
class Splom(BaseTraceType):
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['customdata']
@customdata.setter
def customdata(self, val):
self['customdata'] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on plot.ly for customdata .
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['customdatasrc']
@customdatasrc.setter
def customdatasrc(self, val):
self['customdatasrc'] = val
# diagonal
# --------
@property
def diagonal(self):
"""
The 'diagonal' property is an instance of Diagonal
that may be specified as:
- An instance of plotly.graph_objs.splom.Diagonal
- A dict of string/value properties that will be passed
to the Diagonal constructor
Supported dict properties:
visible
Determines whether or not subplots on the
diagonal are displayed.
Returns
-------
plotly.graph_objs.splom.Diagonal
"""
return self['diagonal']
@diagonal.setter
def diagonal(self, val):
self['diagonal'] = val
# dimensions
# ----------
@property
def dimensions(self):
"""
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.splom.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Supported dict properties:
axis
plotly.graph_objs.splom.dimension.Axis instance
or dict with compatible properties
label
Sets the label corresponding to this splom
dimension.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the dimension values to be plotted.
valuessrc
Sets the source reference on plot.ly for
values .
visible
Determines whether or not this dimension is
shown on the graph. Note that even visible
false dimension contribute to the default grid
generate by this splom trace.
Returns
-------
tuple[plotly.graph_objs.splom.Dimension]
"""
return self['dimensions']
@dimensions.setter
def dimensions(self, val):
self['dimensions'] = val
# dimensiondefaults
# -----------------
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the default
property values to use for elements of splom.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of plotly.graph_objs.splom.Dimension
- A dict of string/value properties that will be passed
to the Dimension constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.splom.Dimension
"""
return self['dimensiondefaults']
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self['dimensiondefaults'] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['hoverinfo']
@hoverinfo.setter
def hoverinfo(self, val):
self['hoverinfo'] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on plot.ly for hoverinfo .
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hoverinfosrc']
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self['hoverinfosrc'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.splom.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the length (in number of characters) of
the trace name in the hover labels for this
trace. -1 shows the whole name regardless of
length. 0-3 shows the first 0-3 characters, and
an integer >3 will show the whole name if it is
less than that many characters, but if it is
longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
Returns
-------
plotly.graph_objs.splom.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}". See http
s://github.com/d3/d3-format/blob/master/README.md#locale_format
for details on the formatting syntax. The variables available
in `hovertemplate` are the ones emitted as event data described
at this link https://plot.ly/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>".
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['hovertemplate']
@hovertemplate.setter
def hovertemplate(self, val):
self['hovertemplate'] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on plot.ly for hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hovertemplatesrc']
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self['hovertemplatesrc'] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['hovertext']
@hovertext.setter
def hovertext(self, val):
self['hovertext'] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on plot.ly for hovertext .
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hovertextsrc']
@hovertextsrc.setter
def hovertextsrc(self, val):
self['hovertextsrc'] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ids']
@ids.setter
def ids(self, val):
self['ids'] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on plot.ly for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['idssrc']
@idssrc.setter
def idssrc(self, val):
self['idssrc'] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['legendgroup']
@legendgroup.setter
def legendgroup(self, val):
self['legendgroup'] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.splom.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
colorbar
plotly.graph_objs.splom.marker.ColorBar
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.cmin` and `marker.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
line
plotly.graph_objs.splom.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size
.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for
symbol .
Returns
-------
plotly.graph_objs.splom.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of plotly.graph_objs.splom.Selected
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
plotly.graph_objs.splom.selected.Marker
instance or dict with compatible properties
Returns
-------
plotly.graph_objs.splom.Selected
"""
return self['selected']
@selected.setter
def selected(self, val):
self['selected'] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self['selectedpoints']
@selectedpoints.setter
def selectedpoints(self, val):
self['selectedpoints'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# showlowerhalf
# -------------
@property
def showlowerhalf(self):
"""
Determines whether or not subplots on the lower half from the
diagonal are displayed.
The 'showlowerhalf' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlowerhalf']
@showlowerhalf.setter
def showlowerhalf(self, val):
self['showlowerhalf'] = val
# showupperhalf
# -------------
@property
def showupperhalf(self):
"""
Determines whether or not subplots on the upper half from the
diagonal are displayed.
The 'showupperhalf' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showupperhalf']
@showupperhalf.setter
def showupperhalf(self, val):
self['showupperhalf'] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of plotly.graph_objs.splom.Stream
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
Returns
-------
plotly.graph_objs.splom.Stream
"""
return self['stream']
@stream.setter
def stream(self, val):
self['stream'] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair to appear on
hover. If a single string, the same string appears over all the
data points. If an array of string, the items are mapped in
order to the this trace's (x,y) coordinates.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['text']
@text.setter
def text(self, val):
self['text'] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on plot.ly for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['textsrc']
@textsrc.setter
def textsrc(self, val):
self['textsrc'] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['uid']
@uid.setter
def uid(self, val):
self['uid'] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self['uirevision']
@uirevision.setter
def uirevision(self, val):
self['uirevision'] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of plotly.graph_objs.splom.Unselected
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
plotly.graph_objs.splom.unselected.Marker
instance or dict with compatible properties
Returns
-------
plotly.graph_objs.splom.Unselected
"""
return self['unselected']
@unselected.setter
def unselected(self, val):
self['unselected'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# xaxes
# -----
@property
def xaxes(self):
"""
Sets the list of x axes corresponding to dimensions of this
splom trace. By default, a splom will match the first N xaxes
where N is the number of input dimensions. Note that, in case
where `diagonal.visible` is false and `showupperhalf` or
`showlowerhalf` is false, this splom trace will generate one
less x-axis and one less y-axis.
The 'xaxes' property is an info array that may be specified as:
* a list of elements where:
The 'xaxes[i]' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
list
"""
return self['xaxes']
@xaxes.setter
def xaxes(self, val):
self['xaxes'] = val
# yaxes
# -----
@property
def yaxes(self):
"""
Sets the list of y axes corresponding to dimensions of this
splom trace. By default, a splom will match the first N yaxes
where N is the number of input dimensions. Note that, in case
where `diagonal.visible` is false and `showupperhalf` or
`showlowerhalf` is false, this splom trace will generate one
less x-axis and one less y-axis.
The 'yaxes' property is an info array that may be specified as:
* a list of elements where:
The 'yaxes[i]' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
list
"""
return self['yaxes']
@yaxes.setter
def yaxes(self, val):
self['yaxes'] = val
# type
# ----
@property
def type(self):
return self._props['type']
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
diagonal
plotly.graph_objs.splom.Diagonal instance or dict with
compatible properties
dimensions
plotly.graph_objs.splom.Dimension instance or dict with
compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.splom.Hoverlabel instance or dict
with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". See https://github.com/d3/d3-format
/blob/master/README.md#locale_format for details on the
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plot.ly/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified
per-point (the ones that are `arrayOk: true`) are
available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>".
hovertemplatesrc
Sets the source reference on plot.ly for hovertemplate
.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
plotly.graph_objs.splom.Marker instance or dict with
compatible properties
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.splom.Selected instance or dict with
compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
plotly.graph_objs.splom.Stream instance or dict with
compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.splom.Unselected instance or dict
with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
diagonal=None,
dimensions=None,
dimensiondefaults=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
marker=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
showlowerhalf=None,
showupperhalf=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxes=None,
yaxes=None,
**kwargs
):
"""
Construct a new Splom object
Splom traces generate scatter plot matrix visualizations. Each
splom `dimensions` items correspond to a generated axis. Values
for each of those dimensions are set in `dimensions[i].values`.
Splom traces support all `scattergl` marker style attributes.
Specify `layout.grid` attributes and/or layout x-axis and
y-axis attributes for more control over the axis positioning
and style.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Splom
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on plot.ly for customdata .
diagonal
plotly.graph_objs.splom.Diagonal instance or dict with
compatible properties
dimensions
plotly.graph_objs.splom.Dimension instance or dict with
compatible properties
dimensiondefaults
When used in a template (as
layout.template.data.splom.dimensiondefaults), sets the
default property values to use for elements of
splom.dimensions
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on plot.ly for hoverinfo .
hoverlabel
plotly.graph_objs.splom.Hoverlabel instance or dict
with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". See https://github.com/d3/d3-format
/blob/master/README.md#locale_format for details on the
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plot.ly/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified
per-point (the ones that are `arrayOk: true`) are
available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>".
hovertemplatesrc
Sets the source reference on plot.ly for hovertemplate
.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on plot.ly for hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on plot.ly for ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
plotly.graph_objs.splom.Marker instance or dict with
compatible properties
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
plotly.graph_objs.splom.Selected instance or dict with
compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showlowerhalf
Determines whether or not subplots on the lower half
from the diagonal are displayed.
showupperhalf
Determines whether or not subplots on the upper half
from the diagonal are displayed.
stream
plotly.graph_objs.splom.Stream instance or dict with
compatible properties
text
Sets text elements associated with each (x,y) pair to
appear on hover. If a single string, the same string
appears over all the data points. If an array of
string, the items are mapped in order to the this
trace's (x,y) coordinates.
textsrc
Sets the source reference on plot.ly for text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
plotly.graph_objs.splom.Unselected instance or dict
with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxes
Sets the list of x axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N xaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
yaxes
Sets the list of y axes corresponding to dimensions of
this splom trace. By default, a splom will match the
first N yaxes where N is the number of input
dimensions. Note that, in case where `diagonal.visible`
is false and `showupperhalf` or `showlowerhalf` is
false, this splom trace will generate one less x-axis
and one less y-axis.
Returns
-------
Splom
"""
super(Splom, self).__init__('splom')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Splom
constructor must be a dict or
an instance of plotly.graph_objs.Splom"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (splom as v_splom)
# Initialize validators
# ---------------------
self._validators['customdata'] = v_splom.CustomdataValidator()
self._validators['customdatasrc'] = v_splom.CustomdatasrcValidator()
self._validators['diagonal'] = v_splom.DiagonalValidator()
self._validators['dimensions'] = v_splom.DimensionsValidator()
self._validators['dimensiondefaults'] = v_splom.DimensionValidator()
self._validators['hoverinfo'] = v_splom.HoverinfoValidator()
self._validators['hoverinfosrc'] = v_splom.HoverinfosrcValidator()
self._validators['hoverlabel'] = v_splom.HoverlabelValidator()
self._validators['hovertemplate'] = v_splom.HovertemplateValidator()
self._validators['hovertemplatesrc'
] = v_splom.HovertemplatesrcValidator()
self._validators['hovertext'] = v_splom.HovertextValidator()
self._validators['hovertextsrc'] = v_splom.HovertextsrcValidator()
self._validators['ids'] = v_splom.IdsValidator()
self._validators['idssrc'] = v_splom.IdssrcValidator()
self._validators['legendgroup'] = v_splom.LegendgroupValidator()
self._validators['marker'] = v_splom.MarkerValidator()
self._validators['name'] = v_splom.NameValidator()
self._validators['opacity'] = v_splom.OpacityValidator()
self._validators['selected'] = v_splom.SelectedValidator()
self._validators['selectedpoints'] = v_splom.SelectedpointsValidator()
self._validators['showlegend'] = v_splom.ShowlegendValidator()
self._validators['showlowerhalf'] = v_splom.ShowlowerhalfValidator()
self._validators['showupperhalf'] = v_splom.ShowupperhalfValidator()
self._validators['stream'] = v_splom.StreamValidator()
self._validators['text'] = v_splom.TextValidator()
self._validators['textsrc'] = v_splom.TextsrcValidator()
self._validators['uid'] = v_splom.UidValidator()
self._validators['uirevision'] = v_splom.UirevisionValidator()
self._validators['unselected'] = v_splom.UnselectedValidator()
self._validators['visible'] = v_splom.VisibleValidator()
self._validators['xaxes'] = v_splom.XaxesValidator()
self._validators['yaxes'] = v_splom.YaxesValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('customdata', None)
self['customdata'] = customdata if customdata is not None else _v
_v = arg.pop('customdatasrc', None)
self['customdatasrc'
] = customdatasrc if customdatasrc is not None else _v
_v = arg.pop('diagonal', None)
self['diagonal'] = diagonal if diagonal is not None else _v
_v = arg.pop('dimensions', None)
self['dimensions'] = dimensions if dimensions is not None else _v
_v = arg.pop('dimensiondefaults', None)
self['dimensiondefaults'
] = dimensiondefaults if dimensiondefaults is not None else _v
_v = arg.pop('hoverinfo', None)
self['hoverinfo'] = hoverinfo if hoverinfo is not None else _v
_v = arg.pop('hoverinfosrc', None)
self['hoverinfosrc'] = hoverinfosrc if hoverinfosrc is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hovertemplate', None)
self['hovertemplate'
] = hovertemplate if hovertemplate is not None else _v
_v = arg.pop('hovertemplatesrc', None)
self['hovertemplatesrc'
] = hovertemplatesrc if hovertemplatesrc is not None else _v
_v = arg.pop('hovertext', None)
self['hovertext'] = hovertext if hovertext is not None else _v
_v = arg.pop('hovertextsrc', None)
self['hovertextsrc'] = hovertextsrc if hovertextsrc is not None else _v
_v = arg.pop('ids', None)
self['ids'] = ids if ids is not None else _v
_v = arg.pop('idssrc', None)
self['idssrc'] = idssrc if idssrc is not None else _v
_v = arg.pop('legendgroup', None)
self['legendgroup'] = legendgroup if legendgroup is not None else _v
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('selected', None)
self['selected'] = selected if selected is not None else _v
_v = arg.pop('selectedpoints', None)
self['selectedpoints'
] = selectedpoints if selectedpoints is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('showlowerhalf', None)
self['showlowerhalf'
] = showlowerhalf if showlowerhalf is not None else _v
_v = arg.pop('showupperhalf', None)
self['showupperhalf'
] = showupperhalf if showupperhalf is not None else _v
_v = arg.pop('stream', None)
self['stream'] = stream if stream is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
_v = arg.pop('textsrc', None)
self['textsrc'] = textsrc if textsrc is not None else _v
_v = arg.pop('uid', None)
self['uid'] = uid if uid is not None else _v
_v = arg.pop('uirevision', None)
self['uirevision'] = uirevision if uirevision is not None else _v
_v = arg.pop('unselected', None)
self['unselected'] = unselected if unselected is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
_v = arg.pop('xaxes', None)
self['xaxes'] = xaxes if xaxes is not None else _v
_v = arg.pop('yaxes', None)
self['yaxes'] = yaxes if yaxes is not None else _v
# Read-only literals
# ------------------
from _plotly_utils.basevalidators import LiteralValidator
self._props['type'] = 'splom'
self._validators['type'] = LiteralValidator(
plotly_name='type', parent_name='splom', val='splom'
)
arg.pop('type', None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 36.773866 | 89 | 0.572273 | [
"MIT"
] | 180Studios/LoginApp | venv/lib/python3.7/site-packages/plotly/graph_objs/_splom.py | 54,315 | Python |
import time
from bs4 import BeautifulSoup
import requests
import json
from datetime import datetime, timedelta
import psycopg2
import smtplib
import os
DATABASE = os.environ["DATABASE"]
USER = os.environ["USER"]
PASSWORD = os.environ["PASSWORD"]
HOST = os.environ["HOST"]
def send_email(message: str) -> None:
"""
Sends an email to target email with given message.
Args:
message (str): message you're sending
"""
with open("../creds.json", "r") as f:
creds = json.loads(f)
gmail_user = creds["user"]
gmail_pass = creds["pass"]
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(gmail_user, gmail_pass)
server.sendmail(gmail_user, creds["target"], message)
except:
print("Email didnt work...")
def get_data() -> None:
"""
Infinite loop of every 10min requests to Vilnius vaccination center.
Collects count of vaccines and adds to PostgreSQL database.
Sends an email if Pfizer vaccine is available.
"""
while True:
sql_connection = psycopg2.connect(
database=DATABASE, user=USER, password=PASSWORD, host=HOST
)
# Connect to DB
cur = sql_connection.cursor()
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"sec-ch-ua": "^\\^",
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Language": "en-US,en;q=0.9",
}
page = requests.get(
"https://vilnius-vac.myhybridlab.com/selfregister/vaccine", headers=headers
)
soup = BeautifulSoup(page.content, "html.parser")
vaccines = soup.find("vaccine-rooms", class_=None)[":vaccine-rooms"]
json_object = json.loads(vaccines)
# Time
time_raw = soup.find("small", class_="text-muted").get_text().split()
time_str = time_raw[2] + " " + time_raw[3]
dt = datetime.fromisoformat(time_str)
now = datetime.now().replace(microsecond=0)
eet_dt = now + timedelta(hours=3)
diff_secs = (eet_dt - dt).seconds
total_sleep = 602 - diff_secs
moderna = json_object[0]["free_total"]
pfizer = json_object[1]["free_total"]
astra = json_object[2]["free_total"]
janssen = json_object[3]["free_total"]
cur.execute(
f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});"
)
sql_connection.commit()
sql_connection.close()
if pfizer > 0:
send_email(
"Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine"
)
time.sleep(total_sleep)
if __name__ == "__main__":
get_data()
| 32.46729 | 161 | 0.582614 | [
"MIT"
] | Karalius/get-vaccine-vilnius | vaccines.py | 3,474 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._vaults_operations_async import VaultsOperations
from ._operations_async import Operations
__all__ = [
'VaultsOperations',
'Operations',
]
| 38.875 | 94 | 0.588424 | [
"MIT"
] | 00Kai0/azure-sdk-for-python | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2016_10_01/aio/operations_async/__init__.py | 622 | Python |
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
| 42.138347 | 82 | 0.721019 | [
"Apache-2.0"
] | Bobgy/model-analysis | tensorflow_model_analysis/api/model_eval_lib.py | 45,383 | Python |
#coding:utf-8
#
# id: bugs.core_5275
# title: CORE-5275: Expression index may become inconsistent if CREATE INDEX was interrupted after b-tree creation but before commiting
# decription:
# This test (and CORE- ticket) has been created after wrong initial implementation of test for CORE-1746.
# Scenario:
# 1. ISQL_1 is launched as child async. process, inserts 1000 rows and then falls in pause (delay) ~10 seconds;
# 2. ISQL_2 is launched as child async. process in Tx = WAIT, tries to create index on the table which is handled
# by ISQL_1 and immediatelly falls in pause because of waiting for table lock.
# 3. ISQL_3 is launched in SYNC mode and does 'DELETE FROM MON$ATTACHMENTS' thus forcing other attachments to be
# closed with raising 00803/connection shutdown.
# 4. Repeat step 1. On WI-T4.0.0.258 this step lead to:
# "invalid SEND request (167), file: JrdStatement.cpp line: 325", 100% reproducilbe.
#
# Checked on WI-V2.5.6.27017 (SC), WI-V3.0.1.32539 (SS/SC/CS), WI-T4.0.0.262 (SS) - works fine.
#
# Beside above mentioned steps, we also:
# 1) compare content of old/new firebird.log (difference): it should NOT contain line "consistency check";
# 2) run database online validation: it should NOT report any error in the database.
#
# :::::::::::::::::::::::::::::::::::::::: NB ::::::::::::::::::::::::::::::::::::
# 18.08.2020. FB 4.x has incompatible behaviour with all previous versions since build 4.0.0.2131 (06-aug-2020):
# statement 'alter sequence <seq_name> restart with 0' changes rdb$generators.rdb$initial_value to -1 thus next call
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# This is considered as *expected* and is noted in doc/README.incompatibilities.3to4.txt
#
# Because of this, it was decided to replace 'alter sequence restart...' with subtraction of two gen values:
# c = gen_id(<g>, -gen_id(<g>, 0)) -- see procedure sp_restart_sequences.
#
# Checked on:
# 4.0.0.2164 SS: 15.932s.
# 4.0.0.2119 SS: 16.141s.
# 4.0.0.2164 CS: 17.549s.
# 3.0.7.33356 SS: 17.446s.
# 3.0.7.33356 CS: 18.321s.
# 2.5.9.27150 SC: 13.768s.
#
# tracker_id: CORE-5275
# min_versions: ['2.5.6']
# versions: 2.5.6
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.6
# resources: None
substitutions_1 = [('0: CREATE INDEX LOG: RDB_EXPR_BLOB.*', '0: CREATE INDEX LOG: RDB_EXPR_BLOB'), ('BULK_INSERT_START.*', 'BULK_INSERT_START'), ('.*KILLED BY DATABASE ADMINISTRATOR.*', ''), ('BULK_INSERT_FINISH.*', 'BULK_INSERT_FINISH'), ('CREATE_INDX_START.*', 'CREATE_INDX_START'), ('AFTER LINE.*', 'AFTER LINE'), ('RECORDS AFFECTED:.*', 'RECORDS AFFECTED:'), ('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''), ('RELATION [0-9]{3,4}', 'RELATION')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import time
# import difflib
# import subprocess
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_file=db_conn.database_name
# engine =str(db_conn.engine_version)
#
# db_conn.close()
#
# #--------------------------------------------
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# def svc_get_fb_log( engine, f_fb_log ):
#
# import subprocess
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
# # C:\\MIX
# irebird\\oldfb251in
# bsvcmgr localhost:service_mgr -user sysdba -password masterkey action_get_ib_log
# subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# get_firebird_log_key
# ],
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
#
# return
#
# sql_ddl='''
# create or alter procedure sp_ins(n int) as begin end;
#
# recreate table test(x int unique using index test_x, s varchar(10) default 'qwerty' );
#
# set term ^;
# execute block as
# begin
# execute statement 'drop sequence g';
# when any do begin end
# end
# ^
# set term ;^
# commit;
# create sequence g;
# commit;
#
# set term ^;
# create or alter procedure sp_ins(n int) as
# begin
# while (n>0) do
# begin
# insert into test( x ) values( gen_id(g,1) );
# n = n - 1;
# end
# end
# ^
# set term ;^
# commit;
# '''
# runProgram('isql',[dsn],sql_ddl)
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5275_fblog_before.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_before )
# flush_and_close( f_fblog_before )
#
# #########################################################
#
# rows_to_add=1000
#
# sql_bulk_insert=''' set bail on;
# set list on;
#
# -- DISABLED 19.08.2020: alter sequence g restart with 0;
#
# delete from test;
# commit;
# set transaction lock timeout 10; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY, see below auton Tx start.
#
# select current_timestamp as bulk_insert_start from rdb$database;
# set term ^;
# execute block as
# declare i int;
# begin
# i = gen_id(g, -gen_id(g, 0)); -- restart sequence, since 19.08.2020
# execute procedure sp_ins( %(rows_to_add)s );
# begin
# -- #########################################################
# -- ####################### D E L A Y #####################
# -- #########################################################
# in autonomous transaction do
# insert into test( x ) values( %(rows_to_add)s ); -- this will cause delay because of duplicate in index
# when any do
# begin
# i = gen_id(g,1);
# end
# end
# end
# ^
# set term ;^
# commit;
# select current_timestamp as bulk_insert_finish from rdb$database;
# '''
#
# sql_create_indx=''' set bail on;
# set list on;
# set blob all;
# select
# iif( gen_id(g,0) > 0 and gen_id(g,0) < 1 + %(rows_to_add)s,
# 'OK, IS RUNNING',
# iif( gen_id(g,0) <=0,
# 'WRONG: not yet started, current gen_id='||gen_id(g,0),
# 'WRONG: already finished, rows_to_add='||%(rows_to_add)s ||', current gen_id='||gen_id(g,0)
# )
# ) as inserts_state,
# current_timestamp as create_indx_start
# from rdb$database;
# set autoddl off;
# commit;
#
# set echo on;
# set transaction %(tx_decl)s;
#
# create index test_%(idx_name)s on test computed by( %(idx_expr)s );
# set echo off;
# commit;
#
# select
# iif( gen_id(g,0) >= 1 + %(rows_to_add)s,
# 'OK, FINISHED',
# 'SOMETHING WRONG: current gen_id=' || gen_id(g,0)||', rows_to_add='||%(rows_to_add)s
# ) as inserts_state
# from rdb$database;
#
# set count on;
# select
# rdb$index_name
# ,coalesce(rdb$unique_flag,0) as rdb$unique_flag
# ,coalesce(rdb$index_inactive,0) as rdb$index_inactive
# ,rdb$expression_source as rdb_expr_blob
# from rdb$indices ri
# where ri.rdb$index_name = upper( 'test_%(idx_name)s' )
# ;
# set count off;
# set echo on;
# set plan on;
# select 1 from test where %(idx_expr)s > '' rows 0;
# set plan off;
# set echo off;
# commit;
# drop index test_%(idx_name)s;
# commit;
# '''
#
# sql_kill_att=''' set count on;
# set list on;
# commit;
# delete from mon$attachments where mon$attachment_id<>current_connection;
# '''
#
# f_kill_att_sql = open( os.path.join(context['temp_directory'],'tmp_5275_kill_att.sql' ), 'w')
# f_kill_att_sql.write( sql_kill_att )
# flush_and_close( f_kill_att_sql )
#
# tx_param=['WAIT','WAIT']
#
# for i in range(len(tx_param)):
#
# f_bulk_insert_sql = open( os.path.join(context['temp_directory'],'tmp_5275_ins.sql'), 'w')
# f_bulk_insert_sql.write(sql_bulk_insert % locals() )
# flush_and_close( f_bulk_insert_sql )
#
# tx_decl=tx_param[i]
# idx_name=tx_decl.replace(' ','_')
# idx_expr="'"+idx_name+"'|| s"
#
# f_create_indx_sql = open( os.path.join(context['temp_directory'],'tmp_5275_idx_%s.sql' % str(i) ), 'w')
# f_create_indx_sql.write( sql_create_indx % locals() )
# flush_and_close( f_create_indx_sql )
#
# f_bulk_insert_log = open( os.path.join(context['temp_directory'],'tmp_5275_ins_%s.log' % str(i) ), 'w')
# f_create_indx_log = open( os.path.join(context['temp_directory'],'tmp_5275_idx_%s.log' % str(i) ), 'w')
#
# p_bulk_insert=subprocess.Popen( [context['isql_path'], dsn, "-q", "-i", f_bulk_insert_sql.name ],
# stdout = f_bulk_insert_log,
# stderr = subprocess.STDOUT
# )
#
# # 3.0 Classic: seems that it requires at least 2 seconds for ISQL be loaded into memory.
# time.sleep(2)
#
# p_create_indx=subprocess.Popen( [context['isql_path'], dsn, "-q", "-i", f_create_indx_sql.name ],
# stdout = f_create_indx_log,
# stderr = subprocess.STDOUT
# )
# time.sleep(2)
#
# f_kill_att_log = open( os.path.join(context['temp_directory'],'tmp_5275_kill_att.log' ), 'w')
#
# subprocess.call( [context['isql_path'], dsn, "-q", "-i", f_kill_att_sql.name ],
# stdout = f_kill_att_log,
# stderr = subprocess.STDOUT
# )
# flush_and_close( f_kill_att_log )
#
# # 11.05.2017, FB 4.0 only!
# # Following messages can appear after 'connection shutdown'
# # (letter from dimitr, 08-may-2017 20:41):
# # isc_att_shut_killed: Killed by database administrator
# # isc_att_shut_idle: Idle timeout expired
# # isc_att_shut_db_down: Database is shutdown
# # isc_att_shut_engine: Engine is shutdown
#
# # do NOT remove this delay, otherwise ISQL logs in 2.5.x will contain NO text with error message
# # STATEMENT FAILED, SQLSTATE = 08003 / CONNECTION SHUTDOWN:
# time.sleep(1)
#
# p_create_indx.terminate()
# p_bulk_insert.terminate()
#
# flush_and_close( f_bulk_insert_log )
# flush_and_close( f_create_indx_log )
#
# with open( f_bulk_insert_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': BULK INSERTS LOG: '+line.strip().upper() )
#
# with open( f_create_indx_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': CREATE INDEX LOG: '+line.strip().upper() )
#
# with open( f_kill_att_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': KILL ATTACH LOG: '+line.upper() )
#
# # cleanup (nitermediate):
# #########
# time.sleep(1)
# cleanup( (f_bulk_insert_sql, f_create_indx_sql, f_bulk_insert_log, f_create_indx_log, f_kill_att_log) )
#
# # ------------------------------------------------------------------------------------------
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5275_fblog_after.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_after )
# flush_and_close( f_fblog_after )
#
# # Now we can compare two versions of firebird.log and check their difference.
# #################################
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5275_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
# # This should be empty:
# #######################
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# # internal Firebird consistency check (invalid SEND request (167), file: JrdStatement.cpp line: 325)
# if 'consistency check' in line:
# print('NEW IN FIREBIRD.LOG: '+line.upper())
#
#
# #--------------------------------------------------------------------------------------------
#
# f_validate_log=open( os.path.join(context['temp_directory'],'tmp_5275_validate.log'), "w")
# f_validate_err=open( os.path.join(context['temp_directory'],'tmp_5275_validate.err'), "w")
#
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_validate",
# "dbname", "$(DATABASE_LOCATION)bugs.core_5275.fdb"
# ],
# stdout=f_validate_log,
# stderr=f_validate_err)
# flush_and_close( f_validate_log )
# flush_and_close( f_validate_err )
#
# with open( f_validate_log.name,'r') as f:
# for line in f:
# if line.split():
# print( 'VALIDATION STDOUT: '+line.upper() )
#
# with open( f_validate_err.name,'r') as f:
# for line in f:
# if line.split():
# print( 'VALIDATION STDERR: '+line.upper() )
#
# # cleanup
# #########
# time.sleep(1)
# cleanup( (f_validate_log, f_validate_err, f_kill_att_sql, f_fblog_before, f_fblog_after, f_diff_txt) )
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
0: BULK INSERTS LOG: BULK_INSERT_START
0: BULK INSERTS LOG: STATEMENT FAILED, SQLSTATE = 08003
0: BULK INSERTS LOG: CONNECTION SHUTDOWN
0: BULK INSERTS LOG: AFTER LINE
0: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
0: CREATE INDEX LOG: CREATE_INDX_START
0: CREATE INDEX LOG: SET TRANSACTION WAIT;
0: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
0: CREATE INDEX LOG: SET ECHO OFF;
0: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 08003
0: CREATE INDEX LOG: CONNECTION SHUTDOWN
0: CREATE INDEX LOG: AFTER LINE
0: KILL ATTACH LOG: RECORDS AFFECTED:
1: BULK INSERTS LOG: BULK_INSERT_START
1: BULK INSERTS LOG: STATEMENT FAILED, SQLSTATE = 08003
1: BULK INSERTS LOG: CONNECTION SHUTDOWN
1: BULK INSERTS LOG: AFTER LINE
1: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
1: CREATE INDEX LOG: CREATE_INDX_START
1: CREATE INDEX LOG: SET TRANSACTION WAIT;
1: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
1: CREATE INDEX LOG: SET ECHO OFF;
1: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 08003
1: CREATE INDEX LOG: CONNECTION SHUTDOWN
1: CREATE INDEX LOG: AFTER LINE
1: KILL ATTACH LOG: RECORDS AFFECTED:
VALIDATION STDOUT: 20:05:26.86 VALIDATION STARTED
VALIDATION STDOUT: 20:05:26.86 RELATION 128 (TEST)
VALIDATION STDOUT: 20:05:26.86 PROCESS POINTER PAGE 0 OF 1
VALIDATION STDOUT: 20:05:26.86 INDEX 1 (TEST_X)
VALIDATION STDOUT: 20:05:26.86 RELATION 128 (TEST) IS OK
VALIDATION STDOUT: 20:05:26.86 VALIDATION FINISHED
"""
@pytest.mark.version('>=2.5.6')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 39.711712 | 452 | 0.556148 | [
"MIT"
] | artyom-smirnov/firebird-qa | tests/bugs/core_5275_test.py | 17,632 | Python |
from odroid_go import GO
from .Block import Block
from .Snake import Snake
SNAKE_COLOR = GO.lcd.colors.GREEN
BACKGROUND_COLOR = GO.lcd.colors.BLACK
FOOD_COLOR = GO.lcd.colors.RED
BORDER_COLOR = GO.lcd.colors.WHITE
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
BLOCK_SIZE = 10
#Where borders are drawn
INIT_X = 0
INIT_Y = 20
#Initial position of snake; relative to borders
SNAKEINIT_X = 40
SNAKEINIT_Y = 10
#Initial direction of snake
INITIALDIRECTION = 4
#Directions
#1: Forward
#2: Backward
#3: Left
#4: Right
def FillRectangle(block,color):
GO.lcd.fill_rectangle(block.x,block.y,block.width,block.length,color)
def FillRectangles(blocks,color):
for block in blocks:
GO.lcd.fill_rectangle(block.x,block.y,block.width,block.length,color)
def DrawSnake(snake):
FillRectangle(snake.Head,SNAKE_COLOR)
if not snake.BlockBehindTail == None:
FillRectangle(snake.BlockBehindTail,BACKGROUND_COLOR)
def LostTone():
GO.speaker.tone(300,0.2)
def FoodTone():
GO.speaker.tone(400,0.2)
def WinTone():
GO.speaker.tone(500,0.2)
CAR_BODY = GO.lcd.colors.WHITE
CAR_TIRES = GO.lcd.colors.GREEN
CAR_FRONT_LIGHTS =GO.lcd.colors.GREEN
INDICATION = GO.lcd.colors.RED
ALPHABET = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
ALPHABET_COUNT = len(ALPHABET) | 23.155172 | 116 | 0.711095 | [
"MIT"
] | willemserruys/Snake | src/snake/Entities/Globals.py | 1,343 | Python |
"""
Tests for Reactions
"""
from src.common import constants as cn
from src.common.simple_sbml import SimpleSBML
from src.common import simple_sbml
from src.common.function_definition import FunctionDefinition
from tests.common import helpers
import copy
import libsbml
import numpy as np
import unittest
IGNORE_TEST = False
IS_PLOT = False
#############################
# Tests
#############################
class TestFunctionDefinition(unittest.TestCase):
def setUp(self):
self.simple = helpers.getSimple_BIOMD56()
self.function_definition = FunctionDefinition(
self.simple.model.getFunctionDefinition(0))
def testConstructor(self):
if IGNORE_TEST:
return
self.assertEqual(len(self.function_definition.argument_names), 4)
if __name__ == '__main__':
unittest.main()
| 21.368421 | 69 | 0.716749 | [
"MIT"
] | ModelEngineering/kinetics_validator | tests/common/test_function_definition.py | 812 | Python |
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.388889 | 89 | 0.577049 | [
"MIT"
] | EnricoMagnago/F3 | benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/15-sender_receiver_10.py | 18,657 | Python |
'''
Deals with the actual detection of signals in multichannel audio files.
There are two problems that need to solved while detecting a signal of interest.
#. within-channel signal detection
#. across-channel correspondence matching
Within-channel signal detection
-------------------------------
This task involves `locally` checking if there are any signals of interest in one channel at a time. The exact methods used for
the within-channel can be set by the user, though the simplest is of course a basic threshold-type detector. Whenever the
signal goes beyond a particular threshold, a signal is considered to be in that region.
Built-in detection routines
---------------------------
The detection module has a few simple detection routines. More advanced routines
are unlikely to form a core part of the package, and need to be written by the
user.
#. dBrms_detector : Calculates the moving dB rms profile of an audio clip. The
User needs to define the size of the moving window and the threshold in dB rms.
#. envelope_detector : Generates the Hilbert envelop of the audio clip. Regions above
the set threshold in dB peak amplitude are defined as detections. This method is faster
than the dBrms_detector.
'''
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize']=10000
import numpy as np
import scipy.signal as signal
import scipy.io.wavfile as wav
import scipy.ndimage as ndimage
import tqdm
from batracker.common_dsp.sigproc import *
def cross_channel_threshold_detector(multichannel, fs, **kwargs):
'''
Parameters
----------
multichannel : np.array
Msamples x Nchannels audio data
fs : float >0
detector_function : function, optional
The function used to detect the start and end of a signal.
Any custom detector function can be given, the compulsory inputs
are audio np.array, sample rate and the function should accept keyword
arguments (even if it doesn't use them.)
Defaults to dBrms_detector.
Returns
-------
all_detections : list
A list with sublists containing start-stop times of the detections
in each channel. Each sublist contains the detections in one channel.
Notes
-----
For further keyword arguments see the `threshold_detector` function
See Also
--------
dBrms_detector
'''
samples, channels = multichannel.shape
detector_function = kwargs.get('detector_function', dBrms_detector)
print(channels, samples)
all_detections = []
for each in tqdm.tqdm(range(channels)):
all_detections.append(detector_function(multichannel[:,each], fs, **kwargs))
return all_detections
def dBrms_detector(one_channel, fs, **kwargs):
'''
Calculates the dB rms profile of the input audio and
selects regions which arae above the profile.
Parameters
----------
one_channel
fs
dbrms_threshold: float, optional
Defaults to -50 dB rms
dbrms_window: float, optional
The window which is used to calculate the dB rms profile
in seconds. Defaults to 0.001 seconds.
Returns
-------
detections : list with tuples
Each tuple corresponds to a candidate signal region
'''
if one_channel.ndim > 1:
raise IndexError(f'Input audio must be flattened, and have only 1 dimension. \
Current audio has {one_channel.ndim} dimensions')
dbrms_window = kwargs.get('dbrms_window',0.001) # seconds
dbrms_threshold = kwargs.get('dbrms_threshold', -50)
window_samples = int(fs*dbrms_window)
dBrms_profile = dB(moving_rms(one_channel, window_size=window_samples))
labelled, num_regions = ndimage.label(dBrms_profile>dbrms_threshold)
if num_regions==0:
print (f'No regions above threshold: {dbrms_threshold} dBrms found in this channel!')
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs) for each in regions_above]
return regions_above_timestamps
def envelope_detector(audio, fs, **kwargs):
'''
Generates the Hilbert envelope of the audio. Signals are detected
wherever the envelope goes beyond a user-defined threshold value.
Two main options are to segment loud signals with reference to dB peak or
with reference dB above floor level.
Parameters
----------
audio
fs
Keyword Arguments
-----------------
threshold_db_floor: float, optional
The threshold for signal detection in dB above the floor level. The 5%ile level of the whole envelope is chosen as
the floor level. If not specified, then threshold_dbpeak is used to segment signals.
threshold_dbpeak : float, optional
The value beyond which a signal is considered to start.
Used only if relative_to_baseline is True.
lowpass_durn: float, optional
The highest time-resolution of envelope fluctuation to keep.
This effectively performs a low-pass at 1/lowpass_durn Hz on the raw envelope
signal.
Returns
-------
regions_above_timestamps
'''
envelope = np.abs(signal.hilbert(audio))
if not kwargs.get('lowpass_durn') is None:
lowpass_durn = kwargs['lowpass_durn'] # seconds
freq = 1.0/lowpass_durn
b,a = signal.butter(1, freq/(fs*0.5),'lowpass')
envelope = signal.filtfilt(b,a,envelope)
if not kwargs.get('threshold_db_floor', None) is None:
floor_level = np.percentile(20*np.log10(envelope),5)
threshold_db = floor_level + kwargs['threshold_db_floor']
else:
# get regions above the threshold
threshold_db = kwargs['threshold_dbpeak']
linear_threshold = 10**(threshold_db/20)
labelled, num_detections = ndimage.label(envelope>=linear_threshold)
regions_above = ndimage.find_objects(labelled.flatten())
regions_above_timestamps = [get_start_stop_times(each, fs ) for each in regions_above]
return regions_above_timestamps
def get_start_stop_times(findobjects_tuple, fs):
'''
'''
only_tuple = findobjects_tuple[0]
start, stop = only_tuple.start/fs, only_tuple.stop/fs
return start, stop
def moving_rms(X, **kwargs):
'''Calculates moving rms of a signal with given window size.
Outputs np.array of *same* size as X. The rms of the
last few samples <= window_size away from the end are assigned
to last full-window rms calculated
Parameters
----------
X : np.array
Signal of interest.
window_size : int, optional
Defaults to 125 samples.
Returns
-------
all_rms : np.array
Moving rms of the signal.
'''
window_size = kwargs.get('window_size', 125)
starts = np.arange(0, X.size)
stops = starts+window_size
valid = stops<X.size
valid_starts = np.int32(starts[valid])
valid_stops = np.int32(stops[valid])
all_rms = np.ones(X.size).reshape(-1,1)*999
for i, (start, stop) in enumerate(zip(valid_starts, valid_stops)):
rms_value = rms(X[start:stop])
all_rms[i] = rms_value
# replace all un-assigned samples with the last rms value
all_rms[all_rms==999] = np.nan
return all_rms
#
#if __name__ == '__main__':
# import scipy.signal as signal
# # trying out the hilbert envelope method:
# fs = 250000
# background = -60 # dB rms
# audio = np.random.normal(0, 10**(background/20), fs)
# duration = 0.005
# sound_start = 0.05
# t = np.linspace(0, duration, int(fs*duration))
# bat_call = signal.chirp(t,90000, 25000, t[-1])
# bat_call *= 0.5
# sound_stop = sound_start+duration
#
# start, end = np.int32(np.array([sound_start,
# sound_stop])*fs)
# audio[start:end] += bat_call
#
# envelope = np.abs(signal.hilbert(audio))
#
# dets = envelope_detector(audio, fs, threshold_dbpeak=-20)
# print(dets)
## | 33.912134 | 128 | 0.672424 | [
"MIT"
] | thejasvibr/batracker | batracker/signal_detection/detection.py | 8,105 | Python |
import unittest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelints.stage.IncompleteConditional import IncompleteConditional
class TestIncompleteConditional(unittest.TestCase):
collection = RulesCollection()
def setUp(self):
self.collection.register(IncompleteConditional())
def test_file(self):
file_name = 'testResources/ansible-smell/hardcodepassword5.yml'
good_runner = Runner(file_name, rules=self.collection)
print(good_runner.run())
| 27.05 | 74 | 0.770795 | [
"Apache-2.0"
] | SODALITE-EU/defect-prediction | ansible/tests/TestIncompleteConditional.py | 541 | Python |
# coding: utf-8
"""
Run the tests.
$ pip install nose (optional)
$ cd swagger_client-python
$ nosetests -v
"""
import os
import sys
import time
import unittest
import swagger_client
from swagger_client.rest import ApiException
class ApiExceptionTests(unittest.TestCase):
def setUp(self):
self.api_client = swagger_client.ApiClient()
self.pet_api = swagger_client.PetApi(self.api_client)
self.setUpModels()
def setUpModels(self):
self.category = swagger_client.Category()
self.category.id = int(time.time())
self.category.name = "dog"
self.tag = swagger_client.Tag()
self.tag.id = int(time.time())
self.tag.name = "blank"
self.pet = swagger_client.Pet()
self.pet.id = int(time.time())
self.pet.name = "hello kity"
self.pet.photo_urls = ["http://foo.bar.com/1", "http://foo.bar.com/2"]
self.pet.status = "sold"
self.pet.category = self.category
self.pet.tags = [self.tag]
def tearDown(self):
time.sleep(1)
def test_404_error(self):
self.pet_api.add_pet(body=self.pet)
self.pet_api.delete_pet(pet_id=self.pet.id)
with self.checkRaiseRegex(ApiException, "Pet not found"):
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
try:
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
except ApiException as e:
self.assertEqual(e.status, 404)
self.assertEqual(e.reason, "Not Found")
self.checkRegex(e.body, "Pet not found")
def test_500_error(self):
self.pet_api.add_pet(body=self.pet)
with self.checkRaiseRegex(ApiException, "Internal Server Error"):
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata="special",
file=None
)
try:
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata="special",
file=None
)
except ApiException as e:
self.assertEqual(e.status, 500)
self.assertEqual(e.reason, "Internal Server Error")
self.checkRegex(e.body, "Error 500 Internal Server Error")
def checkRaiseRegex(self, expected_exception, expected_regex):
if sys.version_info < (3, 0):
return self.assertRaisesRegexp(expected_exception, expected_regex)
return self.assertRaisesRegex(expected_exception, expected_regex)
def checkRegex(self, text, expected_regex):
if sys.version_info < (3, 0):
return self.assertRegexpMatches(text, expected_regex)
return self.assertRegex(text, expected_regex)
| 30.164835 | 78 | 0.622222 | [
"Apache-2.0"
] | DigitalInnovation/swagger-codegen | samples/client/petstore/python/tests/test_api_exception.py | 2,745 | Python |
# reimplementation of https://github.com/guillaumegenthial/tf_ner/blob/master/models/lstm_crf/main.py
import functools
import json
import logging
from pathlib import Path
import sys
import numpy as np
import tensorflow as tf
# tf.enable_eager_execution()
from tf_metrics import precision, recall, f1
DATADIR = "../../../data/conll/"
# Setup Logging
Path('results').mkdir(exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
handlers = [ logging.FileHandler('results/main.log'), logging.StreamHandler(sys.stdout)]
logging.getLogger('tensorflow').handlers = handlers
# Data Pipeline
def parse_fn(line_words, line_tags):
"""Encodes words into bytes for tensor
:param line_words: one line with words (aka sentences) with space between each word/token
:param line_tags: one line of tags (one tag per word in line_words)
:return: (list of encoded words, len(words)), list of encoded tags
"""
words = [w.encode() for w in line_words.strip().split()]
tags = [t.encode() for t in line_tags.strip().split()]
assert len(words) == len(tags), "Number of words {} and Number of tags must be the same {}".format(len(words), len(tags))
return (words, len(words)), tags
def generator_fn(words_file, tags_file):
"""Enumerator to enumerate through words_file and associated tags_file one line at a time
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.
"""
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for line_words, line_tags in zip(f_words, f_tags):
yield parse_fn(line_words, line_tags)
def input_fn(words_file, tags_file, params = None, shuffle_and_repeat = False):
"""Creates tensorflow dataset using the generator_fn
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'
:param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)
:return: instance of tf.data.Dataset
"""
params = params if params is not None else {}
# shapes are analogous to (list of encoded words, len(words)), list of encoded tags
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes = shapes, output_types = types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)\
return dataset
def model_fn(features, labels, mode, params):
"""
:param features: words from sentence and number of words per sentence
:param labels: One tag per word
:param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL
:param params: dictionary of hyper parameters for the model
:return:
"""
# For serving, features are a bit different
if isinstance(features, dict):
features = features['words'], features['nwords']
# Read vocab_words_file, vocab_tags_file, features
words, nwords = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets = params['num_oov_buckets'])
'''
If the file contains the following:
B-LOC
B-PER
O
I-LOC
then indices = [0, 1, 3] and num_tags = 4
Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?
'''
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for idx, tag in enumerate(f) if tag.strip() != 'O']
num_tags = len(indices) + 1
# Word Embeddings
# remember - as per the parse function "words" is a python list of
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [[0.]*params['dim']]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate = dropout, training = training)
# LSTM CRF
time_major = tf.transpose(embeddings, perm = [1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
"""
Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)
Following this, lstm_fw or lstm_bw each return a pair containing:
Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]
Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.
"""
output_fw,_ = lstm_cell_fw(time_major, dtype = tf.float32, sequence_length = nwords)
output_bw,_ = lstm_cell_bw(time_major, dtype = tf.float32, sequence_length = nwords)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
# CRf
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape = [num_tags, num_tags], dtype = tf.float32)
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) # pred_ids = A [batch_size, max_seq_len] matrix, with dtype tf.int32.
# Prediction mode
if mode == tf.estimator.ModeKeys.PREDICT:
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Loss
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
"""
logits are the same thing as unary potentials,
checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]
"""
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean(-log_likelihood)
# metrics
weights = tf.sequence_mask(nwords)
metrics = {
'acc': tf.metrics.accuracy(label_ids, pred_ids, weights),
'precision': precision(label_ids, pred_ids, num_tags, indices, weights), # indices indicate non-null classes
'recall': recall(label_ids, pred_ids, num_tags, indices, weights),
'f1': f1(label_ids, pred_ids, num_tags, indices, weights),
}
for metric_name, op in metrics.items():
tf.summary.scalar(metric_name, op[1])
# Evaluation Mode or training mode
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss = loss, eval_metric_ops = metrics )
elif mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss = loss, train_op = train_op)
def fwords(name):
return str(Path(DATADIR, '{}.words.txt'.format(name)))
def ftags(name):
return str(Path(DATADIR, '{}.tags.txt'.format(name)))
# Write predictions to file
def write_predictions(name, estimator):
Path('results/score').mkdir(parents=True, exist_ok=True)
with Path('results/score/{}.preds.txt'.format(name)).open('wb') as f:
test_inpf = functools.partial(input_fn, fwords(name), ftags(name))
golds_gen = generator_fn(fwords(name), ftags(name))
preds_gen = estimator.predict(test_inpf)
for golds, preds in zip(golds_gen, preds_gen):
((words, _), tags) = golds
for word, tag, tag_pred in zip(words, tags, preds['tags']):
f.write(b' '.join([word, tag, tag_pred]) + b'\n')
f.write(b'\n')
if __name__ == '__main__':
# Params
params = {
'dim': 300,
'dropout': 0.5,
'num_oov_buckets': 1,
'epochs': 25,
'batch_size': 20,
'buffer': 15000,
'lstm_size': 100,
'vocab_words_file': str(Path(DATADIR, 'vocab.words.txt')),
'vocab_chars_file': str(Path(DATADIR, 'vocab.chars.txt')),
'vocab_tags_file': str(Path(DATADIR, 'vocab.tags.txt')),
'glove': str(Path(DATADIR, 'glove.npz'))
}
with Path('results/params.json').open('w') as f:
json.dump(params, f, indent=4, sort_keys=True)
print('Done writing params to disk')
# Run configuration and estimator
cfg = tf.estimator.RunConfig(save_checkpoints_secs=120)
estimator = tf.estimator.Estimator(model_fn, 'results/model', cfg, params)
print('Done creating estimator spec')
# Defining our input functions
train_inpf = functools.partial(input_fn, fwords('train'), ftags('train'), params, shuffle_and_repeat=True)
eval_inpf = functools.partial(input_fn, fwords('testa'), ftags('testa'))
# Create an early stopping hook
Path(estimator.eval_dir()).mkdir(parents=True, exist_ok=True)
"""
Ref: https://stackoverflow.com/questions/47137061/early-stopping-with-tf-estimator-how
The parameters for stop_if_no_decrease_hook are as follows:
tf.contrib.estimator.stop_if_no_decrease_hook(
estimator,
metric_name='loss',
max_steps_without_decrease=1000,
min_steps=100)
"""
hook = tf.contrib.estimator.stop_if_no_increase_hook(estimator, 'f1', 500, min_steps=8000, run_every_secs=120)
train_spec = tf.estimator.TrainSpec(input_fn = train_inpf, hooks = [hook])
eval_spec = tf.estimator.EvalSpec(input_fn = eval_inpf, throttle_secs = 120) # Evaluate every 120 seconds
print('Done creating train and eval spec')
# Train with early stopping
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print('Done training and evaluation')
for name in ['train', 'testa', 'testb']:
write_predictions(name, estimator)
| 38.647273 | 142 | 0.691664 | [
"Apache-2.0"
] | vikasbahirwani/SequenceTagging | src/model/lstm_crf/main.py | 10,628 | Python |
# -*- coding: utf-8 -*-
"""Assignment Day :13
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hCwbVUHmWUKYdN7xNNeZcze9aGEmlGKz
"""
# Q1.
#Remove the hardcoded part from the code with the help of configparser
import os
from configparser import ConfigParser
config=ConfigParser()
config.read("D://Py Ex/Advance-py//ex_config.ini")
path=config.get("Exten", "path")
old_text=config.get("Exten","OE")
New_text=config.get("Exten","NE")
os.chdir(path)
os.getcwd()
for file in os.listdir():
if file.endswith(old_text):
first_name=file.rsplit(".",1)[0]
new_name=first_name+"."+New_text
print(new_name)
os.rename(file,new_name)
#Q2
#The question has been asked in an interview
#Please write the code in such a way so that it could give all path of a #file (same name ) which is present in multiple locations.
import os
resp =os.walk("C:\\company\\names")
d1= {}
for r,d,f in resp:
for file in f:
d1.setdefault(file,[]).append(r)
print(d1)
file_name = input("Enter the file name ")
for k,v in d1.items():
if file_name.lower() in k.lower() :
print (k,":", v)
for find_file in v:
print(find_file) | 25.22449 | 132 | 0.68123 | [
"Apache-2.0"
] | anjali0503/Letsupgrade_Advance_Python_Django | assignment_day_13.py | 1,236 | Python |
import invoke
from pathlib import Path
PACKAGE = "src"
REQUIRED_COVERAGE = 90
BASE_DIR = Path(__file__).resolve().parent
@invoke.task(name="format")
def format_(arg):
autoflake = "autoflake -i --recursive --remove-all-unused-imports --remove-duplicate-keys --remove-unused-variables"
arg.run(f"{autoflake} {PACKAGE}", echo=True)
arg.run(f"isort {PACKAGE}", echo=True)
arg.run(f"black {PACKAGE}", echo=True)
@invoke.task(
help={
"style": "Check style with flake8, isort, and black",
"typing": "Check typing with mypy",
}
)
def check(arg, style=True, typing=True):
if style:
arg.run(f"flake8 {PACKAGE}", echo=True)
arg.run(f"isort --diff {PACKAGE} --check-only", echo=True)
arg.run(f"black --diff {PACKAGE} --check", echo=True)
if typing:
arg.run(f"mypy --no-incremental --cache-dir=/dev/null {PACKAGE}", echo=True)
@invoke.task
def test(arg):
arg.run(
f"pytest",
pty=True,
echo=True,
)
@invoke.task
def makemigrations(arg, message):
arg.run(f"cd {BASE_DIR} && alembic revision --autogenerate -m '{message}'", echo=True, pty=True)
@invoke.task
def migrate(arg):
arg.run(f"cd {BASE_DIR} && alembic upgrade head", echo=True)
@invoke.task
def hooks(arg):
invoke_path = Path(arg.run("which invoke", hide=True).stdout[:-1])
for src_path in Path(".hooks").iterdir():
dst_path = Path(".git/hooks") / src_path.name
print(f"Installing: {dst_path}")
with open(str(src_path), "r") as f:
src_data = f.read()
with open(str(dst_path), "w") as f:
f.write(src_data.format(invoke_path=invoke_path.parent))
arg.run(f"chmod +x {dst_path}")
| 26.569231 | 120 | 0.62652 | [
"Apache-2.0"
] | brainfukk/fiuread | tasks.py | 1,727 | Python |
# -*- coding: utf-8 -*-
"""
pygments.lexers.smalltalk
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Smalltalk and related languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
.. versionadded:: 1.1
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
| 36.811224 | 88 | 0.427859 | [
"MIT"
] | 0x008800/Sandbox | Python/Django/rest_framework/1_serialization/env/lib/python2.7/site-packages/pygments/lexers/smalltalk.py | 7,215 | Python |
import optparse
import Utils
import gensim
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--dataset', default='sample')
parser.add_option('--size', default=300, type='int', help='vectors dimension. Default: %default')
parser.add_option('--window', default=5, type='int', help='window size. Default: %default')
parser.add_option('--min_count', default=5, type='int', help='Min count. Default: %default')
options, args = parser.parse_args()
documents = list(Utils.read_json('%s-tokenized.json' % options.dataset))
model = gensim.models.word2vec.Word2Vec(documents, size=options.size, window=options.window, min_count=options.min_count, workers=4)
model.save('%s-word-vector-model' % options.dataset)
main()
| 38.65 | 136 | 0.702458 | [
"MIT"
] | mathieu-lacage/sophiaconf2018 | do-word-vector-model.py | 773 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import time
import numpy as np
import math
import os
# gene[f][c] f:function type, c:connection (nodeID)
class Individual(object):
def __init__(self, net_info, init):
self.net_info = net_info
self.gene = np.zeros((self.net_info.node_num + self.net_info.out_num, self.net_info.max_in_num + 1)).astype(int)
self.is_active = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
self.is_pool = np.empty(self.net_info.node_num + self.net_info.out_num).astype(bool)
self.eval = None
self.size = None
if init:
print('init with specific architectures')
self.init_gene_with_conv() # In the case of starting only convolution
else:
self.init_gene() # generate initial individual randomly
def init_gene_with_conv(self):
# initial architecture
arch = ['S_ConvBlock_64_3']
input_layer_num = int(self.net_info.input_num / self.net_info.rows) + 1
output_layer_num = int(self.net_info.out_num / self.net_info.rows) + 1
layer_ids = [((self.net_info.cols - 1 - input_layer_num - output_layer_num) + i) // (len(arch)) for i in
range(len(arch))]
prev_id = 0 # i.e. input layer
current_layer = input_layer_num
block_ids = [] # *do not connect with these ids
# building convolution net
for i, idx in enumerate(layer_ids):
current_layer += idx
n = current_layer * self.net_info.rows + np.random.randint(self.net_info.rows)
block_ids.append(n)
self.gene[n][0] = self.net_info.func_type.index(arch[i])
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
self.gene[n][1] = prev_id
for j in range(1, self.net_info.max_in_num):
self.gene[n][j + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
prev_id = n + self.net_info.input_num
# output layer
n = self.net_info.node_num
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
self.gene[n][1] = prev_id
for i in range(1, self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
block_ids.append(n)
# intermediate node
for n in range(self.net_info.node_num + self.net_info.out_num):
if n in block_ids:
continue
# type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
# connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
for i in range(self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
self.check_active()
def init_gene(self):
# intermediate node
for n in range(self.net_info.node_num + self.net_info.out_num):
# type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
self.gene[n][0] = np.random.randint(type_num)
# connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
for i in range(self.net_info.max_in_num):
self.gene[n][i + 1] = min_connect_id + np.random.randint(max_connect_id - min_connect_id)
self.check_active()
def __check_course_to_out(self, n):
if not self.is_active[n]:
self.is_active[n] = True
t = self.gene[n][0]
if n >= self.net_info.node_num: # output node
in_num = self.net_info.out_in_num[t]
else: # intermediate node
in_num = self.net_info.func_in_num[t]
for i in range(in_num):
if self.gene[n][i + 1] >= self.net_info.input_num:
self.__check_course_to_out(self.gene[n][i + 1] - self.net_info.input_num)
def check_active(self):
# clear
self.is_active[:] = False
# start from output nodes
for n in range(self.net_info.out_num):
self.__check_course_to_out(self.net_info.node_num + n)
def check_pool(self):
is_pool = True
pool_num = 0
for n in range(self.net_info.node_num + self.net_info.out_num):
if self.is_active[n]:
if self.gene[n][0] > 19:
is_pool = False
pool_num += 1
return is_pool, pool_num
def __mutate(self, current, min_int, max_int):
mutated_gene = current
while current == mutated_gene:
mutated_gene = min_int + np.random.randint(max_int - min_int)
return mutated_gene
def mutation(self, mutation_rate=0.01):
active_check = False
for n in range(self.net_info.node_num + self.net_info.out_num):
t = self.gene[n][0]
# mutation for type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
if np.random.rand() < mutation_rate and type_num > 1:
self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
if self.is_active[n]:
active_check = True
# mutation for connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
for i in range(self.net_info.max_in_num):
if np.random.rand() < mutation_rate and max_connect_id - min_connect_id > 1:
self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)
if self.is_active[n] and i < in_num:
active_check = True
self.check_active()
return active_check
def neutral_mutation(self, mutation_rate=0.01):
for n in range(self.net_info.node_num + self.net_info.out_num):
t = self.gene[n][0]
# mutation for type gene
type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
if not self.is_active[n] and np.random.rand() < mutation_rate and type_num > 1:
self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
# mutation for connection gene
col = np.min((int(n / self.net_info.rows), self.net_info.cols))
max_connect_id = col * self.net_info.rows + self.net_info.input_num
min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
if col - self.net_info.level_back >= 0 else 0
in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
for i in range(self.net_info.max_in_num):
if (not self.is_active[n] or i >= in_num) and np.random.rand() < mutation_rate \
and max_connect_id - min_connect_id > 1:
self.gene[n][i + 1] = self.__mutate(self.gene[n][i + 1], min_connect_id, max_connect_id)
self.check_active()
return False
def count_active_node(self):
return self.is_active.sum()
def copy(self, source):
self.net_info = source.net_info
self.gene = source.gene.copy()
self.is_active = source.is_active.copy()
self.eval = source.eval
self.size = source.size
def active_net_list(self):
net_list = [["input", 0, 0]]
active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)
active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)
for n, is_a in enumerate(self.is_active):
if is_a:
t = self.gene[n][0]
if n < self.net_info.node_num: # intermediate node
type_str = self.net_info.func_type[t]
else: # output node
type_str = self.net_info.out_type[t]
connections = [active_cnt[self.gene[n][i + 1]] for i in range(self.net_info.max_in_num)]
net_list.append([type_str] + connections)
return net_list
# CGP with (1 + \lambda)-ES
class CGP(object):
def __init__(self, net_info, eval_func, lam=4, imgSize=32, init=False, bias=0):
self.lam = lam
self.pop = [Individual(net_info, init) for _ in range(1 + self.lam)]
self.eval_func = eval_func
self.num_gen = 0
self.num_eval = 0
self.max_pool_num = int(math.log2(imgSize) - 2)
self.init = init
self.bias = bias
def _evaluation(self, pop, eval_flag):
# create network list
net_lists = []
active_index = np.where(eval_flag)[0]
for i in active_index:
net_lists.append(pop[i].active_net_list())
# evaluation
fp = self.eval_func(net_lists)
for i, j in enumerate(active_index):
if isinstance(fp[i], tuple):
pop[j].eval = fp[i][0]
pop[j].size = fp[i][1]
else:
pop[j].eval = fp[i]
pop[j].size = np.inf
evaluations_acc = np.zeros(len(pop))
evaluations_size = np.zeros(len(pop))
for i in range(len(pop)):
evaluations_acc[i] = pop[i].eval
evaluations_size[i] = pop[i].size
self.num_eval += len(net_lists)
return evaluations_acc, evaluations_size
def _log_data(self, net_info_type='active_only', start_time=0):
log_list = [self.num_gen, self.num_eval, time.time() - start_time, self.pop[0].eval,
self.pop[0].size, self.pop[0].count_active_node()]
if net_info_type == 'active_only':
log_list.append(self.pop[0].active_net_list())
elif net_info_type == 'full':
log_list += self.pop[0].gene.flatten().tolist()
else:
pass
return log_list
def _log_data_children(self, net_info_type='active_only', start_time=0, pop=None):
log_list = [self.num_gen, self.num_eval, time.time() - start_time, pop.eval, pop.size, pop.count_active_node()]
if net_info_type == 'active_only':
log_list.append(pop.active_net_list())
elif net_info_type == 'full':
log_list += pop.gene.flatten().tolist()
else:
pass
return log_list
def load_log(self, log_data):
self.num_gen = int(log_data[0])
self.num_eval = int(log_data[1])
net_info = self.pop[0].net_info
self.pop[0].eval = log_data[3]
self.pop[0].size = log_data[4]
print("Loaded Accuracy:", self.pop[0].eval)
self.pop[0].gene = np.int64(np.array(log_data[6:])).reshape(
(net_info.node_num + net_info.out_num, net_info.max_in_num + 1))
self.pop[0].check_active()
# Evolution CGP:
# At each iteration:
# - Generate lambda individuals in which at least one active node changes (i.e., forced mutation)
# - Mutate the best individual with neutral mutation (unchanging the active nodes)
# if the best individual is not updated.
def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_path='./'):
with open(os.path.join(log_path, 'child.txt'), 'a') as fw_c:
writer_c = csv.writer(fw_c, lineterminator='\n')
start_time = time.time()
eval_flag = np.empty(self.lam)
active_num = self.pop[0].count_active_node()
_, pool_num = self.pop[0].check_pool()
if self.init:
pass
else: # in the case of not using an init indiviudal
while active_num < self.pop[0].net_info.min_active_num or pool_num > self.max_pool_num:
self.pop[0].mutation(1.0)
active_num = self.pop[0].count_active_node()
_, pool_num = self.pop[0].check_pool()
if self.pop[0].eval is None:
self._evaluation([self.pop[0]], np.array([True]))
print(self._log_data(net_info_type='active_only', start_time=start_time))
while self.num_gen < max_eval:
self.num_gen += 1
# reproduction
for i in range(self.lam):
eval_flag[i] = False
self.pop[i + 1].copy(self.pop[0]) # copy a parent
active_num = self.pop[i + 1].count_active_node()
_, pool_num = self.pop[i + 1].check_pool()
# mutation (forced mutation)
while not eval_flag[i] or active_num < self.pop[
i + 1].net_info.min_active_num or pool_num > self.max_pool_num:
self.pop[i + 1].copy(self.pop[0]) # copy a parent
eval_flag[i] = self.pop[i + 1].mutation(mutation_rate) # mutation
active_num = self.pop[i + 1].count_active_node()
_, pool_num = self.pop[i + 1].check_pool()
# evaluation and selection
evaluations_acc, evaluations_size = self._evaluation(self.pop[1:], eval_flag=eval_flag)
evaluations_argsort = np.argsort(-evaluations_acc)
print(evaluations_acc, evaluations_argsort)
best_arg = evaluations_argsort[0]
# save
f = open(os.path.join(log_path, 'arch_child.txt'), 'a')
writer_f = csv.writer(f, lineterminator='\n')
for c in range(1 + self.lam):
writer_c.writerow(
self._log_data_children(net_info_type='full', start_time=start_time, pop=self.pop[c]))
writer_f.writerow(
self._log_data_children(net_info_type='active_only', start_time=start_time, pop=self.pop[c]))
f.close()
# replace the parent by the best individual
print("Comparing children with parent...")
print(f"Best Child's Accuracy {evaluations_acc[best_arg]}, Parent Accuracy: {self.pop[0].eval}")
if evaluations_acc[best_arg] > self.pop[0].eval:
self.pop[0].copy(self.pop[best_arg + 1])
print("Replacing parent with best child")
elif self.bias > 0:
found = False
print(f"Parent: Accuracy: {self.pop[0].eval}, Size: {self.pop[0].size}")
for i, idx in enumerate(evaluations_argsort):
print(f"Child {i + 1}: Accuracy: {evaluations_acc[idx]}, Size: {evaluations_size[idx]}")
if evaluations_acc[idx] > (self.pop[0].eval - self.bias) and \
evaluations_size[idx] < self.pop[0].size:
print("Replacing parent with child")
self.pop[0].copy(self.pop[idx + 1])
found = True
break
if not found:
self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)
else:
self.pop[0].neutral_mutation(mutation_rate) # modify the parent (neutral mutation)
# display and save log
print(self._log_data(net_info_type='active_only', start_time=start_time))
fw = open(os.path.join(log_path, 'log_cgp.txt'), 'a')
writer = csv.writer(fw, lineterminator='\n')
writer.writerow(self._log_data(net_info_type='full', start_time=start_time))
fa = open(os.path.join(log_path, 'arch.txt'), 'a')
writer_a = csv.writer(fa, lineterminator='\n')
writer_a.writerow(self._log_data(net_info_type='active_only', start_time=start_time))
fw.close()
fa.close()
| 47.862534 | 120 | 0.584108 | [
"MIT"
] | Pavan-Samtani/CGP-CNN-v2 | cgp.py | 17,757 | Python |
import graphene
from ..translations.mutations import ShopSettingsTranslate
from .mutations import (
AuthorizationKeyAdd,
AuthorizationKeyDelete,
HomepageCollectionUpdate,
ShopAddressUpdate,
ShopDomainUpdate,
ShopFetchTaxRates,
ShopSettingsUpdate,
StaffNotificationRecipientCreate,
StaffNotificationRecipientDelete,
StaffNotificationRecipientUpdate,
)
from .types import Shop
class ShopQueries(graphene.ObjectType):
shop = graphene.Field(Shop, description="Return information about the shop.")
def resolve_shop(self, _info):
return Shop()
class ShopMutations(graphene.ObjectType):
authorization_key_add = AuthorizationKeyAdd.Field()
authorization_key_delete = AuthorizationKeyDelete.Field()
staff_notification_recipient_create = StaffNotificationRecipientCreate.Field()
staff_notification_recipient_update = StaffNotificationRecipientUpdate.Field()
staff_notification_recipient_delete = StaffNotificationRecipientDelete.Field()
homepage_collection_update = HomepageCollectionUpdate.Field()
shop_domain_update = ShopDomainUpdate.Field()
shop_settings_update = ShopSettingsUpdate.Field()
shop_fetch_tax_rates = ShopFetchTaxRates.Field()
shop_settings_translate = ShopSettingsTranslate.Field()
shop_address_update = ShopAddressUpdate.Field()
| 33.65 | 82 | 0.801634 | [
"BSD-3-Clause"
] | croolicjah/saleor-platform | saleor/saleor/graphql/shop/schema.py | 1,346 | Python |
# -*- coding: utf-8 -*-
r'''
Manage the Windows registry
===========================
Many python developers think of registry keys as if they were python keys in a
dictionary which is not the case. The windows registry is broken down into the
following components:
-----
Hives
-----
This is the top level of the registry. They all begin with HKEY.
- HKEY_CLASSES_ROOT (HKCR)
- HKEY_CURRENT_USER(HKCU)
- HKEY_LOCAL MACHINE (HKLM)
- HKEY_USER (HKU)
- HKEY_CURRENT_CONFIG
----
Keys
----
Hives contain keys. These are basically the folders beneath the hives. They can
contain any number of subkeys.
-----------------
Values or Entries
-----------------
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. It is usually "(Default)"="(value not set)". The
actual value for the name and the date is Null. The registry editor will display
"(Default)" and "(value not set)".
-------
Example
-------
The following example is taken from the windows startup portion of the registry:
```
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
```
In this example these are the values for each:
Hive: `HKEY_LOCAL_MACHINE`
Key and subkeys: `SOFTWARE\Microsoft\Windows\CurrentVersion\Run`
Value:
- There are 3 value names: `RTHDVCPL`, `NvBackend`, and `BTMTrayAgent`
- Each value name has a corresponding value
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
import salt.utils.stringutils
log = logging.getLogger(__name__)
def __virtual__():
'''
Load this state if the reg module exists
'''
if 'reg.read_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.read_value')
if 'reg.set_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.set_value')
if 'reg.delete_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_value')
if 'reg.delete_key_recursive' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_key_recursive')
return 'reg'
def _parse_key(key):
'''
split the hive from the key
'''
splt = key.split("\\")
hive = splt.pop(0)
key = '\\'.join(splt)
return hive, key
def present(name,
vname=None,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False):
'''
Ensure a registry key or value is present.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param str vdata: The value you'd like to set. If a value name (vname) is
passed, this will be the data for that value name. If not, this will be the
(Default) value for the key.
The type for the (Default) value is always REG_SZ and cannot be changed.
This parameter is optional. If not passed, the Key will be created with no
associated item/value pairs.
:param str vtype: The value type for the data you wish to store in the
registry. Valid values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_SZ (Default)
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
The following example will set the value for the ``version`` entry under the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The
value will be reflected in ``Wow6432Node``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_current = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already configured' \
''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)',
salt.utils.stringutils.to_unicode(name, 'utf-8'))
return ret
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'),
'Value': vdata_decoded}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
# Configure the value
ret['result'] = __utils__['reg.set_value'](hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
return ret
def absent(name, vname=None, use_32bit_registry=False):
'''
Ensure a registry value is removed. To remove a key use key_absent.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not
passed, the (Default) value would be deleted.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_check = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not reg_check['success'] or reg_check['vdata'] == '(value not set)':
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)')}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
# Delete the value
ret['result'] = __utils__['reg.delete_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = r'Removed {0} from {1}'.format(key, hive)
return ret
def key_absent(name, use_32bit_registry=False):
r'''
.. versionadded:: 2015.5.4
Ensure a registry key is removed. This will remove a key and all value
entries it contains. It will fail if the key contains subkeys.
:param str name: A string representing the full path to the key to be
removed to include the hive and the keypath. The hive can be any of the
following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will delete the ``SOFTWARE\Salt`` key and all subkeys
under the ``HKEY_CURRENT_USER`` hive.
Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\SOFTWARE\Salt':
reg.key_absent:
- force: True
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\Salt`` is the key
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
if not __utils__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {
'Removed': {
'Key': r'{0}\{1}'.format(hive, key)
}}}
# Check for test option
if __opts__['test']:
ret['result'] = None
return ret
# Delete the value
__utils__['reg.delete_key_recursive'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)
if __utils__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
return ret
| 32.427056 | 117 | 0.594438 | [
"Apache-2.0"
] | Feeeenng/salt | salt/states/reg.py | 12,225 | Python |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipe', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| 23.866667 | 55 | 0.77933 | [
"MIT"
] | Webins/recipe-app-api | app/recipe/urls.py | 358 | Python |
import os
import click
import numpy as np
from tqdm import tqdm
from models.model_loader import load_model
from torchvision.transforms import Compose
from dataset.data_transform import Resize, Rotation, ElasticAndSine, ColorGradGausNoise, AddWidth, Normalize, ToGray, OnlyElastic, OnlySine, ColorGrad, ColorGausNoise
from dataset.text_data import TextDataset, TextDatasetRandomFont
from dataset.collate_fn import text_collate
from utils.data_visualization import TbSummary
from lr_policy import StepLR, DannLR
import pickle as pkl
import glob
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from warpctc_pytorch import CTCLoss
from test import test
from models.new_vat import VATLoss, VATLossSign, LabeledATLoss, LabeledAtAndUnlabeledTestVatLoss, VATonRnnSign, VATonRnnCnnSign, VATonCnnSign
from dataset.dataset_metadata import SynthDataInfo
@click.command()
@click.option('--base-data-dir', type=str,
default=os.path.expandvars ('../Data/'),
help='Path to base data directory (all other data paths are relative to this one).')
@click.option('--train-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_train.txt'),
help='Path to training dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--train-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing training images (relative to base-data-dir)')
@click.option('--orig-eval-data-path', type=str,
default=os.path.expandvars(
'Test/Prepared/im2line.txt'),
help='Path to original test dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--orig-eval-base-dir', type=str,
default=os.path.expandvars(
'Test/Prepared/LineImages'),
help='Path to directory containing original test images (relative to base-data-dir)')
@click.option('--synth-eval-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_val.txt'),
help='Path to synthetic evaluation dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--synth-eval-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing synthetic evaluation images (relative to base-data-dir)')
@click.option('--lexicon-path', type=str,
default=os.path.expandvars('char_to_class.pkl'),
help='Path to alphabet lexicon (letter to id), relative to base-data-dir.')
@click.option('--seq-proj', type=str, default="10x20", help='Projection of sequence')
@click.option('--backend', type=str, default="resnet18", help='Backend network to use (default is resnet18)')
@click.option('--snapshot', type=str, default=None, help='Path to pre-trained weights')
@click.option('--input-height', type=int, default=64, help='Height of input images to network')
@click.option('--base-lr', type=float, default=1e-4, help='Base learning rate.') # was e-3
#@click.option('--lr-decay', type=float, default=1e-4, help='Base learning rate') # was 0.0001
@click.option('--elastic-alpha', type=float, default=34, help='Elastic augmentation parameter alpha.')
@click.option('--elastic-sigma', type=float, default=3, help='Elastic augmentation parameter sigma.')
@click.option('--step-size', type=int, default=500, help='Step size for step lr change.')
@click.option('--max-iter', type=int, default=6000, help='Max iterations for taining')
@click.option('--batch-size', type=int, default=8, help='Batch size for training')
@click.option('--output-dir', type=str,
default='../Output/exp1',
help='Path to save output snapshot')
@click.option('--test-iter', type=int, default=1000, help='Number of iterations between test evaluation.')
@click.option('--show-iter', type=int, default=1000, help='Number of iterations between showing images in tensorboard.')
@click.option('--test-init', type=bool, default=False, help='Wether to test after network initialization initialization')
@click.option('--use-gpu', type=bool, default=True, help='Whether to use the gpu')
@click.option('--use-no-font-repeat-data', type=bool, default=True, help='Parameter to remove (always true) - whether to use random training data.')
@click.option('--do-vat', type=bool, default=False, help='Whether to do VAT on synthetic trainig data')
@click.option('--do-at', type=bool, default=False, help='Whether to do AT on synthetic trainig data')
@click.option('--vat-ratio', type=float, default=1, help='Ratio of vat on train data loss vs base loss')
@click.option('--test-vat-ratio', type=float, default=1, help='Ratio on vat on test data loss vs base loss')
@click.option('--vat-epsilon', type=float, default=2.5, help='VAT on train hyperparameter - epsilon')
@click.option('--vat-ip', type=int, default=1, help='VAT on train hyperparameter - number of power iterations')
@click.option('--vat-xi', type=float, default=10., help='VAT on train hyperparameter - xi')
@click.option('--vat-sign', type=bool, default=False, help='VAT on train hyperparameter - whether to do sign on vat loss')
@click.option('--do-remove-augs', type=bool, default=False, help='Whether to remove some of the augmentations (for ablation study)')
@click.option('--aug-to-remove', type=str,
default='',
help="with augmentation to remover out of ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']")
@click.option('--do-beam-search', type=bool, default=False, help='whether to do beam search inference in evaluation')
@click.option('--dropout-conv', type=bool, default=False, help='Whether to do dropout between convolution and rnn.')
@click.option('--dropout-rnn', type=bool, default=False, help='Whether to do dropout in rnn.')
@click.option('--dropout-output', type=bool, default=False, help='Whether to do dropout after rnn.')
@click.option('--do-ema', type=bool, default=False, help='Whether to do exponential moving average on weights')
@click.option('--do-gray', type=bool, default=False, help='whether to use grayscale instread of rgb')
@click.option('--do-test-vat', type=bool, default=False, help='Whether to do VAT loss on original test data')
@click.option('--do-test-entropy', type=bool, default=False, help='Whether to do entropy loss on original test data')
@click.option('--do-test-vat-cnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for cnn part')
@click.option('--do-test-vat-rnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for rnn part')
@click.option('--ada-after-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on rnn part')
@click.option('--ada-before-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on cnn part')
@click.option('--do-ada-lr', type=bool, default=False, help='Whether to do lr rule suitable of adversarial domain adaptaion (from article)')
@click.option('--ada-ratio', type=float, default=1, help='Ratio of ADA loss vs base loss')
@click.option('--rnn-hidden-size', type=int, default=128, help='Size of rnn hidden layer')
@click.option('--do-lr-step', type=bool, default=False, help='Visualize output')
@click.option('--dataset-name', type=str, default='tibetan', help='Dataset name, currently wiener or tibetan')
def main(base_data_dir, train_data_path, train_base_dir,
orig_eval_data_path, orig_eval_base_dir,
synth_eval_data_path, synth_eval_base_dir,
lexicon_path, seq_proj, backend, snapshot, input_height, base_lr, elastic_alpha, elastic_sigma,
step_size, max_iter,
batch_size, output_dir, test_iter, show_iter, test_init, use_gpu, use_no_font_repeat_data,
do_vat, do_at, vat_ratio, test_vat_ratio, vat_epsilon, vat_ip, vat_xi, vat_sign,
do_remove_augs, aug_to_remove, do_beam_search,
dropout_conv, dropout_rnn, dropout_output, do_ema, do_gray, do_test_vat, do_test_entropy, do_test_vat_cnn,
do_test_vat_rnn,
ada_after_rnn, ada_before_rnn, do_ada_lr, ada_ratio, rnn_hidden_size,
do_lr_step,
dataset_name
):
if not do_lr_step and not do_ada_lr:
raise NotImplementedError('learning rate should be either step or ada.')
train_data_path = os.path.join(base_data_dir, train_data_path)
train_base_dir = os.path.join(base_data_dir, train_base_dir)
synth_eval_data_path = os.path.join(base_data_dir, synth_eval_data_path)
synth_eval_base_dir = os.path.join(base_data_dir, synth_eval_base_dir)
orig_eval_data_path = os.path.join(base_data_dir, orig_eval_data_path)
orig_eval_base_dir = os.path.join(base_data_dir, orig_eval_base_dir)
lexicon_path = os.path.join(base_data_dir, lexicon_path)
all_parameters = locals()
cuda = use_gpu
#print(train_base_dir)
if output_dir is not None:
os.makedirs(output_dir, exist_ok=True)
tb_writer = TbSummary(output_dir)
output_dir = os.path.join(output_dir, 'model')
os.makedirs(output_dir, exist_ok=True)
with open(lexicon_path, 'rb') as f:
lexicon = pkl.load(f)
#print(sorted(lexicon.items(), key=operator.itemgetter(1)))
with open(os.path.join(output_dir, 'params.txt'),'w') as f:
f.writelines(str(all_parameters))
print(all_parameters)
print('new vat')
sin_magnitude = 4
rotate_max_angle = 2
dataset_info = SynthDataInfo(None, None, None, dataset_name.lower())
train_fonts = dataset_info.font_names
all_args = locals()
allowed_removals = ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']
if do_remove_augs and aug_to_remove not in allowed_removals:
raise Exception('augmentation removal value is not allowed.')
if do_remove_augs:
rand_trans = []
if aug_to_remove == 'elastic':
print('doing sine transform :)')
rand_trans.append(OnlySine(sin_magnitude=sin_magnitude))
elif aug_to_remove in ['sine', 'sine_rotate']:
print('doing elastic transform :)')
rand_trans.append(OnlyElastic(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma))
if aug_to_remove not in ['elastic', 'sine', 'sine_rotate']:
print('doing elastic transform :)')
print('doing sine transform :)')
rand_trans.append(ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude))
if aug_to_remove not in ['rotation', 'sine_rotate']:
print('doing rotation transform :)')
rand_trans.append(Rotation(angle=rotate_max_angle, fill_value=255))
if aug_to_remove not in ['color_aug', 'color_gaus', 'color_sine']:
print('doing color_aug transform :)')
rand_trans.append(ColorGradGausNoise())
elif aug_to_remove == 'color_gaus':
print('doing color_sine transform :)')
rand_trans.append(ColorGrad())
elif aug_to_remove == 'color_sine':
print('doing color_gaus transform :)')
rand_trans.append(ColorGausNoise())
else:
print('doing all transforms :)')
rand_trans = [
ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude),
Rotation(angle=rotate_max_angle, fill_value=255),
ColorGradGausNoise()]
if do_gray:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()]
else:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
Normalize()]
transform_random = Compose(rand_trans)
if do_gray:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()
])
else:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
Normalize()
])
if use_no_font_repeat_data:
print('creating dataset')
train_data = TextDatasetRandomFont(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
print('finished creating dataset')
else:
print('train data path:\n{}'.format(train_data_path))
print('train_base_dir:\n{}'.format(train_base_dir))
train_data = TextDataset(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
synth_eval_data = TextDataset(data_path=synth_eval_data_path, lexicon=lexicon,
base_path=synth_eval_base_dir, transform=transform_random, fonts=train_fonts)
orig_eval_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
orig_vat_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if ada_after_rnn or ada_before_rnn:
orig_ada_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
#else:
# train_data = TestDataset(transform=transform, abc=abc).set_mode("train")
# synth_eval_data = TestDataset(transform=transform, abc=abc).set_mode("test")
# orig_eval_data = TestDataset(transform=transform, abc=abc).set_mode("test")
seq_proj = [int(x) for x in seq_proj.split('x')]
net = load_model(lexicon=train_data.get_lexicon(), seq_proj=seq_proj, backend=backend,
snapshot=snapshot, cuda=cuda, do_beam_search=do_beam_search,
dropout_conv=dropout_conv,
dropout_rnn=dropout_rnn,
dropout_output=dropout_output,
do_ema=do_ema,
ada_after_rnn=ada_after_rnn, ada_before_rnn=ada_before_rnn,
rnn_hidden_size=rnn_hidden_size
)
optimizer = optim.Adam(net.parameters(), lr = base_lr, weight_decay=0.0001)
if do_ada_lr:
print('using ada lr')
lr_scheduler = DannLR(optimizer, max_iter=max_iter)
elif do_lr_step:
print('using step lr')
lr_scheduler = StepLR(optimizer, step_size=step_size, max_iter=max_iter)
loss_function = CTCLoss()
synth_avg_ed_best = float("inf")
orig_avg_ed_best = float("inf")
epoch_count = 0
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
collate_vat = lambda x: text_collate(x, do_mask=True)
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
if ada_after_rnn or ada_before_rnn:
collate_ada = lambda x: text_collate(x, do_mask=True)
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
loss_domain = torch.nn.NLLLoss()
while True:
collate = lambda x: text_collate(x, do_mask=(do_vat or ada_before_rnn or ada_after_rnn))
data_loader = DataLoader(train_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate)
loss_mean_ctc = []
loss_mean_vat = []
loss_mean_at = []
loss_mean_comp = []
loss_mean_total = []
loss_mean_test_vat = []
loss_mean_test_pseudo = []
loss_mean_test_rand = []
loss_mean_ada_rnn_s = []
loss_mean_ada_rnn_t = []
loss_mean_ada_cnn_s = []
loss_mean_ada_cnn_t = []
iterator = tqdm(data_loader)
iter_count = 0
for iter_num, sample in enumerate(iterator):
total_iter = (epoch_count * len(data_loader)) + iter_num
if ((total_iter > 1) and total_iter % test_iter == 0) or (test_init and total_iter == 0):
# epoch_count != 0 and
print("Test phase")
net = net.eval()
if do_ema:
net.start_test()
synth_acc, synth_avg_ed, synth_avg_no_stop_ed, synth_avg_loss = test(net, synth_eval_data,
synth_eval_data.get_lexicon(),
cuda, visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer,
n_iter=total_iter,
initial_title='val_synth',
loss_function=loss_function,
output_path=os.path.join(
output_dir, 'results'),
do_beam_search=False)
orig_acc, orig_avg_ed, orig_avg_no_stop_ed, orig_avg_loss = test(net, orig_eval_data,
orig_eval_data.get_lexicon(), cuda,
visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer, n_iter=total_iter,
initial_title='test_orig',
loss_function=loss_function,
output_path=os.path.join(output_dir,
'results'),
do_beam_search=do_beam_search)
net = net.train()
#save periodic
if output_dir is not None and total_iter // 30000:
periodic_save = os.path.join(output_dir, 'periodic_save')
os.makedirs(periodic_save, exist_ok=True)
old_save = glob.glob(os.path.join(periodic_save,'*'))
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_" + str(total_iter)))
if orig_avg_no_stop_ed < orig_avg_ed_best:
orig_avg_ed_best = orig_avg_no_stop_ed
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_best"))
if synth_avg_no_stop_ed < synth_avg_ed_best:
synth_avg_ed_best = synth_avg_no_stop_ed
if do_ema:
net.end_test()
print("synth: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(synth_avg_ed_best,
synth_avg_ed,
synth_avg_no_stop_ed,
synth_acc))
print("orig: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(orig_avg_ed_best,
orig_avg_ed,
orig_avg_no_stop_ed,
orig_acc))
tb_writer.get_writer().add_scalars('data/test',
{'synth_ed_total': synth_avg_ed,
'synth_ed_no_stop': synth_avg_no_stop_ed,
'synth_avg_loss': synth_avg_loss,
'orig_ed_total': orig_avg_ed,
'orig_ed_no_stop': orig_avg_no_stop_ed,
'orig_avg_loss': orig_avg_loss
}, total_iter)
if len(loss_mean_ctc) > 0:
train_dict = {'mean_ctc_loss': np.mean(loss_mean_ctc)}
if do_vat:
train_dict = {**train_dict, **{'mean_vat_loss':np.mean(loss_mean_vat)}}
if do_at:
train_dict = {**train_dict, **{'mean_at_loss':np.mean(loss_mean_at)}}
if do_test_vat:
train_dict = {**train_dict, **{'mean_test_vat_loss': np.mean(loss_mean_test_vat)}}
if do_test_vat_rnn and do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_crnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_rnn:
train_dict = {**train_dict, **{'mean_test_vat_rnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_cnn_loss': np.mean(loss_mean_test_vat)}}
if ada_after_rnn:
train_dict = {**train_dict,
**{'mean_ada_rnn_s_loss': np.mean(loss_mean_ada_rnn_s),
'mean_ada_rnn_t_loss': np.mean(loss_mean_ada_rnn_t)}}
if ada_before_rnn:
train_dict = {**train_dict,
**{'mean_ada_cnn_s_loss': np.mean(loss_mean_ada_cnn_s),
'mean_ada_cnn_t_loss': np.mean(loss_mean_ada_cnn_t)}}
print(train_dict)
tb_writer.get_writer().add_scalars('data/train',
train_dict,
total_iter)
'''
# for multi-gpu support
if sample["img"].size(0) % len(gpu.split(',')) != 0:
continue
'''
optimizer.zero_grad()
imgs = Variable(sample["img"])
#print("images sizes are:")
#print(sample["img"].shape)
if do_vat or ada_after_rnn or ada_before_rnn:
mask = sample['mask']
labels_flatten = Variable(sample["seq"]).view(-1)
label_lens = Variable(sample["seq_len"].int())
#print("image sequence length is:")
#print(sample["im_seq_len"])
#print("label sequence length is:")
#print(sample["seq_len"].view(1,-1))
img_seq_lens = sample["im_seq_len"]
if cuda:
imgs = imgs.cuda()
if do_vat or ada_after_rnn or ada_before_rnn:
mask = mask.cuda()
if do_ada_lr:
ada_p = float(iter_count) / max_iter
lr_scheduler.update(ada_p)
if ada_before_rnn or ada_after_rnn:
if not do_ada_lr:
ada_p = float(iter_count) / max_iter
ada_alpha = 2. / (1. + np.exp(-10. * ada_p)) - 1
if cur_ada >= ada_len:
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
ada_batch = next(ada_iter)
cur_ada += 1
ada_imgs = Variable(ada_batch["img"])
ada_img_seq_lens = ada_batch["im_seq_len"]
ada_mask = ada_batch['mask'].byte()
if cuda:
ada_imgs = ada_imgs.cuda()
_, ada_cnn, ada_rnn = net(ada_imgs, ada_img_seq_lens,
ada_alpha=ada_alpha, mask=ada_mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.zeros(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_t = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_t = loss_domain(ada_rnn, domain_label)
if do_test_vat and do_at:
# test part!
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
test_vat_batch = next(vat_iter)
cur_vat += 1
test_vat_mask = test_vat_batch['mask']
test_vat_imgs = Variable(test_vat_batch["img"])
test_vat_img_seq_lens = test_vat_batch["im_seq_len"]
if cuda:
test_vat_imgs = test_vat_imgs.cuda()
test_vat_mask = test_vat_mask.cuda()
# train part
at_test_vat_loss = LabeledAtAndUnlabeledTestVatLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss, test_vat_loss = at_test_vat_loss(model=net, train_x=imgs, train_labels_flatten=labels_flatten,
train_img_seq_lens=img_seq_lens, train_label_lens=label_lens, batch_size=batch_size,
test_x=test_vat_imgs, test_seq_len=test_vat_img_seq_lens, test_mask=test_vat_mask)
elif do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
vat_batch = next(vat_iter)
cur_vat += 1
vat_mask = vat_batch['mask']
vat_imgs = Variable(vat_batch["img"])
vat_img_seq_lens = vat_batch["im_seq_len"]
if cuda:
vat_imgs = vat_imgs.cuda()
vat_mask = vat_mask.cuda()
if do_test_vat:
if do_test_vat_rnn or do_test_vat_cnn:
raise "can only do one of do_test_vat | (do_test_vat_rnn, do_test_vat_cnn)"
if vat_sign == True:
test_vat_loss = VATLossSign(do_test_entropy=do_test_entropy, xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
else:
test_vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn and do_test_vat_cnn:
test_vat_loss = VATonRnnCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn:
test_vat_loss = VATonRnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_cnn:
test_vat_loss = VATonCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
if do_test_vat_cnn and do_test_vat_rnn:
test_vat_loss, cnn_lds, rnn_lds = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_test_vat:
test_vat_loss = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_vat:
vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
vat_loss = vat_loss(net, imgs, img_seq_lens, mask)
elif do_at:
at_loss = LabeledATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss = at_loss(net, imgs, labels_flatten, img_seq_lens, label_lens, batch_size)
if ada_after_rnn or ada_before_rnn:
preds, ada_cnn, ada_rnn = net(imgs, img_seq_lens, ada_alpha=ada_alpha, mask=mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.ones(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_s = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_s = loss_domain(ada_rnn, domain_label)
else:
preds = net(imgs, img_seq_lens)
'''
if output_dir is not None:
if (show_iter is not None and iter_num != 0 and iter_num % show_iter == 0):
print_data_visuals(net, tb_writer, train_data.get_lexicon(), sample["img"], labels_flatten, label_lens,
preds, ((epoch_count * len(data_loader)) + iter_num))
'''
loss_ctc = loss_function(preds, labels_flatten,
Variable(torch.IntTensor(np.array(img_seq_lens))), label_lens) / batch_size
if loss_ctc.data[0] in [float("inf"), -float("inf")]:
print("warnning: loss should not be inf.")
continue
total_loss = loss_ctc
if do_vat:
#mask = sample['mask']
#if cuda:
# mask = mask.cuda()
#vat_loss = virtual_adversarial_loss(net, imgs, img_seq_lens, mask, is_training=True, do_entropy=False, epsilon=vat_epsilon, num_power_iterations=1,
# xi=1e-6, average_loss=True)
total_loss = total_loss + vat_ratio * vat_loss.cpu()
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
total_loss = total_loss + test_vat_ratio * test_vat_loss.cpu()
if ada_before_rnn:
total_loss = total_loss + ada_ratio * err_ada_cnn_s.cpu() + ada_ratio * err_ada_cnn_t.cpu()
if ada_after_rnn:
total_loss = total_loss + ada_ratio * err_ada_rnn_s.cpu() + ada_ratio * err_ada_rnn_t.cpu()
total_loss.backward()
nn.utils.clip_grad_norm(net.parameters(), 10.0)
if -400 < loss_ctc.data[0] < 400:
loss_mean_ctc.append(loss_ctc.data[0])
if -1000 < total_loss.data[0] < 1000:
loss_mean_total.append(total_loss.data[0])
if len(loss_mean_total) > 100:
loss_mean_total = loss_mean_total[-100:]
status = "epoch: {0:5d}; iter_num: {1:5d}; lr: {2:.2E}; loss_mean: {3:.3f}; loss: {4:.3f}".format(epoch_count,
lr_scheduler.last_iter,
lr_scheduler.get_lr(),
np.mean(loss_mean_total),
loss_ctc.data[0])
if ada_after_rnn:
loss_mean_ada_rnn_s.append(err_ada_rnn_s.data[0])
loss_mean_ada_rnn_t.append(err_ada_rnn_t.data[0])
status += "; ladatrnns: {0:.3f}; ladatrnnt: {1:.3f}".format(
err_ada_rnn_s.data[0], err_ada_rnn_t.data[0]
)
if ada_before_rnn:
loss_mean_ada_cnn_s.append(err_ada_cnn_s.data[0])
loss_mean_ada_cnn_t.append(err_ada_cnn_t.data[0])
status += "; ladatcnns: {0:.3f}; ladatcnnt: {1:.3f}".format(
err_ada_cnn_s.data[0], err_ada_cnn_t.data[0]
)
if do_vat:
loss_mean_vat.append(vat_loss.data[0])
status += "; lvat: {0:.3f}".format(
vat_loss.data[0]
)
if do_at:
loss_mean_at.append(at_loss.data[0])
status += "; lat: {0:.3f}".format(
at_loss.data[0]
)
if do_test_vat:
loss_mean_test_vat.append(test_vat_loss.data[0])
status += "; l_tvat: {0:.3f}".format(
test_vat_loss.data[0]
)
if do_test_vat_rnn or do_test_vat_cnn:
loss_mean_test_vat.append(test_vat_loss.data[0])
if do_test_vat_rnn and do_test_vat_cnn:
status += "; l_tvatc: {}".format(
cnn_lds.data[0]
)
status += "; l_tvatr: {}".format(
rnn_lds.data[0]
)
else:
status += "; l_tvat: {}".format(
test_vat_loss.data[0]
)
iterator.set_description(status)
optimizer.step()
if do_lr_step:
lr_scheduler.step()
if do_ema:
net.udate_ema()
iter_count += 1
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_last"))
epoch_count += 1
return
if __name__ == '__main__':
main()
| 55.190031 | 166 | 0.561526 | [
"MIT"
] | alexeypechorin/tibetan-transductive | train.py | 35,432 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
from knack.log import get_logger
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.azclierror import RequiredArgumentMissingError
from azure.cli.core.util import sdk_no_wait
from ._client_factory import _compute_client_factory
logger = get_logger(__name__)
def sig_community_image_definition_list(client, location, public_gallery_name, marker=None, show_next_marker=None):
generator = client.list(location=location, public_gallery_name=public_gallery_name)
return get_page_result(generator, marker, show_next_marker)
def sig_community_image_version_list(client, location, public_gallery_name, gallery_image_name, marker=None,
show_next_marker=None):
generator = client.list(location=location, public_gallery_name=public_gallery_name,
gallery_image_name=gallery_image_name)
return get_page_result(generator, marker, show_next_marker)
def get_page_result(generator, marker, show_next_marker=None):
pages = generator.by_page(continuation_token=marker) # ContainerPropertiesPaged
result = list_generator(pages=pages)
if show_next_marker:
next_marker = {"nextMarker": pages.continuation_token}
result.append(next_marker)
else:
if pages.continuation_token:
logger.warning('Next Marker:')
logger.warning(pages.continuation_token)
return result
# The REST service takes 50 items as a page by default
def list_generator(pages, num_results=50):
result = []
# get first page items
page = list(next(pages))
result += page
while True:
if not pages.continuation_token:
break
# handle num results
if num_results is not None:
if num_results == len(result):
break
page = list(next(pages))
result += page
return result
def _get_resource_group_location(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
# pylint: disable=no-member
return client.resource_groups.get(resource_group_name).location
def create_image_gallery(cmd, resource_group_name, gallery_name, description=None,
location=None, no_wait=False, tags=None, permissions=None, soft_delete=None,
publisher_uri=None, publisher_contact=None, eula=None, public_name_prefix=None):
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import Gallery
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
gallery = Gallery(description=description, location=location, tags=(tags or {}))
if soft_delete is not None:
gallery.soft_delete_policy = {'is_soft_delete_enabled': soft_delete}
client = _compute_client_factory(cmd.cli_ctx)
if permissions:
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import SharingProfile
gallery.sharing_profile = SharingProfile(permissions=permissions)
if permissions == 'Community':
if publisher_uri is None or publisher_contact is None or eula is None or public_name_prefix is None:
raise RequiredArgumentMissingError('If you want to share to the community, '
'you need to fill in all the following parameters:'
' --publisher-uri, --publisher-email, --eula, --public-name-prefix.')
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import CommunityGalleryInfo
gallery.sharing_profile.community_gallery_info = CommunityGalleryInfo(publisher_uri=publisher_uri,
publisher_contact=publisher_contact,
eula=eula,
public_name_prefix=public_name_prefix)
return sdk_no_wait(no_wait, client.galleries.begin_create_or_update, resource_group_name, gallery_name, gallery)
def sig_share_update(cmd, client, resource_group_name, gallery_name, subscription_ids=None, tenant_ids=None,
op_type=None):
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import SharingProfileGroup, SharingUpdate, SharingProfileGroupTypes
if op_type != 'EnableCommunity':
if subscription_ids is None and tenant_ids is None:
raise RequiredArgumentMissingError('At least one of subscription ids or tenant ids must be provided')
groups = []
if subscription_ids:
groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.SUBSCRIPTIONS, ids=subscription_ids))
if tenant_ids:
groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.AAD_TENANTS, ids=tenant_ids))
sharing_update = SharingUpdate(operation_type=op_type, groups=groups)
return client.begin_update(resource_group_name=resource_group_name,
gallery_name=gallery_name,
sharing_update=sharing_update)
| 49.333333 | 129 | 0.675676 | [
"MIT"
] | Arkanayan/azure-cli-extensions | src/image-gallery/azext_image_gallery/custom.py | 5,772 | Python |
from unittest.mock import MagicMock
from django.core.exceptions import ValidationError
from users.backends import DakaraModelBackend
from users.tests.base_test import UsersAPITestCase, config_email_disabled
class DakaraModelBackendTestCase(UsersAPITestCase):
"""Test the authentication backend."""
def setUp(self):
# create a user without any rights
self.user = self.create_user("TestUser", email="[email protected]", password="pass")
def test_authenticate_username_superuser(self):
"""Test to authenticate as superuser."""
self.user.is_superuser = True
self.user.validated_by_email = False
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
def test_authenticate_username_not_active(self):
"""Test to authenticate an inactive user."""
self.user.is_active = False
self.user.save()
backend = DakaraModelBackend()
self.assertIsNone(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
)
def test_authenticate_username_not_validated_by_email(self):
"""Test to authenticate when not validated by email."""
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(
ValidationError, "This user email has not been validated"
):
backend.authenticate(MagicMock(), username="TestUser", password="pass")
@config_email_disabled
def test_authenticate_username_not_validated_by_email_no_email(self):
"""Test to authenticate when not validated by email and emails disabled."""
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
def test_authenticate_username_not_validated_by_manager(self):
"""Test to authenticate when not validated by manager."""
self.user.validated_by_email = True
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(
ValidationError, "This user account has not been validated by a manager"
):
backend.authenticate(MagicMock(), username="TestUser", password="pass")
def test_authenticate_username_ok(self):
"""Test to authenticate."""
self.user.validated_by_email = True
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(
backend.authenticate(MagicMock(), username="TestUser", password="pass"),
self.user,
)
| 35.551724 | 88 | 0.66602 | [
"MIT"
] | DakaraProject/dakara-server | dakara_server/users/tests/test_backends.py | 3,093 | Python |
#!/usr/bin/env python3
# Macaw
#
# Testing file open and string concatenation.
import random
import pkgutil
def main():
# This dictionary of words is for testing only and should *not* be considered secure.
# Courtesy of https://gist.github.com/deekayen/4148741
#f = open('dictionary.txt')
f = pkgutil.get_data("macaw","dictionary.txt").decode("utf8")
wordList = f.split()
password = generatePassword(wordList)
speakPassword(password)
def speakPassword(str):
print(r"""
,,,___
,' _ \__ ___________________________________________
/ { O / `\ / \
,\ } /---./ .-' """+str+"""
/\ `-.__- `--' `-. |
/ `._ : | \___________________________________________/
/\_; -' : ;
/ \_; / /
/| \ \_/..-'
________|_\___/_\\\_\\\________
----------------;;-;;--------
\/ `-'/
|\_|_/|
\/ \/
\_/
""")
def generatePassword(wordList):
tempPass = ''
for i in range(0, 5):
word = wordList[random.randint(0,999)] # grab a random word from the dictionary file.
tempPass = tempPass + word #concat that word to the end of the password.
return tempPass
| 30.913043 | 93 | 0.466245 | [
"MIT"
] | dcchambers/macaw | macaw/macaw.py | 1,422 | Python |
"""
Django settings for hotelrooms project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^b7=e99!2(t7csio=(lospr6ebgbp-2(*n^il4vt8dotctorm*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'booking',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hotelrooms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "hotelrooms", "templates"),
os.path.join(BASE_DIR, "booking", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hotelrooms.wsgi.application'
PROJECT_DIR = os.path.dirname(__file__)
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'hotelrooms',
'PORT': 5433,
'HOST': os.getenv("DB_HOST", "localhost"),
'USER': 'django',
'PASSWORD': 'hotelrooms',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static/')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| 27.751724 | 91 | 0.65333 | [
"MIT"
] | atombrella/hotel-room-reservation | hotelrooms/hotelrooms/settings.py | 4,024 | Python |
import time
print("What shall I remind you about?")
text = str(input())
print("In how many minutes ?")
local_time = float(input())
local_time = local_time * 60
time.sleep(local_time)
print(text)
| 21.666667 | 39 | 0.728205 | [
"MIT"
] | Awesome12-arch/Python-Reminder-Application | Code.py | 195 | Python |
from math import ceil
import datetime
from altair import Chart # type: ignore
import pandas as pd # type: ignore
import numpy as np
from .parameters import Parameters
from .utils import add_date_column
from .presentation import DATE_FORMAT
def new_admissions_chart(
alt, projection_admits: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: "day", True: "date:T"}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
# TODO fix the fold to allow any number of dispositions
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return (
alt.Chart(ceiled_admits.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Daily admissions", scale=y_scale),
color="key:N",
tooltip=[
tooltip_dict[as_date],
alt.Tooltip("value:Q", format=".0f", title="Admissions"),
"key:N",
],
)
.interactive()
)
def admitted_patients_chart(
alt, census: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
idx = "date:T"
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
idx = "day"
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
# TODO fix the fold to allow any number of dispositions
return (
alt.Chart(census.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Census", scale=y_scale),
color="key:N",
tooltip=[
idx,
alt.Tooltip("value:Q", format=".0f", title="Census"),
"key:N",
],
)
.interactive()
)
def additional_projections_chart(
alt, model, parameters
) -> Chart:
# TODO use subselect of df_raw instead of creating a new df
raw_df = model.raw_df
dat = pd.DataFrame({
"infected": raw_df.infected,
"recovered": raw_df.recovered
})
dat["day"] = dat.index
as_date = parameters.as_date
max_y_axis = parameters.max_y_axis
if as_date:
dat = add_date_column(dat)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
return (
alt.Chart(dat)
.transform_fold(fold=["infected", "recovered"])
.mark_line()
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Case Volume", scale=y_scale),
tooltip=["key:N", "value:Q"],
color="key:N",
)
.interactive()
)
def chart_descriptions(chart: Chart, labels, suffix: str = ""):
"""
:param chart: Chart: The alt chart to be used in finding max points
:param suffix: str: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: str: Returns a multi-line string description of the results
"""
messages = []
cols = ["hospitalized", "icu", "ventilated"]
asterisk = False
day = "date" if "date" in chart.data.columns else "day"
for col in cols:
if chart.data[col].idxmax() + 1 == len(chart.data):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if day == "date":
on = datetime.datetime.strftime(on, "%b %d") # todo: bring this to an optional arg / i18n
else:
on += 1 # 0 index issue
messages.append(
"{}{} peaks at {:,} on day {}{}".format(
labels[col],
suffix,
ceil(chart.data[col].max()),
on,
"*" if asterisk else "",
)
)
if asterisk:
messages.append("_* The max is at the upper bound of the data, and therefore may not be the actual max_")
return "\n\n".join(messages)
| 29.893258 | 113 | 0.584852 | [
"MIT"
] | degerli/chime-1 | src/penn_chime/charts.py | 5,321 | Python |
import shlex
import string
import sys
from contextlib import contextmanager
from typing import Any, Callable, Generic, List, Optional, Tuple, Type, TypeVar, cast
import pytest
import simple_parsing
from simple_parsing import ConflictResolution, DashVariant, ParsingError
from simple_parsing.utils import camel_case
from simple_parsing.wrappers.field_wrapper import ArgumentGenerationMode, NestedMode
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
def xfail_param(*args, reason: str):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
return pytest.param(*args, marks=pytest.mark.xfail(reason=reason))
Dataclass = TypeVar("Dataclass")
@contextmanager
def raises(exception=ParsingError, match=None, code: int = None):
with pytest.raises(exception, match=match):
yield
from io import StringIO
from contextlib import redirect_stderr
@contextmanager
def exits_and_writes_to_stderr(match: str = ""):
s = StringIO()
with redirect_stderr(s), raises(SystemExit):
yield
s.seek(0)
err_string = s.read()
if match:
assert match in err_string, err_string
else:
assert err_string, err_string
@contextmanager
def raises_missing_required_arg():
with exits_and_writes_to_stderr("the following arguments are required"):
yield
@contextmanager
def raises_expected_n_args(n: int):
with exits_and_writes_to_stderr(f"expected {n} arguments"):
yield
@contextmanager
def raises_unrecognized_args(*args: str):
with exits_and_writes_to_stderr("unrecognized arguments: " + " ".join(args or [])):
yield
def assert_help_output_equals(actual: str, expected: str) -> None:
# Replace the start with `prog`, since the test runner might not always be
# `pytest`, could also be __main__ when debugging with VSCode
prog = sys.argv[0].split("/")[-1]
if prog != "pytest":
expected = expected.replace("usage: pytest", f"usage: {prog}")
remove = string.punctuation + string.whitespace
if "optional arguments" in expected and sys.version_info[:2] >= (3, 10):
expected = expected.replace("optional arguments", "options")
actual_str = "".join(actual.split())
actual_str = actual.translate(str.maketrans("", "", remove))
expected_str = expected.translate(str.maketrans("", "", remove))
assert actual_str == expected_str, "\n" + "\n".join([actual_str, expected_str])
T = TypeVar("T")
class TestParser(simple_parsing.ArgumentParser, Generic[T]):
__test__ = False
""" A parser subclass just used for testing.
Makes the retrieval of the arguments a bit easier to read.
"""
def __init__(self, *args, **kwargs):
self._current_dest = None
self._current_dataclass = None
super().__init__(*args, **kwargs)
def add_arguments(self, dataclass: Type, dest, prefix="", default=None):
if self._current_dest == dest and self._current_dataclass == dataclass:
return # already added arguments for that dataclass.
self._current_dest = dest
self._current_dataclass = dataclass
return super().add_arguments(dataclass, dest, prefix=prefix, default=default)
def __call__(self, args: str) -> T:
namespace = self.parse_args(shlex.split(args))
value = getattr(namespace, self._current_dest)
value = cast(T, value)
return value
class TestSetup:
@classmethod
def setup(
cls: Type[Dataclass],
arguments: Optional[str] = "",
dest: Optional[str] = None,
default: Optional[Dataclass] = None,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
parse_known_args: bool = False,
attempt_to_reorder: bool = False,
*,
argument_generation_mode: ArgumentGenerationMode = ArgumentGenerationMode.FLAT,
nested_mode: NestedMode = NestedMode.DEFAULT,
) -> Dataclass:
"""Basic setup for a test.
Keyword Arguments:
arguments {Optional[str]} -- The arguments to pass to the parser (default: {""})
dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})
Returns:
{cls}} -- the class's type.
"""
parser = simple_parsing.ArgumentParser(
conflict_resolution=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
argument_generation_mode=argument_generation_mode,
nested_mode=nested_mode,
)
if dest is None:
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if arguments is None:
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
args, unknown_args = parser.parse_known_args(
splits, attempt_to_reorder=attempt_to_reorder
)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest) # type: ignore
delattr(args, dest)
# If there are subgroups, we can allow an extra "subgroups" attribute, otherwise we don't
# expect any other arguments.
args_dict = vars(args).copy()
args_dict.pop("subgroups", None)
assert not args_dict, f"Namespace has leftover garbage values (besides subgroups): {args}"
instance = cast(Dataclass, instance)
return instance
@classmethod
def setup_multiple(
cls: Type[Dataclass], num_to_parse: int, arguments: Optional[str] = ""
) -> Tuple[Dataclass, ...]:
conflict_resolution_mode: ConflictResolution = ConflictResolution.ALWAYS_MERGE
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode)
class_name = camel_case(cls.__name__)
for i in range(num_to_parse):
parser.add_arguments(cls, f"{class_name}_{i}")
if arguments is None:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
args = parser.parse_args(splits)
return tuple(getattr(args, f"{class_name}_{i}") for i in range(num_to_parse))
@classmethod
def get_help_text(
cls,
argv: Optional[str] = None,
multiple=False,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants=DashVariant.AUTO,
**parser_kwargs,
) -> str:
import contextlib
from io import StringIO
f = StringIO()
if argv is None:
argv = "--help"
elif not argv.endswith("--help"):
argv = argv + " --help"
with contextlib.suppress(SystemExit), contextlib.redirect_stdout(f):
_ = cls.setup(
argv,
conflict_resolution_mode=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
**parser_kwargs,
)
s = f.getvalue()
return s
ListFormattingFunction = Callable[[List[Any]], str]
ListOfListsFormattingFunction = Callable[[List[List[Any]]], str]
def format_list_using_spaces(value_list: List[Any]) -> str:
return " ".join(str(p) for p in value_list)
def format_list_using_brackets(value_list: List[Any]) -> str:
return f"[{','.join(str(p) for p in value_list)}]"
def format_list_using_single_quotes(value_list: List[Any]) -> str:
return f"'{format_list_using_spaces(value_list)}'"
def format_list_using_double_quotes(value_list: List[Any]) -> str:
return f'"{format_list_using_spaces(value_list)}"'
def format_lists_using_brackets(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_brackets(value_list) for value_list in list_of_lists)
def format_lists_using_double_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_double_quotes(value_list) for value_list in list_of_lists)
def format_lists_using_single_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_single_quotes(value_list) for value_list in list_of_lists)
| 33.767717 | 104 | 0.670164 | [
"MIT"
] | idoby/SimpleParsing | test/testutils.py | 8,577 | Python |
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
"""A multi-task model for intent classification and entity extraction.
DIET is Dual Intent and Entity Transformer.
The architecture is based on a transformer which is shared for both tasks.
A sequence of entity labels is predicted through a Conditional Random Field (CRF)
tagging layer on top of the transformer output sequence corresponding to the
input sequence of tokens. The transformer output for the ``__CLS__`` token and
intent labels are embedded into a single semantic vector space. We use the
dot-product loss to maximize the similarity with the target label and minimize
similarities with negative samples.
"""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
# please make sure to update the docs when changing a default parameter
return {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},
# Whether to share the hidden layer weights between user message and labels.
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 2,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings. Only in effect if key- or value
# relative attention are turned on
MAX_RELATIVE_POSITION: 5,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Dense dimension to use for sparse features.
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'cross_entropy' or 'margin'.
LOSS_TYPE: CROSS_ENTROPY,
# Number of top intents for which confidences should be reported.
# Set to 0 if confidences for all intents should be reported.
RANKING_LENGTH: LABEL_RANKING_LENGTH,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: False,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# Fraction of trainable weights in internal layers.
CONNECTION_DENSITY: 0.2,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: True,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: True,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
# Set to 0 for no validation.
EVAL_NUM_EXAMPLES: 0,
# ## Model config
# If 'True' intent classification is trained and intent predicted.
INTENT_CLASSIFICATION: True,
# If 'True' named entity recognition is trained and entities predicted.
ENTITY_RECOGNITION: True,
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# 'BILOU_flag' determines whether to use BILOU tagging or not.
# If set to 'True' labelling is more rigorous, however more
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation
# metrics, set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'batch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Perform model checkpointing
CHECKPOINT_MODEL: False,
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
# Split entities by comma, this makes sense e.g. for a list of ingredients
# in a recipie, but it doesn't make sense for the parts of an address
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
"""Declare instance variables with default values."""
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
"""Return key if intent classification is activated."""
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
"""Return sub key if intent classification is activated."""
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
"""Create label_id dictionary."""
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
"""Create entity tag specifications with their respective tag id mappings."""
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
"""Create mapping from tag name to id."""
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
"""Checks if all labels have features set."""
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
"""Checks if features have same dimensionality if hidden layers are shared."""
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
"""Collects precomputed encodings."""
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
"""Computes one-hot representation for the labels."""
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
"""Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
"""
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
"""Prepare data for training and create a RasaModelData object."""
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
# training
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
# Add entities as labels only during training and only if there was
# training data added for entities with DIET configured to predict entities.
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
# only use those training examples that have the label_attribute set
# during training
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
# no training data are present to train
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# Current implementation doesn't yet account for updating sparse
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
# make sure all keys are in the same order during training and prediction
# as we rely on the order of key and sub-key when constructing the actual
# tensors from the model data
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
# no label features are present, get default features from _label_data
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
# as label_attribute can have different values, e.g. INTENT or RESPONSE,
# copy over the features to the LABEL key to make
# it easier to access the label features inside the model itself
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
# train helpers
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
"""Train the embedding intent classifier on a data set."""
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
# keep one example for persisting and loading
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
# No pre-trained model to load from. Create a new instance of the model.
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False, # we use custom shuffle inside data generator
)
self.persist()
return self._resource
# process helpers
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
"""Predicts the intent of the provided message."""
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
"""Augments the message with intents, entities, and diagnostic data."""
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
"""Persist this model into the passed directory."""
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
"""Loads a policy from the storage (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
"""Loads the trained model from the provided directory."""
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
# jsonpickle converts dictionary keys to strings
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
# create entity tag spec before calling super otherwise building the model
# will fail
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
# tf training
self._create_metrics()
self._update_metrics_to_log()
# needed for efficient prediction
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
"""Ensure that order of entity tag specs matches CRF layer order."""
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
# self.metrics will have the same order as they are created
# so create loss metrics first to output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
# create accuracy metrics second to output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
# For user text, prepare layers that combine different feature types, embed
# everything using a transformer and optionally also do masked language
# modeling.
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
# Intent labels are treated similarly to user text but without the transformer,
# without masked language modelling, and with no dropout applied to the
# individual features, only to the overall label embedding after all label
# features have been combined.
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
# disable input dropout applied to sparse and dense label features
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
# for embedding predicted tokens at masked positions
self._prepare_embed_layers(f"{name}_lm_mask")
# for embedding the true tokens that got masked
self._prepare_embed_layers(f"{name}_golden_token")
# mask loss is additional loss
# set scaling to False, so that it doesn't overpower other losses
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
"""Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
"""
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
# get sentence features vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
# add a zero (no entity) for the sentence features to match the shape of
# inputs
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
"""Prepares the model for prediction."""
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
"""Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
"""
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
# skip crf layer if it was not trained
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
# get sentence feature vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
| 38.309951 | 88 | 0.625901 | [
"Apache-2.0"
] | Adarshsng/rasa | rasa/nlu/classifiers/diet_classifier.py | 70,452 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
],
options={
},
bases=(models.Model,),
),
]
| 24.12 | 114 | 0.538972 | [
"MIT"
] | bwarren2/django-basic-blog | blog/migrations/0001_initial.py | 603 | Python |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import tensorflow.compat.v1 as tf
from tensorflow.contrib import graph_editor as ge
from fedlearner.trainer import embedding
from fedlearner.trainer import estimator
from fedlearner.trainer import feature
from fedlearner.trainer import operator
from fedlearner.trainer import utils
class ConfigRunError(Exception):
pass
class SparseFLModel(estimator.FLModel):
def __init__(self, role, bridge, example_ids, exporting=False,
config_run=True,
bias_tensor=None, vec_tensor=None,
bias_embedding=None, vec_embedding=None,
feature_columns=None):
super(SparseFLModel, self).__init__(role,
bridge, example_ids, exporting)
self._config_run = config_run
self._num_shards = 1
if config_run:
self._bias_tensor = tf.placeholder(tf.float32, shape=[None, None])
self._vec_tensor = tf.placeholder(tf.float32, shape=[None, None])
else:
self._bias_tensor = bias_tensor
self._vec_tensor = vec_tensor
self._bias_embedding = bias_embedding
self._vec_embedding = vec_embedding
self._feature_columns = feature_columns
self._frozen = False
self._slot_ids = []
self._feature_slots = {}
self._feature_column_v1s = {}
self._use_fid_v2 = False
self._num_embedding_groups = 3
def add_feature_slot(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fs = feature.FeatureSlot(*args, **kwargs)
if self._use_fid_v2:
assert 0 <= fs.slot_id < utils.MAX_SLOTS_v2, \
"Invalid slot id %d"%fs.slot_id
else:
assert 0 <= fs.slot_id < utils.MAX_SLOTS, \
"Invalid slot id %d"%fs.slot_id
self._slot_ids.append(fs.slot_id)
self._feature_slots[fs.slot_id] = fs
return fs
def add_feature_column(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fc = feature.FeatureColumnV1(*args, **kwargs)
slot_id = fc.feature_slot.slot_id
assert slot_id in self._feature_slots and \
self._feature_slots[slot_id] is fc.feature_slot, \
"FeatureSlot with id %d must be added to Model first"%slot_id
assert slot_id not in self._feature_column_v1s, \
"Only one FeatureColumnV1 can be created for each slot"
self._feature_column_v1s[slot_id] = fc
return fc
def set_use_fid_v2(self, use_fid_v2):
self._use_fid_v2 = use_fid_v2
def get_bias(self):
return self._bias_tensor
def get_vec(self):
return self._vec_tensor
def _get_bias_slot_configs(self):
if not self._config_run:
return self._bias_embedding.config if self._bias_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
fs = self._feature_slots[slot_id]
key = (id(fs._bias_initializer), id(fs._bias_optimizer))
fs_map[key] = fs
slot_list.append((fs.slot_id, 1, fs.hash_table_size, key))
if not slot_list:
return None
bias_config = utils._compute_slot_config(slot_list, 1,
self._use_fid_v2)
bias_config['name'] = 'bias'
bias_config['slot_list'] = slot_list
bias_config['initializers'] = [fs_map[i]._bias_initializer
for i in bias_config['weight_group_keys']]
bias_config['optimizers'] = [fs_map[i]._bias_optimizer
for i in bias_config['weight_group_keys']]
bias_config['use_fid_v2'] = self._use_fid_v2
return bias_config
def _get_vec_slot_configs(self):
if not self._config_run:
return self._vec_embedding.config if self._vec_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
if slot_id not in self._feature_column_v1s:
continue
fc = self._feature_column_v1s[slot_id]
fs = fc.feature_slot
if fc.feature_slot.dim > 1:
key = (id(fs._vec_initializer), id(fs._vec_optimizer))
fs_map[key] = fs
slot_list.append((slot_id, fs.dim - 1, fs.hash_table_size, key))
if not slot_list:
return None
vec_config = utils._compute_slot_config(slot_list,
self._num_embedding_groups,
self._use_fid_v2)
vec_config['name'] = 'vec'
vec_config['slot_list'] = slot_list
vec_config['initializers'] = [fs_map[i]._vec_initializer
for i in vec_config['weight_group_keys']]
vec_config['optimizers'] = [fs_map[i]._vec_optimizer
for i in vec_config['weight_group_keys']]
vec_config['use_fid_v2'] = self._use_fid_v2
return vec_config
def get_feature_columns(self):
return self._feature_column_v1s
def freeze_slots(self, features):
assert not self._frozen, "Already finalized"
if self._config_run:
raise ConfigRunError()
self._sparse_v2opt = {}
bias_config = self._get_bias_slot_configs()
if bias_config:
bias_weights = self._bias_embedding.weights
for i, opt in enumerate(bias_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[bias_weights[i][j]] = opt
vec_config = self._get_vec_slot_configs()
if vec_config:
vec_weights = self._vec_embedding.weights
for i, opt in enumerate(vec_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[vec_weights[i][j]] = opt
placeholders = []
dims = []
for slot_id, _, _, _ in vec_config['slot_list']:
fc = self._feature_column_v1s[slot_id]
for sslice in fc.feature_slot.feature_slices:
dims.append(sslice.len)
placeholders.append(fc.get_vector(sslice))
vec_split = tf.split(self._vec_tensor, dims, axis=1)
ge.swap_ts(vec_split, placeholders)
for slot in self._feature_slots.values():
slot._frozen = True
self._frozen = True
class SparseFLEstimator(estimator.FLEstimator):
def __init__(self,
cluster_server,
trainer_master,
bridge,
role,
model_fn,
is_chief=False):
super(SparseFLEstimator, self).__init__(
cluster_server, trainer_master, bridge, role, model_fn, is_chief)
self._bias_slot_configs = None
self._vec_slot_configs = None
self._slot_configs = None
try:
ps_indices = cluster_server.cluster_spec.task_indices('ps')
except ValueError:
ps_indices = None
finally:
self._embedding_devices = [None,] if not ps_indices else \
['/job:ps/task:%d'%i for i in ps_indices]
self._num_shards = len(self._embedding_devices)
def _preprocess_fids(self, fids, configs):
if fids.indices.shape.rank == 2:
fids = tf.IndexedSlices(indices=fids.indices[:, 0],
values=fids.values,
dense_shape=fids.dense_shape)
features = {}
for config in configs:
features.update(operator._multidevice_preprocess_fids(
fids, config, num_shards=self._num_shards))
return features
def _set_model_configs(self, mode): #features, labels, mode):
with tf.Graph().as_default() as g:
M = SparseFLModel(self._role,
self._bridge,
None, #features['example_id'],
config_run=True)
try:
self._model_fn(M, None, None, mode) # features, labels, mode)
except ConfigRunError as e:
self._bias_slot_configs = M._get_bias_slot_configs()
self._vec_slot_configs = M._get_vec_slot_configs()
self._feature_columns = M.get_feature_columns()
self._slot_configs = [self._bias_slot_configs,
self._vec_slot_configs]
return self._slot_configs
raise UserWarning("Failed to get model config. Did you forget to call \
freeze_slots in model_fn?")
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
slot_configs = self._set_model_configs(mode) # features, labels, mode)
def input_fn_wrapper(*args, **kwargs):
dataset = input_fn(self._bridge, self._trainer_master)
def mapper(features, *args):
features.update(self._preprocess_fids(features.pop('fids'),
slot_configs))
return (features,) + args if args else features
dataset = dataset.map(
mapper, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(2)
return dataset
return super(SparseFLEstimator, self
)._get_features_and_labels_from_input_fn(input_fn_wrapper, mode)
def _get_model_spec(self, features, labels, mode):
features = features.copy()
if mode == tf.estimator.ModeKeys.PREDICT:
fids = tf.IndexedSlices(
indices=features.pop('fids_indices'),
values=features.pop('fids_values'),
dense_shape=features.pop('fids_dense_shape'))
features.update(self._preprocess_fids(
fids, self._slot_configs))
bias_embedding = embedding.Embedding(self._bias_slot_configs,
devices=self._embedding_devices)
bias_tensor = bias_embedding.lookup(features)
if self._vec_slot_configs is not None:
vec_embedding = embedding.Embedding(self._vec_slot_configs,
devices=self._embedding_devices)
vec_tensor = vec_embedding.lookup(features)
else:
vec_embedding = None
vec_tensor = None
model = SparseFLModel(self._role, self._bridge,
features.get('example_id', None),
config_run=False,
bias_tensor=bias_tensor,
bias_embedding=bias_embedding,
vec_tensor=vec_tensor,
vec_embedding=vec_embedding,
feature_columns=self._feature_columns)
spec = self._model_fn(model, features, labels, mode)
assert model._frozen, "Please finalize model in model_fn"
return spec, model
| 40.559028 | 80 | 0.606712 | [
"Apache-2.0"
] | 0400H/fedlearner | fedlearner/trainer/sparse_estimator.py | 11,681 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
from warnings import warn
try:
import numpy
except ImportError:
numpy = None
try:
from numpy.linalg import svd as singular_value_decomposition
except ImportError:
singular_value_decomposition = None
from ._summarizer import AbstractSummarizer
class LsaSummarizer(AbstractSummarizer):
MIN_DIMENSIONS = 3
REDUCTION_RATIO = 1/1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependecies_installed()
dictionary = self._create_dictionary(document)
# empty document
if not dictionary:
return ()
matrix = self._create_matrix(document, dictionary)
matrix = self._compute_term_frequency(matrix)
u, sigma, v = singular_value_decomposition(matrix, full_matrices=False)
ranks = iter(self._compute_ranks(sigma, v))
return self._get_best_sentences(document.sentences, sentences_count,
lambda s: next(ranks))
def _ensure_dependecies_installed(self):
if numpy is None:
raise ValueError("LSA summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _create_dictionary(self, document):
"""Creates mapping key = word, value = row index"""
# print(document.words)
words = map(self.normalize_word, document.words)
unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)
return dict((w, i) for i, w in enumerate(unique_words))
def _create_matrix(self, document, dictionary):
"""
Creates matrix of shape |unique words|×|sentences| where cells
contains number of occurences of words (rows) in senteces (cols).
"""
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if words_count < sentences_count:
message = (
"Number of words (%d) is lower than number of sentences (%d). "
"LSA algorithm may not work properly."
)
warn(message % (words_count, sentences_count))
# create matrix |unique words|×|sentences| filled with zeroes
matrix = numpy.zeros((words_count, sentences_count))
for col, sentence in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
# only valid words is counted (not stop-words, ...)
if word in dictionary:
row = dictionary[word]
matrix[row, col] += 1
return matrix
def _compute_term_frequency(self, matrix, smooth=0.4):
"""
Computes TF metrics for each sentence (column) in the given matrix.
You can read more about smoothing parameter at URL below:
http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
"""
assert 0.0 <= smooth < 1.0
max_word_frequencies = numpy.max(matrix, axis=0)
rows, cols = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if max_word_frequency != 0:
frequency = matrix[row, col]/max_word_frequency
matrix[row, col] = smooth + (1.0 - smooth)*frequency
return matrix
def _compute_ranks(self, sigma, v_matrix):
assert len(sigma) == v_matrix.shape[0], "Matrices should be multiplicable"
dimensions = max(LsaSummarizer.MIN_DIMENSIONS,
int(len(sigma)*LsaSummarizer.REDUCTION_RATIO))
powered_sigma = tuple(s**2 if i < dimensions else 0.0
for i, s in enumerate(sigma))
ranks = []
# iterate over columns of matrix (rows of transposed matrix)
for column_vector in v_matrix.T:
rank = sum(s*v**2 for s, v in zip(powered_sigma, column_vector))
ranks.append(math.sqrt(rank))
return ranks
| 34.983871 | 113 | 0.639696 | [
"MIT"
] | Sohone-Guo/Pointer-Generator | util_common/nlp/Sumy/summarizers/lsa.py | 4,340 | Python |
#!/usr/bin/env python
# Copyright (c) 2015 Freescale Semiconductor, Inc.
# Copyright 2016-2017 NXP
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import struct
from .codec import (MessageType, MessageInfo, Codec, CodecError)
class BasicCodec(Codec):
## Version of this codec.
BASIC_CODEC_VERSION = 1
def start_write_message(self, msgInfo):
header = (self.BASIC_CODEC_VERSION << 24) \
| ((msgInfo.service & 0xff) << 16) \
| ((msgInfo.request & 0xff) << 8) \
| (msgInfo.type.value & 0xff)
self.write_uint32(header)
self.write_uint32(msgInfo.sequence)
def _write(self, fmt, value):
self._buffer += struct.pack(fmt, value)
self._cursor += struct.calcsize(fmt)
def write_bool(self, value):
self._write('<?', value)
def write_int8(self, value):
self._write('<b', value)
def write_int16(self, value):
self._write('<h', value)
def write_int32(self, value):
self._write('<i', value)
def write_int64(self, value):
self._write('<q', value)
def write_uint8(self, value):
self._write('<B', value)
def write_uint16(self, value):
self._write('<H', value)
def write_uint32(self, value):
self._write('<I', value)
def write_uint64(self, value):
self._write('<Q', value)
def write_float(self, value):
self._write('<f', value)
def write_double(self, value):
self._write('<d', value)
def write_string(self, value):
self.write_binary(value.encode())
def write_binary(self, value):
self.write_uint32(len(value))
self._buffer += value
def start_write_list(self, length):
self.write_uint32(length)
def start_write_union(self, discriminator):
self.write_uint32(discriminator)
def write_null_flag(self, flag):
self.write_uint8(1 if flag else 0)
##
# @return 4-tuple of msgType, service, request, sequence.
def start_read_message(self):
header = self.read_uint32()
sequence = self.read_uint32()
version = header >> 24
if version != self.BASIC_CODEC_VERSION:
raise CodecError("unsupported codec version %d" % version)
service = (header >> 16) & 0xff
request = (header >> 8) & 0xff
msgType = MessageType(header & 0xff)
return MessageInfo(type=msgType, service=service, request=request, sequence=sequence)
def _read(self, fmt):
result = struct.unpack_from(fmt, self._buffer, self._cursor)
self._cursor += struct.calcsize(fmt)
return result[0]
def read_bool(self):
return self._read('<?')
def read_int8(self):
return self._read('<b')
def read_int16(self):
return self._read('<h')
def read_int32(self):
return self._read('<i')
def read_int64(self):
return self._read('<q')
def read_uint8(self):
return self._read('<B')
def read_uint16(self):
return self._read('<H')
def read_uint32(self):
return self._read('<I')
def read_uint64(self):
return self._read('<Q')
def read_float(self):
return self._read('<f')
def read_double(self):
return self._read('<d')
def read_string(self):
return self.read_binary().decode()
def read_binary(self):
length = self.read_uint32()
data = self._buffer[self._cursor:self._cursor+length]
self._cursor += length
return data
##
# @return Int of list length.
def start_read_list(self):
return self.read_uint32()
##
# @return Int of union discriminator.
def start_read_union(self):
return self.read_int32()
def read_null_flag(self):
return self.read_uint8()
| 25.559211 | 93 | 0.60592 | [
"MIT"
] | Sir-Branch/k64f-starter-template | sdk_k64f/middleware/multicore/erpc/erpc_python/erpc/basic_codec.py | 3,885 | Python |
class Employee:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
# self.email = f"{fname}.{lname}@sandy.com"
def explain(self):
return f"This employee is {self.fname} {self.lname}"
def email(self):
return f"{self.fname}.{self.lname} @parker.com"
obj1 = Employee("Peter", "Parkar")
print(obj1.email())
obj1.fname = "Spider"
print(obj1.email()) #required call email() function to print
| 24.842105 | 68 | 0.616525 | [
"MIT"
] | codewithsandy/Python-Basic-Exp | 47 Setters_Property Decorators/main1.py | 472 | Python |
import os
import subprocess
import jinja2
import json
import openchemistry as oc
def run_calculation(geometry_file, output_file, params, scratch_dir):
# Read in the geometry from the geometry file
# This container expects the geometry file to be in .xyz format
with open(geometry_file) as f:
xyz_structure = f.read()
# remove the first two lines in the xyz file
# (i.e. number of atom and optional comment)
xyz_structure = xyz_structure.split('\n')[2:]
xyz_structure = '\n '.join(xyz_structure)
# Read the input parameters
theory = params.get('theory', 'hf')
task = params.get('task', 'energy')
basis = params.get('basis', 'cc-pvdz')
functional = params.get('functional', 'b3lyp')
charge = params.get('charge', 0)
multiplicity = params.get('multiplicity', 1)
theory = theory.lower()
if theory == 'hf':
_theory = 'scf'
# We update the multiplicity key when using scf. SCF accept names and
# not numbers.
multiplicities = {'1': 'singlet', '2': 'doublet', '3': 'triplet'}
_multiplicity = multiplicities.get(str(multiplicity), 'singlet')
else:
_theory = theory
_multiplicity = multiplicity
task = task.lower()
if task == 'frequencies':
_task = 'task {0} {1}\ntask {0} {2}'.format(_theory, 'optimize', 'freq')
elif task == 'optimize':
_task = 'task {0} {1}'.format(_theory, 'optimize')
else: # single point energy
_task = 'task {0}'.format(_theory)
context = {
'task': _task,
'theory': _theory,
'functional': functional,
'charge': charge,
'multiplicity': _multiplicity,
'basis': basis,
}
# Combine the input parameters and geometry into a concrete input file
# that can be executed by the simulation code
template_path = os.path.dirname(__file__)
jinja2_env = \
jinja2.Environment(loader=jinja2.FileSystemLoader(template_path),
trim_blocks=True)
os.makedirs(scratch_dir, exist_ok=True)
os.chdir(scratch_dir)
raw_input_file = os.path.join(scratch_dir, 'raw.in')
raw_output_file = os.path.join(scratch_dir, 'raw.json')
with open(raw_input_file, 'wb') as f:
if _theory == 'dft':
jinja2_env.get_template('nwchem.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8')
else:
jinja2_env.get_template('nwchem.sfc.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8')
# Execute the code and write to output
cpus = 4
subprocess.run(['mpirun', '-np', str(cpus), "/opt/nwchem/bin/LINUX64/nwchem",
raw_input_file, raw_output_file])
# Convert the raw output file generated by the code execution, into the
# output format declared in the container description (cjson)
with open(raw_output_file) as f:
cjson = oc.NWChemJsonReader(f).read()
# Save the calculation parameters in the cjson output for future reference
cjson['inputParameters'] = params
with open(output_file, 'w') as f:
json.dump(cjson, f)
| 36.068182 | 127 | 0.640832 | [
"BSD-3-Clause"
] | OpenChemistry/mongochemdeploy | docker/nwchem/src/run.py | 3,174 | Python |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from ch08.deep_convnet import DeepConvNet
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
sampled = 10000 # 高速化のため
x_test = x_test[:sampled]
t_test = t_test[:sampled]
print("caluculate accuracy (float64) ... ")
print(network.accuracy(x_test, t_test))
# float16に型変換
x_test = x_test.astype(np.float16)
for param in network.params.values():
param[...] = param.astype(np.float16)
print("caluculate accuracy (float16) ... ")
print(network.accuracy(x_test, t_test))
| 25.310345 | 64 | 0.757493 | [
"MIT"
] | gangigammo/deep-learning-1 | ch08/half_float_network.py | 804 | Python |
# Generated by Django 2.1.7 on 2019-03-16 10:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_donor',
),
]
| 17.666667 | 47 | 0.578616 | [
"MIT"
] | paceite/Seelife---An-NGO-Website | src/accounts/migrations/0002_remove_user_is_donor.py | 318 | Python |
"""
A validator for a frontend failure model. The model contains all
the failing web frontends and their status, as well as the virtual
machines they run on.
"""
from vuluptuous import Schema
schema = Schema({
'web_frontends_failures'
})
| 21.727273 | 66 | 0.769874 | [
"MIT"
] | lmontrieux/MAPE-validators | frontend-failure-model/frontend_failure.py | 239 | Python |
"""
Test SBProcess APIs, including ReadMemory(), WriteMemory(), and others.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbutil import get_stopped_thread, state_type_to_str
class ProcessAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number(
"main.cpp",
"// Set break point at this line and check variable 'my_char'.")
@skipIfReproducer # SBProcess::ReadMemory is not instrumented.
def test_read_memory(self):
"""Test Python SBProcess.ReadMemory() API."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
error = lldb.SBError()
self.assertFalse(val.TypeIsPointerType())
content = process.ReadMemory(
val.AddressOf().GetValueAsUnsigned(), 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'x'",
exe=False,
startstr=b'x')
# Read (char *)my_char_ptr.
val = frame.FindValue("my_char_ptr", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
cstring = process.ReadCStringFromMemory(
val.GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='Does it work?')
# Get the SBValue for the global variable 'my_cstring'.
val = frame.FindValue("my_cstring", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 256 to read at most 256 bytes
# from the address, and expect to get a Python string as the result
# object!
self.assertFalse(val.TypeIsPointerType())
cstring = process.ReadCStringFromMemory(
val.AddressOf().GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='lldb.SBProcess.ReadCStringFromMemory() works!')
# Get the SBValue for the global variable 'my_uint32'.
val = frame.FindValue("my_uint32", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 4 to read 4 bytes
# from the address, and expect to get an int as the result!
self.assertFalse(val.TypeIsPointerType())
my_uint32 = process.ReadUnsignedFromMemory(
val.AddressOf().GetValueAsUnsigned(), 4, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("uint32 read is:", my_uint32)
if my_uint32 != 12345:
self.fail(
"Result from SBProcess.ReadUnsignedFromMemory() does not match our expected output")
@skipIfReproducer # SBProcess::WriteMemory is not instrumented.
def test_write_memory(self):
"""Test Python SBProcess.WriteMemory() API."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
# continuing.
if not val.GetLocation().startswith("0x"):
return
# OK, let's get the hex location of the variable.
location = int(val.GetLocation(), 16)
# The program logic makes the 'my_char' variable to have memory content as 'x'.
# But we want to use the WriteMemory() API to assign 'a' to the
# variable.
# Now use WriteMemory() API to write 'a' into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
# Read from the memory location. This time it should be 'a'.
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
content = process.ReadMemory(location, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
@skipIfReproducer # SBProcess::WriteMemory is not instrumented.
def test_access_my_int(self):
"""Test access 'my_int' using Python SBProcess.GetByteOrder() and other APIs."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_int'.
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
# continuing.
if not val.GetLocation().startswith("0x"):
return
# OK, let's get the hex location of the variable.
location = int(val.GetLocation(), 16)
# Note that the canonical from of the bytearray is little endian.
from lldbsuite.test.lldbutil import int_to_bytearray, bytearray_to_int
byteSize = val.GetByteSize()
bytes = int_to_bytearray(256, byteSize)
byteOrder = process.GetByteOrder()
if byteOrder == lldb.eByteOrderBig:
bytes.reverse()
elif byteOrder == lldb.eByteOrderLittle:
pass
else:
# Neither big endian nor little endian? Return for now.
# Add more logic here if we want to handle other types.
return
# The program logic makes the 'my_int' variable to have int type and value of 0.
# But we want to use the WriteMemory() API to assign 256 to the
# variable.
# Now use WriteMemory() API to write 256 into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, bytes, error)
if not error.Success() or result != byteSize:
self.fail("SBProcess.WriteMemory() failed")
# Make sure that the val we got originally updates itself to notice the
# change:
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# And for grins, get the SBValue for the global variable 'my_int'
# again, to make sure that also tracks the new value:
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# Now read the memory content. The bytearray should have (byte)1 as
# the second element.
content = process.ReadMemory(location, byteSize, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
# The bytearray_to_int utility function expects a little endian
# bytearray.
if byteOrder == lldb.eByteOrderBig:
content = bytearray(content, 'ascii')
content.reverse()
new_value = bytearray_to_int(content, byteSize)
if new_value != 256:
self.fail("Memory content read from 'my_int' does not match (int)256")
# Dump the memory content....
if self.TraceOn():
for i in content:
print("byte:", i)
def test_remote_launch(self):
"""Test SBProcess.RemoteLaunch() API with a process not in eStateConnected, and it should fail."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if self.TraceOn():
print("process state:", state_type_to_str(process.GetState()))
self.assertTrue(process.GetState() != lldb.eStateConnected)
error = lldb.SBError()
success = process.RemoteLaunch(
None, None, None, None, None, None, 0, False, error)
self.assertTrue(
not success,
"RemoteLaunch() should fail for process state != eStateConnected")
def test_get_num_supported_hardware_watchpoints(self):
"""Test SBProcess.GetNumSupportedHardwareWatchpoints() API with a process."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
error = lldb.SBError()
num = process.GetNumSupportedHardwareWatchpoints(error)
if self.TraceOn() and error.Success():
print("Number of supported hardware watchpoints: %d" % num)
@no_debug_info_test
def test_get_process_info(self):
"""Test SBProcess::GetProcessInfo() API with a locally launched process."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process and stop at the entry point.
launch_info = target.GetLaunchInfo()
launch_info.SetWorkingDirectory(self.get_process_working_directory())
launch_flags = launch_info.GetLaunchFlags()
launch_flags |= lldb.eLaunchFlagStopAtEntry
launch_info.SetLaunchFlags(launch_flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
if not error.Success():
self.fail("Failed to launch process")
# Verify basic process info can be retrieved successfully
process_info = process.GetProcessInfo()
self.assertTrue(process_info.IsValid())
file_spec = process_info.GetExecutableFile()
self.assertTrue(file_spec.IsValid())
process_name = process_info.GetName()
self.assertIsNotNone(process_name, "Process has a name")
self.assertGreater(len(process_name), 0, "Process name isn't blank")
self.assertEqual(file_spec.GetFilename(), "a.out")
self.assertNotEqual(
process_info.GetProcessID(), lldb.LLDB_INVALID_PROCESS_ID,
"Process ID is valid")
triple = process_info.GetTriple()
self.assertIsNotNone(triple, "Process has a triple")
# Additional process info varies by platform, so just check that
# whatever info was retrieved is consistent and nothing blows up.
if process_info.UserIDIsValid():
self.assertNotEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is valid")
else:
self.assertEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is invalid")
if process_info.GroupIDIsValid():
self.assertNotEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is valid")
else:
self.assertEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is invalid")
if process_info.EffectiveUserIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is invalid")
if process_info.EffectiveGroupIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is invalid")
process_info.GetParentProcessID()
def test_allocate_deallocate_memory(self):
"""Test Python SBProcess.AllocateMemory() and SBProcess.DeallocateMemory() APIs."""
self.build()
(target, process, main_thread, main_breakpoint) = lldbutil.run_to_source_breakpoint(
self, "// Set break point at this line", lldb.SBFileSpec("main.cpp"))
# Allocate a block of memory in the target process
error = lldb.SBError()
addr = process.AllocateMemory(16384, lldb.ePermissionsReadable, error)
if not error.Success() or addr == lldb.LLDB_INVALID_ADDRESS:
self.fail("SBProcess.AllocateMemory() failed")
# Now use WriteMemory() API to write 'a' into the allocated
# memory. Note that the debugger can do this even though the
# block is not set writable.
result = process.WriteMemory(addr, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
# Read from the memory location. This time it should be 'a'.
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
content = process.ReadMemory(addr, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
# Verify that the process itself can read the allocated memory
frame = main_thread.GetFrameAtIndex(0)
val = frame.EvaluateExpression(
"test_read(reinterpret_cast<char *>({:#x}))".format(addr))
self.expect(val.GetValue(),
"Result of test_read() matches expected output 'a'",
exe=False,
startstr="'a'")
# Verify that the process cannot write into the block
val = frame.EvaluateExpression(
"test_write(reinterpret_cast<char *>({:#x}), 'b')".format(addr))
if val.GetError().Success():
self.fail(
"test_write() to allocated memory without write permission unexpectedly succeeded")
# Deallocate the memory
error = process.DeallocateMemory(addr)
if not error.Success():
self.fail("SBProcess.DeallocateMemory() failed")
| 40.22807 | 106 | 0.629089 | [
"Apache-2.0"
] | AaronBallman/llvm | lldb/test/API/python_api/process/TestProcessAPI.py | 18,344 | Python |
import uvicorn
from fastapi import (FastAPI, File, UploadFile)
from starlette.responses import RedirectResponse
from tensorflow.python.keras.preprocessing import image as imgx
import requests
from PIL import Image
from application.components import predict, read_imagefile
from application.schema import Symptom
from application.components.prediction import symptom_check
from googletrans import Translator, constants
from pprint import pprint
app_desc = """<h2>Try this app by uploading any image with `predict/image`</h2>
<h2>Analize photos</h2>
<br>Template by Aniket Maurya, new version by Joaquin Egocheaga"""
app = FastAPI(title='Comparizy , Tensorflow FastAPI ', description=app_desc)
translator = Translator()
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
@app.post("/predict/image")
async def predict_api(file: UploadFile = File(...)):
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
print(file.filename)
print(extension)
if not extension:
return "Image must be jpg or png format!"
image = read_imagefile(await file.read())
prediction = predict(image)
clase=prediction[0]['class']
clase=clase.replace("_", " ")
print(clase)
print("X")
translation = translator.translate(clase, "es")
translation=translation.text
print(translation)
return translation
@app.post("/api/covid-symptom-check")
def check_risk(symptom: Symptom):
return symptom_check.get_risk_level(symptom)
if __name__ == "__main__":
uvicorn.run(app, debug=True)
| 28.821429 | 79 | 0.724907 | [
"MIT"
] | EgoPro1/InceptionV2 | application/server/main.py | 1,614 | Python |
import os
import sys
path = os.getcwd()
package_path = (os.path.abspath(os.path.join(path, os.pardir))).replace('\\', '/')+'/'
sys.path.insert(1, package_path)
from config.config import *
##############################################Scrape-1###################################################
def contains(text , subtext):
if subtext in text:
return True
return False
def get_scrape_url(url):
encoding = "html.parser"
resp = requests.get(url)
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, from_encoding=encoding)
for link in soup.find_all('a', href=True):
scrape_url = str(link['href'])
if(contains(scrape_url , "s3.amazonaws.com") and contains(scrape_url , ".zip")):
break
file_name = scrape_url.split("/Kickstarter/")[1]
return scrape_url, file_name
def download(scrape_url , output_directory):
try:
wget.download(scrape_url, out=output_directory)
except:
raise Exception("Failed in downloading the data file")
return output_directory
def unzip_data(input_file_path , output_directory):
try:
with zipfile.ZipFile(input_file_path, 'r') as zip_ref:
zip_ref.extractall(output_directory)
except Exception as e:
raise Exception("Failed to unzip the data folder !+....{}",format(e))
os.remove(input_file_path)
return True
###################################scrape-1ends############################################################
| 34.12 | 107 | 0.611958 | [
"MIT"
] | sai-krishna-msk/KickAssist | scripts/processing_scripts/raw_scrape_processing.py | 1,706 | Python |
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from obspy.core.stream import Stream
def plot_gll(x, y, z):
""" Plots values on 2D unstructured GLL mesh
"""
r = (max(x) - min(x))/(max(y) - min(y))
rx = r/np.sqrt(1 + r**2)
ry = 1/np.sqrt(1 + r**2)
f = plt.figure(figsize=(10*rx, 10*ry))
p = plt.tricontourf(x, y, z, 125)
plt.axis('image')
return f, p
def plot_vector(t, v, xlabel='', ylabel='', title=''):
""" Plots a vector or time series.
Parameters
----------
v: ndarray, ndims = 1/2
Vector or time series to plot
xlabel: str
x axis label
ylabel: str
y axis label
title: str
plot title
Raises
------
ValueError
If dimensions of v are greater than 2
"""
# check input dimension
if v.ndim > 2:
raise ValueError('v must be a vector or a time series')
if v.ndim == 1:
x = list(range(len(v)))
y = v
else:
x = v[:, 0]
y = v[:, 1]
# plot
plt.plot(t, v)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
def plot_section(stream, ax=None, cmap='seismic', clip=100, title='', x_interval=1.0, y_interval=1.0):
""" Plots a seismic section from an obspy stream.
Parameters
----------
stream: Obspy stream object
Obspy stream object created from a SU data file
ax: Matplotlib Axes object
Optional axis object
cmap: str
Matplotlib colormap option.
clip: float
Percentage value (0-100) for amplitude clipping
title: str
plot title
x_interval: float
Offset axis tick interval in km
y_interval: float
Time axis tick interval in km
Raises
------
NotImplementedError
If stream object does not have SU format
"""
# check format of stream
if stream[0].stats._format != 'SU':
raise NotImplemented('plot_section currently only supports streams for SU data files.')
# get dimensions
nr = len(stream)
nt = len(stream[0].data)
dt = stream[0].stats.delta
d_aspect = nr / float(nt)
# convert stream to image array
data = _convert_to_array(stream)
# default values
fsize = 6
scale_factor = 1.5
if ax is None:
fig, ax = plt.subplots(figsize=(fsize, scale_factor*fsize))
im = ax.imshow(data, aspect=scale_factor*d_aspect, clim=_cscale(data, clip=clip))
im.set_cmap(cmap)
# labels
ax.set_title(title)
ax.set_xlabel('Offset [km]')
ax.set_ylabel('Time [s]')
#set ticks
t = _get_time(stream)
yticks, ytick_labels = get_regular_ticks(t, y_interval)
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
offsets =_get_offsets(stream)
xticks, xtick_labels = get_regular_ticks(offsets, x_interval)
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels)
return ax
def _convert_to_array(stream):
""" Extracts trace data from an obspy stream and returns a 2D array.
Parameters
----------
stream: Obspy stream object
Stream storing trace data
Returns
-------
output: ndarray, ndim=2
Returns an (nt*nr) array. nt and nr are the number of sample points
and number of traces respectively. Assumes trace lengths are equal
for all traces.
Raises
------
TypeError
If stream is not an obspy stream
"""
if not isinstance(stream, Stream):
raise TypeError('Input object should be an obspy stream.')
nt = len(stream.traces[0].data)
nr = len(stream)
output = np.zeros((nt, nr))
for i, trace in enumerate(stream):
output[:, i] = trace.data[:]
return output
def _cscale(v, clip=100):
""" Return limits for colormap.
"""
perc = clip / 100.
return -perc * abs(v).max(), perc * abs(v).max()
def _get_time(stream):
""" Get fixed time vector for stream object.
"""
dt = stream[0].stats.delta
nt = len(stream[0].data)
return np.arange(0, nt*dt, dt)
def _get_offsets(stream):
""" Return offsets.
"""
nr = len(stream)
offsets = np.zeros(nr)
scalco = stream[0].stats.su.trace_header.scalar_to_be_applied_to_all_coordinates
# set scale to km
if scalco == 0:
scalco = 1e-3 # assume coords are in m
else:
scalco = 1.0e-3 / scalco
for i, tr in enumerate(stream):
offsets[i] = (tr.stats.su.trace_header.group_coordinate_x -
tr.stats.su.trace_header.source_coordinate_x) * scalco
return offsets
def get_regular_ticks(v, interval):
""" Returns regular tick intervals.
"""
f = interp1d(v, list(range(len(v))))
begin = int(v[0] / interval) * interval
end = v[-1]
tick_labels = np.arange(begin, end, interval)
ticks = f(tick_labels)
return ticks, tick_labels
| 23.878641 | 102 | 0.61049 | [
"BSD-2-Clause"
] | fanwu8/SeisFlowsQ | seisflows/tools/graphics.py | 4,919 | Python |
# Generated by Django 3.0.6 on 2020-05-24 13:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grid', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img_name', models.CharField(max_length=30)),
('img_description', models.TextField()),
('photo', models.ImageField(default='', upload_to='images/')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Category')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Editor')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grid.Location')),
],
),
]
| 37.37037 | 114 | 0.60555 | [
"MIT"
] | greatdaniels/gallery-app | grid/migrations/0002_image.py | 1,009 | Python |
a = []
# append element at the end.
a.append(2)
a.append(3)
print(a)
# insert at a specific location.
a.insert(0, 5)
a.insert(10, 5)
print(a)
# when specified a position not in list, it inserts at the end.
a.insert(100, 6)
print(a)
# Deleting elements from a list.
a.remove(5) # removes the first occurence of value passed
print(a, len(a))
del a[0]
print(a, len(a))
# access the last element
print(a[-1])
# Printing a list
print(len(a))
for item in range(len(a)): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(0, len(a), 1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Reverse printing a list
for item in range(len(a) - 1, -1, -1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Jump a certain number of times.
for item in range(0, len(a), 2): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
| 22.227273 | 66 | 0.604294 | [
"MIT"
] | archeranimesh/fantastic-waffle | SRC/December-Batch/02_class/01_list.py | 978 | Python |
import os
import sys
import yaml
import argparse
from kubernetes import client, config
import urllib3
from jinja2 import FileSystemLoader, Environment
urllib3.disable_warnings()
KERNEL_POD_TEMPLATE_PATH = '/kernel-pod.yaml.j2'
def generate_kernel_pod_yaml(keywords):
"""Return the kubernetes pod spec as a yaml string.
- load jinja2 template from this file directory.
- substitute template variables with keywords items.
"""
j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True)
# jinja2 template substitutes template variables with None though keywords doesn't contain corresponding item.
# Therfore, no need to check if any are left unsubstituted. Kubernetes API server will validate the pod spec instead.
k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords)
return k8s_yaml
def launch_kubernetes_kernel(kernel_id, port_range, response_addr, spark_context_init_mode):
# Launches a containerized kernel as a kubernetes pod.
config.load_incluster_config()
# Capture keywords and their values.
keywords = dict()
# Factory values...
# Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive
# value since this is used to locate the kernel launch script within the image.
keywords['eg_port_range'] = port_range
keywords['eg_response_address'] = response_addr
keywords['kernel_id'] = kernel_id
keywords['kernel_name'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
keywords['kernel_spark_context_init_mode'] = spark_context_init_mode
# Walk env variables looking for names prefixed with KERNEL_. When found, set corresponding keyword value
# with name in lower case.
for name, value in os.environ.items():
if name.startswith('KERNEL_'):
keywords[name.lower()] = yaml.safe_load(value)
# Substitute all template variable (wrapped with {{ }}) and generate `yaml` string.
k8s_yaml = generate_kernel_pod_yaml(keywords)
# For each k8s object (kind), call the appropriate API method. Too bad there isn't a method
# that can take a set of objects.
#
# Creation for additional kinds of k8s objects can be added below. Refer to
# https://github.com/kubernetes-client/python for API signatures. Other examples can be found in
# https://github.com/jupyter-incubator/enterprise_gateway/blob/master/enterprise_gateway/services/processproxies/k8s.py
#
kernel_namespace = keywords['kernel_namespace']
k8s_objs = yaml.safe_load_all(k8s_yaml)
for k8s_obj in k8s_objs:
if k8s_obj.get('kind'):
if k8s_obj['kind'] == 'Pod':
#print("{}".format(k8s_obj)) # useful for debug
client.CoreV1Api(client.ApiClient()).create_namespaced_pod(body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'Secret':
client.CoreV1Api(client.ApiClient()).create_namespaced_secret(body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'PersistentVolumeClaim':
client.CoreV1Api(client.ApiClient()).create_namespaced_persistent_volume_claim(
body=k8s_obj, namespace=kernel_namespace)
elif k8s_obj['kind'] == 'PersistentVolume':
client.CoreV1Api(client.ApiClient()).create_persistent_volume(body=k8s_obj)
else:
sys.exit("ERROR - Unhandled Kubernetes object kind '{}' found in yaml file - kernel launch terminating!".
format(k8s_obj['kind']))
else:
sys.exit("ERROR - Unknown Kubernetes object '{}' found in yaml file - kernel launch terminating!".
format(k8s_obj))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--RemoteProcessProxy.kernel-id', dest='kernel_id', nargs='?',
help='Indicates the id associated with the launched kernel.')
parser.add_argument('--RemoteProcessProxy.port-range', dest='port_range', nargs='?',
metavar='<lowerPort>..<upperPort>', help='Port range to impose for kernel ports')
parser.add_argument('--RemoteProcessProxy.response-address', dest='response_address', nargs='?',
metavar='<ip>:<port>', help='Connection address (<ip>:<port>) for returning connection file')
parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='spark_context_init_mode',
nargs='?', help='Indicates whether or how a spark context should be created',
default='none')
arguments = vars(parser.parse_args())
kernel_id = arguments['kernel_id']
port_range = arguments['port_range']
response_addr = arguments['response_address']
spark_context_init_mode = arguments['spark_context_init_mode']
launch_kubernetes_kernel(kernel_id, port_range, response_addr, spark_context_init_mode)
| 49.242718 | 123 | 0.699921 | [
"Apache-2.0"
] | spotinst/wave-operator | tools/kernelspecs/kernels/R_kubernetes/scripts/launch_kubernetes.py | 5,072 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.datacatalog.v1beta1",
manifest={
"TableSourceType",
"BigQueryTableSpec",
"ViewSpec",
"TableSpec",
"BigQueryDateShardedSpec",
},
)
class TableSourceType(proto.Enum):
r"""Table source type."""
TABLE_SOURCE_TYPE_UNSPECIFIED = 0
BIGQUERY_VIEW = 2
BIGQUERY_TABLE = 5
class BigQueryTableSpec(proto.Message):
r"""Describes a BigQuery table.
Attributes:
table_source_type (google.cloud.datacatalog_v1beta1.types.TableSourceType):
Output only. The table source type.
view_spec (google.cloud.datacatalog_v1beta1.types.ViewSpec):
Table view specification. This field should only be
populated if ``table_source_type`` is ``BIGQUERY_VIEW``.
table_spec (google.cloud.datacatalog_v1beta1.types.TableSpec):
Spec of a BigQuery table. This field should only be
populated if ``table_source_type`` is ``BIGQUERY_TABLE``.
"""
table_source_type = proto.Field(proto.ENUM, number=1, enum="TableSourceType",)
view_spec = proto.Field(
proto.MESSAGE, number=2, oneof="type_spec", message="ViewSpec",
)
table_spec = proto.Field(
proto.MESSAGE, number=3, oneof="type_spec", message="TableSpec",
)
class ViewSpec(proto.Message):
r"""Table view specification.
Attributes:
view_query (str):
Output only. The query that defines the table
view.
"""
view_query = proto.Field(proto.STRING, number=1,)
class TableSpec(proto.Message):
r"""Normal BigQuery table spec.
Attributes:
grouped_entry (str):
Output only. If the table is a dated shard, i.e., with name
pattern ``[prefix]YYYYMMDD``, ``grouped_entry`` is the Data
Catalog resource name of the date sharded grouped entry, for
example,
``projects/{project_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}``.
Otherwise, ``grouped_entry`` is empty.
"""
grouped_entry = proto.Field(proto.STRING, number=1,)
class BigQueryDateShardedSpec(proto.Message):
r"""Spec for a group of BigQuery tables with name pattern
``[prefix]YYYYMMDD``. Context:
https://cloud.google.com/bigquery/docs/partitioned-tables#partitioning_versus_sharding
Attributes:
dataset (str):
Output only. The Data Catalog resource name of the dataset
entry the current table belongs to, for example,
``projects/{project_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}``.
table_prefix (str):
Output only. The table name prefix of the shards. The name
of any given shard is ``[table_prefix]YYYYMMDD``, for
example, for shard ``MyTable20180101``, the ``table_prefix``
is ``MyTable``.
shard_count (int):
Output only. Total number of shards.
"""
dataset = proto.Field(proto.STRING, number=1,)
table_prefix = proto.Field(proto.STRING, number=2,)
shard_count = proto.Field(proto.INT64, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.756757 | 107 | 0.667963 | [
"Apache-2.0"
] | steffnay/python-datacatalog | google/cloud/datacatalog_v1beta1/types/table_spec.py | 3,858 | Python |
# coding=utf-8
from __future__ import print_function
import json
from data_packer import err, DataPacker, container
g_src = {
'a': 1,
'b': 'hello',
'c': ['a', 'b', 'c'],
'd': {
'1': 1,
'2': 2,
},
'e': {
'1': ['a', 'b'],
'2': {
'a': 'a',
'b': 'b'
}
},
'f': '0x123',
'g': 'longlonglonglonglong',
'h': 2,
}
def valid_container(c):
if isinstance(c, dict):
c = container.DictContainer(c)
else:
raise TypeError('dst Must be dict or DictContainer')
return c
def demo_run(fields, msg, dst=None, src=None):
print('')
print(msg)
if src is None:
src = g_src
if dst is None:
dst = {}
src = valid_container(src)
dst = valid_container(dst)
dp = DataPacker(fields)
try:
dp.run(src, dst)
except err.DataPackerError as e:
print('抛出了异常: ', type(e), e)
print(json.dumps(dst.raw_data(), indent=4))
return dst
| 17.118644 | 60 | 0.50297 | [
"MIT"
] | ideascf/data-packer | example/demo/common.py | 1,020 | Python |
"""
Django settings for workstation project.
Generated by 'django-admin startproject' using Django 4.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from dotenv import load_dotenv
from pathlib import Path
import os
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = str(os.getenv("SECRET_KEY"))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"account",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",),
}
ROOT_URLCONF = "workstation.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "workstation.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| 25.75 | 91 | 0.705026 | [
"MIT"
] | cindy21td/WorkStation | workstation-backend/workstation/settings.py | 3,502 | Python |
from __future__ import annotations
import re
import pkg_resources
VERSION_FORMAT = re.compile('([0-9]+)\\.([0-9]+)\\.([0-9]+)')
class Version(object):
def __init__(self, major: int, minor: int, revision: int):
self.major = major
self.minor = minor
self.revision = revision
def __str__(self) -> str:
return f'{self.major}.{self.minor}.{self.revision}'
def is_compatible(self):
if self.minor < 3:
return False
return True
@staticmethod
def current():
return Version.parse(version_string())
@staticmethod
def parse(version: str) -> Version:
if re.match(VERSION_FORMAT, version) is None:
raise ValueError(f'Illegal version string {version}')
major, minor, rev = map(lambda v: int(v), version.split('.'))
return Version(major, minor, rev)
def version_string():
return pkg_resources.require("cowait")[0].version
| 26.416667 | 69 | 0.624606 | [
"Apache-2.0"
] | ProgHaj/cowait | cowait/utils/version.py | 951 | Python |
"""
Django settings for BlogProject project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 添加apps目录
sys.path.insert(0,os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l-r2=6&(#p$f1qn$xzk6vce99ojk1nit&x7l_hqi9%&u$f#am&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.base',
'apps.article',
'apps.comment'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BlogProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'base.context_processors.site_info' #站点基础信息上下文
],
},
},
]
WSGI_APPLICATION = 'BlogProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# (开发环境)媒体文件 上传图片保存文件夹
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| 25.911111 | 91 | 0.69697 | [
"MIT"
] | lwpdzq/BlogProject | BlogProject/settings.py | 3,558 | Python |
import numpy as np
import time
import matplotlib.pyplot as plt
from MuellerBrownPotential import MuellerBrownPotential
from LogExpOfHarmonicWellsPotential import LogExpOfHarmonicWellsPotential
from MonteCarloSimulator import MonteCarloSimulator
from MetadynamicsBias import MetadynamicsBias
T = 1.0
NumMCmoves = 10000
kB = 1.0
kBT = kB * T
initialHeight = (kBT/2.0)
sigma = [ 0.2,0.2 ]
pace = 1
biasfactor = 10.0
# potential = LogExpOfHarmonicWellsPotential()
potential = MuellerBrownPotential()
MetaD = MetadynamicsBias(
Temperature = T,
Sigma = sigma,
InitialHeight = initialHeight,
Pace = pace,
Biasfactor = biasfactor
)
MCsim = MonteCarloSimulator(
potentialClass=potential,
Temperature = T,
externalBiasClass = MetaD
)
MCsim.resetRun()
MCsim.setPosition( potential.getMinima()[0] )
MCsim.runMC(NumMCmoves)
print ' '
MCsim.printAverageAcceptence()
print ' '
MCsim.printTrajectoryMeanAndStddev()
print ' '
MCsim.plotPotentialAndTrajectory()
MCsim.plotTrajectoryTimeSeries()
MCsim.plotTrajectoryHistogramAndFES()
| 23.791667 | 73 | 0.718914 | [
"MIT"
] | valsson/MD-MC-Codes-2016 | Particle-On-Potential-MC-sampling/Run_MuellerBrownPotential-WithMetaD.py | 1,142 | Python |
"""
Test command line commands.
"""
from pathlib import Path
from subprocess import PIPE, Popen
__author__ = "Sergey Vartanov"
__email__ = "[email protected]"
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from map_machine.ui.cli import COMMAND_LINES
LOG: bytes = (
b"INFO Constructing ways...\n"
b"INFO Constructing nodes...\n"
b"INFO Drawing ways...\n"
b"INFO Drawing main icons...\n"
b"INFO Drawing extra icons...\n"
b"INFO Drawing texts...\n"
)
def error_run(arguments: list[str], message: bytes) -> None:
"""Run command that should fail and check error message."""
with Popen(["map-machine"] + arguments, stderr=PIPE) as pipe:
_, error = pipe.communicate()
assert pipe.returncode != 0
assert error == message
def run(arguments: list[str], message: bytes) -> None:
"""Run command that should fail and check error message."""
with Popen(["map-machine"] + arguments, stderr=PIPE) as pipe:
_, error = pipe.communicate()
assert pipe.returncode == 0
assert error == message
def test_wrong_render_arguments() -> None:
"""Test `render` command with wrong arguments."""
error_run(
["render", "-z", "17"],
b"CRITICAL Specify either --input, or --boundary-box, or --coordinates "
b"and --size.\n",
)
def test_render() -> None:
"""Test `render` command."""
run(
COMMAND_LINES["render"] + ["--cache", "tests/data"],
LOG + b"INFO Writing output SVG to out/map.svg...\n",
)
with Path("out/map.svg").open(encoding="utf-8") as output_file:
root: Element = ElementTree.parse(output_file).getroot()
# 4 expected elements: `defs`, `rect` (background), `g` (outline),
# `g` (icon), 4 `text` elements (credits).
assert len(root) == 8
assert len(root[3][0]) == 0
assert root.get("width") == "186.0"
assert root.get("height") == "198.0"
def test_render_with_tooltips() -> None:
"""Test `render` command."""
run(
COMMAND_LINES["render_with_tooltips"] + ["--cache", "tests/data"],
LOG + b"INFO Writing output SVG to out/map.svg...\n",
)
with Path("out/map.svg").open(encoding="utf-8") as output_file:
root: Element = ElementTree.parse(output_file).getroot()
# 4 expected elements: `defs`, `rect` (background), `g` (outline),
# `g` (icon), 4 `text` elements (credits).
assert len(root) == 8
assert len(root[3][0]) == 1
assert root[3][0][0].text == "natural: tree"
assert root.get("width") == "186.0"
assert root.get("height") == "198.0"
def test_icons() -> None:
"""Test `icons` command."""
run(
COMMAND_LINES["icons"],
b"INFO Icons are written to out/icons_by_name and out/icons_by_id.\n"
b"INFO Icon grid is written to out/icon_grid.svg.\n"
b"INFO Icon grid is written to doc/grid.svg.\n",
)
assert (Path("out") / "icon_grid.svg").is_file()
assert (Path("out") / "icons_by_name").is_dir()
assert (Path("out") / "icons_by_id").is_dir()
assert (Path("out") / "icons_by_name" / "Röntgen apple.svg").is_file()
assert (Path("out") / "icons_by_id" / "apple.svg").is_file()
def test_mapcss() -> None:
"""Test `mapcss` command."""
run(
COMMAND_LINES["mapcss"],
b"INFO MapCSS 0.2 scheme is written to out/map_machine_mapcss.\n",
)
assert (Path("out") / "map_machine_mapcss").is_dir()
assert (Path("out") / "map_machine_mapcss" / "icons").is_dir()
assert (
Path("out") / "map_machine_mapcss" / "icons" / "apple.svg"
).is_file()
assert (Path("out") / "map_machine_mapcss" / "map_machine.mapcss").is_file()
def test_element() -> None:
"""Test `element` command."""
run(
COMMAND_LINES["element"],
b"INFO Element is written to out/element.svg.\n",
)
assert (Path("out") / "element.svg").is_file()
def test_tile() -> None:
"""Test `tile` command."""
run(
COMMAND_LINES["tile"] + ["--cache", "tests/data"],
LOG + b"INFO Tile is drawn to out/tiles/tile_18_160199_88904.svg.\n"
b"INFO SVG file is rasterized to out/tiles/tile_18_160199_88904.png.\n",
)
assert (Path("out") / "tiles" / "tile_18_160199_88904.svg").is_file()
assert (Path("out") / "tiles" / "tile_18_160199_88904.png").is_file()
| 32.274074 | 80 | 0.614184 | [
"MIT"
] | LaoshuBaby/map-machine | tests/test_command_line.py | 4,358 | Python |
from colab_ssh.utils.packages.installer import create_deb_installer
from colab_ssh.utils.ui.render_html import render_template
from subprocess import Popen, PIPE
import shlex
from colab_ssh._command import run_command, run_with_pipe
import os
import time
from colab_ssh.get_tunnel_config import get_argo_tunnel_config
from .utils.expose_env_variable import expose_env_variable
import importlib
import sys
import signal
deb_install = create_deb_installer()
def launch_ssh_cloudflared(
password="",
verbose=False,
prevent_interrupt=False,
kill_other_processes=False):
# Kill any cloudflared process if running
if kill_other_processes:
os.system("kill -9 $(ps aux | grep 'cloudflared' | awk '{print $2}')")
# Download cloudflared
if not os.path.isfile("cloudflared"):
run_command(
"wget -q -nc https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-amd64.tgz")
run_command("tar zxf cloudflared-stable-linux-amd64.tgz")
else:
if verbose:
print("DEBUG: Skipping cloudflared installation")
# Install the openssh server
deb_install("openssh-server", verbose=verbose)
# Set the password
run_with_pipe("echo root:{} | chpasswd".format(password))
# Configure the openSSH server
run_command("mkdir -p /var/run/sshd")
os.system("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config")
if password:
os.system('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config')
expose_env_variable("LD_LIBRARY_PATH")
expose_env_variable("COLAB_TPU_ADDR")
expose_env_variable("COLAB_GPU")
expose_env_variable("TBE_CREDS_ADDR")
expose_env_variable("TF_FORCE_GPU_ALLOW_GROWTH")
expose_env_variable("TPU_NAME")
expose_env_variable("XRT_TPU_CONFIG")
os.system('service ssh start')
extra_params = []
info = None
# Prepare the cloudflared command
popen_command = f'./cloudflared tunnel --url ssh://localhost:22 --logfile ./cloudflared.log --metrics localhost:45678 {" ".join(extra_params)}'
preexec_fn = None
if prevent_interrupt:
popen_command = 'nohup ' + popen_command
preexec_fn = os.setpgrp
popen_command = shlex.split(popen_command)
# Initial sleep time
sleep_time = 2.0
# Create tunnel and retry if failed
for i in range(10):
proc = Popen(popen_command, stdout=PIPE, preexec_fn=preexec_fn)
if verbose:
print(f"DEBUG: Cloudflared process: PID={proc.pid}")
time.sleep(sleep_time)
try:
info = get_argo_tunnel_config()
break
except Exception as e:
os.kill(proc.pid, signal.SIGKILL)
if verbose:
print(f"DEBUG: Exception: {e.args[0]}")
print(f"DEBUG: Killing {proc.pid}. Retrying...")
# Increase the sleep time and try again
sleep_time *= 1.5
if verbose:
print("DEBUG:", info)
if info:
return info
else:
print(proc.stdout.readlines())
raise Exception(
"It looks like something went wrong, please make sure your token is valid")
proc.stdout.close()
| 32.806122 | 147 | 0.670295 | [
"MIT"
] | CharleoY/colab-ssh | colab_ssh/launch_ssh_cloudflared.py | 3,215 | Python |
import logging
from openstack_internal.nova.hypervisor_details import OSHypervisor
from topology.link import Link
from topology.node import Node
LOG = logging.getLogger(__name__)
class Server(Node):
def __init__(self, int_id: int, hypervisor: OSHypervisor):
super().__init__(int_id=int_id, _id=hypervisor.get_id(), name=hypervisor.get_name(), is_switch=False)
print(f"Server Name: {self.name}")
self.cpu = hypervisor.get_available_vcpus()
self.hdd = hypervisor.get_available_disk_gb()
self.ram = hypervisor.get_available_ram_mb()
self.in_links: Link or None = None
self.out_links: Link or None = None
def add_in_link(self, link: Link):
self.in_links = link
def add_out_link(self, link: Link):
self.out_links = link
| 29.888889 | 109 | 0.705081 | [
"Apache-2.0"
] | kukkalli/orchestrator | app/topology/server.py | 807 | Python |
import json
from girder.constants import AccessType
from girder_client import HttpError
import pytest
from .conftest import getClient, getTestFolder, localDataRoot, users, wait_for_jobs
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=3)
def test_reset_integration_env(user: dict):
client = getClient(user['login'])
privateFolder = getTestFolder(client)
client.delete(f"folder/{privateFolder['_id']}")
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=4)
def test_upload_user_data(user: dict):
client = getClient(user['login'])
createdDatasets = []
for dataset in user['data']:
dsPath = localDataRoot / str(dataset['path'])
privateFolder = getTestFolder(client)
newDatasetFolder = client.createFolder(
privateFolder['_id'],
dataset['name'],
metadata={
'fps': dataset['fps'],
'type': dataset['type'],
},
)
createdDatasets.append(newDatasetFolder)
# Validate the fileset
filenames = [file.name for file in dsPath.iterdir()]
valid = client.post('dive_dataset/validate_files', json=filenames)
assert valid['ok'], 'File validation failed'
for file in dsPath.iterdir():
if file.is_file():
client.uploadFileToFolder(newDatasetFolder['_id'], str(file))
client.post(f'dive_rpc/postprocess/{newDatasetFolder["_id"]}')
if dataset.get('sharedWith', False):
me = client.get('user/me')
otherClient = getClient(dataset['sharedWith'])
otherUser = otherClient.get('user/me')
with pytest.raises(HttpError):
otherClient.get(f'dive_dataset/{newDatasetFolder["_id"]}')
client.put(
f'folder/{newDatasetFolder["_id"]}/access',
data={
'public': False,
'recurse': False,
'progress': False,
'access': json.dumps(
{
'users': [
{'id': me['_id'], 'level': AccessType.ADMIN, 'flags': []},
{'id': otherUser['_id'], 'level': AccessType.READ, 'flags': []},
],
'groups': [],
}
),
},
)
assert (
otherClient.get(
f'dive_dataset/{newDatasetFolder["_id"]}', jsonResp=False
).status_code
== 200
)
wait_for_jobs(client)
# Confirm that the new dataset looks like it should.
for created, expected in zip(createdDatasets, user['data']):
created = client.get(f'dive_dataset/{created["_id"]}')
if expected['type'] == 'video':
assert created['fps'] == expected['originalFps'] or created['fps'] == expected['fps']
assert created['annotate']
assert created['originalFps'] == expected['originalFps']
| 38.180723 | 97 | 0.546229 | [
"Apache-2.0"
] | maxpark/dive | server/tests/integration/test_dataset_upload.py | 3,169 | Python |
import Celula
class Labirinto:
def __init__(self, num_rows, num_columns, order_to_check):
# Indica a ordem que vai os vizinhos vao ser checados
self.order_to_check = order_to_check
# Numero de linhas no grid
self.num_rows = num_rows
# Numero de colunas no grid
self.num_columns = num_columns
self.grid = []
# Preenche o grid
tmp_cell = Celula.Celula(0)
for i in range(self.num_columns):
self.grid.append([tmp_cell for x in range(self.num_rows)])
# Printar o grid
def __str__(self):
grid_as_string = ""
for i in range(self.num_columns):
for j in range(self.num_rows):
grid_as_string += f"{self.grid[i][j].get_value()} "
grid_as_string += "\n"
return grid_as_string
# Adiciona a celula cell em [pos_y][pos_x]
def insert(self, cell_value, pos_y, pos_x):
self.grid[pos_y][pos_x] = Celula.Celula(cell_value)
# Jeito rapido de resolver IndexError porque nao quero gastar muito tempo nesse codigo
try:
# Verificar se existe uma celula em cima
if self.grid[pos_y-1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_up(self.grid[pos_y-1][pos_x])
self.grid[pos_y-1][pos_x].set_down(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
# Verificar se existe uma celula embaixo
if self.grid[pos_y+1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_down(self.grid[pos_y+1][pos_x])
self.grid[pos_y+1][pos_x].set_up(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
# Verificar se existe uma celula na esquerda
if self.grid[pos_y][pos_x-1].get_value() != 0:
self.grid[pos_y][pos_x].set_left(self.grid[pos_y][pos_x])
self.grid[pos_y][pos_x-1].set_right(self.grid[pos_y][pos_x])
except IndexError:
pass
try:
# Verificar se existe uma celula na direita
if self.grid[pos_y+1][pos_x].get_value() != 0:
self.grid[pos_y][pos_x].set_right(self.grid[pos_y+1][pos_x])
self.grid[pos_y+1][pos_x].set_left(self.grid[pos_y][pos_x])
except IndexError:
pass
def find_path(self, pos_x, pos_y):
self.grid[pos_y][pos_x].visited = True
# Se for a saida, printar a posicao dela!
if self.grid[pos_y][pos_x].value == 2:
print(f"Saida encontrada na posicao [{pos_x}][{pos_y}]!")
# Verificar na ordem que foi recebida pela funcao
for i in self.order_to_check:
# Se existe alguem em cima, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "up" and self.grid[pos_y][pos_x].up != None and self.grid[pos_y][pos_x].up != 0:
if not self.grid[pos_y][pos_x].up.visited:
self.find_path(pos_x, pos_y-1)
# Se existe alguem na esquerda, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "left" and self.grid[pos_y][pos_x].left != None and self.grid[pos_y][pos_x].left != 0:
if not self.grid[pos_y][pos_x].left.visited:
self.find_path(pos_x-1, pos_y)
# Se existe alguem embaixo, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "down" and self.grid[pos_y][pos_x].down != None and self.grid[pos_y][pos_x].down != 0:
if not self.grid[pos_y][pos_x].down.visited:
self.find_path(pos_x, pos_y+1)
# Se existe alguem na direita, se esse alguem for diferente de None e de Zero, abrir uma recursao naquela posicao
# pois eh um caminho!
if i == "right" and self.grid[pos_y][pos_x].right != None and self.grid[pos_y][pos_x].right != 0:
if not self.grid[pos_y][pos_x].right.visited:
self.find_path(pos_x+1, pos_y)
| 38.339286 | 126 | 0.581742 | [
"MIT"
] | RafaelAmauri/Projeto-e-Analise-de-Algoritmos | Trabalho 02/Resolucao/code/backtracking/Labirinto.py | 4,294 | Python |
import json
import logging
import os
from typing import Optional, List
from checkov.common.checks_infra.registry import get_graph_checks_registry
from checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes
from checkov.common.output.record import Record
from checkov.common.output.report import Report, CheckType
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.registry import resource_registry
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.plan_parser import parse_tf_plan
from checkov.terraform.runner import Runner as TerraformRunner, merge_reports
class Runner(TerraformRunner):
check_type = CheckType.TERRAFORM_PLAN
def __init__(self):
super().__init__()
self.template_lines = {}
self.graph_registry = get_graph_checks_registry(super().check_type)
block_type_registries = {
'resource': resource_registry,
}
def run(
self,
root_folder: Optional[str] = None,
external_checks_dir: Optional[List[str]] = None,
files: Optional[List[str]] = None,
runner_filter: RunnerFilter = RunnerFilter(),
collect_skip_comments: bool = True
) -> Report:
report = Report(self.check_type)
self.tf_definitions = {}
parsing_errors = {}
if external_checks_dir:
for directory in external_checks_dir:
resource_registry.load_external_checks(directory)
self.graph_registry.load_external_checks(directory)
if root_folder:
files = [] if not files else files
for root, d_names, f_names in os.walk(root_folder):
filter_ignored_paths(root, d_names, runner_filter.excluded_paths)
filter_ignored_paths(root, f_names, runner_filter.excluded_paths)
for file in f_names:
file_ending = os.path.splitext(file)[1]
if file_ending == '.json':
try:
with open(f'{root}/{file}') as f:
content = json.load(f)
if isinstance(content, dict) and content.get('terraform_version'):
files.append(os.path.join(root, file))
except Exception as e:
logging.debug(f'Failed to load json file {root}/{file}, skipping')
logging.debug('Failure message:')
logging.debug(e, stack_info=True)
if files:
files = [os.path.abspath(file) for file in files]
for file in files:
if file.endswith(".json"):
tf_definitions, template_lines = parse_tf_plan(file)
if not tf_definitions:
continue
self.tf_definitions = tf_definitions
self.template_lines = template_lines
self.check_tf_definition(report, runner_filter)
else:
logging.debug(f'Failed to load {file} as is not a .json file, skipping')
report.add_parsing_errors(parsing_errors.keys())
if self.tf_definitions:
graph = self.graph_manager.build_graph_from_definitions(self.tf_definitions, render_variables=False)
self.graph_manager.save_graph(graph)
graph_report = self.get_graph_checks_report(root_folder, runner_filter)
merge_reports(report, graph_report)
return report
def get_entity_context_and_evaluations(self, entity):
raw_context = self.get_entity_context(entity[CustomAttributes.BLOCK_NAME].split("."), entity[CustomAttributes.FILE_PATH])
raw_context['definition_path'] = entity[CustomAttributes.BLOCK_NAME].split('.')
return raw_context, None
def check_tf_definition(self, report, runner_filter):
for full_file_path, definition in self.tf_definitions.items():
scanned_file = f"/{os.path.relpath(full_file_path)}"
logging.debug(f"Scanning file: {scanned_file}")
for block_type in definition.keys():
if block_type in self.block_type_registries.keys():
self.run_block(definition[block_type], full_file_path, report, scanned_file,
block_type, runner_filter)
def run_block(self, entities, full_file_path, report, scanned_file, block_type, runner_filter=None):
registry = self.block_type_registries[block_type]
if registry:
for entity in entities:
context_parser = parser_registry.context_parsers[block_type]
definition_path = context_parser.get_entity_context_path(entity)
entity_id = ".".join(definition_path)
# Entity can exist only once per dir, for file as well
entity_context = self.get_entity_context(definition_path, full_file_path)
entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]
entity_code_lines = entity_context.get('code_lines')
entity_address = entity_context.get('address')
results = registry.scan(scanned_file, entity, [], runner_filter)
for check, check_result in results.items():
record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result,
code_block=entity_code_lines, file_path=scanned_file,
file_line_range=entity_lines_range,
resource=entity_id, resource_address=entity_address, evaluations=None,
check_class=check.__class__.__module__, file_abs_path=full_file_path)
record.set_guideline(check.guideline)
report.add_record(record=record)
def get_entity_context(self, definition_path, full_file_path):
entity_context = {}
if full_file_path not in self.tf_definitions:
logging.debug(f'Tried to look up file {full_file_path} in TF plan entity definitions, but it does not exist')
return entity_context
for resource in self.tf_definitions.get(full_file_path, {}).get('resource', []):
resource_type = definition_path[0]
if resource_type in resource.keys():
resource_name = definition_path[1]
if resource_name in resource[resource_type].keys():
resource_defintion = resource[resource_type][resource_name]
entity_context['start_line'] = resource_defintion['start_line'][0]
entity_context['end_line'] = resource_defintion['end_line'][0]
entity_context['code_lines'] = self.template_lines[
entity_context['start_line']:entity_context['end_line']]
entity_context['address'] = resource_defintion['__address__']
return entity_context
return entity_context
| 50.116438 | 129 | 0.632636 | [
"Apache-2.0"
] | BenjaDiaz/checkov | checkov/terraform/plan_runner.py | 7,317 | Python |
import os
import logging
import json
from typing import Union, Dict, List
from documentstore_migracao.utils.isis2json import isis2json
logger = logging.getLogger(__name__)
class OutputContainer:
"""Classe que mimetiza a escrita de arquivos para a escrita em uma estrutura
de lista. Cada linha em um arquivo representa uma entrada na lista."""
def __init__(self):
self._lines = []
def write(self, string: str) -> None:
try:
_string = json.loads(string)
except Exception:
pass
else:
self._lines.append(_string)
def close(self):
pass
@property
def lines(self):
return self._lines
def create_output_dir(path):
output_dir = "/".join(path.split("/")[:-1])
if not os.path.exists(output_dir):
logger.debug("Creating folder: %s", output_dir)
os.makedirs(output_dir)
def run(path: str, output_file: str = "", mongo=False) -> Union[None, List[dict]]:
"""Invoca o utilitário `isis2json` com os parâmetros adaptados para a
leitura de arquivos MST de acordo com as definições padrões utilizadas
pelo __main__ da ferramenta `isis2json`.
O resultado de saída pode ser escrito diretamente para um arquivo em disco
ou retornará uma lista contento as linhas passíveis de conversão para
JSON.
Exemplo:
>>> run("file.mst")
>>> [{"mfn": 1}, {"mfn": 2}]
>>> run("file.mst", output_file="/tmp/output.json")
>>> None
"""
if not os.path.exists(path):
raise FileNotFoundError("File '%s' does not exist.")
if len(output_file) > 0:
output_file = open(output_file, "wb")
else:
output_file = OutputContainer()
isis2json.writeJsonArray(
iterRecords=isis2json.iterMstRecords,
file_name=path,
output=output_file,
qty=isis2json.DEFAULT_QTY,
skip=0,
id_tag=0,
gen_uuid=False,
mongo=mongo,
mfn=True,
isis_json_type=3,
prefix="v",
constant="",
)
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
| 25.104651 | 82 | 0.633164 | [
"BSD-2-Clause"
] | patymori/document-store-migracao | documentstore_migracao/utils/extract_isis.py | 2,168 | Python |
#/usr/bin/env python
from paraview.simple import *
import sys
wavelet1 = Wavelet()
wavelet2 = Wavelet()
pythonCalculator1 = PythonCalculator(Input=wavelet2)
pythonCalculator1.ArrayName = 'RTData'
pythonCalculator1.Expression = 'RTData+200'
pythonCalculator1.CopyArrays = 0
# this one should be ignored in the output since it has a different
# amount of points and cells than the first one
sphereSource = Sphere()
appendAttributes1 = AppendAttributes(Input=[wavelet1, sphereSource, pythonCalculator1])
appendAttributes1.UpdatePipeline()
if appendAttributes1.PointData.GetNumberOfArrays() != 2:
# should have RTData and RTData_input_1
print("ERROR: wrong number of arrays ", appendAttributes1.PointData.GetNumberOfArrays())
sys.exit(1)
arrayRange = appendAttributes1.PointData['RTData'].GetRange()
if arrayRange[0] < 37 or arrayRange[0] > 38 or arrayRange[1] < 276 or arrayRange[0] > 277:
print("ERROR: RTData has wrong array range ", arrayRange)
sys.exit(1)
arrayRange = appendAttributes1.PointData['RTData_input_2'].GetRange()
if arrayRange[0] < 237 or arrayRange[0] > 238 or arrayRange[1] < 476 or arrayRange[0] > 477:
print("ERROR: RTData_input_2 has wrong array range ", arrayRange)
sys.exit(1)
# now try with the can.ex2 exodus file for multiblock testing
for i, arg in enumerate(sys.argv):
if arg == "-D" and i+1 < len(sys.argv):
dataFile = sys.argv[i+1] + '/Testing/Data/can.ex2'
canex2 = ExodusIIReader(FileName=[dataFile])
canex2.ElementVariables = ['EQPS']
canex2.PointVariables = ['DISPL', 'VEL', 'ACCL']
canex2.GlobalVariables = ['KE', 'XMOM', 'YMOM', 'ZMOM', 'NSTEPS', 'TMSTEP']
calculator1 = Calculator(Input=canex2)
calculator1.AttributeType = 'Point Data'
calculator1.CoordinateResults = 0
calculator1.ResultNormals = 0
calculator1.ResultTCoords = 0
calculator1.ReplaceInvalidResults = 1
calculator1.ReplacementValue = 0.0
calculator1.ResultArrayName = 'VEL_X'
calculator1.Function = 'VEL_X+100'
appendAttributes2 = AppendAttributes(Input=[canex2, calculator1])
appendAttributes2.UpdatePipeline()
print("success")
| 35.862069 | 92 | 0.755288 | [
"Apache-2.0",
"BSD-3-Clause"
] | ChristianWitzler/ParaView | Applications/ParaView/Testing/Python/AppendAttributes.py | 2,080 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostGroupAccountUserGroupAttachmentArgs', 'HostGroupAccountUserGroupAttachment']
@pulumi.input_type
class HostGroupAccountUserGroupAttachmentArgs:
def __init__(__self__, *,
host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]],
host_group_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
user_group_id: pulumi.Input[str]):
"""
The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
pulumi.set(__self__, "host_account_names", host_account_names)
pulumi.set(__self__, "host_group_id", host_group_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Input[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Input[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _HostGroupAccountUserGroupAttachmentState:
def __init__(__self__, *,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
if host_account_names is not None:
pulumi.set(__self__, "host_account_names", host_account_names)
if host_group_id is not None:
pulumi.set(__self__, "host_group_id", host_group_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@host_account_names.setter
def host_account_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_account_names", value)
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@host_group_id.setter
def host_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_group_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class HostGroupAccountUserGroupAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostGroupAccountUserGroupAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostGroupAccountUserGroupAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostGroupAccountUserGroupAttachmentArgs.__new__(HostGroupAccountUserGroupAttachmentArgs)
if host_account_names is None and not opts.urn:
raise TypeError("Missing required property 'host_account_names'")
__props__.__dict__["host_account_names"] = host_account_names
if host_group_id is None and not opts.urn:
raise TypeError("Missing required property 'host_group_id'")
__props__.__dict__["host_group_id"] = host_group_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if user_group_id is None and not opts.urn:
raise TypeError("Missing required property 'user_group_id'")
__props__.__dict__["user_group_id"] = user_group_id
super(HostGroupAccountUserGroupAttachment, __self__).__init__(
'alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_group_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'HostGroupAccountUserGroupAttachment':
"""
Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__["host_account_names"] = host_account_names
__props__.__dict__["host_group_id"] = host_group_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["user_group_id"] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountNames")
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
"""
A list names of the host account.
"""
return pulumi.get(self, "host_account_names")
@property
@pulumi.getter(name="hostGroupId")
def host_group_id(self) -> pulumi.Output[str]:
"""
The ID of the host group.
"""
return pulumi.get(self, "host_group_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
| 46.496104 | 171 | 0.669572 | [
"ECL-2.0",
"Apache-2.0"
] | pulumi/pulumi-alicloud | sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py | 17,901 | Python |
# [email protected]
import numpy as np
# Return the angles in the plane of the sky given angles with respect
# to the vertical for observations on the limb (in degrees!)
def absolute_to_sky(thetaB, chiB):
thetaB = np.deg2rad(thetaB)
chiB = np.deg2rad(chiB)
t1 = np.sin(thetaB) * np.sin(chiB)
t2 = -np.cos(thetaB)
t3 = np.sin(thetaB) * np.cos(chiB)
thetaSky = np.arccos(t3)
sinthSky = np.sqrt(1.e0 - t3**2)
sinChiSky = t1 / sinthSky
cosChiSky = t2 / sinthSky
# Test for the quadrant
chiSky_preliminary = np.arccos(cosChiSky)
if (np.sign(sinChiSky) > 0.e0):
chiSky = chiSky_preliminary
else:
chiSky = -chiSky_preliminary
return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]
# Return the angles in the vertical system given angles in the
# plane of the sky for observations on the limb (in degrees!)
def sky_to_absolute(thetaSky, chiSky):
thetaSky = np.deg2rad(thetaSky)
chiSky = np.deg2rad(chiSky)
t1 = np.sin(thetaSky) * np.sin(chiSky)
t2 = np.cos(thetaSky)
t3 = -np.sin(thetaSky) * np.cos(chiSky)
thetaB = np.arccos(t3)
sinthB = np.sqrt(1.e0 - t3**2)
sinChiB = t1 / sinthB
cosChiB = t2 / sinthB
# Test for the quadrant
chiB_preliminary = np.arccos(cosChiB)
if (np.sign(sinChiB) > 0.e0):
chiB = chiB_preliminary
else:
chiB = -chiB_preliminary
return [np.rad2deg(thetaB), np.rad2deg(chiB)]
# Return the angles in the plane of the sky given angles with respect
# to the vertical for observations at angle theta (in degrees!)
def absolute_to_sky_general(theta, thetaB, chiB):
theta = np.deg2rad(theta)
thetaB = np.deg2rad(thetaB)
chiB = np.deg2rad(chiB)
cosThetaSky = np.cos(theta) * np.cos(thetaB) + \
np.sin(theta) * np.sin(thetaB) * np.cos(chiB)
sinThetaSky = np.sqrt(1.e0 - cosThetaSky**2)
thetaSky = np.arccos(cosThetaSky)
cosChiSky = (np.cos(theta) * np.sin(thetaB) * np.cos(chiB) -
np.cos(thetaB) * np.sin(theta)) / sinThetaSky
sinChiSky = (np.sin(thetaB) * np.sin(chiB)) / sinThetaSky
# Test for the quadrant
chiSky_preliminary = np.arccos(cosChiSky)
if (np.sign(sinChiSky) > 0.e0):
chiSky = chiSky_preliminary
else:
chiSky = -chiSky_preliminary
return [np.rad2deg(thetaSky), np.rad2deg(chiSky)]
# Return the angles in the plane of the sky given angles with respect
# to the vertical for observations at angle theta (in degrees!)
def sky_to_absolute_general(theta, thetaSky, chiSky):
theta = np.deg2rad(theta)
thetaSky = np.deg2rad(thetaSky)
chiSky = np.deg2rad(chiSky)
cosThetaB = np.cos(theta) * np.cos(thetaSky) - \
np.sin(theta) * np.sin(thetaSky) * np.cos(chiSky)
sinThetaB = np.sqrt(1.e0 - cosThetaB**2)
thetaB = np.arccos(cosThetaB)
cosChiB = (np.cos(theta) * np.sin(thetaSky) * np.cos(chiSky) +
np.cos(thetaSky) * np.sin(theta)) / sinThetaB
sinChiB = (np.sin(thetaSky) * np.sin(chiSky)) / sinThetaB
# Test for the quadrant
chiB_preliminary = np.arccos(cosChiB)
if (np.sign(sinChiB) > 0.e0):
chiB = chiB_preliminary
else:
chiB = -chiB_preliminary
return [np.rad2deg(thetaB), np.rad2deg(chiB)]
if __name__ == '__main__':
pass
| 28.754386 | 69 | 0.655583 | [
"MIT"
] | aasensio/hazel | pyRoutines/angle_transformation.py | 3,278 | Python |
# encoding: utf-8
import os
import os.path
from pkg_resources import parse_version
# Avoid problem releasing to pypi from vagrant
if os.environ.get('USER', '') == 'vagrant':
del os.link
try:
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
from ckan import (__version__, __description__, __long_description__,
__license__)
#
# Check setuptools version
#
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:
setuptools_requirement = f.read().strip()
min_setuptools_version = parse_version(setuptools_requirement.split('==')[1])
if parse_version(setuptools_version) < min_setuptools_version:
raise AssertionError(
'setuptools version error\n'
'You need a newer version of setuptools.\n'
'Install the recommended version:\n'
' pip install -r requirement-setuptools.txt\n'
'and then try again to install ckan into your python environment.'
)
entry_points = {
'paste.app_factory': [
'main = ckan.config.middleware:make_app',
],
'paste.app_install': [
'main = ckan.config.install:CKANInstaller',
],
'console_scripts': [
'ckan = ckan.cli.cli:ckan',
],
'ckan.click_command': [
'datastore = ckanext.datastore.cli:datastore',
'datapusher = ckanext.datapusher.cli:datapusher',
],
'paste.paster_create_template': [
'ckanext = ckan.pastertemplates:CkanextTemplate',
],
'ckan.forms': [
'standard = ckan.forms.package:get_standard_fieldset',
'package = ckan.forms.package:get_standard_fieldset',
'group = ckan.forms.group:get_group_fieldset',
'package_group = ckan.forms.group:get_package_group_fieldset',
],
'ckan.search': [
'sql = ckan.lib.search.sql:SqlSearchBackend',
'solr = ckan.lib.search.solr_backend:SolrSearchBackend',
],
'ckan.plugins': [
'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',
'stats = ckanext.stats.plugin:StatsPlugin',
'publisher_form = ckanext.publisher_form.forms:PublisherForm',
'publisher_dataset_form = ckanext.publisher_form.forms:PublisherDatasetForm',
'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',
'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',
'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',
'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',
'organizations = ckanext.organizations.forms:OrganizationForm',
'organizations_dataset = ckanext.organizations.forms:OrganizationDatasetForm',
'expire_api_token = ckanext.expire_api_token.plugin:ExpireApiTokenPlugin',
'chained_functions = ckanext.chained_functions.plugin:ChainedFunctionsPlugin',
'datastore = ckanext.datastore.plugin:DatastorePlugin',
'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',
'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',
'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',
'text_view = ckanext.textview.plugin:TextView',
'recline_view = ckanext.reclineview.plugin:ReclineView',
'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',
'datatables_view = ckanext.datatablesview.plugin:DataTablesView',
'image_view = ckanext.imageview.plugin:ImageView',
'audio_view = ckanext.audioview.plugin:AudioView',
'video_view = ckanext.videoview.plugin:VideoView',
'webpage_view = ckanext.webpageview.plugin:WebPageView',
# FIXME: Remove deprecated resource previews below. You should use the
# versions as *_view instead.
'text_preview = ckanext.textview.plugin:TextView',
'recline_preview = ckanext.reclineview.plugin:ReclineView',
'recline_grid = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map = ckanext.reclineview.plugin:ReclineMapView',
# End of deprecated previews
'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',
'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',
'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',
'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',
'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',
'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',
'example_idatasetform_v5 = ckanext.example_idatasetform.plugin_v5:ExampleIDatasetFormPlugin',
'example_idatasetform_v6 = ckanext.example_idatasetform.plugin_v6:ExampleIDatasetFormPlugin',
'example_idatasetform_v7 = ckanext.example_idatasetform.plugin_v7:ExampleIDatasetFormPlugin',
'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',
'example_igroupform_v2 = ckanext.example_igroupform.plugin_v2:ExampleIGroupFormPlugin',
'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',
'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',
'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',
'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',
'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',
'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',
'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',
'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',
'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',
'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',
'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',
'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',
'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',
'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',
'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',
'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',
'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',
'example_theme_v15_fanstatic = ckanext.example_theme_docs.v15_fanstatic.plugin:ExampleThemePlugin',
'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',
'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',
'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',
'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',
'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',
'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',
'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',
'example_theme_v22_fanstatic_and_webassets = ckanext.example_theme_docs.v22_fanstatic_and_webassets.plugin:ExampleThemePlugin',
'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',
'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',
'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',
'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',
'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',
'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',
'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',
'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',
'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',
'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',
'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',
'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',
'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',
'example_iapitoken = ckanext.example_iapitoken.plugin:ExampleIApiTokenPlugin',
'example_iclick = ckanext.example_iclick.plugin:ExampleIClickPlugin',
'example_iauthenticator = ckanext.example_iauthenticator.plugin:ExampleIAuthenticatorPlugin',
],
'ckan.system_plugins': [
'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',
],
'ckan.test_plugins': [
'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',
'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',
'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',
'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',
'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',
'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',
'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',
'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',
'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',
'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',
'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',
'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',
'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',
'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',
'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',
'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',
'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',
'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',
'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',
'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',
'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',
'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',
'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',
'legacy_mock_search_plugin = ckan.tests.legacy.logic.test_action:MockPackageSearchPlugin',
],
'babel.extractors': [
'ckan = ckan.lib.extract:extract_ckan',
],
}
extras_require = {}
_extras_groups = [
('requirements', 'requirements.txt'), ('requirements-py2', 'requirements-py2.txt'),
('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'),
]
for group, filepath in _extras_groups:
with open(os.path.join(HERE, filepath), 'r') as f:
extras_require[group] = f.readlines()
setup(
name='ckan',
version=__version__,
author='https://github.com/ckan/ckan/graphs/contributors',
author_email='[email protected]',
license=__license__,
url='http://ckan.org/',
description=__description__,
keywords='data packaging component tool server',
long_description=__long_description__,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup']),
# namespace_packages=['ckanext', 'ckanext.stats'],
message_extractors={
'ckan': [
('templates/importer/**', 'ignore', None),
('templates/**.html', 'ckan', None),
('templates/**.txt', 'ckan', None),
('templates_legacy/**.html', 'ckan', None),
('public/base/test/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
],
'ckanext': [
('**.py', 'python', None),
('**.js', 'javascript', None),
('**.html', 'ckan', None),
('multilingual/solr/*.txt', 'ignore', None),
]
},
entry_points=entry_points,
# setup.py test command needs a TestSuite so does not work with py.test
# tests_require=[ 'py >= 0.8.0-alpha2' ]
extras_require=extras_require,
classifiers=[
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 60.603922 | 151 | 0.752103 | [
"Apache-2.0"
] | OCHA-DAP/hdx-ckan | setup.py | 15,454 | Python |
from __future__ import print_function
import random
import logging
import argparse
import grpc
import object_detection_pb2
import object_detection_pb2_grpc
BLOCK_SIZE = 40000
class ImageDataBlockRequestIterable(object):
def __init__(self, img_data):
self.data = img_data
self.pos = 0
def __iter__(self):
return self
def __next__(self):
data_block = self.data[self.pos:self.pos+BLOCK_SIZE]
if data_block:
request = object_detection_pb2.UploadImageRequest(
data_block = data_block
)
self.pos += BLOCK_SIZE
return request
else:
raise StopIteration
class gRPCClient():
def __init__(self, server_address):
logging.basicConfig()
channel = grpc.insecure_channel(server_address)
self.stub = object_detection_pb2_grpc.ObjectDetectionStub(channel)
def detect(self, img_data):
if img_data:
data_block_iterable = ImageDataBlockRequestIterable(img_data)
try:
response = self.stub.detect(data_block_iterable)
return response
except grpc.RpcError as err:
print(err.details()) #pylint: disable=no-member
#print('{}, {}'.format(err.code().name, err.code().value())) #pylint: disable=no-member
else:
print('image data is none.')
def read_image(filename):
img_data = None
with open(filename, 'rb') as f:
img_data = f.read()
return img_data
# python darknet_model_client.py -a 127.0.0.1:7713 -f ../darknet/model-zoo/platen-switch/test/IMG_9256.JPG
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--server_address', type=str, help='server address 127.0.0.1:7713', default='[::]:7713')
parser.add_argument('-f', '--image_file', type=str, help='image file path.')
args = parser.parse_args()
if args.server_address and args.image_file:
img_data = read_image(args.image_file)
client = gRPCClient(args.server_address)
response = client.detect(img_data)
print(response)
else:
print("argument isn't none.")
| 27.62963 | 118 | 0.637623 | [
"MIT"
] | gouchicao/darknet-serving | darknet_model_client.py | 2,238 | Python |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataset for bert."""
import collections
import math
import numpy as np
import oneflow as flow
from libai.data.data_utils import SentenceIndexedDataset
from libai.data.structures import DistTensorData, Instance
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
class BertDataset(flow.utils.data.Dataset):
"""Dataset containing sentence pairs for BERT training.
Each index corresponds to a randomly generated sentence pair.
Args:
tokenizer: Tokenizer to use.
data_prefix: Path to the training dataset.
indexed_dataset: Indexed dataset to use.
max_seq_length: Maximum length of the sequence. All values are padded to
this length. Defaults to 512.
mask_lm_prob: Probability to mask tokens. Defaults to 0.15.
short_seq_prob: Probability of producing a short sequence. Defaults to 0.0.
max_preds_per_seq: Maximum number of mask tokens in each sentence. Defaults to None.
seed: Seed for random number generator for reproducibility. Defaults to 1234.
binary_head: Specifies whether the underlying dataset
generates a pair of blocks along with a sentence_target or not.
Setting it to True assumes that the underlying dataset generates a
label for the pair of sentences which is surfaced as
sentence_target. Defaults to True.
"""
def __init__(
self,
tokenizer,
data_prefix,
indexed_dataset,
max_seq_length=512,
mask_lm_prob=0.15,
short_seq_prob=0.0,
max_preds_per_seq=None,
seed=1234,
binary_head=True,
):
self.seed = seed
self.mask_lm_prob = mask_lm_prob
self.max_seq_length = max_seq_length
self.short_seq_prob = short_seq_prob
self.binary_head = binary_head
if max_preds_per_seq is None:
max_preds_per_seq = math.ceil(max_seq_length * mask_lm_prob / 10) * 10
self.max_preds_per_seq = max_preds_per_seq
self.dataset = SentenceIndexedDataset(
data_prefix,
indexed_dataset,
max_seq_length=self.max_seq_length - 3,
short_seq_prob=self.short_seq_prob,
binary_head=self.binary_head,
)
self.tokenizer = tokenizer
self.vocab_id_list = list(tokenizer.get_vocab().values())
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
sents = self.dataset[idx]
if self.binary_head:
tokens_a, tokens_b, is_next_random = self.create_random_sentence_pair(sents, np_rng)
else:
tokens_a = []
for j in range(len(sents)):
tokens_a.extend(sents[j])
tokens_b = []
is_next_random = False
tokens_a, tokens_b = self.truncate_seq_pair(
tokens_a, tokens_b, self.max_seq_length - 3, np_rng
)
tokens, token_types = self.create_tokens_and_token_types(tokens_a, tokens_b)
tokens, masked_positions, masked_labels = self.create_masked_lm_predictions(tokens, np_rng)
(
tokens,
token_types,
labels,
padding_mask,
loss_mask,
) = self.pad_and_convert_to_tensor(tokens, token_types, masked_positions, masked_labels)
sample = Instance(
input_ids=DistTensorData(tokens),
attention_mask=DistTensorData(padding_mask),
tokentype_ids=DistTensorData(token_types),
ns_labels=DistTensorData(
flow.tensor(int(is_next_random), dtype=flow.long), placement_idx=-1
),
lm_labels=DistTensorData(labels, placement_idx=-1),
loss_mask=DistTensorData(loss_mask, placement_idx=-1),
)
return sample
def create_random_sentence_pair(self, sample, np_rng):
num_sentences = len(sample)
assert num_sentences > 1, "make sure each sample has at least two sentences."
a_end = 1
if num_sentences >= 3:
a_end = np_rng.randint(1, num_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
tokens_b = []
for j in range(a_end, num_sentences):
tokens_b.extend(sample[j])
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, np_rng):
"""Truncate sequence pair to a maximum sequence length."""
len_a, len_b = len(tokens_a), len(tokens_b)
while True:
total_length = len_a + len_b
if total_length <= max_num_tokens:
break
if len_a > len_b:
trunc_tokens = tokens_a
len_a -= 1
else:
trunc_tokens = tokens_b
len_b -= 1
if np_rng.random() < 0.5:
trunc_tokens.pop(0) # remove the first element
else:
trunc_tokens.pop() # remove the last element
return tokens_a, tokens_b
def create_tokens_and_token_types(self, tokens_a, tokens_b):
"""Merge segments A and B, add [CLS] and [SEP] and build token types."""
tokens = [self.cls_id] + tokens_a + [self.sep_id]
token_types = [0] * (len(tokens_a) + 2)
if len(tokens_b) > 0:
tokens = tokens + tokens_b + [self.sep_id]
token_types = token_types + [1] * (len(tokens_b) + 1)
return tokens, token_types
def mask_token(self, idx, tokens, np_rng):
"""
Helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
"""
label = tokens[idx]
if np_rng.random() < 0.8:
new_label = self.mask_id
else:
if np_rng.random() < 0.5:
new_label = label
else:
new_label = np_rng.choice(self.vocab_id_list)
tokens[idx] = new_label
return label
def create_masked_lm_predictions(
self,
tokens,
np_rng,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
geometric_dist=False,
):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
token_boundary = [0] * len(tokens)
new_tokens = []
for (i, token) in enumerate(tokens):
new_tokens.append(token % len(self.tokenizer))
if token == self.cls_id or token == self.sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (
do_whole_word_mask
and len(cand_indexes) >= 1
and not is_start_piece(self.tokenizer._convert_id_to_token(token))
):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(self.tokenizer._convert_id_to_token(token)):
token_boundary[i] = 1
tokens = new_tokens
masked_positions = []
masked_labels = []
output_tokens = list(tokens)
if self.mask_lm_prob == 0:
return output_tokens, masked_positions, masked_labels
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == self.cls_id or token == self.sep_id:
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if do_whole_word_mask and len(cand_indexes) >= 1 and token_boundary[i] == 0:
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
num_to_predict = min(
self.max_preds_per_seq, max(1, int(round(len(tokens) * self.mask_lm_prob)))
)
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1.0 / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx : idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
masked_lms = []
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(
ngrams[: len(cand_index_set)],
p=pvals[: len(cand_index_set)]
/ pvals[: len(cand_index_set)].sum(keepdims=True),
)
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
label = self.mask_token(index, output_tokens, np_rng)
masked_lms.append(MaskedLmInstance(index=index, label=label))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
for p in masked_lms:
masked_positions.append(p.index)
masked_labels.append(p.label)
return output_tokens, masked_positions, masked_labels
def pad_and_convert_to_tensor(self, tokens, token_types, masked_positions, masked_labels):
"""Pad sequences and convert them to tensor."""
# check
num_tokens = len(tokens)
num_pad = self.max_seq_length - num_tokens
assert num_pad >= 0
assert len(token_types) == num_tokens
assert len(masked_positions) == len(masked_labels)
# tokens and token types
filler = [self.pad_id] * num_pad
tokens = flow.tensor(tokens + filler, dtype=flow.long)
token_types = flow.tensor(token_types + filler, dtype=flow.long)
# padding mask
padding_mask = flow.tensor([1] * num_tokens + [0] * num_pad, dtype=flow.long)
# labels and loss mask
labels = [-1] * self.max_seq_length
loss_mask = [0] * self.max_seq_length
for idx, label in zip(masked_positions, masked_labels):
assert idx < num_tokens
labels[idx] = label
loss_mask[idx] = 1
labels = flow.tensor(labels, dtype=flow.long)
loss_mask = flow.tensor(loss_mask, dtype=flow.long)
return tokens, token_types, labels, padding_mask, loss_mask
@property
def supports_prefetch(self):
return self.dataset.supports_prefetch
def prefetch(self, indices):
self.dataset.prefetch(indices)
| 36.804569 | 99 | 0.603614 | [
"Apache-2.0"
] | Oneflow-Inc/libai | libai/data/datasets/bert_dataset.py | 14,501 | Python |
from django.conf.urls import url, include
from django.contrib import admin
from .views import *
urlpatterns = [
url(r'^index$',index,name='index'),
url(r'^langs$',langs,name='langs'),
url(r'^newindex$',new_index),
url(r'^myindex/(\d+)$',
myindex_with_param,
name='myindex_with_param'),
url(r'^v1_index/(?P<p2>\d+)$',
myindex_with_param_v1,
name='myindex_with_param_v1'),
url(r'new_reverse',
new_reverse,
name = 'new_reverse'),
url(r'^home$',home,name='home')
] | 27.85 | 42 | 0.59246 | [
"Apache-2.0"
] | General-Coder/Django-Introduction | day04/app04/urls.py | 557 | Python |
from setuptools import setup
setup(
name='pusher',
version='0.8',
description='A Python library for sending messages to Pusher',
author='Pusher',
author_email='[email protected]',
url='http://pusher.com',
packages=['pusher'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
],
keywords='pusher rest realtime websockets service',
license='MIT',
)
| 27.428571 | 66 | 0.616319 | [
"MIT"
] | tkhieu/pusher_client_python | setup.py | 576 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import TestCase
from mock import patch, call, Mock
from contextlib import nested
from managesf.tests import dummy_conf
from managesf.model.yamlbkd.resources.storyboard import StoryboardOps
class StoryboardOpsTest(TestCase):
def test_is_activated(self):
conf = dummy_conf()
s = StoryboardOps(conf, None)
project = {'issue-tracker': 'SFStoryboard'}
self.assertTrue(s.is_activated(**project))
project = {'issue-tracker': ''}
self.assertFalse(s.is_activated(**project))
conf.services.remove('SFStoryboard')
project = {'issue-tracker': 'SFStoryboard'}
self.assertFalse(s.is_activated(**project))
def test_extra_validation(self):
conf = dummy_conf()
s = StoryboardOps(conf, None)
project = {
'name': 'project1',
'source-repositories': ['repo1', 'repo2']
}
logs = s.extra_validations(**project)
self.assertTrue(len(logs) == 0)
project = {
'name': 'project2',
'source-repositories': ['repo', '-hjook']
}
logs = s.extra_validations(**project)
self.assertTrue('Minimal len is 5' in logs[0])
self.assertTrue('should match the RE' in logs[1])
def test_update_project(self):
class FakeItem(object):
def __init__(self, name, id):
self.name = name
self.id = id
conf = dummy_conf()
s = StoryboardOps(conf, None)
patches = [
patch('storyboardclient.v1.projects.ProjectsManager.get_all'),
patch('storyboardclient.v1.projects.ProjectsManager.update'),
patch('storyboardclient.v1.projects.ProjectsManager.create')]
with nested(*patches) as (get_all, update, create):
get_all.return_value = [FakeItem('project1', 1)]
s.update_project('project1', 'A desc')
self.assertTrue(get_all.called)
self.assertTrue(update.called)
self.assertFalse(create.called)
with nested(*patches) as (get_all, update, create):
get_all.return_value = [FakeItem('project1', 1)]
s.update_project('project2', 'A desc')
self.assertTrue(get_all.called)
self.assertFalse(update.called)
self.assertTrue(create.called)
def test_update_project_group(self):
class FakeItem(object):
def __init__(self, name, id):
self.name = name
self.id = id
conf = dummy_conf()
patches = [
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.get_all'),
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.create'),
patch.object(StoryboardOps, 'update_project'),
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.get'),
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.update'),
patch('storyboardclient.v1.projects.'
'ProjectsManager.get_all')]
with nested(*patches) as (get_all, create, update_project,
get, update, p_get_all):
new = {
'resources': {
'repos': {
'project1': {'description': 'A desc'},
'project2': {'description': 'A desc'}
}
}
}
s = StoryboardOps(conf, new)
get_all.return_value = [FakeItem('pg1', 1)]
fake_subprojects = [
FakeItem('project1', 1),
FakeItem('project2', 2)]
mput = Mock()
mdelete = Mock()
class fprojects():
def get_all(self):
return fake_subprojects
def put(self, id):
mput(id)
def delete(self, id):
mdelete(id)
class NestedProjects():
def __init__(self):
self.projects = fprojects()
get.return_value = NestedProjects()
update.return_value = NestedProjects()
p_get_all.return_value = fake_subprojects
# Here projects are already included in the project
# group so nothing will be added/removed in the project
# group. Just projects will be updated.
s.update_project_groups(
**{'name': 'pg1',
'source-repositories': ['project1', 'project2']})
self.assertFalse(mput.called)
self.assertFalse(mdelete.called)
self.assertTrue(len(update_project.mock_calls), 2)
# Here project1 and project2 are already included but
# the resources project decription only defines the
# project2 to be included. So we make sure the delete
# is called with id 1.
mput.reset_mock()
mdelete.reset_mock()
update_project.reset_mock()
s.update_project_groups(
**{'name': 'pg1',
'source-repositories': ['project2']})
self.assertFalse(mput.called)
self.assertTrue(mdelete.called)
self.assertListEqual(mdelete.call_args_list, [call(1)])
self.assertTrue(len(update_project.mock_calls), 1)
# Here only project1 is already included but
# the resources project decription defines the
# project1 and project2 to be included. So we make sure
# the put is called with id 2.
mput.reset_mock()
mdelete.reset_mock()
update_project.reset_mock()
fake_subprojects = [
FakeItem('project1', 1)]
s.update_project_groups(
**{'name': 'pg1',
'source-repositories': ['project1', 'project2']})
self.assertTrue(mput.called)
self.assertListEqual(mput.call_args_list, [call(2)])
self.assertFalse(mdelete.called)
self.assertTrue(len(update_project.mock_calls), 1)
# Here the project group does not exist. So we verify
# it is created and provisionned with two projects
# included.
get_all.return_value = []
p_get_all.return_value = [
FakeItem('project1', 1),
FakeItem('project2', 2)]
fake_subprojects = []
get.return_value = NestedProjects()
update.return_value = NestedProjects()
mput.reset_mock()
mdelete.reset_mock()
update_project.reset_mock()
s.update_project_groups(
**{'name': 'pg1',
'source-repositories': ['project1', 'project2']})
self.assertTrue(create.called)
self.assertTrue(len(update_project.mock_calls), 2)
self.assertTrue(len(mput.mock_calls), 2)
self.assertFalse(mdelete.called)
def test_delete_project_group(self):
class FakeItem(object):
def __init__(self, name, id):
self.name = name
self.id = id
conf = dummy_conf()
patches = [
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.get_all'),
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.get'),
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.update'),
patch('storyboardclient.v1.project_groups.'
'ProjectGroupsManager.delete')]
with nested(*patches) as (get_all, get, update, delete):
s = StoryboardOps(conf, None)
get_all.return_value = [FakeItem('pg1', 3)]
mdelete = Mock()
fake_subprojects = [
FakeItem('project1', 1),
FakeItem('project2', 2)]
class fprojects():
def get_all(self):
return fake_subprojects
def delete(self, id):
mdelete(id)
class NestedProjects():
def __init__(self):
self.projects = fprojects()
get.return_value = NestedProjects()
update.return_value = NestedProjects()
s.delete_project_groups(**{'name': 'pg1'})
self.assertEqual(len(mdelete.call_args_list), 2)
self.assertIn(call(1), mdelete.call_args_list)
self.assertIn(call(2), mdelete.call_args_list)
self.assertListEqual(delete.call_args_list, [call(id=3)])
| 39.220833 | 75 | 0.564432 | [
"Apache-2.0"
] | enovance/managesf | managesf/tests/test_resources_storyboard.py | 9,413 | Python |
from __future__ import unicode_literals
default_app_config = 'mayan.apps.dependencies.apps.DependenciesApp'
| 27.25 | 67 | 0.862385 | [
"Apache-2.0"
] | BajacDev/Mayan-EDMS | mayan/apps/dependencies/__init__.py | 109 | Python |
import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0: # ping operable AFTER any net loss
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
if outage_time > 3:
MolerTest.error("Network outage duration exceeded threshold")
else:
MolerTest.info("Network outage duration is acceptable")
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
#######################################################
# TEST GOAL: network outage should not exceed 3 seconds
#######################################################
# test setup
ping_times = {"lost_connection_time": 0,
"reconnection_time": 0}
# ensure network is up before running test
net_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ensure_net_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": net_up})
sudo_ensure_net_up()
# run event observing "network down/up"
no_ping = unix1.get_event(event_name="ping_no_response", event_params={"till_occurs_times": 1})
no_ping.add_event_occurred_callback(callback=outage_callback,
callback_params={'device_name': 'MyMachine1',
'ping_times': ping_times})
no_ping.start()
ping_is_on = unix1.get_event(event_name="ping_response")
ping_is_on.add_event_occurred_callback(callback=ping_is_on_callback,
callback_params={'ping_times': ping_times})
ping_is_on.start()
# run test
ping = unix1.get_cmd(cmd_name="ping", cmd_params={"destination": "localhost", "options": "-O"})
ping.start(timeout=120)
time.sleep(3)
ifconfig_down = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo down"})
sudo_ifconfig_down = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_down})
sudo_ifconfig_down()
time.sleep(5)
ifconfig_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ifconfig_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_up})
sudo_ifconfig_up()
time.sleep(3)
# test teardown
ping.cancel()
no_ping.cancel()
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
*** validate/assert network outage time - MolerTest.error() usage ***
1. run it
2. see logs - look for "Network outage duration"
But yes, we do have error in logs but test doesn't fail
(we expect exception)
4. try to decorate test function with @MolerTest.raise_background_exceptions()
"""
| 38.6 | 118 | 0.658031 | [
"BSD-3-Clause"
] | AdamKlekowski/moler | trainings/workshop1/step13/network_outage.py | 3,474 | Python |
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20181028052650.1: * @file leowapp.py
#@@first
'''
This file is deprecated/obsolete. It may be removed soon.
leoflexx.py implements LeoWapp using flexx.
'''
#@+<< imports >>
#@+node:ekr.20181028052650.3: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoFrame as leoFrame
import leo.core.leoGui as leoGui
import sys
try:
import websockets
assert websockets
except ImportError:
websockets = None
print('leowapp.py requires websockets')
print('>pip install websockets')
import xml.sax.saxutils as saxutils
#@-<< imports >>
#@+<< config >>
#@+node:ekr.20181029070405.1: ** << config >>
class Config:
# ip = g.app.config.getString("leowapp-ip") or '127.0.0.1'
# port = g.app.config.getInt("leowapp-port") or 8100
# timeout = g.app.config.getInt("leowapp-timeout") or 0
# if timeout > 0: timeout = timeout / 1000.0
ip = '127.0.0.1'
port = 5678
# port = 8100
timeout = 0
# Create a singleton instance.
# The initial values probably should not be changed.
config = Config()
#@-<< config >>
# browser_encoding = 'utf-8'
# To do: query browser: var x = document.characterSet;
#@+others
#@+node:ekr.20181030103048.2: ** escape
def escape(s):
'''
Do the standard xml escapes, and replace newlines and tabs.
'''
return saxutils.escape(s, {
'\n': '<br />',
'\t': ' ',
})
#@+node:ekr.20181028052650.5: ** init (leowapp.py)
def init():
'''Return True if the plugin has loaded successfully.'''
if not websockets:
return False
# ws_server hangs Leo!
# ws_server()
g.plugin_signon(__name__)
return True
#@+node:ekr.20181031162039.1: ** class BrowserGui (leoGui.LeoGui)
class BrowserGui(leoGui.NullGui):
#@+others
#@+node:ekr.20181031160042.1: *3* bg.__getattr__
def __getattr__ (self, attr):
'''Handle an missing attribute.'''
if attr in (
'frameFactory',
'set_minibuffer_label',
):
# These are optional ivars.
raise AttributeError
return self.message(attr)
#@+node:ekr.20181031162620.1: *3* bg.__init__
def __init__(self):
g.trace('===== (BrowserGui)')
leoGui.NullGui.__init__(self, guiName='browser')
self.styleSheetManagerClass = g.NullObject
self.log = leoFrame.NullLog()
#@+node:ekr.20181101034427.1: *3* bg.createLeoFrame
def createLeoFrame(self, c, title):
return leoFrame.NullFrame(c, title='NullFrame', gui=self)
#@+node:ekr.20181101025053.1: *3* bg.message
def message (self, func):
'''
Send a message to the framework.
'''
g.trace('=====', func, g.callers())
#@+node:ekr.20181031162454.1: *3* bg.runMainLoop
def runMainLoop(self, fileName=None):
'''The main loop for the browser gui.'''
# pylint: disable=arguments-differ
if fileName:
print('LeoWapp running: %s...' % g.shortFileName(fileName))
else:
print('LeoWapp running...')
if 0: # Run all unit tests.
path = g.os_path_finalize_join(
g.app.loadDir, '..', 'test', 'unittest.leo')
c = g.openWithFileName(path, gui=self)
c.findCommands.ftm = g.NullObject()
# A hack. Maybe the NullGui should do this.
c.debugCommands.runAllUnitTestsLocally()
print('calling sys.exit(0)')
sys.exit(0)
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
| 30.700855 | 71 | 0.609131 | [
"MIT"
] | Anu082000/leo-editor | leo/plugins/leowapp.py | 3,592 | Python |
from __future__ import absolute_import
from __future__ import unicode_literals
from dateutil.relativedelta import relativedelta
from custom.icds_reports.const import AGG_CHILD_HEALTH_THR_TABLE
from custom.icds_reports.utils.aggregation_helpers import month_formatter
from custom.icds_reports.utils.aggregation_helpers.distributed.base import BaseICDSAggregationDistributedHelper
class THRFormsChildHealthAggregationDistributedHelper(BaseICDSAggregationDistributedHelper):
helper_key = 'thr-forms-child-health'
ucr_data_source_id = 'static-dashboard_thr_forms'
tablename = AGG_CHILD_HEALTH_THR_TABLE
def aggregate(self, cursor):
drop_query, drop_params = self.drop_table_query()
agg_query, agg_params = self.aggregation_query()
cursor.execute(drop_query, drop_params)
cursor.execute(agg_query, agg_params)
def drop_table_query(self):
return (
'DELETE FROM "{}" WHERE month=%(month)s AND state_id = %(state)s'.format(self.tablename),
{'month': month_formatter(self.month), 'state': self.state_id}
)
def aggregation_query(self):
month = self.month.replace(day=1)
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
query_params = {
"month": month_formatter(month),
"state_id": self.state_id,
"current_month_start": current_month_start,
"next_month_start": next_month_start,
}
return """
INSERT INTO "{tablename}" (
state_id, supervisor_id, month, case_id, latest_time_end_processed, days_ration_given_child
) (
SELECT DISTINCT ON (child_health_case_id)
%(state_id)s AS state_id,
LAST_VALUE(supervisor_id) over w AS supervisor_id,
%(month)s AS month,
child_health_case_id AS case_id,
MAX(timeend) over w AS latest_time_end_processed,
SUM(days_ration_given_child) over w AS days_ration_given_child
FROM "{ucr_tablename}"
WHERE state_id = %(state_id)s AND
timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND
child_health_case_id IS NOT NULL
WINDOW w AS (PARTITION BY supervisor_id, child_health_case_id)
)
""".format(
ucr_tablename=self.ucr_tablename,
tablename=self.tablename
), query_params
| 40.967213 | 111 | 0.687875 | [
"BSD-3-Clause"
] | MaciejChoromanski/commcare-hq | custom/icds_reports/utils/aggregation_helpers/distributed/thr_forms_child_health.py | 2,499 | Python |
# Generated by Django 2.1.5 on 2019-02-08 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='post/%Y/%m/%d'),
),
]
| 21.315789 | 86 | 0.582716 | [
"MIT"
] | muntakim1/mblog | blog/migrations/0002_auto_20190209_0235.py | 405 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hospital.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.75 | 73 | 0.687037 | [
"Apache-2.0"
] | thirdgroup/Hospital | hospital/manage.py | 540 | Python |
# Natural Language Toolkit - Range
# Represents a range of numbers, not an immutable object and can be modified by include
# Capable of performing operations on ranges
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier.exceptions import systemerror as se
DELTA = 0.000001
class Range:
def __init__(self, lower = 0, upper = 0, upper_includes_max=False):
"""
any number within this range should be greater than or equal to self.lower and
less than (or less than equal to depending on whether it includes the max) self.upper
"""
self.__delta_added = False
if upper < lower:
raise se.SystemError('Lower limit ' + str(lower) + ' cannot be greater than the Upper limit ' + str(upper) + ' in a range')
self.__uninitialized = False
if upper == lower == 0:
self.__uninitialized = True
self.lower, self.upper, self.__delta_added = lower, upper, False
if upper_includes_max:
self.upper += DELTA
self.__delta_added = True
def include(self, number):
if self.__uninitialized:
self.lower, self.upper = number, number
self.__uninitialized = False
if number >= self.upper:
self.__delta_added = True
self.upper = number + DELTA
elif number < self.lower:
self.lower = number
def includes(self, number):
return self.lower <= number and self.upper > number
def split(self, parts):
if self.lower == self.upper: return None
size = self.upper - self.lower
max_limit = self.upper
if self.__delta_added:
size -= DELTA
max_limit -= DELTA
each = size / parts
if each < DELTA:
raise se.SystemError('Splitting of range resulted in elements smaller than delta ' + str(DELTA) + '.')
lower, ranges = self.lower, []
for i in range(parts - 1):
ranges.append(Range(lower, lower + each))
lower += each
ranges.append(Range(lower, self.upper))
return ranges
def __eq__(self, other):
if other is None: return False
if self.__class__ != other.__class__ : return False
if self.lower == other.lower and self.upper == other.upper: return True
return False
def __hash__(self):
return hash(self.lower) + hash(self.upper)
def __str__(self):
return '[' + str(self.lower) + ',' + str(self.upper) + ']'
| 38.328571 | 135 | 0.611256 | [
"Apache-2.0"
] | hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/numrange.py | 2,683 | Python |
"""
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import pytz
import os # needed by code below
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-@*cqyd6l)4*=yg7r19zmp#y32mpus(a2d-)ny&hstt^kq!13jk'
import os
#SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
#with open('/etc/secret_key.txt') as f:
# SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['obscure-plateau-04602.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# to view it console, never add in actual site
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' | 29.070064 | 103 | 0.724803 | [
"Unlicense"
] | TheRedemp7ion/DjangoLocalLibrary | locallibrary/settings.py | 4,564 | Python |
"""
SAM CLI version
"""
__version__ = "1.23.0"
| 8 | 22 | 0.583333 | [
"BSD-2-Clause",
"Apache-2.0"
] | HiteshMah-Jan/aws-sam-cli | samcli/__init__.py | 48 | Python |
from bokeh.plotting import figure, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [4, 5, 5, 7, 2]
# create a plot
p = figure(
title="Background colors example",
sizing_mode="stretch_width",
max_width=500,
height=250,
)
# add a renderer
p.line(x, y, line_color="green", line_width=2)
# change the fill colors
p.background_fill_color = (204, 255, 255)
p.border_fill_color = (102, 204, 255)
p.outline_line_color = (0, 0, 255)
# show the results
show(p)
| 18.92 | 46 | 0.668076 | [
"BSD-3-Clause"
] | ABODFTW/bokeh | sphinx/source/docs/first_steps/examples/first_steps_4_background.py | 473 | Python |
import sys
class Solution:
# Write your code here
def __init__(self):
self.stack = []
self.queue = []
def popCharacter(self):
return self.stack.pop()
def pushCharacter(self, char):
self.stack.append(char)
def dequeueCharacter(self):
char = self.queue[0]
self.queue = self.queue[1:]
return char
def enqueueCharacter(self, char):
self.queue.append(char)
# read the string s
s=input()
#Create the Solution class object
obj=Solution()
l=len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
#finally print whether string s is palindrome or not.
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
| 22.854167 | 54 | 0.646308 | [
"MIT"
] | SayanBan/HackerRank-30-Days-of-code | Day 18/Queue and stacks.py | 1,097 | Python |
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ('Message',
'Hello',
'Welcome',
'Abort',
'Challenge',
'Authenticate',
'Goodbye',
'Heartbeat',
'Error',
'Publish',
'Published',
'Subscribe',
'Subscribed',
'Unsubscribe',
'Unsubscribed',
'Event',
'Call',
'Cancel',
'Result',
'Register',
'Registered',
'Unregister',
'Unregistered',
'Invocation',
'Interrupt',
'Yield')
import re
import six
import autobahn
from autobahn import util
from autobahn.wamp.exception import ProtocolError
from autobahn.wamp.interfaces import IMessage
from autobahn.wamp.role import ROLE_NAME_TO_CLASS
## strict URI check allowing empty URI components
_URI_PAT_STRICT = re.compile(r"^(([0-9a-z_]{2,}\.)|\.)*([0-9a-z_]{2,})?$")
## loose URI check allowing empty URI components
_URI_PAT_LOOSE = re.compile(r"^(([^\s\.#]+\.)|\.)*([^\s\.#]+)?$")
## strict URI check disallowing empty URI components
_URI_PAT_STRICT_NON_EMPTY = re.compile(r"^([0-9a-z_]{2,}\.)*([0-9a-z_]{2,})?$")
## loose URI check disallowing empty URI components
_URI_PAT_LOOSE_NON_EMPTY = re.compile(r"^([^\s\.#]+\.)*([^\s\.#]+)?$")
def check_or_raise_uri(value, message):
if type(value) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for URI".format(message, type(value)))
if not _URI_PAT_LOOSE.match(value):
raise ProtocolError("{0}: invalid value '{1}' for URI".format(message, value))
return value
def check_or_raise_id(value, message):
if type(value) not in six.integer_types:
raise ProtocolError("{0}: invalid type {1} for ID".format(message, type(value)))
if value < 0 or value > 9007199254740992: # 2**53
raise ProtocolError("{0}: invalid value {1} for ID".format(message, value))
return value
def check_or_raise_extra(value, message):
if type(value) != dict:
raise ProtocolError("{0}: invalid type {1}".format(message, type(value)))
for k in value.keys():
if type(k) != six.text_type:
raise ProtocolError("{0}: invalid type {1} for key '{2}'".format(message, type(k), k))
return value
class Message(util.EqualityMixin):
"""
WAMP message base class. Implements :class:`autobahn.wamp.interfaces.IMessage`.
.. note:: This is not supposed to be instantiated.
"""
def __init__(self):
## serialization cache: mapping from ISerializer instances to serialized bytes
self._serialized = {}
def uncache(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.uncache`
"""
self._serialized = {}
def serialize(self, serializer):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.serialize`
"""
## only serialize if not cached ..
if not serializer in self._serialized:
self._serialized[serializer] = serializer.serialize(self.marshal())
return self._serialized[serializer]
IMessage.register(Message)
class Hello(Message):
"""
A WAMP ``HELLO`` message.
Format: ``[HELLO, Realm|uri, Details|dict]``
"""
MESSAGE_TYPE = 1
"""
The WAMP message code for this type of message.
"""
def __init__(self, realm, roles, authmethods = None, authid = None):
"""
:param realm: The URI of the WAMP realm to join.
:type realm: unicode
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authmethods: The authentication methods to announce.
:type authmethods: list of unicode or None
:param authid: The authentication ID to announce.
:type authid: unicode or None
"""
assert(type(realm) == six.text_type)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
if authmethods:
assert(type(authmethods) == list)
for authmethod in authmethods:
assert(type(authmethod) == six.text_type)
assert(authid is None or type(authid) == six.text_type)
Message.__init__(self)
self.realm = realm
self.roles = roles
self.authmethods = authmethods
self.authid = authid
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Hello.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for HELLO".format(len(wmsg)))
realm = check_or_raise_uri(wmsg[1], "'realm' in HELLO")
details = check_or_raise_extra(wmsg[2], "'details' in HELLO")
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in HELLO")
details_roles = check_or_raise_extra(details[u'roles'], "'roles' in 'details' in HELLO")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in HELLO")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in HELLO".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
details_role = check_or_raise_extra(details_roles[role], "role '{0}' in 'roles' in 'details' in HELLO".format(role))
if u'features' in details_role:
check_or_raise_extra(details_role[u'features'], "'features' in role '{0}' in 'roles' in 'details' in HELLO".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_role[u'features'])
else:
role_features = role_cls()
roles.append(role_features)
authmethods = None
if u'authmethods' in details:
details_authmethods = details[u'authmethods']
if type(details_authmethods) != list:
raise ProtocolError("invalid type {0} for 'authmethods' detail in HELLO".format(type(details_authmethods)))
for auth_method in details_authmethods:
if type(auth_method) != six.text_type:
raise ProtocolError("invalid type {0} for item in 'authmethods' detail in HELLO".format(type(auth_method)))
authmethods = details_authmethods
authid = None
if u'authid' in details:
details_authid = details[u'authid']
if type(details_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in HELLO".format(type(details_authid)))
authid = details_authid
obj = Hello(realm, roles, authmethods, authid)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {u'roles': {}}
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
if self.authmethods:
details[u'authmethods'] = self.authmethods
if self.authid:
details[u'authid'] = self.authid
return [Hello.MESSAGE_TYPE, self.realm, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HELLO Message (realm = {0}, roles = {1}, authmethods = {2}, authid = {3})".format(self.realm, self.roles, self.authmethods, self.authid)
class Welcome(Message):
"""
A WAMP ``WELCOME`` message.
Format: ``[WELCOME, Session|id, Details|dict]``
"""
MESSAGE_TYPE = 2
"""
The WAMP message code for this type of message.
"""
def __init__(self, session, roles, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
:param session: The WAMP session ID the other peer is assigned.
:type session: int
:param roles: The WAMP roles to announce.
:type roles: list of :class:`autobahn.wamp.role.RoleFeatures`
:param authid: The authentication ID assigned.
:type authid: unicode or None
:param authrole: The authentication role assigned.
:type authrole: unicode or None
:param authmethod: The authentication method in use.
:type authmethod: unicode or None
:param authprovider: The authentication method in use.
:type authprovider: unicode or None
"""
assert(type(session) in six.integer_types)
assert(type(roles) == list)
for role in roles:
assert(isinstance(role, autobahn.wamp.role.RoleFeatures))
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
Message.__init__(self)
self.session = session
self.roles = roles
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Welcome.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WELCOME".format(len(wmsg)))
session = check_or_raise_id(wmsg[1], "'session' in WELCOME")
details = check_or_raise_extra(wmsg[2], "'details' in WELCOME")
authid = details.get(u'authid', None)
authrole = details.get(u'authrole', None)
authmethod = details.get(u'authmethod', None)
authprovider = details.get(u'authprovider', None)
roles = []
if not u'roles' in details:
raise ProtocolError("missing mandatory roles attribute in options in WELCOME")
details_roles = check_or_raise_extra(details['roles'], "'roles' in 'details' in WELCOME")
if len(details_roles) == 0:
raise ProtocolError("empty 'roles' in 'details' in WELCOME")
for role in details_roles:
if role not in ROLE_NAME_TO_CLASS:
raise ProtocolError("invalid role '{0}' in 'roles' in 'details' in WELCOME".format(role))
role_cls = ROLE_NAME_TO_CLASS[role]
if u'features' in details_roles[role]:
check_or_raise_extra(details_roles[role][u'features'], "'features' in role '{0}' in 'roles' in 'details' in WELCOME".format(role))
## FIXME: skip unknown attributes
role_features = role_cls(**details_roles[role][u'features'])
else:
role_features = role_cls()
roles.append(role_features)
obj = Welcome(session, roles, authid, authrole, authmethod, authprovider)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {
u'roles': {}
}
if self.authid:
details[u'authid'] = self.authid
if self.authrole:
details[u'authrole'] = self.authrole
if self.authrole:
details[u'authmethod'] = self.authmethod
if self.authprovider:
details[u'authprovider'] = self.authprovider
for role in self.roles:
details[u'roles'][role.ROLE] = {}
for feature in role.__dict__:
if not feature.startswith('_') and feature != 'ROLE' and getattr(role, feature) is not None:
if not u'features' in details[u'roles'][role.ROLE]:
details[u'roles'][role.ROLE] = {u'features': {}}
details[u'roles'][role.ROLE][u'features'][six.u(feature)] = getattr(role, feature)
return [Welcome.MESSAGE_TYPE, self.session, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP WELCOME Message (session = {0}, roles = {1}, authid = {2}, authrole = {3}, authmethod = {4}, authprovider = {5})".format(self.session, self.roles, self.authid, self.authrole, self.authmethod, self.authprovider)
class Abort(Message):
"""
A WAMP ``ABORT`` message.
Format: ``[ABORT, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 3
"""
The WAMP message code for this type of message.
"""
def __init__(self, reason, message = None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], "'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP ABORT Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra = None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], "'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CHALLENGE Message (method = {0}, extra = {1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra = None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], "'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP AUTHENTICATE Message (signature = {0}, extra = {1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.goodbye.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason = DEFAULT_REASON, message = None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], "'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], "'reason' in GOODBYE")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
obj = Goodbye(reason, message)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP GOODBYE Message (message = {0}, reason = {1})".format(self.message, self.reason)
class Heartbeat(Message):
"""
A WAMP ``HEARTBEAT`` message.
Formats:
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer]``
* ``[HEARTBEAT, Incoming|integer, Outgoing|integer, Discard|string]``
"""
MESSAGE_TYPE = 7
"""
The WAMP message code for this type of message.
"""
def __init__(self, incoming, outgoing, discard = None):
"""
:param incoming: Last incoming heartbeat processed from peer.
:type incoming: int
:param outgoing: Outgoing heartbeat.
:type outgoing: int
:param discard: Optional data that is discarded by peer.
:type discard: unicode or None
"""
assert(type(incoming) in six.integer_types)
assert(type(outgoing) in six.integer_types)
assert(discard is None or type(discard) == six.text_type)
Message.__init__(self)
self.incoming = incoming
self.outgoing = outgoing
self.discard = discard
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Heartbeat.MESSAGE_TYPE)
if len(wmsg) not in [3, 4]:
raise ProtocolError("invalid message length {0} for HEARTBEAT".format(len(wmsg)))
incoming = wmsg[1]
if type(incoming) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'incoming' in HEARTBEAT".format(type(incoming)))
if incoming < 0: # must be non-negative
raise ProtocolError("invalid value {0} for 'incoming' in HEARTBEAT".format(incoming))
outgoing = wmsg[2]
if type(outgoing) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'outgoing' in HEARTBEAT".format(type(outgoing)))
if outgoing <= 0: # must be positive
raise ProtocolError("invalid value {0} for 'outgoing' in HEARTBEAT".format(outgoing))
discard = None
if len(wmsg) > 3:
discard = wmsg[3]
if type(discard) != six.text_type:
raise ProtocolError("invalid type {0} for 'discard' in HEARTBEAT".format(type(discard)))
obj = Heartbeat(incoming, outgoing, discard = discard)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
if self.discard:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing, self.discard]
else:
return [Heartbeat.MESSAGE_TYPE, self.incoming, self.outgoing]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP HEARTBEAT Message (incoming {0}, outgoing = {1}, len(discard) = {2})".format(self.incoming, self.outgoing, len(self.discard) if self.discard else None)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args = None, kwargs = None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} for ERROR".format(len(wmsg)))
request_type = wmsg[1]
if type(request_type) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'request_type' in ERROR".format(request_type))
if request_type not in [Subscribe.MESSAGE_TYPE,
Unsubscribe.MESSAGE_TYPE,
Publish.MESSAGE_TYPE,
Register.MESSAGE_TYPE,
Unregister.MESSAGE_TYPE,
Call.MESSAGE_TYPE,
Invocation.MESSAGE_TYPE]:
raise ProtocolError("invalid value {0} for 'request_type' in ERROR".format(request_type))
request = check_or_raise_id(wmsg[2], "'request' in ERROR")
_ = check_or_raise_extra(wmsg[3], "'details' in ERROR")
error = check_or_raise_uri(wmsg[4], "'error' in ERROR")
args = None
if len(wmsg) > 5:
args = wmsg[5]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in ERROR".format(type(args)))
kwargs = None
if len(wmsg) > 6:
kwargs = wmsg[6]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in ERROR".format(type(kwargs)))
obj = Error(request_type, request, error, args = args, kwargs = kwargs)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.kwargs:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args, self.kwargs]
elif self.args:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error, self.args]
else:
return [self.MESSAGE_TYPE, self.request_type, self.request, details, self.error]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP Error Message (request_type = {0}, request = {1}, error = {2}, args = {3}, kwargs = {4})".format(self.request_type, self.request, self.error, self.args, self.kwargs)
class Publish(Message):
"""
A WAMP ``PUBLISH`` message.
Formats:
* ``[PUBLISH, Request|id, Options|dict, Topic|uri]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list]``
* ``[PUBLISH, Request|id, Options|dict, Topic|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 16
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
topic,
args = None,
kwargs = None,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic the event should
be published to.
:type topic: unicode
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool or None
:param excludeMe: If ``True``, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool or None
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list of int or None
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list of int or None
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or type(exclude) == list)
assert(eligible is None or type(eligible) == list)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.topic = topic
self.args = args
self.kwargs = kwargs
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Publish.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for PUBLISH".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISH")
options = check_or_raise_extra(wmsg[2], "'options' in PUBLISH")
topic = check_or_raise_uri(wmsg[3], "'topic' in PUBLISH")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in PUBLISH".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in PUBLISH".format(type(kwargs)))
acknowledge = None
excludeMe = None
exclude = None
eligible = None
discloseMe = None
if u'acknowledge' in options:
option_acknowledge = options[u'acknowledge']
if type(option_acknowledge) != bool:
raise ProtocolError("invalid type {0} for 'acknowledge' option in PUBLISH".format(type(option_acknowledge)))
acknowledge = option_acknowledge
if u'exclude_me' in options:
option_excludeMe = options[u'exclude_me']
if type(option_excludeMe) != bool:
raise ProtocolError("invalid type {0} for 'exclude_me' option in PUBLISH".format(type(option_excludeMe)))
excludeMe = option_excludeMe
if u'exclude' in options:
option_exclude = options[u'exclude']
if type(option_exclude) != list:
raise ProtocolError("invalid type {0} for 'exclude' option in PUBLISH".format(type(option_exclude)))
for sessionId in option_exclude:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'exclude' option in PUBLISH".format(type(sessionId)))
exclude = option_exclude
if u'eligible' in options:
option_eligible = options[u'eligible']
if type(option_eligible) != list:
raise ProtocolError("invalid type {0} for 'eligible' option in PUBLISH".format(type(option_eligible)))
for sessionId in option_eligible:
if type(sessionId) not in six.integer_types:
raise ProtocolError("invalid type {0} for value in 'eligible' option in PUBLISH".format(type(sessionId)))
eligible = option_eligible
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in PUBLISH".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Publish(request,
topic,
args = args,
kwargs = kwargs,
acknowledge = acknowledge,
excludeMe = excludeMe,
exclude = exclude,
eligible = eligible,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.acknowledge is not None:
options[u'acknowledge'] = self.acknowledge
if self.excludeMe is not None:
options[u'exclude_me'] = self.excludeMe
if self.exclude is not None:
options[u'exclude'] = self.exclude
if self.eligible is not None:
options[u'eligible'] = self.eligible
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args, self.kwargs]
elif self.args:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic, self.args]
else:
return [Publish.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISH Message (request = {0}, topic = {1}, args = {2}, kwargs = {3}, acknowledge = {4}, excludeMe = {5}, exclude = {6}, eligible = {7}, discloseMe = {8})".format(self.request, self.topic, self.args, self.kwargs, self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class Published(Message):
"""
A WAMP ``PUBLISHED`` message.
Format: ``[PUBLISHED, PUBLISH.Request|id, Publication|id]``
"""
MESSAGE_TYPE = 17
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, publication):
"""
:param request: The request ID of the original `PUBLISH` request.
:type request: int
:param publication: The publication ID for the published event.
:type publication: int
"""
assert(type(request) in six.integer_types)
assert(type(publication) in six.integer_types)
Message.__init__(self)
self.request = request
self.publication = publication
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Published.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for PUBLISHED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in PUBLISHED")
publication = check_or_raise_id(wmsg[2], "'publication' in PUBLISHED")
obj = Published(request, publication)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Published.MESSAGE_TYPE, self.request, self.publication]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP PUBLISHED Message (request = {0}, publication = {1})".format(self.request, self.publication)
class Subscribe(Message):
"""
A WAMP ``SUBSCRIBE`` message.
Format: ``[SUBSCRIBE, Request|id, Options|dict, Topic|uri]``
"""
MESSAGE_TYPE = 32
"""
The WAMP message code for this type of message.
"""
MATCH_EXACT = u'exact'
MATCH_PREFIX = u'prefix'
MATCH_WILDCARD = u'wildcard'
def __init__(self, request, topic, match = MATCH_EXACT):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param topic: The WAMP or application URI of the PubSub topic to subscribe to.
:type topic: unicode
:param match: The topic matching method to be used for the subscription.
:type match: unicode
"""
assert(type(request) in six.integer_types)
assert(type(topic) == six.text_type)
assert(match is None or type(match) == six.text_type)
assert(match is None or match in [self.MATCH_EXACT, self.MATCH_PREFIX, self.MATCH_WILDCARD])
Message.__init__(self)
self.request = request
self.topic = topic
self.match = match
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribe.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for SUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBE")
options = check_or_raise_extra(wmsg[2], "'options' in SUBSCRIBE")
topic = check_or_raise_uri(wmsg[3], "'topic' in SUBSCRIBE")
match = Subscribe.MATCH_EXACT
if u'match' in options:
option_match = options[u'match']
if type(option_match) != six.text_type:
raise ProtocolError("invalid type {0} for 'match' option in SUBSCRIBE".format(type(option_match)))
if option_match not in [Subscribe.MATCH_EXACT, Subscribe.MATCH_PREFIX, Subscribe.MATCH_WILDCARD]:
raise ProtocolError("invalid value {0} for 'match' option in SUBSCRIBE".format(option_match))
match = option_match
obj = Subscribe(request, topic, match)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.match and self.match != Subscribe.MATCH_EXACT:
options[u'match'] = self.match
return [Subscribe.MESSAGE_TYPE, self.request, options, self.topic]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBE Message (request = {0}, topic = {1}, match = {2})".format(self.request, self.topic, self.match)
class Subscribed(Message):
"""
A WAMP ``SUBSCRIBED`` message.
Format: ``[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]``
"""
MESSAGE_TYPE = 33
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The request ID of the original ``SUBSCRIBE`` request.
:type request: int
:param subscription: The subscription ID for the subscribed topic (or topic pattern).
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Subscribed.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for SUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in SUBSCRIBED")
subscription = check_or_raise_id(wmsg[2], "'subscription' in SUBSCRIBED")
obj = Subscribed(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Subscribed.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP SUBSCRIBED Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribe(Message):
"""
A WAMP ``UNSUBSCRIBE`` message.
Format: ``[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]``
"""
MESSAGE_TYPE = 34
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, subscription):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param subscription: The subscription ID for the subscription to unsubscribe from.
:type subscription: int
"""
assert(type(request) in six.integer_types)
assert(type(subscription) in six.integer_types)
Message.__init__(self)
self.request = request
self.subscription = subscription
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribe.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNSUBSCRIBE".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBE")
subscription = check_or_raise_id(wmsg[2], "'subscription' in UNSUBSCRIBE")
obj = Unsubscribe(request, subscription)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribe.MESSAGE_TYPE, self.request, self.subscription]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBE Message (request = {0}, subscription = {1})".format(self.request, self.subscription)
class Unsubscribed(Message):
"""
A WAMP ``UNSUBSCRIBED`` message.
Format: ``[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]``
"""
MESSAGE_TYPE = 35
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNSUBSCRIBE`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unsubscribed.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNSUBSCRIBED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNSUBSCRIBED")
obj = Unsubscribed(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unsubscribed.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNSUBSCRIBED Message (request = {0})".format(self.request)
class Event(Message):
"""
A WAMP ``EVENT`` message.
Formats:
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]``
* ``[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 36
"""
The WAMP message code for this type of message.
"""
def __init__(self, subscription, publication, args = None, kwargs = None, publisher = None):
"""
:param subscription: The subscription ID this event is dispatched under.
:type subscription: int
:param publication: The publication ID of the dispatched event.
:type publication: int
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param publisher: If present, the WAMP session ID of the publisher of this event.
:type publisher: int or None
"""
assert(type(subscription) in six.integer_types)
assert(type(publication) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(publisher is None or type(publisher) in six.integer_types)
Message.__init__(self)
self.subscription = subscription
self.publication = publication
self.args = args
self.kwargs = kwargs
self.publisher = publisher
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Event.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for EVENT".format(len(wmsg)))
subscription = check_or_raise_id(wmsg[1], "'subscription' in EVENT")
publication = check_or_raise_id(wmsg[2], "'publication' in EVENT")
details = check_or_raise_extra(wmsg[3], "'details' in EVENT")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in EVENT".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in EVENT".format(type(kwargs)))
publisher = None
if u'publisher' in details:
detail_publisher = details[u'publisher']
if type(detail_publisher) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'publisher' detail in EVENT".format(type(detail_publisher)))
publisher = detail_publisher
obj = Event(subscription,
publication,
args = args,
kwargs = kwargs,
publisher = publisher)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.publisher is not None:
details[u'publisher'] = self.publisher
if self.kwargs:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args, self.kwargs]
elif self.args:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details, self.args]
else:
return [Event.MESSAGE_TYPE, self.subscription, self.publication, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP EVENT Message (subscription = {0}, publication = {1}, args = {2}, kwargs = {3}, publisher = {4})".format(self.subscription, self.publication, self.args, self.kwargs, self.publisher)
class Call(Message):
"""
A WAMP ``CALL`` message.
Formats:
* ``[CALL, Request|id, Options|dict, Procedure|uri]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]``
* ``[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 48
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
procedure,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
discloseMe = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the procedure which should be called.
:type procedure: unicode
:param args: Positional values for application-defined call arguments.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined call arguments.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancel
the call after this ms.
:type timeout: int or None
:param receive_progress: If ``True``, indicates that the caller wants to receive
progressive call results.
:type receive_progress: bool or None
:param discloseMe: If ``True``, the caller requests to disclose itself to the callee.
:type discloseMe: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(discloseMe is None or type(discloseMe) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.discloseMe = discloseMe
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Call.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for CALL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CALL")
options = check_or_raise_extra(wmsg[2], "'options' in CALL")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in CALL")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in CALL".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in CALL".format(type(kwargs)))
timeout = None
if u'timeout' in options:
option_timeout = options[u'timeout']
if type(option_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' option in CALL".format(type(option_timeout)))
if option_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' option in CALL".format(option_timeout))
timeout = option_timeout
receive_progress = None
if u'receive_progress' in options:
option_receive_progress = options[u'receive_progress']
if type(option_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' option in CALL".format(type(option_receive_progress)))
receive_progress = option_receive_progress
discloseMe = None
if u'disclose_me' in options:
option_discloseMe = options[u'disclose_me']
if type(option_discloseMe) != bool:
raise ProtocolError("invalid type {0} for 'disclose_me' option in CALL".format(type(option_discloseMe)))
discloseMe = option_discloseMe
obj = Call(request,
procedure,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
discloseMe = discloseMe)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.discloseMe is not None:
options[u'disclose_me'] = self.discloseMe
if self.kwargs:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args, self.kwargs]
elif self.args:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure, self.args]
else:
return [Call.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CALL Message (request = {0}, procedure = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, discloseMe = {6})".format(self.request, self.procedure, self.args, self.kwargs, self.timeout, self.receive_progress, self.discloseMe)
class Cancel(Message):
"""
A WAMP ``CANCEL`` message.
Format: ``[CANCEL, CALL.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 49
"""
The WAMP message code for this type of message.
"""
SKIP = u'skip'
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original `CALL` to cancel.
:type request: int
:param mode: Specifies how to cancel the call (``"skip"``, ``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode in [None, self.SKIP, self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Cancel.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CANCEL".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in CANCEL")
options = check_or_raise_extra(wmsg[2], "'options' in CANCEL")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in CANCEL".format(type(option_mode)))
if option_mode not in [Cancel.SKIP, Cancel.ABORT, Cancel.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in CANCEL".format(option_mode))
mode = option_mode
obj = Cancel(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Cancel.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP CANCEL Message (request = {0}, mode = '{1}'')".format(self.request, self.mode)
class Result(Message):
"""
A WAMP ``RESULT`` message.
Formats:
* ``[RESULT, CALL.Request|id, Details|dict]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]``
* ``[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 50
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The request ID of the original `CALL` request.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive call result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Result.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for RESULT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in RESULT")
details = check_or_raise_extra(wmsg[2], "'details' in RESULT")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in RESULT".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in RESULT".format(type(kwargs)))
progress = None
if u'progress' in details:
detail_progress = details[u'progress']
if type(detail_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in RESULT".format(type(detail_progress)))
progress = detail_progress
obj = Result(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
details = {}
if self.progress is not None:
details[u'progress'] = self.progress
if self.kwargs:
return [Result.MESSAGE_TYPE, self.request, details, self.args, self.kwargs]
elif self.args:
return [Result.MESSAGE_TYPE, self.request, details, self.args]
else:
return [Result.MESSAGE_TYPE, self.request, details]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP RESULT Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
class Register(Message):
"""
A WAMP ``REGISTER`` message.
Format: ``[REGISTER, Request|id, Options|dict, Procedure|uri]``
"""
MESSAGE_TYPE = 64
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, procedure, pkeys = None, discloseCaller = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param procedure: The WAMP or application URI of the RPC endpoint provided.
:type procedure: unicode
:param pkeys: The endpoint can work for this list of application partition keys.
:type pkeys: list of int or None
:param discloseCaller: If ``True``, the (registering) callee requests to disclose
the identity of callers whenever called.
:type discloseCaller: bool or None
"""
assert(type(request) in six.integer_types)
assert(type(procedure) == six.text_type)
assert(pkeys is None or type(pkeys) == list)
if pkeys:
for k in pkeys:
assert(type(k) in six.integer_types)
assert(discloseCaller is None or type(discloseCaller) == bool)
Message.__init__(self)
self.request = request
self.procedure = procedure
self.pkeys = pkeys
self.discloseCaller = discloseCaller
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Register.MESSAGE_TYPE)
if len(wmsg) != 4:
raise ProtocolError("invalid message length {0} for REGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTER")
options = check_or_raise_extra(wmsg[2], "'options' in REGISTER")
procedure = check_or_raise_uri(wmsg[3], "'procedure' in REGISTER")
pkeys = None
discloseCaller = None
if u'pkeys' in options:
option_pkeys = options[u'pkeys']
if type(option_pkeys) != list:
raise ProtocolError("invalid type {0} for 'pkeys' option in REGISTER".format(type(option_pkeys)))
for pk in option_pkeys:
if type(pk) not in six.integer_types:
raise ProtocolError("invalid type for value '{0}' in 'pkeys' option in REGISTER".format(type(pk)))
pkeys = option_pkeys
if u'disclose_caller' in options:
option_discloseCaller = options[u'disclose_caller']
if type(option_discloseCaller) != bool:
raise ProtocolError("invalid type {0} for 'disclose_caller' option in REGISTER".format(type(option_discloseCaller)))
discloseCaller = option_discloseCaller
obj = Register(request, procedure, pkeys = pkeys, discloseCaller = discloseCaller)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.pkeys is not None:
options[u'pkeys'] = self.pkeys
if self.discloseCaller is not None:
options[u'disclose_caller'] = self.discloseCaller
return [Register.MESSAGE_TYPE, self.request, options, self.procedure]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTER Message (request = {0}, procedure = {1}, pkeys = {2}, discloseCaller = {3})".format(self.request, self.procedure, self.pkeys, self.discloseCaller)
class Registered(Message):
"""
A WAMP ``REGISTERED`` message.
Format: ``[REGISTERED, REGISTER.Request|id, Registration|id]``
"""
MESSAGE_TYPE = 65
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The request ID of the original ``REGISTER`` request.
:type request: int
:param registration: The registration ID for the registered procedure (or procedure pattern).
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Registered.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for REGISTERED".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in REGISTERED")
registration = check_or_raise_id(wmsg[2], "'registration' in REGISTERED")
obj = Registered(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Registered.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP REGISTERED Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregister(Message):
"""
A WAMP `UNREGISTER` message.
Format: ``[UNREGISTER, Request|id, REGISTERED.Registration|id]``
"""
MESSAGE_TYPE = 66
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, registration):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID for the registration to unregister.
:type registration: int
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
Message.__init__(self)
self.request = request
self.registration = registration
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregister.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for WAMP UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
registration = check_or_raise_id(wmsg[2], "'registration' in UNREGISTER")
obj = Unregister(request, registration)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregister.MESSAGE_TYPE, self.request, self.registration]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0}, registration = {1})".format(self.request, self.registration)
class Unregistered(Message):
"""
A WAMP ``UNREGISTERED`` message.
Format: ``[UNREGISTERED, UNREGISTER.Request|id]``
"""
MESSAGE_TYPE = 67
"""
The WAMP message code for this type of message.
"""
def __init__(self, request):
"""
:param request: The request ID of the original ``UNREGISTER`` request.
:type request: int
"""
assert(type(request) in six.integer_types)
Message.__init__(self)
self.request = request
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Unregistered.MESSAGE_TYPE)
if len(wmsg) != 2:
raise ProtocolError("invalid message length {0} for UNREGISTER".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in UNREGISTER")
obj = Unregistered(request)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
return [Unregistered.MESSAGE_TYPE, self.request]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP UNREGISTER Message (request = {0})".format(self.request)
class Invocation(Message):
"""
A WAMP ``INVOCATION`` message.
Formats:
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]``
* ``[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 68
"""
The WAMP message code for this type of message.
"""
def __init__(self,
request,
registration,
args = None,
kwargs = None,
timeout = None,
receive_progress = None,
caller = None,
authid = None,
authrole = None,
authmethod = None):
"""
:param request: The WAMP request ID of this request.
:type request: int
:param registration: The registration ID of the endpoint to be invoked.
:type registration: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param timeout: If present, let the callee automatically cancels
the invocation after this ms.
:type timeout: int or None
:param receive_progress: Indicates if the callee should produce progressive results.
:type receive_progress: bool or None
:param caller: The WAMP session ID of the caller.
:type caller: int or None
:param authid: The authentication ID of the caller.
:type authid: unicode or None
:param authrole: The authentication role of the caller.
:type authrole: unicode or None
:param authmethod: The authentication method under which the caller was authenticated.
:type authmethod: unicode or None
"""
assert(type(request) in six.integer_types)
assert(type(registration) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(timeout is None or type(timeout) in six.integer_types)
assert(receive_progress is None or type(receive_progress) == bool)
assert(caller is None or type(caller) in six.integer_types)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
Message.__init__(self)
self.request = request
self.registration = registration
self.args = args
self.kwargs = kwargs
self.timeout = timeout
self.receive_progress = receive_progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Invocation.MESSAGE_TYPE)
if len(wmsg) not in (4, 5, 6):
raise ProtocolError("invalid message length {0} for INVOCATION".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INVOCATION")
registration = check_or_raise_id(wmsg[2], "'registration' in INVOCATION")
details = check_or_raise_extra(wmsg[3], "'details' in INVOCATION")
args = None
if len(wmsg) > 4:
args = wmsg[4]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in INVOCATION".format(type(args)))
kwargs = None
if len(wmsg) > 5:
kwargs = wmsg[5]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in INVOCATION".format(type(kwargs)))
timeout = None
if u'timeout' in details:
detail_timeout = details[u'timeout']
if type(detail_timeout) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'timeout' detail in INVOCATION".format(type(detail_timeout)))
if detail_timeout < 0:
raise ProtocolError("invalid value {0} for 'timeout' detail in INVOCATION".format(detail_timeout))
timeout = detail_timeout
receive_progress = None
if u'receive_progress' in details:
detail_receive_progress = details[u'receive_progress']
if type(detail_receive_progress) != bool:
raise ProtocolError("invalid type {0} for 'receive_progress' detail in INVOCATION".format(type(detail_receive_progress)))
receive_progress = detail_receive_progress
caller = None
if u'caller' in details:
detail_caller = details[u'caller']
if type(detail_caller) not in six.integer_types:
raise ProtocolError("invalid type {0} for 'caller' detail in INVOCATION".format(type(detail_caller)))
caller = detail_caller
authid = None
if u'authid' in details:
detail_authid = details[u'authid']
if type(detail_authid) != six.text_type:
raise ProtocolError("invalid type {0} for 'authid' detail in INVOCATION".format(type(detail_authid)))
authid = detail_authid
authrole = None
if u'authrole' in details:
detail_authrole = details[u'authrole']
if type(detail_authrole) != six.text_type:
raise ProtocolError("invalid type {0} for 'authrole' detail in INVOCATION".format(type(detail_authrole)))
authrole = detail_authrole
authmethod = None
if u'authmethod' in details:
detail_authmethod = details[u'authmethod']
if type(detail_authmethod) != six.text_type:
raise ProtocolError("invalid type {0} for 'authmethod' detail in INVOCATION".format(type(detail_authmethod)))
authmethod = detail_authmethod
obj = Invocation(request,
registration,
args = args,
kwargs = kwargs,
timeout = timeout,
receive_progress = receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.timeout is not None:
options[u'timeout'] = self.timeout
if self.receive_progress is not None:
options[u'receive_progress'] = self.receive_progress
if self.caller is not None:
options[u'caller'] = self.caller
if self.authid is not None:
options[u'authid'] = self.authid
if self.authrole is not None:
options[u'authrole'] = self.authrole
if self.authmethod is not None:
options[u'authmethod'] = self.authmethod
if self.kwargs:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args, self.kwargs]
elif self.args:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options, self.args]
else:
return [Invocation.MESSAGE_TYPE, self.request, self.registration, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INVOCATION Message (request = {0}, registration = {1}, args = {2}, kwargs = {3}, timeout = {4}, receive_progress = {5}, caller = {6}, authid = {7}, authrole = {8}, authmethod = {9})".format(self.request, self.registration, self.args, self.kwargs, self.timeout, self.receive_progress, self.caller, self.authid, self.authrole, self.authmethod)
class Interrupt(Message):
"""
A WAMP ``INTERRUPT`` message.
Format: ``[INTERRUPT, INVOCATION.Request|id, Options|dict]``
"""
MESSAGE_TYPE = 69
"""
The WAMP message code for this type of message.
"""
ABORT = u'abort'
KILL = u'kill'
def __init__(self, request, mode = None):
"""
:param request: The WAMP request ID of the original ``INVOCATION`` to interrupt.
:type request: int
:param mode: Specifies how to interrupt the invocation (``"abort"`` or ``"kill"``).
:type mode: unicode or None
"""
assert(type(request) in six.integer_types)
assert(mode is None or type(mode) == six.text_type)
assert(mode is None or mode in [self.ABORT, self.KILL])
Message.__init__(self)
self.request = request
self.mode = mode
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Interrupt.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for INTERRUPT".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in INTERRUPT")
options = check_or_raise_extra(wmsg[2], "'options' in INTERRUPT")
## options
##
mode = None
if u'mode' in options:
option_mode = options[u'mode']
if type(option_mode) != six.text_type:
raise ProtocolError("invalid type {0} for 'mode' option in INTERRUPT".format(type(option_mode)))
if option_mode not in [Interrupt.ABORT, Interrupt.KILL]:
raise ProtocolError("invalid value '{0}' for 'mode' option in INTERRUPT".format(option_mode))
mode = option_mode
obj = Interrupt(request, mode = mode)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.mode is not None:
options[u'mode'] = self.mode
return [Interrupt.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP INTERRUPT Message (request = {0}, mode = '{1}')".format(self.request, self.mode)
class Yield(Message):
"""
A WAMP ``YIELD`` message.
Formats:
* ``[YIELD, INVOCATION.Request|id, Options|dict]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]``
* ``[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]``
"""
MESSAGE_TYPE = 70
"""
The WAMP message code for this type of message.
"""
def __init__(self, request, args = None, kwargs = None, progress = None):
"""
:param request: The WAMP request ID of the original call.
:type request: int
:param args: Positional values for application-defined event payload.
Must be serializable using any serializers in use.
:type args: list or tuple or None
:param kwargs: Keyword values for application-defined event payload.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param progress: If ``True``, this result is a progressive invocation result, and subsequent
results (or a final error) will follow.
:type progress: bool or None
"""
assert(type(request) in six.integer_types)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(progress is None or type(progress) == bool)
Message.__init__(self)
self.request = request
self.args = args
self.kwargs = kwargs
self.progress = progress
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
## this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Yield.MESSAGE_TYPE)
if len(wmsg) not in (3, 4, 5):
raise ProtocolError("invalid message length {0} for YIELD".format(len(wmsg)))
request = check_or_raise_id(wmsg[1], "'request' in YIELD")
options = check_or_raise_extra(wmsg[2], "'options' in YIELD")
args = None
if len(wmsg) > 3:
args = wmsg[3]
if type(args) != list:
raise ProtocolError("invalid type {0} for 'args' in YIELD".format(type(args)))
kwargs = None
if len(wmsg) > 4:
kwargs = wmsg[4]
if type(kwargs) != dict:
raise ProtocolError("invalid type {0} for 'kwargs' in YIELD".format(type(kwargs)))
progress = None
if u'progress' in options:
option_progress = options[u'progress']
if type(option_progress) != bool:
raise ProtocolError("invalid type {0} for 'progress' option in YIELD".format(type(option_progress)))
progress = option_progress
obj = Yield(request, args = args, kwargs = kwargs, progress = progress)
return obj
def marshal(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.marshal`
"""
options = {}
if self.progress is not None:
options[u'progress'] = self.progress
if self.kwargs:
return [Yield.MESSAGE_TYPE, self.request, options, self.args, self.kwargs]
elif self.args:
return [Yield.MESSAGE_TYPE, self.request, options, self.args]
else:
return [Yield.MESSAGE_TYPE, self.request, options]
def __str__(self):
"""
Implements :func:`autobahn.wamp.interfaces.IMessage.__str__`
"""
return "WAMP YIELD Message (request = {0}, args = {1}, kwargs = {2}, progress = {3})".format(self.request, self.args, self.kwargs, self.progress)
| 31.212635 | 360 | 0.633132 | [
"BSD-3-Clause"
] | Crimson-MITK-ThirdParty/VTK-7.0.0 | ThirdParty/AutobahnPython/autobahn/wamp/message.py | 86,459 | Python |
# parse list of objects
import csv
file = "basic_objects.txt"
objects = []
with open(file) as f:
for line in f:
if line[0:2] == '//' or line[0:2] == None: # skip empties, comments
pass
else:
obj = line.rstrip() # strip Newlines
obj = obj.capitalize()
o = [o.capitalize() for o in obj.split(' ')] # Capitalize every word
obj = ' '.join(o)
objects.append(obj)
nice_objects = sorted(set(objects))
print(nice_objects[0:10], "............", nice_objects[-10:-1])
print(len(nice_objects))
def print_dupes(objs): # test for dupes
last = None
for n in nice_objects:
if n == last:
print(n)
last = n
# Write out txt list
# with open("cleaned_basic_objects.txt", 'wb') as csvfile:
# writer = csv.writer(csvfile)
# writer.writerows(n.split(',') for n in nice_objects) # idk why you need the split
| 30 | 91 | 0.523529 | [
"MIT"
] | rckmnt/How-Big-Is-A-Thing- | py/parse_sort_objects.py | 1,020 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.