ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3d26bca3a5343a2950d0c053acd117a7910146 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test HTTP Driver.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <[email protected]>
# Copyright (c) 2009-2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
from osgeo import ogr
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Verify we have the driver.
def http_1():
gdaltest.dods_drv = None
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
try:
gdaltest.dods_drv = gdal.GetDriverByName( 'DODS' )
if gdaltest.dods_drv is not None:
gdaltest.dods_drv.Deregister()
except:
gdaltest.dods_drv = None
tst = gdaltest.GDALTest( 'PNG','http://gdal.org/gdalicon.png',
1, 7617, filename_absolute = 1 )
ret = tst.testOpen()
if ret == 'fail':
conn = gdaltest.gdalurlopen('http://gdal.org/gdalicon.png')
if conn is None:
print('cannot open URL')
return 'skip'
conn.close()
return ret
###############################################################################
# Verify /vsicurl (subversion file listing)
def http_2():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'GTiff','/vsicurl/http://svn.osgeo.org/gdal/trunk/autotest/gcore/data/byte.tif',
1, 4672, filename_absolute = 1 )
ret = tst.testOpen()
if ret == 'fail':
conn = gdaltest.gdalurlopen('http://svn.osgeo.org/gdal/trunk/autotest/gcore/data/byte.tif')
if conn is None:
print('cannot open URL')
return 'skip'
conn.close()
return ret
###############################################################################
# Verify /vsicurl (apache file listing)
def http_3():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
gdal.SetConfigOption('GDAL_HTTP_TIMEOUT', '5')
ds = gdal.Open('/vsicurl/http://download.osgeo.org/gdal/data/ehdr/elggll.bil')
gdal.SetConfigOption('GDAL_HTTP_TIMEOUT', None)
if ds is None:
conn = gdaltest.gdalurlopen('http://download.osgeo.org/gdal/data/ehdr/elggll.bil')
if conn is None:
print('cannot open URL')
return 'skip'
conn.close()
return 'fail'
return 'success'
###############################################################################
# Verify /vsicurl (ftp)
def http_4_old():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
ds = gdal.Open('/vsicurl/ftp://ftp2.cits.rncan.gc.ca/pub/cantopo/250k_tif/MCR2010_01.tif')
if ds is None:
# Workaround unexplained failure on Tamas test machine. The test works fine with his
# builds on other machines...
# This heuristics might be fragile !
if "GDAL_DATA" in os.environ and os.environ["GDAL_DATA"].find("E:\\builds\\..\\sdk\\") == 0:
return 'skip'
conn = gdaltest.gdalurlopen('ftp://ftp2.cits.rncan.gc.ca/pub/cantopo/250k_tif/MCR2010_01.tif')
if conn is None:
print('cannot open URL')
return 'skip'
conn.close()
return 'fail'
filelist = ds.GetFileList()
if filelist[0] != '/vsicurl/ftp://ftp2.cits.rncan.gc.ca/pub/cantopo/250k_tif/MCR2010_01.tif':
print(filelist)
return 'fail'
return 'success'
###############################################################################
# Verify /vsicurl (ftp)
def http_4():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
ds = gdal.Open('/vsicurl/ftp://download.osgeo.org/gdal/data/gtiff/utm.tif')
if ds is None:
conn = gdaltest.gdalurlopen('ftp://download.osgeo.org/gdal/data/gtiff/utm.tif', timeout = 4)
if conn is None:
print('cannot open URL')
return 'skip'
try:
conn.read()
except:
print('cannot read')
return 'skip'
conn.close()
if sys.platform == 'darwin' and gdal.GetConfigOption('TRAVIS', None) is not None:
print("Fails on MacOSX Travis sometimes. Not sure why.")
return 'skip'
gdaltest.post_reason('fail')
return 'fail'
filelist = ds.GetFileList()
if '/vsicurl/ftp://download.osgeo.org/gdal/data/gtiff/utm.tif' not in filelist:
print(filelist)
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test HTTP driver with non VSIL driver
def http_5():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
ds = gdal.Open('http://svn.osgeo.org/gdal/trunk/autotest/gdrivers/data/s4103.blx')
if ds is None:
conn = gdaltest.gdalurlopen('http://svn.osgeo.org/gdal/trunk/autotest/gdrivers/data/s4103.blx')
if conn is None:
print('cannot open URL')
return 'skip'
try:
conn.read()
except:
print('cannot read')
return 'skip'
conn.close()
gdaltest.post_reason('fail')
return 'fail'
filename = ds.GetDescription()
ds = None
try:
os.stat(filename)
gdaltest.post_reason('file %s should have been removed' % filename)
return 'fail'
except:
pass
return 'success'
###############################################################################
# Test HTTP driver with OGR driver
def http_6():
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
ds = ogr.Open('http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/test.jml')
if ds is None:
conn = gdaltest.gdalurlopen('http://svn.osgeo.org/gdal/trunk/autotest/ogr/data/test.jml')
if conn is None:
print('cannot open URL')
return 'skip'
try:
conn.read()
except:
print('cannot read')
return 'skip'
conn.close()
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
#
def http_cleanup():
if gdaltest.dods_drv is not None:
gdaltest.dods_drv.Register()
gdaltest.dods_drv = None
return 'success'
gdaltest_list = [ http_1,
http_2,
http_3,
#http_4_old,
http_4,
http_5,
http_6,
http_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'http' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
py | 1a3d26f460fcdbbfa9dbcec488d4c6aba8e1b78c | # terrascript/systemd/__init__.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class systemd(terrascript.Provider):
pass
|
py | 1a3d287eeb6c2cb3070f1aa7157b006e9aa857f5 | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from detectron2.structures import Instances, ROIMasks
# perhaps should rename to "resize_instance"
def detector_postprocess(
results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
# Change to 'if is_tracing' after PT1.7
if isinstance(output_height, torch.Tensor):
# Converts integer tensors to float temporaries to ensure true
# division is performed when computing scale_x and scale_y.
output_width_tmp = output_width.float()
output_height_tmp = output_height.float()
new_size = torch.stack([output_height, output_width])
else:
new_size = (output_height, output_width)
output_width_tmp = output_width
output_height_tmp = output_height
scale_x, scale_y = (
output_width_tmp / results.image_size[1],
output_height_tmp / results.image_size[0],
)
results = Instances(new_size, **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
else:
output_boxes = None
assert output_boxes is not None, "Predictions must contain boxes!"
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
if isinstance(results.pred_masks, ROIMasks):
roi_masks = results.pred_masks
else:
# pred_masks is a tensor of shape (N, 1, M, M)
roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
results.pred_masks = roi_masks.to_bitmasks(
results.pred_boxes, output_height, output_width, mask_threshold
).tensor # TODO return ROIMasks/BitMask object in the future
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result
|
py | 1a3d28abe4609b2d418f65cb5c8a72e40e333d4a | """Platform to present any Tuya DP as a binary sensor."""
import logging
from functools import partial
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS
from .common import LocalTuyaEntity, async_setup_entry
_LOGGER = logging.getLogger(__name__)
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
def flow_schema(dps):
"""Return schema used in config flow."""
return {
vol.Required(CONF_STATE_ON, default="True"): str,
vol.Required(CONF_STATE_OFF, default="False"): str,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
class LocaltuyaBinarySensor(LocalTuyaEntity, BinarySensorEntity):
"""Representation of a Tuya binary sensor."""
def __init__(
self,
device,
config_entry,
sensorid,
**kwargs,
):
"""Initialize the Tuya binary sensor."""
super().__init__(device, config_entry, sensorid, _LOGGER, **kwargs)
self._is_on = False
@property
def is_on(self):
"""Return sensor state."""
return self._is_on
@property
def device_class(self):
"""Return the class of this device."""
return self._config.get(CONF_DEVICE_CLASS)
def status_updated(self):
"""Device status was updated."""
state = str(self.dps(self._dp_id)).lower()
if state == self._config[CONF_STATE_ON].lower():
self._is_on = True
elif state == self._config[CONF_STATE_OFF].lower():
self._is_on = False
else:
self.warning(
"State for entity %s did not match state patterns", self.entity_id
)
async_setup_entry = partial(
async_setup_entry, DOMAIN, LocaltuyaBinarySensor, flow_schema
)
|
py | 1a3d2996febf453676b0c4741fe0c0d3c8d23d71 | # -*- coding: utf-8 -*-
import argparse
import os
from pprint import pprint
import subprocess
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILES", default="path/to/*.mp4", help="Input media file pattern")
parser.add_argument('-width', dest="TARGET_WIDTH", default=640, type=int, help="Target width")
parser.add_argument('-height', dest="TARGET_HEIGHT", default=360, type=int, help="Target height")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/%s.mp4", help="Media output file pattern")
a = parser.parse_args()
from lib.io_utils import *
makeDirectories([a.OUTPUT_FILE])
filenames = getFilenames(a.INPUT_FILES)
for infile in filenames:
basefn = getBasename(infile)
command = ['ffmpeg',
'-y',
'-i', infile,
'-vf', 'scale=%s:%s' % (a.TARGET_WIDTH, a.TARGET_HEIGHT),
a.OUTPUT_FILE % basefn]
print(" ".join(command))
finished = subprocess.check_call(command)
print("Done.")
|
py | 1a3d2acdaf5fc7a7ca8897e2ad18779290cad59f | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host')
class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
def setUp(self):
super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
capabilities = {'opt1': 1, 'opt2': 2}
filter_properties = {'context': mock.sentinel.ctx, 'instance_type':
{'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertFalse(agg_mock.called)
def _do_test_aggregate_filter_extra_specs(self, especs, passes):
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(self.filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
especs = {
# Un-scoped extra spec
'opt1': '1',
# Scoped extra spec that applies to this filter
'aggregate_instance_extra_specs:opt2': '2',
# Scoped extra spec that does not apply to this filter
'trust:trusted_host': 'true',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
agg_mock.return_value = {'aggregate_instance_extra_specs': '1'}
especs = {
# Un-scoped extra spec, make sure we don't blow up if it
# happens to match our scope.
'aggregate_instance_extra_specs': '1',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
especs = {
'opt1': '1',
'opt2': '222',
'trust:trusted_host': 'true'
}
self._do_test_aggregate_filter_extra_specs(especs, passes=False)
|
py | 1a3d2b61236e2af14f95cd093a700d23bd37cee5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "21.1.0b1"
|
py | 1a3d2bbc94dabdbabe8233411d4fd6899bde1904 | from typing import Dict, Callable
from optimade.models import (
DataType,
ErrorResponse,
StructureResource,
ReferenceResource,
)
from optimade.server.exceptions import POSSIBLE_ERRORS
__all__ = ("ENTRY_INFO_SCHEMAS", "ERROR_RESPONSES", "retrieve_queryable_properties")
ENTRY_INFO_SCHEMAS: Dict[str, Callable[[None], Dict]] = {
"structures": StructureResource.schema,
"references": ReferenceResource.schema,
}
"""This dictionary is used to define the `/info/<entry_type>` endpoints."""
ERROR_RESPONSES: Dict[int, Dict] = {
err.status_code: {"model": ErrorResponse, "description": err.title}
for err in POSSIBLE_ERRORS
}
def retrieve_queryable_properties(
schema: dict,
queryable_properties: list = None,
entry_type: str = None,
) -> dict:
"""Recursively loops through the schema of a pydantic model and
resolves all references, returning a dictionary of all the
OPTIMADE-queryable properties of that model.
Parameters:
schema: The schema of the pydantic model.
queryable_properties: The list of properties to find in the schema.
entry_type: An optional entry type for the model. Will be used to
lookup schemas for any config-defined fields.
Returns:
A flat dictionary with properties as keys, containing the field
description, unit, sortability, support level, queryability
and type, where provided.
"""
properties = {}
for name, value in schema["properties"].items():
if not queryable_properties or name in queryable_properties:
if "$ref" in value:
path = value["$ref"].split("/")[1:]
sub_schema = schema.copy()
while path:
next_key = path.pop(0)
sub_schema = sub_schema[next_key]
sub_queryable_properties = sub_schema["properties"].keys()
properties.update(
retrieve_queryable_properties(sub_schema, sub_queryable_properties)
)
else:
properties[name] = {"description": value.get("description", "")}
# Update schema with extension keys provided they are not None
for key in [_ for _ in ("unit", "queryable", "support") if _ in value]:
properties[name][key] = value[key]
# All properties are sortable with the MongoDB backend.
# While the result for sorting lists may not be as expected, they are still sorted.
properties[name]["sortable"] = value.get("sortable", True)
# Try to get OpenAPI-specific "format" if possible, else get "type"; a mandatory OpenAPI key.
properties[name]["type"] = DataType.from_json_type(
value.get("format", value.get("type"))
)
# If specified, check the config for any additional well-described provider fields
if entry_type:
from optimade.server.config import CONFIG
described_provider_fields = [
field
for field in CONFIG.provider_fields.get(entry_type, {})
if isinstance(field, dict)
]
for field in described_provider_fields:
name = f"_{CONFIG.provider.prefix}_{field['name']}"
properties[name] = {k: field[k] for k in field if k != "name"}
properties[name]["sortable"] = field.get("sortable", True)
return properties
|
py | 1a3d2e7c0e81e381e7b09bc7f05a93f586b62d3c | from orbitkit import util
extension = util.get_content_type_v1('pdf')
print(extension)
|
py | 1a3d2e9e0a05353f35efdc2676a339f220413c78 | import sys
sys.path.insert(0, '../')
import tornado_dynamodb
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = 'tornado-dynamodb'
copyright = '2015, Gavin M. Roy'
author = 'Gavin M. Roy'
release = tornado_dynamodb.__version__
version = '.'.join(release.split('.')[0:1])
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = True
html_static_path = ['_static']
htmlhelp_basename = 'tornado-dynamodb'
intersphinx_mapping = {'https://docs.python.org/': None,
'http://www.tornadoweb.org/en/stable/': None}
|
py | 1a3d301f6aae04f573292bf992f8f8e105aa6bf6 | k=int(input())
res=""
for i in range(k):
n,m = map(int, input().split())
res+=(str(1+m*(n-m))+"\n")
print(res,end="")
|
py | 1a3d309c69978f9535c54b84ec0d32aaecb80194 | """empty message
Revision ID: aa989b9b2862
Revises: 7223a3ac4f30
Create Date: 2021-03-29 19:41:56.312406
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'aa989b9b2862'
down_revision = '7223a3ac4f30'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('campaigns',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('events',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('campaign_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['campaign_id'], ['campaigns.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('events_users',
sa.Column('event_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('event_id', 'user_id')
)
op.create_table('phone_bank_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('phone_bank_tenants',
sa.Column('phone_bank_event_id', sa.Integer(), nullable=False),
sa.Column('defendant_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['defendant_id'], ['defendants.id'], ),
sa.ForeignKeyConstraint(['phone_bank_event_id'], ['phone_bank_events.id'], ),
sa.PrimaryKeyConstraint('phone_bank_event_id', 'defendant_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('phone_bank_tenants')
op.drop_table('phone_bank_events')
op.drop_table('events_users')
op.drop_table('events')
op.drop_table('campaigns')
# ### end Alembic commands ###
|
py | 1a3d31642fc373633e5ce70413c2ec0f2a46a9c6 | from setuptools import setup
install_requires = [
r.strip() for r in open('requirements.txt')
if r.strip() and not r.strip().startswith('#')
]
setup(
name="aiokafka_rpc",
version="1.3.1.3",
author='Kostiantyn Andrusenko',
author_email='[email protected]',
description=("RPC over Apache Kafka for Python using asyncio"),
license="Apache Software License",
keywords="aiokafka_rpc",
url="https://github.com/fabregas/aiokafka_rpc",
packages=["aiokafka_rpc"],
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3"
],
)
|
py | 1a3d323009e2c7937aba2071620a813e662cfec4 | import os
from pprint import pprint
import torch
import torch.optim as optim
from torch import nn
import passport_generator
from dataset import prepare_dataset, prepare_wm
from experiments.base import Experiment
from experiments.trainer import Trainer, Tester
from experiments.trainer_private import TesterPrivate
from experiments.utils import construct_passport_kwargs, load_passport_model_to_normal_model, \
load_normal_model_to_passport_model, load_normal_model_to_normal_model
from models.alexnet_normal import AlexNetNormal
from models.alexnet_passport import AlexNetPassport
from models.resnet_normal import ResNet18, ResNet9
from models.resnet_passport import ResNet18Passport, ResNet9Passport
class ClassificationExperiment(Experiment):
def __init__(self, args):
super().__init__(args)
self.in_channels = 1 if self.dataset == 'mnist' else 3
self.num_classes = {
'cifar10': 10,
'cifar100': 100,
'caltech-101': 101,
'caltech-256': 256,
'imagenet1000': 1000
}[self.dataset]
self.train_data, self.valid_data = prepare_dataset(self.args)
self.wm_data = None
if self.use_trigger_as_passport:
self.passport_data = prepare_wm('data/trigger_set/pics', crop=self.imgcrop)
else:
self.passport_data = self.valid_data
if self.train_backdoor:
self.wm_data = prepare_wm('data/trigger_set/pics', crop=self.imgcrop)
self.construct_model()
optimizer = optim.SGD(self.model.parameters(),
lr=self.lr,
momentum=0.9,
weight_decay=0.0001)
if len(self.lr_config[self.lr_config['type']]) != 0: # if no specify steps, then scheduler = None
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
self.lr_config[self.lr_config['type']],
self.lr_config['gamma'])
else:
scheduler = None
self.trainer = Trainer(self.model, optimizer, scheduler, self.device)
if self.is_tl:
self.finetune_load()
else:
self.makedirs_or_load()
def construct_model(self):
print('Construct Model')
def setup_keys():
if self.key_type != 'random':
pretrained_from_torch = self.pretrained_path is None
if self.arch == 'alexnet':
norm_type = 'none' if pretrained_from_torch else self.norm_type
pretrained_model = AlexNetNormal(self.in_channels,
self.num_classes,
norm_type=norm_type,
pretrained=pretrained_from_torch)
else:
ResNetClass = ResNet18 if self.arch == 'resnet' else ResNet9
norm_type = 'bn' if pretrained_from_torch else self.norm_type
pretrained_model = ResNetClass(num_classes=self.num_classes,
norm_type=norm_type,
pretrained=pretrained_from_torch)
if not pretrained_from_torch:
print('Loading pretrained from self-trained model')
pretrained_model.load_state_dict(torch.load(self.pretrained_path))
else:
print('Loading pretrained from torch-pretrained model')
pretrained_model = pretrained_model.to(self.device)
self.setup_keys(pretrained_model)
def load_pretrained():
if self.pretrained_path is not None:
sd = torch.load(self.pretrained_path)
model.load_state_dict(sd)
if self.train_passport:
passport_kwargs, plkeys = construct_passport_kwargs(self, True)
self.passport_kwargs = passport_kwargs
self.plkeys = plkeys
self.is_baseline = False
print('Loading arch: ' + self.arch)
if self.arch == 'alexnet':
model = AlexNetPassport(self.in_channels, self.num_classes, passport_kwargs)
else:
ResNetPassportClass = ResNet18Passport if self.arch == 'resnet' else ResNet9Passport
model = ResNetPassportClass(num_classes=self.num_classes,
passport_kwargs=passport_kwargs)
self.model = model.to(self.device)
setup_keys()
else: # train normally or train backdoor
print('Loading arch: ' + self.arch)
self.is_baseline = True
if self.arch == 'alexnet':
model = AlexNetNormal(self.in_channels, self.num_classes, self.norm_type)
else:
ResNetClass = ResNet18 if self.arch == 'resnet' else ResNet9
model = ResNetClass(num_classes=self.num_classes, norm_type=self.norm_type)
load_pretrained()
self.model = model.to(self.device)
pprint(self.model)
def setup_keys(self, pretrained_model):
if self.key_type != 'random':
n = 1 if self.key_type == 'image' else 20 # any number will do
key_x, x_inds = passport_generator.get_key(self.passport_data, n)
key_x = key_x.to(self.device)
key_y, y_inds = passport_generator.get_key(self.passport_data, n)
key_y = key_y.to(self.device)
passport_generator.set_key(pretrained_model, self.model,
key_x, key_y)
def transfer_learning(self):
if not self.is_tl:
raise Exception('Please run with --transfer-learning')
is_imagenet = self.num_classes == 1000
self.num_classes = {
'cifar10': 10,
'cifar100': 100,
'caltech-101': 101,
'caltech-256': 256,
'imagenet1000': 1000
}[self.tl_dataset]
##### load clone model #####
print('Loading clone model')
if self.arch == 'alexnet':
tl_model = AlexNetNormal(self.in_channels,
self.num_classes,
self.norm_type,
imagenet=is_imagenet)
else:
tl_model = ResNet18(num_classes=self.num_classes,
norm_type=self.norm_type,
imagenet=is_imagenet)
##### load / reset weights of passport layers for clone model #####
tl_model.to(self.device)
if self.is_baseline: # baseline
load_normal_model_to_normal_model(self.arch, tl_model, self.model)
else:
load_passport_model_to_normal_model(self.arch, self.plkeys, self.model, tl_model)
print(tl_model)
print('Loaded clone model')
##### dataset is created at constructor #####
##### tl scheme setup #####
if self.tl_scheme == 'rtal':
# rtal = reset last layer + train all layer
# ftal = train all layer
try:
if isinstance(tl_model.classifier, nn.Sequential):
tl_model.classifier[-1].reset_parameters()
else:
tl_model.classifier.reset_parameters()
except:
tl_model.linear.reset_parameters()
##### optimizer setup #####
optimizer = optim.SGD(tl_model.parameters(),
lr=self.lr,
momentum=0.9,
weight_decay=0.0005)
if len(self.lr_config[self.lr_config['type']]) != 0: # if no specify steps, then scheduler = None
scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
self.lr_config[self.lr_config['type']],
self.lr_config['gamma'])
else:
scheduler = None
##### training is on finetune model
self.trainer = Trainer(tl_model,
optimizer,
scheduler,
self.device)
##### tester is on original model
tester = Tester(self.model,
self.device)
tester_passport = TesterPrivate(self.model,
self.device)
history_file = os.path.join(self.logdir, 'history.csv')
first = True
best_acc = 0
for ep in range(1, self.epochs + 1):
##### transfer learning on new tasks #####
train_metrics = self.trainer.train(ep, self.train_data)
valid_metrics = self.trainer.test(self.valid_data)
##### load transfer learning weights from clone model #####
if self.is_baseline:
load_normal_model_to_normal_model(self.arch, self.model, tl_model)
else:
load_normal_model_to_passport_model(self.arch, self.plkeys, self.model, tl_model)
tl_model.to(self.device)
self.model.to(self.device)
##### check if using weight of finetuned model is still able to detect trigger set watermark #####
wm_metrics = {}
if self.train_backdoor:
wm_metrics = tester.test(self.wm_data, 'WM Result')
##### check if using weight of finetuend model is still able to extract signature correctly #####
if not self.is_baseline and self.train_passport:
res = tester_passport.test_signature()
for key in res: wm_metrics['passport_' + key] = res[key]
##### store results #####
metrics = {}
for key in train_metrics: metrics[f'train_{key}'] = train_metrics[key]
for key in valid_metrics: metrics[f'valid_{key}'] = valid_metrics[key]
for key in wm_metrics: metrics[f'old_wm_{key}'] = wm_metrics[key]
self.append_history(history_file, metrics, first)
first = False
if self.save_interval and ep % self.save_interval == 0:
self.save_model(f'epoch-{ep}.pth')
self.save_model(f'tl-epoch-{ep}.pth', tl_model)
if best_acc < metrics['valid_acc']:
print(f'Found best at epoch {ep}\n')
best_acc = metrics['valid_acc']
self.save_model('best.pth')
self.save_model('tl-best.pth', tl_model)
self.save_last_model()
def training(self):
best_acc = float('-inf')
history_file = os.path.join(self.logdir, 'history.csv')
first = True
if self.save_interval > 0:
self.save_model('epoch-0.pth')
print('Start training')
for ep in range(1, self.epochs + 1):
train_metrics = self.trainer.train(ep, self.train_data, self.wm_data)
print(f'Sign Detection Accuracy: {train_metrics["sign_acc"] * 100:6.4f}')
valid_metrics = self.trainer.test(self.valid_data, 'Testing Result')
wm_metrics = {}
if self.train_backdoor:
wm_metrics = self.trainer.test(self.wm_data, 'WM Result')
metrics = {}
for key in train_metrics: metrics[f'train_{key}'] = train_metrics[key]
for key in valid_metrics: metrics[f'valid_{key}'] = valid_metrics[key]
for key in wm_metrics: metrics[f'wm_{key}'] = wm_metrics[key]
self.append_history(history_file, metrics, first)
first = False
if self.save_interval and ep % self.save_interval == 0:
self.save_model(f'epoch-{ep}.pth')
if best_acc < metrics['valid_acc']:
print(f'Found best at epoch {ep}\n')
best_acc = metrics['valid_acc']
self.save_model('best.pth')
self.save_last_model()
def evaluate(self):
self.trainer.test(self.valid_data)
|
py | 1a3d329c996084e794b30e69053cedd414e5211d | # -*- coding: utf-8 -*-
# Copyright (c) 2018, SELCO Foundation and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestWho(unittest.TestCase):
pass
|
py | 1a3d32e256c16e744520c1a50665416f5d288f93 | import base64
import datetime
import json
import logging
import os
import time
from functools import reduce
import cv2
import gevent
import numpy as np
from flask import (Blueprint, Flask, Response, current_app, jsonify,
make_response, request)
from flask_sockets import Sockets
from peewee import SqliteDatabase, operator, fn, DoesNotExist
from playhouse.shortcuts import model_to_dict
from frigate.const import CLIPS_DIR, RECORD_DIR
from frigate.models import Event
from frigate.stats import stats_snapshot
from frigate.util import calculate_region
from frigate.version import VERSION
logger = logging.getLogger(__name__)
bp = Blueprint('frigate', __name__)
ws = Blueprint('ws', __name__)
class MqttBackend():
"""Interface for registering and updating WebSocket clients."""
def __init__(self, mqtt_client, topic_prefix):
self.clients = list()
self.mqtt_client = mqtt_client
self.topic_prefix = topic_prefix
def register(self, client):
"""Register a WebSocket connection for Mqtt updates."""
self.clients.append(client)
def publish(self, message):
try:
json_message = json.loads(message)
json_message = {
'topic': f"{self.topic_prefix}/{json_message['topic']}",
'payload': json_message['payload'],
'retain': json_message.get('retain', False)
}
except:
logger.warning("Unable to parse websocket message as valid json.")
return
logger.debug(f"Publishing mqtt message from websockets at {json_message['topic']}.")
self.mqtt_client.publish(json_message['topic'], json_message['payload'], retain=json_message['retain'])
def run(self):
def send(client, userdata, message):
"""Sends mqtt messages to clients."""
try:
logger.debug(f"Received mqtt message on {message.topic}.")
ws_message = json.dumps({
'topic': message.topic.replace(f"{self.topic_prefix}/",""),
'payload': message.payload.decode()
})
except:
# if the payload can't be decoded don't relay to clients
logger.debug(f"MQTT payload for {message.topic} wasn't text. Skipping...")
return
for client in self.clients:
try:
client.send(ws_message)
except:
logger.debug("Removing websocket client due to a closed connection.")
self.clients.remove(client)
self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
def start(self):
"""Maintains mqtt subscription in the background."""
gevent.spawn(self.run)
def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor, mqtt_client):
app = Flask(__name__)
sockets = Sockets(app)
@app.before_request
def _db_connect():
database.connect()
@app.teardown_request
def _db_close(exc):
if not database.is_closed():
database.close()
app.frigate_config = frigate_config
app.stats_tracking = stats_tracking
app.detected_frames_processor = detected_frames_processor
app.register_blueprint(bp)
sockets.register_blueprint(ws)
app.mqtt_backend = MqttBackend(mqtt_client, frigate_config.mqtt.topic_prefix)
app.mqtt_backend.start()
return app
@bp.route('/')
def is_healthy():
return "Frigate is running. Alive and healthy!"
@bp.route('/events/summary')
def events_summary():
has_clip = request.args.get('has_clip', type=int)
has_snapshot = request.args.get('has_snapshot', type=int)
clauses = []
if not has_clip is None:
clauses.append((Event.has_clip == has_clip))
if not has_snapshot is None:
clauses.append((Event.has_snapshot == has_snapshot))
if len(clauses) == 0:
clauses.append((1 == 1))
groups = (
Event
.select(
Event.camera,
Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
Event.zones,
fn.COUNT(Event.id).alias('count')
)
.where(reduce(operator.and_, clauses))
.group_by(
Event.camera,
Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
Event.zones
)
)
return jsonify([e for e in groups.dicts()])
@bp.route('/events/<id>')
def event(id):
try:
return model_to_dict(Event.get(Event.id == id))
except DoesNotExist:
return "Event not found", 404
@bp.route('/events/<id>/thumbnail.jpg')
def event_thumbnail(id):
format = request.args.get('format', 'ios')
thumbnail_bytes = None
try:
event = Event.get(Event.id == id)
thumbnail_bytes = base64.b64decode(event.thumbnail)
except DoesNotExist:
# see if the object is currently being tracked
try:
for camera_state in current_app.detected_frames_processor.camera_states.values():
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
thumbnail_bytes = tracked_obj.get_thumbnail()
except:
return "Event not found", 404
if thumbnail_bytes is None:
return "Event not found", 404
# android notifications prefer a 2:1 ratio
if format == 'android':
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail_bytes = jpg.tobytes()
response = make_response(thumbnail_bytes)
response.headers['Content-Type'] = 'image/jpg'
return response
@bp.route('/events/<id>/snapshot.jpg')
def event_snapshot(id):
jpg_bytes = None
try:
event = Event.get(Event.id == id)
if not event.has_snapshot:
return "Snapshot not available", 404
# read snapshot from disk
with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), 'rb') as image_file:
jpg_bytes = image_file.read()
except DoesNotExist:
# see if the object is currently being tracked
try:
for camera_state in current_app.detected_frames_processor.camera_states.values():
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
jpg_bytes = tracked_obj.get_jpg_bytes(
timestamp=request.args.get('timestamp', type=int),
bounding_box=request.args.get('bbox', type=int),
crop=request.args.get('crop', type=int),
height=request.args.get('h', type=int)
)
except:
return "Event not found", 404
except:
return "Event not found", 404
response = make_response(jpg_bytes)
response.headers['Content-Type'] = 'image/jpg'
return response
@bp.route('/events')
def events():
limit = request.args.get('limit', 100)
camera = request.args.get('camera')
label = request.args.get('label')
zone = request.args.get('zone')
after = request.args.get('after', type=float)
before = request.args.get('before', type=float)
has_clip = request.args.get('has_clip', type=int)
has_snapshot = request.args.get('has_snapshot', type=int)
include_thumbnails = request.args.get('include_thumbnails', default=1, type=int)
clauses = []
excluded_fields = []
if camera:
clauses.append((Event.camera == camera))
if label:
clauses.append((Event.label == label))
if zone:
clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
if after:
clauses.append((Event.start_time >= after))
if before:
clauses.append((Event.start_time <= before))
if not has_clip is None:
clauses.append((Event.has_clip == has_clip))
if not has_snapshot is None:
clauses.append((Event.has_snapshot == has_snapshot))
if not include_thumbnails:
excluded_fields.append(Event.thumbnail)
if len(clauses) == 0:
clauses.append((1 == 1))
events = (Event.select()
.where(reduce(operator.and_, clauses))
.order_by(Event.start_time.desc())
.limit(limit))
return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
@bp.route('/config')
def config():
return jsonify(current_app.frigate_config.to_dict())
@bp.route('/version')
def version():
return VERSION
@bp.route('/stats')
def stats():
stats = stats_snapshot(current_app.stats_tracking)
return jsonify(stats)
@bp.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label):
if camera_name in current_app.frigate_config.cameras:
best_object = current_app.detected_frames_processor.get_best(camera_name, label)
best_frame = best_object.get('frame')
if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8)
else:
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
crop = bool(request.args.get('crop', 0, type=int))
if crop:
box = best_object.get('box', (0,0,300,300))
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
height = int(request.args.get('h', str(best_frame.shape[0])))
width = int(height*best_frame.shape[1]/best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>')
def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360'))
draw_options = {
'bounding_boxes': request.args.get('bbox', type=int),
'timestamp': request.args.get('timestamp', type=int),
'zones': request.args.get('zones', type=int),
'mask': request.args.get('mask', type=int),
'motion_boxes': request.args.get('motion', type=int),
'regions': request.args.get('regions', type=int),
}
if camera_name in current_app.frigate_config.cameras:
# return a multipart response
return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>/latest.jpg')
def latest_frame(camera_name):
draw_options = {
'bounding_boxes': request.args.get('bbox', type=int),
'timestamp': request.args.get('timestamp', type=int),
'zones': request.args.get('zones', type=int),
'mask': request.args.get('mask', type=int),
'motion_boxes': request.args.get('motion', type=int),
'regions': request.args.get('regions', type=int),
}
if camera_name in current_app.frigate_config.cameras:
# max out at specified FPS
frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((720,1280,3), np.uint8)
height = int(request.args.get('h', str(frame.shape[0])))
width = int(height*frame.shape[1]/frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/recordings/<camera_name>')
def list_recordings(camera_name):
levels = 3
outFiles = []
outDirs = []
for subdir, dirs, files, curLevel in dirwalklevel(RECORD_DIR, level=levels):
if curLevel == levels:
for file in files:
if pathBelongsToCamera(camera_name, subdir, file):
outFiles.append(sanitiseRecordingPath(subdir, file))
for dir in dirs:
if pathBelongsToCamera(camera_name, subdir, dir):
outDirs.append(sanitiseRecordingPath(subdir, dir))
return jsonify({
"files": outFiles,
"directories": outDirs
})
@bp.route('/recordings/<year_month>/<day>/<hour>/<camera_name>')
def list_recording_files(year_month, day, hour, camera_name):
outFiles = []
try:
levels = 1
pathParts = [RECORD_DIR, year_month, day, hour]
recPath = os.path.join(*pathParts)
for subdir, dirs, files, curLevel in dirwalklevel(recPath, level=levels):
if curLevel == levels:
for file in files:
if pathBelongsToCamera(camera_name, subdir, file):
outFiles.append(sanitiseRecordingPath(subdir, file))
except Exception as e:
print(e)
return jsonify({
"files": outFiles
})
def dirwalklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
num_sep_this = root.count(os.path.sep)
curLevel = (num_sep_this - num_sep)
yield root, dirs, files, curLevel
if num_sep + level <= num_sep_this:
del dirs[:]
def pathBelongsToCamera(camera_name, subdir, file):
return camera_name == "all" or camera_name in file or camera_name in subdir
def sanitiseRecordingPath(subdir, file):
return os.path.join(subdir, file).replace(RECORD_DIR, "")
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True:
# max out at specified FPS
gevent.sleep(1/fps)
frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((height,int(height*16/9),3), np.uint8)
width = int(height*frame.shape[1]/frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
@ws.route('/ws')
def echo_socket(socket):
current_app.mqtt_backend.register(socket)
while not socket.closed:
# Sleep to prevent *constant* context-switches.
gevent.sleep(0.1)
message = socket.receive()
if message:
current_app.mqtt_backend.publish(message)
|
py | 1a3d33dbfaefe663788d3cc1f7ed257dc54a2b82 | from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from engine import Engine
from Entities.entity import Entity
from Map.game_map import GameMap
class BaseComponent:
parent: Entity # Owning entity instance.
@property
def gamemap(self) -> GameMap:
return self.parent.gamemap
@property
def engine(self) -> Engine:
return self.gamemap.engine |
py | 1a3d340a27b597e441d73376af769b2472db157f | # Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import time
import pytest
from harness.interface.defs import key_codes
@pytest.mark.rt1051
@pytest.mark.usefixtures("phone_unlocked")
def test_search_sms(harness, sms_text, phone_number):
body = {"category": "message", "messageBody": sms_text, "phoneNumber": str(phone_number)}
messages = harness.endpoint_request("messages", "get", body)["body"]
assert len(messages) != 0
|
py | 1a3d344907d2d42b74b864820e8e129d3655769b | import pyredner
import torch
# Test the sample pixel center flag
pyredner.set_use_gpu(torch.cuda.is_available())
objects = pyredner.load_obj('scenes/teapot.obj', return_objects=True)
camera = pyredner.automatic_camera_placement(objects, resolution=(128, 128))
scene = pyredner.Scene(camera = camera, objects = objects)
img = pyredner.render_albedo(scene, sample_pixel_center = True)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_no_aa.exr')
img = pyredner.render_albedo(scene, sample_pixel_center = False)
pyredner.imwrite(img.cpu(), 'results/test_sample_pixel_center/img_with_aa.exr') |
py | 1a3d34c49f6e120d061ed09f7df651c3989a97de | # -*- coding: utf-8 -*-
"""
Installs and configures puppet
"""
import sys
import logging
import os
import platform
import time
from packstack.installer import utils
from packstack.installer import basedefs, output_messages
from packstack.installer.exceptions import ScriptRuntimeError, PuppetError
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import manifestfiles
from packstack.modules.puppet import scan_logfile, validate_logfile
#------------------ oVirt installer initialization ------------------
PLUGIN_NAME = "Puppet"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
PUPPET_DIR = os.environ.get('PACKSTACK_PUPPETDIR',
'/usr/share/openstack-puppet/')
MODULE_DIR = os.path.join(PUPPET_DIR, 'modules')
def initConfig(controller):
group = {"GROUP_NAME": "PUPPET",
"DESCRIPTION": "Puppet Config parameters",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, [])
def initSequences(controller):
puppetpresteps = [
{'title': 'Clean Up', 'functions': [run_cleanup]},
]
controller.insertSequence("Clean Up", [], [], puppetpresteps, index=0)
puppetsteps = [
{'title': 'Installing Dependencies',
'functions': [install_deps]},
{'title': 'Copying Puppet modules and manifests',
'functions': [copy_puppet_modules]},
{'title': 'Applying Puppet manifests',
'functions': [apply_puppet_manifest]},
{'title': 'Finalizing',
'functions': [finalize]}
]
controller.addSequence("Puppet", [], [], puppetsteps)
#------------------------- helper functions -------------------------
def wait_for_puppet(currently_running, messages):
log_len = 0
twirl = ["-", "\\", "|", "/"]
while currently_running:
for hostname, finished_logfile in currently_running:
log_file = os.path.splitext(os.path.basename(finished_logfile))[0]
space_len = basedefs.SPACE_LEN - len(log_file)
if len(log_file) > log_len:
log_len = len(log_file)
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
twirl = twirl[-1:] + twirl[:-1]
sys.stdout.write(("\rTesting if puppet apply is finished: %s"
% log_file).ljust(40 + log_len))
sys.stdout.write("[ %s ]" % twirl[0])
sys.stdout.flush()
try:
# Once a remote puppet run has finished, we retrieve the log
# file and check it for errors
local_server = utils.ScriptRunner()
log = os.path.join(basedefs.PUPPET_MANIFEST_DIR,
os.path.basename(finished_logfile))
log = log.replace(".finished", ".log")
local_server.append('scp -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'root@%s:%s %s'
% (hostname, finished_logfile, log))
# To not pollute logs we turn of logging of command execution
local_server.execute(log=False)
# If we got to this point the puppet apply has finished
currently_running.remove((hostname, finished_logfile))
# clean off the last "testing apply" msg
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
sys.stdout.write(("\r").ljust(45 + log_len))
except ScriptRuntimeError:
# the test raises an exception if the file doesn't exist yet
# TO-DO: We need to start testing 'e' for unexpected exceptions
time.sleep(3)
continue
# check log file for relevant notices
messages.extend(scan_logfile(log))
# check the log file for errors
sys.stdout.write('\r')
try:
validate_logfile(log)
state = utils.state_message('%s:' % log_file, 'DONE', 'green')
sys.stdout.write('%s\n' % state)
sys.stdout.flush()
except PuppetError:
state = utils.state_message('%s:' % log_file, 'ERROR', 'red')
sys.stdout.write('%s\n' % state)
sys.stdout.flush()
raise
#-------------------------- step functions --------------------------
def run_cleanup(config, messages):
localserver = utils.ScriptRunner()
localserver.append("rm -rf %s/*pp" % basedefs.PUPPET_MANIFEST_DIR)
localserver.execute()
def install_deps(config, messages):
deps = ["puppet", "openssh-clients", "tar", "nc"]
modules_pkg = 'openstack-puppet-modules'
local = utils.ScriptRunner()
local.append('rpm -q --requires %s | egrep -v "^(rpmlib|\/|perl)"'
% modules_pkg)
# This can fail if there are no dependencies other than those
# filtered out by the egrep expression.
rc, modules_deps = local.execute(can_fail=False)
# Modules package might not be installed if we are running from source.
# In this case we assume user knows what (s)he's doing and we don't
# install modules dependencies
if ('%s is not installed' % modules_pkg) not in modules_deps:
modules_deps = [i.strip() for i in modules_deps.split() if i.strip()]
deps.extend(modules_deps)
for hostname in filtered_hosts(config):
server = utils.ScriptRunner(hostname)
packages = ' '.join(deps)
server.append("yum install -y %s"
% packages)
server.append("yum update -y %s"
% packages)
# yum does not fail if one of the packages is missing
for package in deps:
server.append("rpm -q --whatprovides %s" % (package))
server.execute()
def copy_puppet_modules(config, messages):
os_modules = ' '.join(('apache', 'ceilometer', 'certmonger', 'cinder',
'concat', 'firewall', 'glance', 'heat', 'horizon',
'inifile', 'keystone', 'memcached', 'mongodb',
'mysql', 'neutron', 'nova', 'nssdb', 'openstack',
'packstack', 'qpid', 'rabbitmq', 'remote', 'rsync',
'ssh', 'stdlib', 'swift', 'sysctl', 'tempest',
'vcsrepo', 'vlan', 'vswitch', 'xinetd',
'openstacklib'))
# write puppet manifest to disk
manifestfiles.writeManifests()
server = utils.ScriptRunner()
for hostname in filtered_hosts(config):
host_dir = config['HOST_DETAILS'][hostname]['tmpdir']
# copy Packstack manifests
server.append("cd %s/puppet" % basedefs.DIR_PROJECT_DIR)
server.append("cd %s" % basedefs.PUPPET_MANIFEST_DIR)
server.append("tar --dereference -cpzf - ../manifests | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (hostname, host_dir))
# copy resources
resources = config.get('RESOURCES', {})
for path, localname in resources.get(hostname, []):
server.append("scp -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"%s root@%s:%s/resources/%s" %
(path, hostname, host_dir, localname))
# copy Puppet modules required by Packstack
server.append("cd %s" % MODULE_DIR)
server.append("tar --dereference -cpzf - %s | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" %
(os_modules, hostname,
os.path.join(host_dir, 'modules')))
server.execute()
def apply_puppet_manifest(config, messages):
if config.get("DRY_RUN"):
return
currently_running = []
lastmarker = None
loglevel = ''
logcmd = False
if logging.root.level <= logging.DEBUG:
loglevel = '--debug'
logcmd = True
for manifest, marker in manifestfiles.getFiles():
# if the marker has changed then we don't want to proceed until
# all of the previous puppet runs have finished
if lastmarker is not None and lastmarker != marker:
wait_for_puppet(currently_running, messages)
lastmarker = marker
for hostname in filtered_hosts(config):
if "%s_" % hostname not in manifest:
continue
host_dir = config['HOST_DETAILS'][hostname]['tmpdir']
print "Applying %s" % manifest
server = utils.ScriptRunner(hostname)
man_path = os.path.join(config['HOST_DETAILS'][hostname]['tmpdir'],
basedefs.PUPPET_MANIFEST_RELATIVE,
manifest)
running_logfile = "%s.running" % man_path
finished_logfile = "%s.finished" % man_path
currently_running.append((hostname, finished_logfile))
server.append("touch %s" % running_logfile)
server.append("chmod 600 %s" % running_logfile)
server.append("export PACKSTACK_VAR_DIR=%s" % host_dir)
cmd = ("( flock %s/ps.lock "
"puppet apply %s --modulepath %s/modules %s > %s "
"2>&1 < /dev/null ; "
"mv %s %s ) > /dev/null 2>&1 < /dev/null &"
% (host_dir, loglevel, host_dir, man_path, running_logfile,
running_logfile, finished_logfile))
server.append(cmd)
server.execute(log=logcmd)
# wait for outstanding puppet runs befor exiting
wait_for_puppet(currently_running, messages)
def finalize(config, messages):
for hostname in filtered_hosts(config):
server = utils.ScriptRunner(hostname)
server.append("installed=$(rpm -q kernel --last | head -n1 | "
"sed 's/kernel-\([a-z0-9\.\_\-]*\).*/\\1/g')")
server.append("loaded=$(uname -r | head -n1)")
server.append('[ "$loaded" == "$installed" ]')
try:
rc, out = server.execute()
except ScriptRuntimeError:
messages.append('Because of the kernel update the host %s '
'requires reboot.' % hostname)
|
py | 1a3d34ca6ebee9c05ab9d6c104495d5579af1bec | import unittest
import json
import yaml
import src.macro
from mock import MagicMock
class TestVPCBuilderTransitGatewaySetup(unittest.TestCase):
identifier = "TEST"
def setUp(self):
self.maxDiff = None
class TestVPCBuilderTransitGateway(TestVPCBuilderTransitGatewaySetup):
def test_base_transitgw_object(self):
resources = {}
outputs = {}
properties = yaml.load("""\
CIDR: 172.16.0.0/20
Details: {VPCName: PRIVATEEGRESSVPC, VPCDesc: Private Egress VPC, Region: ap-southeast-2, IPv6: True}
Tags: {Template: VPC for private endpoints egress only, "info:environment": Staging, "info:owner": Versent}
DHCP: {Name: DhcpOptions, DNSServers: 172.16.0.2, NTPServers: 169.254.169.123, NTBType: 2}
TransitGateways:
Test1:
TransitGatewayId: tgw-01234567890123456
Tags: {Name: PRIVATE-EGRESS-VPC-TGW1, Purpose: Gateway Attach 1}
Subnets:
- Internal1
- Internal2
- Internal3
Test2:
TransitGatewayId: tgw-98765432109876543
Tags: {Name: PRIVATE-EGRESS-VPC-TGW2, Purpose: Gateway Attach 2}
Subnets:
- Internal1
- Internal2
- Internal3
RouteTables:
InternalRT1:
- RouteName: Internal1
RouteCIDR: 10.0.0.0/8
- RouteName: Internal2
RouteCIDR: 192.168.0.0/16
InternalRT2:
- RouteName: Internal1
RouteCIDR: 10.0.0.0/8
- RouteName: Internal2
RouteCIDR: 192.168.0.0/16
InternalRT3:
- RouteName: Internal1
RouteCIDR: 10.0.0.0/8
- RouteName: Internal2
RouteCIDR: 192.168.0.0/16
""", Loader=yaml.FullLoader)
expected = {
'Test2TransitGWAttach': {
'Type': 'AWS::EC2::TransitGatewayAttachment',
'Properties': {
'SubnetIds': [{
'Ref': 'Internal1'
}, {
'Ref': 'Internal2'
}, {
'Ref': 'Internal3'
}],
'VpcId': {
'Ref': 'PRIVATEEGRESSVPC'
},
'TransitGatewayId': 'tgw-98765432109876543',
'Tags': [
{
'Value': 'PRIVATE-EGRESS-VPC-TGW2',
'Key': 'Name'
},
{
'Value': 'Gateway Attach 2',
'Key': 'Purpose'
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"Test2InternalRT1Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT1"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT1Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT1"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT2Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT2"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT2Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT2"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT3Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT3"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT3Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT3"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
'Test1TransitGWAttach': {
'Type': 'AWS::EC2::TransitGatewayAttachment',
'Properties': {
'SubnetIds': [{
'Ref': 'Internal1'
}, {
'Ref': 'Internal2'
}, {
'Ref': 'Internal3'
}],
'VpcId': {
'Ref': 'PRIVATEEGRESSVPC'
},
'TransitGatewayId': 'tgw-01234567890123456',
'Tags': [
{
'Value': 'PRIVATE-EGRESS-VPC-TGW1',
'Key': 'Name'
},
{
'Value': 'Gateway Attach 1',
'Key': 'Purpose'
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
}
}
actual, outputs = src.macro.buildTransitGateways(properties, resources, outputs, parameters={})
print(json.dumps(actual))
print(json.dumps(expected))
self.assertEquals(expected, actual)
|
py | 1a3d351e86f4fd17a417ee415db39c64183d34cd | """sync
Revision ID: bb8e91317dc
Revises: 292886e35ea7
Create Date: 2015-10-07 08:47:01.661978
"""
# revision identifiers, used by Alembic.
revision = 'bb8e91317dc'
down_revision = '292886e35ea7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('date_code_ix', table_name='committee_question')
op.create_unique_constraint('date_code_ix', 'committee_question', ['date', 'code'])
op.drop_index('date_deputy_president_number_ix', table_name='committee_question')
op.create_unique_constraint('date_deputy_president_number_ix', 'committee_question', ['date', 'house_id', 'deputy_president_number'])
op.drop_index('date_oral_number_ix', table_name='committee_question')
op.create_unique_constraint('date_oral_number_ix', 'committee_question', ['date', 'house_id', 'oral_number'])
op.drop_index('date_president_number_ix', table_name='committee_question')
op.create_unique_constraint('date_president_number_ix', 'committee_question', ['date', 'house_id', 'president_number'])
op.drop_index('date_written_number_ix', table_name='committee_question')
op.create_unique_constraint('date_written_number_ix', 'committee_question', ['date', 'house_id', 'written_number'])
op.drop_constraint(u'committee_question_minister_id_fkey', 'committee_question', type_='foreignkey')
op.create_foreign_key(None, 'committee_question', 'minister', ['minister_id'], ['id'], ondelete='SET NULL')
op.drop_constraint(u'daily_schedule_file_join_file_id_fkey', 'daily_schedule_file_join', type_='foreignkey')
op.drop_constraint(u'daily_schedule_file_join_daily_schedule_id_fkey', 'daily_schedule_file_join', type_='foreignkey')
op.create_foreign_key(None, 'daily_schedule_file_join', 'daily_schedule', ['daily_schedule_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'daily_schedule_file_join', 'file', ['file_id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'gazette_file_join_gazette_id_fkey', 'gazette_file_join', type_='foreignkey')
op.drop_constraint(u'gazette_file_join_file_id_fkey', 'gazette_file_join', type_='foreignkey')
op.create_foreign_key(None, 'gazette_file_join', 'file', ['file_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'gazette_file_join', 'gazette', ['gazette_id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'policy_document_file_join_policy_document_id_fkey', 'policy_document_file_join', type_='foreignkey')
op.drop_constraint(u'policy_document_file_join_file_id_fkey', 'policy_document_file_join', type_='foreignkey')
op.create_foreign_key(None, 'policy_document_file_join', 'file', ['file_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'policy_document_file_join', 'policy_document', ['policy_document_id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'question_reply_minister_id_fkey', 'question_reply', type_='foreignkey')
op.create_foreign_key(None, 'question_reply', 'minister', ['minister_id'], ['id'], ondelete='SET NULL')
op.drop_constraint(u'tabled_committee_report_file_join_file_id_fkey', 'tabled_committee_report_file_join', type_='foreignkey')
op.drop_constraint(u'tabled_committee_report_file_jo_tabled_committee_report_id_fkey', 'tabled_committee_report_file_join', type_='foreignkey')
op.create_foreign_key(None, 'tabled_committee_report_file_join', 'file', ['file_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'tabled_committee_report_file_join', 'tabled_committee_report', ['tabled_committee_report_id'], ['id'], ondelete='CASCADE')
op.drop_constraint(u'user_email_key', 'user', type_='unique')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(u'user_email_key', 'user', ['email'])
op.drop_constraint(None, 'tabled_committee_report_file_join', type_='foreignkey')
op.drop_constraint(None, 'tabled_committee_report_file_join', type_='foreignkey')
op.create_foreign_key(u'tabled_committee_report_file_jo_tabled_committee_report_id_fkey', 'tabled_committee_report_file_join', 'tabled_committee_report', ['tabled_committee_report_id'], ['id'])
op.create_foreign_key(u'tabled_committee_report_file_join_file_id_fkey', 'tabled_committee_report_file_join', 'file', ['file_id'], ['id'])
op.drop_constraint(None, 'question_reply', type_='foreignkey')
op.create_foreign_key(u'question_reply_minister_id_fkey', 'question_reply', 'minister', ['minister_id'], ['id'])
op.drop_constraint(None, 'policy_document_file_join', type_='foreignkey')
op.drop_constraint(None, 'policy_document_file_join', type_='foreignkey')
op.create_foreign_key(u'policy_document_file_join_file_id_fkey', 'policy_document_file_join', 'file', ['file_id'], ['id'])
op.create_foreign_key(u'policy_document_file_join_policy_document_id_fkey', 'policy_document_file_join', 'policy_document', ['policy_document_id'], ['id'])
op.drop_constraint(None, 'gazette_file_join', type_='foreignkey')
op.drop_constraint(None, 'gazette_file_join', type_='foreignkey')
op.create_foreign_key(u'gazette_file_join_file_id_fkey', 'gazette_file_join', 'file', ['file_id'], ['id'])
op.create_foreign_key(u'gazette_file_join_gazette_id_fkey', 'gazette_file_join', 'gazette', ['gazette_id'], ['id'])
op.drop_constraint(None, 'daily_schedule_file_join', type_='foreignkey')
op.drop_constraint(None, 'daily_schedule_file_join', type_='foreignkey')
op.create_foreign_key(u'daily_schedule_file_join_daily_schedule_id_fkey', 'daily_schedule_file_join', 'daily_schedule', ['daily_schedule_id'], ['id'])
op.create_foreign_key(u'daily_schedule_file_join_file_id_fkey', 'daily_schedule_file_join', 'file', ['file_id'], ['id'])
op.drop_constraint(None, 'committee_question', type_='foreignkey')
op.create_foreign_key(u'committee_question_minister_id_fkey', 'committee_question', 'minister', ['minister_id'], ['id'])
op.drop_constraint('date_written_number_ix', 'committee_question', type_='unique')
op.create_index('date_written_number_ix', 'committee_question', ['date', 'house_id', 'written_number'], unique=True)
op.drop_constraint('date_president_number_ix', 'committee_question', type_='unique')
op.create_index('date_president_number_ix', 'committee_question', ['date', 'house_id', 'president_number'], unique=True)
op.drop_constraint('date_oral_number_ix', 'committee_question', type_='unique')
op.create_index('date_oral_number_ix', 'committee_question', ['date', 'house_id', 'oral_number'], unique=True)
op.drop_constraint('date_deputy_president_number_ix', 'committee_question', type_='unique')
op.create_index('date_deputy_president_number_ix', 'committee_question', ['date', 'house_id', 'deputy_president_number'], unique=True)
op.drop_constraint('date_code_ix', 'committee_question', type_='unique')
op.create_index('date_code_ix', 'committee_question', ['date', 'code'], unique=True)
### end Alembic commands ###
|
py | 1a3d35612d9dcaa2f8a39d952dfb7b8dcb420e76 | # pylint: skip-file
def main():
'''
ansible git module for committing
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
msg=dict(default=None, required=True, type='str'),
path=dict(default=None, required=True, type='str'),
author=dict(default=None, required=False, type='str'),
commit_files=dict(default=None, required=False, type='list'),
),
supports_check_mode=False,
)
git = GitCommit(module.params['msg'],
module.params['path'],
module.params['commit_files'],
module.params['author'],
)
state = module.params['state']
if state == 'present':
results = git.commit()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('no_commits'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
|
py | 1a3d35f0f066d34db8b98ca9d903427a5cddf7c7 | from __future__ import absolute_import
from fobi.base import form_element_plugin_registry
from .base import ContentTextPlugin
__title__ = 'fobi.contrib.plugins.form_elements.content.content_text.' \
'fobi_form_elements'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('ContentTextPlugin',)
form_element_plugin_registry.register(ContentTextPlugin)
|
py | 1a3d36a21d4b5875c632c273b52df6e1a8777d5b | import spacy
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
import random
import swda
import string
class feature_extractor(object):
def __init__(self):
self.nlp = spacy.load('en_core_web_sm', disable = ['ner', 'textcat'])
self.lemmatizer = spacy.lemmatizer.Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
def find_features(self, utterance, n = 9, is_previous_speaker = None, previous_da = None):
features = {}
doc = self.nlp(utterance)
lemmatized = [self.lemmatizer(token.text, token.pos_)[0] for token in doc]
features['is_question'] = ('?' in [token.text for token in doc]) #question mark
for i in range(n): #first n tokens
try:
features['word'+str(i+1)] = lemmatized[i]
except IndexError:
features['word'+str(i+1)] = ''
for i in range(n): #first n pos-tags
try:
features['word'+str(i+1)+'_pos_tag'] = doc[i].pos_
except IndexError:
features['word'+str(i+1)+'_pos_tag'] = ''
if is_previous_speaker != None: #previous speaker
features['is_previous_speaker'] = is_previous_speaker
else:
features['is_previous_speaker'] = ''
if previous_da: #previous dialogue act
features['previous_da'] = previous_da
else:
features['previous_da'] = ''
try: #predicate verb
predicate, predicate_pos = [(token.text, token.pos_) for token in doc if token.dep_ == 'ROOT'][0]
features['predicate'] = self.lemmatizer(predicate, predicate_pos)[0]
except IndexError:
features['predicate'] = ''
try: #subject
subject, subject_pos = [(token.text, token.pos_) for token in doc if (token.dep_ == 'nsubj') or (token.dep_ == 'nsubjpass') or (token.dep_ == 'csubj')][0]
features['subject'] = self.lemmatizer(subject, subject_pos)[0]
except IndexError:
features['subject'] = ''
try: #object
object, object_pos = [(token.text, token.pos_) for token in doc if (token.dep_ == 'iobj') or (token.dep_ == 'obj') or (token.dep_ == 'dobj') or (token.dep_ == 'pobj')][0]
features['object'] = self.lemmatizer(object, object_pos)[0]
except IndexError:
features['object'] = ''
return features
def create_featuresets(self, max_transcripts = 20, n = 9):
corpus = swda.CorpusReader('swda')
utterances = []
i = 1
for trans in corpus.iter_transcripts(display_progress = True):
if i > max_transcripts:
break
previous_tag = None
previous_caller = None
for utt in trans.utterances:
if utt.act_tag not in ('x', 't3', '%', '+'): #discard non-verbal, uninterpretable and third-party talk da:s as well as continued sentences
try:
previous_tag = utterances[-1][1]
previous_caller = utterances[-1][2]
except IndexError:
pass
utterances.append((self.clean_utterance(utt.text), utt.act_tag, utt.caller, previous_tag, previous_caller))
i += 1
print('\nProcessing {} utterances... this will take some time.'.format(str(len(utterances))))
random.shuffle(utterances)
featuresets = [(self.find_features(text, n, is_previous_speaker = (caller == previous_caller), previous_da = previous_tag), tag) for (text, tag, caller, previous_tag, previous_caller) in utterances]
return featuresets
def clean_utterance(self, utterance):
ttable = dict((ord(char), None) for char in string.punctuation)
for key in '?!.,':
if ord(key) in ttable: del ttable[ord(key)]
for key in 'CDEFG':
ttable[ord(key)] = None
utterance = (utterance.translate(ttable)).replace(' ', ' ')
if utterance[0] == ' ':
utterance = utterance[1:]
return utterance
|
py | 1a3d36c39da8aae9a072302d46ab107bbf09198c | import unittest
from unittest.mock import Mock
from rastervision.augmentor import (Augmentor, AugmentorConfig,
AugmentorConfigBuilder)
from rastervision.protos.augmentor_pb2 import AugmentorConfig as AugmentorConfigMsg
from tests.mock import SupressDeepCopyMixin
MOCK_AUGMENTOR = 'MOCK_AUGMENTOR'
class MockAugmentor(Augmentor):
def __init__(self):
self.mock = Mock()
self.mock.process.return_value = None
def process(self, training_data, tmp_dir):
result = self.mock.process(training_data, tmp_dir)
if result is None:
return training_data
else:
return result
class MockAugmentorConfig(SupressDeepCopyMixin, AugmentorConfig):
def __init__(self):
super().__init__(MOCK_AUGMENTOR)
self.mock = Mock()
self.mock.to_proto.return_value = None
self.mock.create_augmentor.return_value = None
self.mock.update_for_command.return_value = None
def to_proto(self):
result = self.mock.to_proto()
if result is None:
return AugmentorConfigMsg(
augmentor_type=self.augmentor_type, custom_config={})
else:
return result
def create_augmentor(self):
result = self.mock.create_augmentor()
if result is None:
return MockAugmentor()
else:
return result
def update_for_command(self, command_type, experiment_config,
context=None):
super().update_for_command(command_type, experiment_config, context)
self.mock.update_for_command(command_type, experiment_config, context)
def report_io(self, command_type, io_def):
self.mock.report_io(command_type, io_def)
class MockAugmentorConfigBuilder(SupressDeepCopyMixin, AugmentorConfigBuilder):
def __init__(self, prev=None):
super().__init__(MockAugmentorConfig, {})
self.mock = Mock()
self.mock.from_proto = None
def from_proto(self, msg):
result = self.mock.from_proto(msg)
if result is None:
return self
else:
return result
if __name__ == '__main__':
unittest.main()
|
py | 1a3d36e577cefc8319b1114227c8c3312f6fb6bd | import cPickle as pickle
import os
from config.config import config
from dic import parse, Info
from query import topK_query, wildcard_query, load_and_calc
if __name__ == "__main__":
if not os.path.exists(config.TIERED_INDEX_FILE) or not os.path.exists(config.ID_HTML_FILE):
info, id_html = parse()
tiered_index_file = open(config.TIERED_INDEX_FILE, 'w')
pickle.dump(info, tiered_index_file, config.PICKLE_PROTOCOL)
id_html_file = open(config.ID_HTML_FILE, 'w')
pickle.dump(id_html, id_html_file, config.PICKLE_PROTOCOL)
pkl_file = open(config.TIERED_INDEX_FILE, 'r')
info = pickle.load(pkl_file)
index, voc, entries = load_and_calc(info)
pkl_file = open(config.ID_HTML_FILE, 'r')
id_html = pickle.load(pkl_file)
print '------------------------- top k --------------------------------'
result = topK_query(index, voc, entries, 'board beverage operations')
html = []
for item in result:
print item, id_html[item[0]]
print '------------------------- top k --------------------------------'
print '------------------------- wild --------------------------------'
result = wildcard_query(index, voc, entries, 'board beverage opera*')
html = []
for item in result:
print item, id_html[item[0]]
print '------------------------- wild --------------------------------'
|
py | 1a3d370dd53ad563491d5299db34f1cf8fc29a6d | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django import http
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from horizon import messages
from horizon import middleware
from horizon.test import helpers as test
class MessageTests(test.TestCase):
def test_middleware_header(self):
req = self.request
string = "Giant ants are attacking San Francisco!"
expected = ["error", force_text(string), ""]
self.assertIn("async_messages", req.horizon)
self.assertItemsEqual(req.horizon['async_messages'], [])
req.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
messages.error(req, string)
self.assertItemsEqual(req.horizon['async_messages'], [expected])
res = http.HttpResponse()
res = middleware.HorizonMiddleware('dummy_get_response') \
._process_response(req, res)
self.assertEqual(json.dumps([expected]),
res['X-Horizon-Messages'])
def test_error_message(self):
req = self.request
string = mark_safe("We are now safe from ants! Go <a>here</a>!")
expected = ["error", force_text(string), " safe"]
self.assertIn("async_messages", req.horizon)
self.assertItemsEqual(req.horizon['async_messages'], [])
req.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
messages.error(req, string)
self.assertItemsEqual(req.horizon['async_messages'], [expected])
res = http.HttpResponse()
res = middleware.HorizonMiddleware('dummy_get_response') \
._process_response(req, res)
self.assertEqual(json.dumps([expected]),
res['X-Horizon-Messages'])
|
py | 1a3d3735c83fc250b1058e179cfdc16408ef886a | import logging
import pytest
from dvc.exceptions import (
NoMetricsFoundError,
NoMetricsParsedError,
OverlappingOutputPathsError,
)
from dvc.path_info import PathInfo
from dvc.utils.fs import remove
from dvc.utils.serialize import dump_yaml, modify_yaml
from tests.func.metrics.utils import _write_json
@pytest.mark.parametrize(
"diff, metric_value",
(
(
lambda repo, target, rev: repo.metrics.diff(
targets=[target], a_rev=rev
),
{"m": 1},
),
(
lambda repo, target, rev: repo.plots.diff(
targets=[target], revs=[rev]
),
[{"m": 1}, {"m": 2}],
),
),
)
def test_diff_no_file_on_target_rev(
tmp_dir, scm, dvc, caplog, diff, metric_value
):
with tmp_dir.branch("new_branch", new=True):
_write_json(tmp_dir, metric_value, "metric.json")
with caplog.at_level(logging.WARNING, "dvc"):
diff(dvc, "metric.json", "master")
assert "'metric.json' was not found at: 'master'." in caplog.text
@pytest.mark.parametrize(
"show, malformed_metric",
(
(lambda repo, target: repo.metrics.show(targets=[target]), '{"m": 1'),
(
lambda repo, target: repo.plots.show(targets=[target]),
'[{"m": 1}, {"m": 2}',
),
),
)
def test_show_malformed_metric(
tmp_dir, scm, dvc, caplog, show, malformed_metric
):
tmp_dir.gen("metric.json", malformed_metric)
with pytest.raises(NoMetricsParsedError):
show(dvc, "metric.json")
@pytest.mark.parametrize(
"show",
(lambda repo: repo.metrics.show(), lambda repo: repo.plots.show(),),
)
def test_show_no_metrics_files(tmp_dir, dvc, caplog, show):
with pytest.raises(NoMetricsFoundError):
show(dvc)
@pytest.mark.parametrize("clear_before_run", [True, False])
@pytest.mark.parametrize("typ", ["metrics", "plots"])
def test_metrics_show_overlap(
tmp_dir, dvc, run_copy_metrics, clear_before_run, typ
):
data_dir = PathInfo("data")
(tmp_dir / data_dir).mkdir()
outs = {typ: [str(data_dir / "m1.yaml")]}
dump_yaml(data_dir / "m1_temp.yaml", {"a": {"b": {"c": 2, "d": 1}}})
run_copy_metrics(
str(data_dir / "m1_temp.yaml"),
str(data_dir / "m1.yaml"),
single_stage=False,
commit=f"add m1 {typ}",
name="cp-m1",
**outs,
)
with modify_yaml("dvc.yaml") as d:
# trying to make an output overlaps error
d["stages"]["corrupted-stage"] = {
"cmd": "mkdir data",
"outs": ["data"],
}
# running by clearing and not clearing stuffs
# so as it works even for optimized cases
if clear_before_run:
remove(data_dir)
remove(dvc.cache.local.cache_dir)
dvc._reset()
show = dvc.metrics.show if typ == "metrics" else dvc.plots.show
with pytest.raises(OverlappingOutputPathsError):
show()
|
py | 1a3d3790196706b7b2a5f7e941a83c5f83c3e3a8 | """Tests for lr_scheduler.py"""
from distutils.version import LooseVersion
from unittest.mock import Mock
import numpy as np
import pytest
import torch
from sklearn.base import clone
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import ExponentialLR
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import CyclicLR as TorchCyclicLR
from skorch import NeuralNetClassifier
from skorch.callbacks.lr_scheduler import WarmRestartLR, LRScheduler
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
class TestLRCallbacks:
@pytest.mark.parametrize('policy', [StepLR, 'StepLR'])
def test_simulate_lrs_epoch_step(self, policy):
lr_sch = LRScheduler(policy, step_size=2)
lrs = lr_sch.simulate(6, 1)
expected = np.array([1.0, 1.0, 0.1, 0.1, 0.01, 0.01])
assert np.allclose(expected, lrs)
@pytest.mark.parametrize('policy', [TorchCyclicLR])
def test_simulate_lrs_batch_step(self, policy):
lr_sch = LRScheduler(
policy, base_lr=1, max_lr=5, step_size_up=4, step_every='batch')
lrs = lr_sch.simulate(11, 1)
expected = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3])
assert np.allclose(expected, lrs)
@pytest.mark.parametrize('policy, instance, kwargs', [
('LambdaLR', LambdaLR, {'lr_lambda': (lambda x: 1e-1)}),
('StepLR', StepLR, {'step_size': 30}),
('MultiStepLR', MultiStepLR, {'milestones': [30, 90]}),
('ExponentialLR', ExponentialLR, {'gamma': 0.1}),
('ReduceLROnPlateau', ReduceLROnPlateau, {}),
('WarmRestartLR', WarmRestartLR, {}),
('CosineAnnealingLR', CosineAnnealingLR, {'T_max': 5, 'eta_min': 1e-3}),
(WarmRestartLR, WarmRestartLR, {}),
])
def test_lr_callback_init_policies(
self,
classifier_module,
classifier_data,
policy,
instance,
kwargs,
):
X, y = classifier_data
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(
classifier_module, max_epochs=2, callbacks=[lr_policy]
)
net.fit(X, y)
assert any(list(map(
lambda x: isinstance(
getattr(x[1], 'lr_scheduler_', None), instance),
net.callbacks_
)))
@pytest.mark.parametrize('policy, kwargs', [
('LambdaLR', {'lr_lambda': (lambda x: 1e-1)}),
('StepLR', {'step_size': 30}),
('MultiStepLR', {'milestones': [30, 90]}),
('ExponentialLR', {'gamma': 0.1}),
('ReduceLROnPlateau', {}),
('WarmRestartLR', {}),
('CosineAnnealingLR', {'T_max': 3}),
])
def test_lr_callback_steps_correctly(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
max_epochs = 2
X, y = classifier_data
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(
classifier_module(),
max_epochs=max_epochs,
batch_size=16,
callbacks=[lr_policy],
)
net.fit(X, y)
# pylint: disable=protected-access
assert lr_policy.lr_scheduler_.last_epoch == max_epochs
@pytest.mark.parametrize('policy, kwargs', [
(TorchCyclicLR, {'base_lr': 1e-3, 'max_lr': 6e-3, 'step_every': 'batch'}),
])
def test_lr_callback_batch_steps_correctly(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
batch_size = 100
max_epochs = 2
X, y = classifier_data
num_examples = len(X)
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
batch_size=batch_size, callbacks=[lr_policy])
net.fit(X, y)
total_iterations_per_epoch = num_examples / batch_size
# 80% of sample used for training by default
total_training_iterations_per_epoch = 0.8 * total_iterations_per_epoch
expected = int(total_training_iterations_per_epoch * max_epochs)
# pylint: disable=protected-access
assert lr_policy.batch_idx_ == expected
@pytest.mark.parametrize('policy, kwargs', [
(TorchCyclicLR, {'base_lr': 1e-3, 'max_lr': 6e-3, 'step_every': 'batch'}),
])
def test_lr_callback_batch_steps_correctly_fallback(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
batch_size = 100
max_epochs = 2
X, y = classifier_data
num_examples = len(X)
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
batch_size=batch_size, callbacks=[lr_policy])
net.fit(X, y)
# Removes batch count information in the last two epochs
for i in range(max_epochs):
del net.history[i]["train_batch_count"]
del net.history[i]["valid_batch_count"]
net.partial_fit(X, y)
total_iterations_per_epoch = num_examples / batch_size
# batch_counts were removed thus the total iterations of the last
# epoch is used
total_iterations_fit_run = total_iterations_per_epoch * max_epochs
# 80% of sample used for training by default
total_iterations_partial_fit_run = (
0.8 * total_iterations_per_epoch * max_epochs)
# called fit AND partial_fit
total_iterations = (total_iterations_fit_run +
total_iterations_partial_fit_run)
# Failback to using both valid and training batches counts on
# second run
expected = int(total_iterations)
# pylint: disable=protected-access
assert lr_policy.batch_idx_ == expected
def test_lr_scheduler_cloneable(self):
# reproduces bug #271
scheduler = LRScheduler(WarmRestartLR, base_lr=123)
clone(scheduler) # does not raise
def test_lr_scheduler_set_params(self, classifier_module, classifier_data):
scheduler = LRScheduler(
TorchCyclicLR, base_lr=123, max_lr=999, step_every='batch')
net = NeuralNetClassifier(
classifier_module,
max_epochs=0,
callbacks=[('scheduler', scheduler)],
)
net.set_params(callbacks__scheduler__base_lr=456)
net.fit(*classifier_data) # we need to trigger on_train_begin
assert net.callbacks[0][1].lr_scheduler_.base_lrs[0] == 456
@pytest.mark.parametrize('policy,kwargs', [
(StepLR, {'gamma': 0.9, 'step_size': 1})
])
@pytest.mark.skipif(
LooseVersion(torch.__version__) < '1.4',
reason="Feature isn't supported with this torch version."
)
def test_lr_scheduler_record_epoch_step(self,
classifier_module,
classifier_data,
policy,
kwargs):
epochs = 3
scheduler = LRScheduler(policy, **kwargs)
lrs = scheduler.simulate(epochs, initial_lr=123.)
net = NeuralNetClassifier(
classifier_module,
max_epochs=epochs,
lr=123.,
callbacks=[('scheduler', scheduler)]
)
net.fit(*classifier_data)
assert np.all(net.history[:, 'event_lr'] == lrs)
@pytest.mark.skipif(
LooseVersion(torch.__version__) < '1.4',
reason="Feature isn't supported with this torch version."
)
def test_lr_scheduler_record_batch_step(self, classifier_module, classifier_data):
X, y = classifier_data
batch_size = 128
scheduler = LRScheduler(
TorchCyclicLR,
base_lr=1,
max_lr=5,
step_size_up=4,
step_every='batch'
)
net = NeuralNetClassifier(
classifier_module,
max_epochs=1,
lr=123.,
batch_size=batch_size,
callbacks=[('scheduler', scheduler)]
)
net.fit(X, y)
new_lrs = scheduler.simulate(
net.history[-1, 'train_batch_count'],
initial_lr=123.,
)
assert np.all(net.history[-1, 'batches', :, 'event_lr'] == new_lrs)
def test_cyclic_lr_with_epoch_step_warning(self,
classifier_module,
classifier_data):
msg = ("The LRScheduler now makes a step every epoch by default. "
"To have the cyclic lr scheduler update "
"every batch set step_every='batch'")
with pytest.warns(FutureWarning, match=msg) as record:
scheduler = LRScheduler(
TorchCyclicLR, base_lr=123, max_lr=999)
net = NeuralNetClassifier(
classifier_module,
max_epochs=0,
callbacks=[('scheduler', scheduler)],
)
net.initialize()
assert len(record) == 1
class TestReduceLROnPlateau:
def get_net_with_mock(
self, classifier_data, classifier_module, monitor='train_loss'):
"""Returns a net with a mocked lr policy that allows to check what
it's step method was called with.
"""
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(ReduceLROnPlateau, monitor=monitor)),
],
max_epochs=1,
).fit(X, y)
# mock the policy
policy = dict(net.callbacks_)['scheduler'].lr_scheduler_
mock_step = Mock(side_effect=policy.step)
policy.step = mock_step
# make sure that mocked policy is set
scheduler = dict(net.callbacks_)['scheduler']
# pylint: disable=protected-access
scheduler._get_scheduler = lambda *args, **kwargs: policy
net.partial_fit(X, y)
return net, mock_step
@pytest.mark.parametrize('monitor', ['train_loss', 'valid_loss', 'epoch'])
def test_reduce_lr_monitor_with_string(
self, monitor, classifier_data, classifier_module):
# step should be called with the 2nd to last value from that
# history entry
net, mock_step = self.get_net_with_mock(
classifier_data, classifier_module, monitor=monitor)
score = mock_step.call_args_list[0][0][0]
np.isclose(score, net.history[-2, monitor])
def test_reduce_lr_monitor_with_callable(
self, classifier_data, classifier_module):
# step should always be called with the return value from the
# callable, 55
_, mock_step = self.get_net_with_mock(
classifier_data, classifier_module, monitor=lambda x: 55)
score = mock_step.call_args_list[0][0][0]
assert score == 55
@pytest.mark.parametrize('mode,score', [
('min', np.inf),
('max', -np.inf)
])
def test_reduce_lr_monitor_max(
self, classifier_data, classifier_module, mode, score):
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(
ReduceLROnPlateau, monitor='train_loss', mode=mode)),
],
max_epochs=1,
)
net.fit(X, y)
policy = dict(net.callbacks_)['scheduler'].lr_scheduler_
assert policy.best == score
class TestWarmRestartLR():
def assert_lr_correct(
self, optimizer, targets, epochs, min_lr, max_lr, base_period,
period_mult):
"""Test that learning rate was set correctly."""
targets = [targets] if len(optimizer.param_groups) == 1 else targets
scheduler = WarmRestartLR(
optimizer, min_lr, max_lr, base_period, period_mult
)
for epoch in range(epochs):
optimizer.step() # suppress warning about .step call order
scheduler.step(epoch)
for param_group, target in zip(optimizer.param_groups, targets):
assert param_group['lr'] == pytest.approx(target[epoch])
def _single_period_targets(self, epochs, min_lr, max_lr, period):
targets = 1 + np.cos(np.arange(epochs) * np.pi / period)
targets = min_lr + 0.5 * (max_lr - min_lr) * targets
return targets.tolist()
# pylint: disable=missing-docstring
def _multi_period_targets(
self, epochs, min_lr, max_lr, base_period, period_mult):
remaining_epochs = epochs
current_period = base_period
targets = list()
while remaining_epochs > 0:
period_epochs = min(remaining_epochs, current_period + 1)
remaining_epochs -= period_epochs
targets += self._single_period_targets(
period_epochs, min_lr, max_lr, current_period
)
current_period = current_period * period_mult
return targets
@pytest.fixture()
def init_optimizer(self, classifier_module):
return SGD(classifier_module().parameters(), lr=0.05)
def test_raise_incompatible_len_on_min_lr_err(self, init_optimizer):
with pytest.raises(ValueError) as excinfo:
WarmRestartLR(init_optimizer, min_lr=[1e-1, 1e-2])
assert 'min_lr' in str(excinfo.value)
def test_raise_incompatible_len_on_max_lr_err(self, init_optimizer):
with pytest.raises(ValueError) as excinfo:
WarmRestartLR(init_optimizer, max_lr=[1e-1, 1e-2])
assert 'max_lr' in str(excinfo.value)
def test_single_period(self, init_optimizer):
optimizer = init_optimizer
epochs = 3
min_lr = 5e-5
max_lr = 5e-2
base_period = 3
period_mult = 1
targets = self._single_period_targets(
epochs, min_lr, max_lr, base_period)
self.assert_lr_correct(
optimizer,
targets,
epochs,
min_lr,
max_lr,
base_period,
period_mult
)
def test_multi_period_with_restart(self, init_optimizer):
optimizer = init_optimizer
epochs = 9
min_lr = 5e-5
max_lr = 5e-2
base_period = 2
period_mult = 2
targets = self._multi_period_targets(
epochs, min_lr, max_lr, base_period, period_mult
)
self.assert_lr_correct(
optimizer,
targets,
epochs,
min_lr,
max_lr,
base_period,
period_mult
)
def test_restarts_with_multiple_groups(self, classifier_module):
classifier = classifier_module()
optimizer = SGD(
[
{'params': classifier.sequential[0].parameters(), 'lr': 1e-3},
{'params': classifier.sequential[1].parameters(), 'lr': 1e-2},
{'params': classifier.sequential[2].parameters(), 'lr': 1e-1},
]
)
epochs = 9
min_lr_group = [1e-5, 1e-4, 1e-3]
max_lr_group = [1e-3, 1e-2, 1e-1]
base_period = 2
period_mult = 2
targets = list()
for min_lr, max_lr in zip(min_lr_group, max_lr_group):
targets.append(
self._multi_period_targets(
epochs, min_lr, max_lr, base_period, period_mult
)
)
self.assert_lr_correct(
optimizer,
targets,
epochs,
min_lr_group,
max_lr_group,
base_period,
period_mult
)
|
py | 1a3d381f77b67ffb06deb1be2b3a402e1ad3af9e | import os
import sys
import re
def terminal(cmd):
return os.popen(cmd).read()
def run(clauses, literals, num_vars):
terminal(f'python3 gen_random_SAT.py {clauses} {literals} {num_vars}')
output = terminal('./kissat_gb/build/kissat random_SAT.cnf | grep process-time:')
match = re.match('c process-time:\s+[^\s]+\s+([0-9\.]+)', output)
t1 = float(match.group(1))
t2 = 1000
return (t1, t2)
def header():
line = 'Clauses,'
line += 'Literals per clause,'
line += 'Variables,'
line += 'KISSAT_GB Time (s),'
line += 'BRUTE_FORCE Time (s),'
return line
def log(clauses, literals, num_vars, t1, t2):
line = str(clauses) + ','
line += str(literals) + ','
line += str(num_vars) + ','
line += str(t1) + ','
line += str(t2) + ','
return line
output = open('experiment_output.csv', 'w')
output.write(header() + '\n')
total_clauses = 1000
total_literals = 1000
total_variables = 1000
step = 100
count = 0
num_samples = (total_clauses / step) * (total_literals / step) * (total_variables / step)
for clauses in range(step, total_clauses, step):
for literals in range(step, total_literals, step):
for num_vars in range(step, total_variables, step):
if(count % 10 == 0): print(f'Progress: {count / num_samples}')
count += 1
(t1, t2) = run(clauses, literals, num_vars)
output.write(log(clauses, literals, num_vars, t1, t2) + '\n')
output.close()
print('SUCCESS!') |
py | 1a3d3888354fdc55f2332b14c7e49bc3f837bc81 | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "apros.com",
"name": "V2X_Solution",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
|
py | 1a3d38fd49dc33bead1b0c4e8d91741254ef4a2f | # -*- coding: utf-8 -*-
class Singleton(type):
def __init__(cls, name, bases, attrs, **kwargs):
super().__init__(name, bases, attrs)
cls._instance = None
def __call__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
|
py | 1a3d39a53f9fef4d400f714095f57b08c4040890 | """
Leetcode 111. Minimum Depth of Binary Tree
Here things are bit complicated than max depth.
In a root with atleast one right and left, the max depth logic will work.
But consider the case.
2
3
4
5
2
3 4
5
6
My idea: the actual base case is not node is None. The correct one is, root.left and root.right are None, then thats the leaf node.
So base cases changes accordingly.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
from typing import Optional
class Solution:
def minDepth(self, root: Optional[TreeNode]) -> int:
def dfs(root):
if root.left is None and root.right is None:
return 1
elif root.left is None:
return 1 + dfs(root.right)
elif root.right is None:
return 1 + dfs(root.left)
else:
return 1 + min(dfs(root.left), dfs(root.right))
if root is None:
return 0
min_depth = dfs(root)
return min_depth |
py | 1a3d3ad763df2ddb6a0aed80ac681a04ca517bc4 | """This is complete MIME handling package, provides fast parser
and models for handling mime.
Rationale
--------
* Standard python parser is slow at parsing big messages, it takes ~1 second
and a couple of millions of ops to parse 11MB message.
* Not very memory efficient, as it splits the message into array of lines,
and joins them after the parsing.
* Does not preserve the original encodings when altering the message.
The new parser is:
* Fast, it takes ~50 millisecond and ~2K operations to parse 11 MB message.
* Memory efficient, as it stores the message in one string.
* Tracks changes and returns unchanged parts unchanged upon serialization.
* Converts headers to unicode, detects and preserves encodings when possible.
Parser drawbacks:
* Parser is strict, when the MIME is broken, raises MimeError and does
not attempt to fix anything except simple errors (like mistyped charsets)
Alternatives:
If you still need to process the broken MIME, use
flanker.mime.fallback.FallbackMessage that relies on python parser in terms
of fixing the broken MIME and forces broken encodings in bodies and headers,
but beware that it can loose some information because of broken
or unknown encodings.
Examples
-------
>> from flanker import mime
>> msg = mime.from_string(message_string)
# unicode multi-value dictionary with headers
msg.headers
# useful content_type member with predicates:
msg.content_type.is_multipart()
msg.content_type.is_singlepart()
msg.content_type.is_message_container()
#decoded body of the message
if msg.content_type.is_singlepart():
msg.body
# parts if message is multipart
if msg.content_type.is_multipart():
msg.parts
# enclosed message
if msg.content_type.is_message_container():
msg.enclosed
read more in package details.
"""
from flanker.mime.message.errors import DecodingError, EncodingError, MimeError
from flanker.mime import create
from flanker.mime.create import from_string
from flanker.mime.message.fallback.create import from_string as recover
from flanker.mime.message.utils import python_message_to_string
from flanker.mime.message.headers.parametrized import fix_content_type
|
py | 1a3d3c72bac9f50df90aed592b5176e61a8fcb49 | import unittest
from typing import Any, List
from hstest.check_result import CheckResult
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
class TestCustomChecker(StageTest):
def generate(self) -> List[TestCase]:
return [
TestCase(
attach='4\n-in\n123\nout\n234\n',
args=['-in', '123', 'out', '234'],
check_function=self.custom_check
),
]
def custom_check(self, reply: str, attach: Any) -> CheckResult:
return CheckResult(reply == attach, '')
class Test(unittest.TestCase):
def test(self):
status, feedback = TestCustomChecker(
'tests.outcomes.test_custom_checker.program'
).run_tests()
self.assertEqual('test OK', feedback)
self.assertEqual(status, 0)
|
py | 1a3d3c7dbb1356b622805d730ce1a6e431857679 | name = 'abaqus2dyna'
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
py | 1a3d3ccc1a04dfd264aba446740b7a32b3ff5c84 | #!/usr/bin/env python3
#
# This script is used to run the yaml test cases for morphology & morphophonology
# tests.
#
# License: CC0 (see LICENSE)
from subprocess import Popen, PIPE
from argparse import ArgumentParser
from io import StringIO
from collections import OrderedDict, namedtuple
import os
import os.path
import re
import shlex
import shutil
import sys
import yaml
TestCase = namedtuple("TestCase", ['input', 'outputs'])
# SUPPORT FUNCTIONS
def string_to_list(data):
if isinstance(data, bytes): return [data.decode('utf-8')]
elif isinstance(data, str): return [data]
else: return data
def invert_dict(data):
tmp = OrderedDict()
for key, val in data.items():
for v in string_to_list(val):
d = tmp.setdefault(v, [])
if key not in d:
d.append(key)
return tmp
COLORS = {
"red": "\033[1;31m",
"green": "\033[0;32m",
"orange": "\033[0;33m",
"yellow": "\033[1;33m",
"blue": "\033[0;34m",
"light_blue": "\033[0;36m",
"reset": "\033[m"
}
def colourise(string, *args, **kwargs):
kwargs.update(COLORS)
return string.format(*args, **kwargs)
def check_path_exists(program):
out = shutil.which(program)
if out is None:
raise EnvironmentError("Cannot find `%s`. Check $PATH." % program)
return out
# SUPPORT CLASSES
class LookupError(Exception):
pass
# Courtesy of https://gist.github.com/844388. Thanks!
class _OrderedDictYAMLLoader(yaml.Loader):
"""A YAML loader that loads mappings into ordered dictionaries."""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor('tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def yaml_load_ordered(f):
return yaml.load(f, _OrderedDictYAMLLoader)
class TestFile:
def __init__(self, data, system="hfst"):
self.data = data
self._system = system
@property
def surface_tests(self):
tests = OrderedDict()
for title, cases in self.data['Tests'].items():
new_cases = []
for surface, lexical in cases.items():
new_cases.append(TestCase(input=surface, outputs=string_to_list(lexical)))
tests[title] = new_cases
return tests
@property
def lexical_tests(self):
tests = OrderedDict()
for title, cases in self.data['Tests'].items():
new_cases = []
for lexical, surface in invert_dict(cases).items():
new_cases.append(TestCase(input=lexical, outputs=string_to_list(surface)))
tests[title] = new_cases
return tests
@property
def gen(self):
return self.data.get("Config", {}).get(self._system, {}).get("Gen", None)
@property
def morph(self):
return self.data.get("Config", {}).get(self._system, {}).get("Morph", None)
@property
def app(self):
a = self.data.get("Config", {}).get(self._system, {}).get("App", None)
if a is None:
if self._system == "hfst":
return ['hfst-lookup']
elif self._system == "xerox":
return ["lookup", "-flags", "mbTT"]
else:
raise Exception("Unknown system: '%s'" % self._system)
return a
class MorphTest:
class AllOutput():
def __init__(self, args):
self._io = StringIO()
self.args = args
def __str__(self):
return self._io.getvalue()
def write(self, data):
self._io.write(data)
def info(self, data):
self.write(data)
def title(self, *args): pass
def success(self, *args): pass
def failure(self, *args): pass
def result(self, *args): pass
def final_result(self, hfst):
self.write(colourise("Total passes: {green}{passes}{reset}, " +
"Total fails: {red}{fails}{reset}, " +
"Total: {light_blue}{total}{reset}\n",
passes=hfst.passes,
fails=hfst.fails,
total=hfst.fails+hfst.passes
))
class NormalOutput(AllOutput):
def title(self, text):
self.write(colourise("{light_blue}-" * len(text) + '\n'))
self.write(text + '\n')
self.write(colourise("-" * len(text) + '{reset}\n'))
def success(self, case, total, left, right):
x = colourise(("[{light_blue}{case:>%d}/{total}{reset}][{green}PASS{reset}] " +
"{left} {blue}=>{reset} {right}\n") % len(str(total)),
left=left, right=right, case=case, total=total)
self.write(x)
def failure(self, case, total, left, right, errlist):
x = colourise(("[{light_blue}{case:>%d}/{total}{reset}][{red}FAIL{reset}] " +
"{left} {blue}=>{reset} {right}: {errlist}\n") % len(str(total)),
left=left, right=right, case=case, total=total,
errlist=", ".join(errlist))
self.write(x)
def result(self, title, test, counts):
p = counts["Pass"]
f = counts["Fail"]
text = colourise("\nTest {n} - Passes: {green}{passes}{reset}, " +
"Fails: {red}{fails}{reset}, " +
"Total: {light_blue}{total}{reset}\n",
n=test, passes=p, fails=f, total=p+f)
self.write(text)
class CompactOutput(AllOutput):
def result(self, title, test, counts):
p = counts["Pass"]
f = counts["Fail"]
out = "%s %d/%d/%d" % (title, p, f, p+f)
if counts["Fail"] > 0:
if not self.args.hide_fail:
self.write(colourise("[{red}FAIL{reset}] {}\n", out))
elif not self.args.hide_pass:
self.write(colourise("[{green}PASS{reset}] {}\n", out))
class TerseOutput(AllOutput):
def success(self, case, total, l, r):
self.write(colourise("{green}.{reset}"))
def failure(self, case, total, form, err, errlist):
self.write(colourise("{red}!{reset}"))
def result(self, title, test, counts):
self.write('\n')
def final_result(self, counts):
if counts.fails > 0:
self.write(colourise("{red}FAIL{reset}\n"))
else:
self.write(colourise("{green}PASS{reset}\n"))
class FinalOutput(AllOutput):
def final_result(self, counts):
p = counts.passes
f = counts.fails
self.write("%d/%d/%d " % (p, f, p+f))
class NoOutput(AllOutput):
def final_result(self, *args):
pass
def __init__(self, args):
self.args = args
# TODO: check for null case
self.fails = 0
self.passes = 0
self.count = OrderedDict()
self.load_config(self.args.test_file)
def run(self):
#timing_begin = time.time()
self.run_tests(self.args.test)
#self.timer = time.time() - timing_begin
if self.fails > 0:
return 1
else:
return 0
def load_config(self, fn):
args = self.args
if fn.endswith('lexc'):
self.config = TestFile(parse_lexc_trans(open(fn),
args.gen,
args.morph,
args.app,
args.transducer,
args.section), args.section)
else:
self.config = TestFile(yaml_load_ordered(open(fn)), args.section)
d = os.path.dirname(fn)
if d:
os.chdir(os.path.dirname(fn))
# we've loaded the test file, now let all paths be
# relative to that file
config = self.config
app = args.app or config.app
if isinstance(app, str):
app = app.split(" ")
self.program = string_to_list(app)
check_path_exists(self.program[0])
self.gen = args.gen or config.gen
self.morph = args.morph or config.morph
if args.surface:
self.gen = None
if args.lexical:
self.morph = None
if self.gen == self.morph == None:
raise AttributeError("One of Gen or Morph must be configured.")
for i in (self.gen, self.morph):
if i and not os.path.isfile(i):
raise IOError("File %s does not exist." % i)
if args.silent:
self.out = MorphTest.NoOutput(args)
else:
self.out = {
"normal": MorphTest.NormalOutput,
"terse": MorphTest.TerseOutput,
"compact": MorphTest.CompactOutput,
"silent": MorphTest.NoOutput,
"final": MorphTest.FinalOutput
}.get(args.output, lambda x: None)(args)
if self.out is None:
raise AttributeError("Invalid output mode supplied: %s" % args.output)
if args.verbose:
self.out.info("`%s` will be used for parsing dictionaries.\n" % self.program[0])
if not args.colour:
for key in list(COLORS.keys()):
COLORS[key] = ""
def run_tests(self, single_test=None):
args = self.args
config = self.config
if args.surface == args.lexical == False:
args.surface = args.lexical = True
if single_test is not None:
self.parse_fsts(single_test)
if args.lexical: self.run_test(single_test, True)
if args.surface: self.run_test(single_test, False)
else:
self.parse_fsts()
if args.lexical:
for t in config.lexical_tests:
self.run_test(t, True)
if args.surface:
for t in config.surface_tests:
self.run_test(t, False)
self.out.final_result(self)
def parse_fsts(self, key=None):
args = self.args
self.results = {"gen": {}, "morph": {}}
def parser(self, d, f, tests):
# TODO: handle ~ in file parser
if key is not None:
keys = [x.lstrip("~") for x in tests[key]]
else:
keys = [x[0].lstrip("~") for vals in tests.values() for x in vals]
app = Popen(self.program + [f], stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
args = '\n'.join(keys) + '\n'
res, err = app.communicate(args.encode('utf-8'))
res = res.decode('utf-8').split('\n\n')
err = err.decode('utf-8').strip()
if app.returncode != 0:
self.results['err'] = "\n".join(
[i for i in [res[0], err, "(Error code: %s)" % app.returncode] if i != '']
)
else:
self.results[d] = self.parse_fst_output(res)
if args.lexical:
parser(self, "gen", self.gen, self.config.surface_tests)
if self.args.verbose:
self.out.info("Generating...\n")
if args.surface:
parser(self, "morph", self.morph, self.config.lexical_tests)
if self.args.verbose:
self.out.info("Morphing...\n")
if self.args.verbose:
self.out.info("Done!\n")
def get_forms(self, test, forms):
if test.startswith('~'):
test = test.lstrip("~")
detested = set()
expected = set()
for i in forms:
if i.startswith('~'):
expected.add(i.lstrip('~'))
else:
detested.add(i)
else:
detested = set([i.lstrip('~') for i in forms if i.startswith('~')])
expected = set([i.lstrip('~') for i in forms if not i.startswith('~')])
return test, detested, expected
def run_test(self, data, is_lexical):
if is_lexical:
desc = "Lexical/Generation"
f = "gen"
tests = self.config.surface_tests[data]
else: #surface
desc = "Surface/Analysis"
f = "morph"
tests = self.config.lexical_tests[data]
res = self.results[f]
if self.results.get('err'):
raise LookupError('`%s` had an error:\n%s' % (self.program, self.results['err']))
c = len(self.count)
d = "%s (%s)" % (data, desc)
title = "Test %d: %s" % (c, d)
self.out.title(title)
self.count[d] = {"Pass": 0, "Fail": 0}
caseslen = len(tests)
for n, testcase in enumerate(tests):
n += 1 # off by one annoyance
test = testcase.input
forms = testcase.outputs
actual_results = set(res[test.lstrip("~")])
test, detested_results, expected_results = self.get_forms(test, forms)
missing = set()
invalid = set()
success = set()
detested = set()
missing_detested = set()
for form in expected_results:
if not form in actual_results:
missing.add(form)
for form in detested_results:
if form in actual_results:
detested.add(form)
actual_results.remove(form)
else:
missing_detested.add(form)
for form in actual_results:
if not form in expected_results:
invalid.add(form)
if len(expected_results) > 0:
for form in actual_results:
if not form in (missing | invalid | detested):
passed = True
success.add(form)
self.count[d]["Pass"] += 1
if not self.args.hide_pass:
self.out.success(n, caseslen, test, form)
for form in missing_detested:
success.add(form)
self.count[d]["Pass"] += 1
if not self.args.hide_pass:
self.out.success(n, caseslen, test, "<No '%s' %s>" % (form, desc.lower()))
else:
if len(invalid) == 1 and list(invalid)[0].endswith("+?"):
invalid = set()
self.count[d]["Pass"] += 1
if not self.args.hide_pass:
self.out.success(n, caseslen, test, "<No %s>" % desc.lower())
if len(missing) > 0:
if not self.args.hide_fail:
self.out.failure(n, caseslen, test, "Missing results", missing)
#self.count[d]["Fail"] += len(missing)
if len(invalid) > 0:
if not is_lexical and self.args.ignore_analyses:
invalid = set() # hide this for the final check
elif not self.args.hide_fail:
self.out.failure(n, caseslen, test, "Unexpected results", invalid)
#self.count[d]["Fail"] += len(invalid)
if len(detested) > 0:
if self.args.colour:
msg = colourise("{red}BROKEN!{reset}")
else:
msg = "BROKEN!"
if not self.args.hide_fail:
self.out.failure(n, caseslen, test, msg + " Negative results", detested)
#self.count[d]["Fail"] += len(detested)
if len(detested) + len(missing) + len(invalid) > 0:
self.count[d]["Fail"] += 1
self.out.result(title, c, self.count[d])
self.passes += self.count[d]["Pass"]
self.fails += self.count[d]["Fail"]
def parse_fst_output(self, fst):
parsed = {}
for item in fst:
res = item.replace('\r\n','\n').replace('\r','\n').split('\n')
for i in res:
if i.strip() != '':
results = re.split(r'\t+', i)
key = results[0].strip()
if not key in parsed:
parsed[key] = set()
# This test is needed because xfst's lookup
# sometimes output strings like
# bearkoe\tbearkoe\t+N+Sg+Nom, instead of the expected
# bearkoe\tbearkoe+N+Sg+Nom
if len(results) > 2 and results[2][0] == '+':
parsed[key].add(results[1].strip() + results[2].strip())
else:
parsed[key].add(results[1].strip())
return parsed
def __str__(self):
return str(self.out)
# Debug regex at: https://debuggex.com
# Visualisation of the TEST_RE regex:
# https://debuggex.com/i/kURzt7XS3t83-dvT.png
# Link to debuggex page with this regex:
# https://debuggex.com/r/kURzt7XS3t83-dvT
def parse_lexc(f, fallback=None):
HEADER_RE = re.compile(r'^\!\!€([^\s.:]+)(?:.[^\s:]+)?:\s*([^#]+)\s*#?')
TEST_RE = re.compile(r'^\!\!([€\$])\s+(\S.*):\s+(\S+|\S.*\S)(\s*$|\s+[#!])')
POS = "€"
NEG = "$"
output = {}
trans = None
test = None
if isinstance(f, str):
f = StringIO(f)
lines = f.readlines()
for line in lines:
if line.startswith("LEXICON"):
test = line.split(" ", 1)[-1]
if fallback is not None:
trans = fallback
elif line.startswith("!!"):
match = HEADER_RE.match(line)
if match:
trans = match.group(1)
test = match.group(2).strip()
if output.get(trans) is None:
output[trans] = OrderedDict()
if output[trans].get(test) is None:
output[trans][test] = OrderedDict()
continue
match = TEST_RE.match(line)
if test is None or trans is None:
continue
if TEST_RE.match(line):
test_type = match.group(1).strip()
left = match.group(3).strip()
right = match.group(2).strip()
if test_type == NEG:
right = "~" + right
if output[trans][test].get(left) is None:
output[trans][test][left] = []
output[trans][test][left].append(right)
return dict(output)
def parse_lexc_trans(f, gen=None, morph=None, app=None, fallback=None, lookup="hfst"):
trans = None
if gen is not None:
trans = gen.rsplit('.', 1)[0].split('-', 1)[1]
elif morph is not None:
trans = morph.rsplit('.', 1)[0].split('-', 1)[1]
elif fallback is not None:
trans = fallback
if trans is None or trans == "":
raise AttributeError("Could not guess which transducer to use.")
lexc = parse_lexc(f, fallback)[trans]
if app is None:
app = ["hfst-lookup"] if lookup == "hfst" else ["lookup", "-flags", "mbTT"]
config = {lookup: {"Gen": gen, "Morph": morph, "App": string_to_list(app)}}
return {"Config": config, "Tests": lexc}
def lexc_to_yaml_string(data):
out = StringIO()
out.write("Tests:\n")
for trans, tests in data.items():
for test, lines in tests.items():
out.write(" %s:\n" % test)
for left, rights in lines.items():
if len(rights) == 1:
out.write(" %s: %s\n" % (left, rights[0]))
elif len(rights) > 1:
out.write(" %s: [%s]\n" % (left, ", ".join(rights)))
return out.getvalue()
class UI(ArgumentParser):
def __init__(self):
ArgumentParser.__init__(self)
self.description="""Test morphological transducers for consistency."""
self.epilog="Will run all tests in the test_file by default."
self.add_argument("-c", "--colour", dest="colour",
action="store_true", help="Colours the output")
self.add_argument("-o", "--output",
dest="output", default="normal",
help="Desired output style: normal, compact, terse, final (Default: normal)")
self.add_argument("-q", "--silent",
dest="silent", action="store_true",
help="Hide all output; exit code only")
self.add_argument("-i", "--ignore-extra-analyses",
dest="ignore_analyses", action="store_true",
help="""Ignore extra analyses when there are more than expected,
will PASS if the expected one is found.""")
self.add_argument("-s", "--surface",
dest="surface", action="store_true",
help="Surface input/analysis tests only")
self.add_argument("-l", "--lexical",
dest="lexical", action="store_true",
help="Lexical input/generation tests only")
self.add_argument("-f", "--hide-fails",
dest="hide_fail", action="store_true",
help="Suppresses fails to make finding passes easier")
self.add_argument("-p", "--hide-passes",
dest="hide_pass", action="store_true",
help="Suppresses passes to make finding fails easier")
self.add_argument("-S", "--section", default="hfst",
dest="section", nargs='?', required=False,
help="The section to be used for testing (default is `hfst`)")
self.add_argument("-t", "--test",
dest="test", nargs='?', required=False,
help="""Which test to run (Default: all). TEST = test ID, e.g.
'Noun - g\u00E5etie' (remember quotes if the ID contains spaces)""")
self.add_argument("-F", "--fallback",
dest="transducer", nargs='?', required=False,
help="""Which fallback transducer to use (ignored, use --gen and --morph).""")
self.add_argument("-v", "--verbose",
dest="verbose", action="store_true",
help="More verbose output.")
self.add_argument("--app", dest="app", nargs='?', required=False,
help="Override application used for test")
self.add_argument("--gen", dest="gen", nargs='?', required=False,
help="Override generation transducer used for test")
self.add_argument("--morph", dest="morph", nargs='?', required=False,
help="Override morph transducer used for test")
self.add_argument("test_file",
help="YAML file with test rules")
self.test = MorphTest(self.parse_args())
def start(self):
ret = self.test.run()
sys.stdout.write(str(self.test))
sys.exit(ret)
def main():
try:
ui = UI()
ui.start()
except KeyboardInterrupt:
sys.exit(130)
#except Exception as e:
# print("Error: %r" % e)
# sys.exit(1)
if __name__ == "__main__":
main()
|
py | 1a3d3da2b92472919c825f58389fda91ab32e3f0 | # Copyright 2015 Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import backoff
from google.cloud import datastore
import pytest
import snippets
PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
class CleanupClient(datastore.Client):
def __init__(self, *args, **kwargs):
super(CleanupClient, self).__init__(*args, **kwargs)
self.entities_to_delete = []
self.keys_to_delete = []
def cleanup(self):
with self.batch():
self.delete_multi(
list(set([x.key for x in self.entities_to_delete]))
+ list(set(self.keys_to_delete))
)
@pytest.yield_fixture
def client():
client = CleanupClient(PROJECT)
yield client
client.cleanup()
@pytest.mark.flaky
class TestDatastoreSnippets:
# These tests mostly just test the absence of exceptions.
def test_incomplete_key(self, client):
assert snippets.incomplete_key(client)
def test_named_key(self, client):
assert snippets.named_key(client)
def test_key_with_parent(self, client):
assert snippets.key_with_parent(client)
def test_key_with_multilevel_parent(self, client):
assert snippets.key_with_multilevel_parent(client)
def test_basic_entity(self, client):
assert snippets.basic_entity(client)
def test_entity_with_parent(self, client):
assert snippets.entity_with_parent(client)
def test_properties(self, client):
assert snippets.properties(client)
def test_array_value(self, client):
assert snippets.array_value(client)
def test_upsert(self, client):
task = snippets.upsert(client)
client.entities_to_delete.append(task)
assert task
def test_insert(self, client):
task = snippets.insert(client)
client.entities_to_delete.append(task)
assert task
def test_update(self, client):
task = snippets.insert(client)
client.entities_to_delete.append(task)
assert task
def test_lookup(self, client):
task = snippets.lookup(client)
client.entities_to_delete.append(task)
assert task
def test_delete(self, client):
snippets.delete(client)
def test_batch_upsert(self, client):
tasks = snippets.batch_upsert(client)
client.entities_to_delete.extend(tasks)
assert tasks
def test_batch_lookup(self, client):
tasks = snippets.batch_lookup(client)
client.entities_to_delete.extend(tasks)
assert tasks
def test_batch_delete(self, client):
snippets.batch_delete(client)
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_unindexed_property_query(self, client):
tasks = snippets.unindexed_property_query(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_basic_query(self, client):
tasks = snippets.basic_query(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_projection_query(self, client):
priorities, percents = snippets.projection_query(client)
client.entities_to_delete.extend(client.query(kind="Task").fetch())
assert priorities
assert percents
def test_ancestor_query(self, client):
tasks = snippets.ancestor_query(client)
client.entities_to_delete.extend(tasks)
assert tasks
def test_run_query(self, client):
snippets.run_query(client)
def test_cursor_paging(self, client):
for n in range(6):
client.entities_to_delete.append(snippets.insert(client))
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def run_sample():
results = snippets.cursor_paging(client)
page_one, cursor_one, page_two, cursor_two = results
assert len(page_one) == 5
assert len(page_two)
assert cursor_one
run_sample()
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_property_filter(self, client):
tasks = snippets.property_filter(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_composite_filter(self, client):
tasks = snippets.composite_filter(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_key_filter(self, client):
tasks = snippets.key_filter(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_ascending_sort(self, client):
tasks = snippets.ascending_sort(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_descending_sort(self, client):
tasks = snippets.descending_sort(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_multi_sort(self, client):
tasks = snippets.multi_sort(client)
client.entities_to_delete.extend(tasks)
assert tasks
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_keys_only_query(self, client):
keys = snippets.keys_only_query(client)
client.entities_to_delete.extend(client.query(kind="Task").fetch())
assert keys
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_distinct_on_query(self, client):
tasks = snippets.distinct_on_query(client)
client.entities_to_delete.extend(tasks)
assert tasks
def test_kindless_query(self, client):
tasks = snippets.kindless_query(client)
assert tasks
def test_inequality_range(self, client):
snippets.inequality_range(client)
def test_inequality_invalid(self, client):
snippets.inequality_invalid(client)
def test_equal_and_inequality_range(self, client):
snippets.equal_and_inequality_range(client)
def test_inequality_sort(self, client):
snippets.inequality_sort(client)
def test_inequality_sort_invalid_not_same(self, client):
snippets.inequality_sort_invalid_not_same(client)
def test_inequality_sort_invalid_not_first(self, client):
snippets.inequality_sort_invalid_not_first(client)
def test_array_value_inequality_range(self, client):
snippets.array_value_inequality_range(client)
def test_array_value_equality(self, client):
snippets.array_value_equality(client)
def test_exploding_properties(self, client):
task = snippets.exploding_properties(client)
assert task
def test_transactional_update(self, client):
keys = snippets.transactional_update(client)
client.keys_to_delete.extend(keys)
def test_transactional_get_or_create(self, client):
task = snippets.transactional_get_or_create(client)
client.entities_to_delete.append(task)
assert task
def transactional_single_entity_group_read_only(self, client):
task_list, tasks_in_list = snippets.transactional_single_entity_group_read_only(
client
)
client.entities_to_delete.append(task_list)
client.entities_to_delete.extend(tasks_in_list)
assert task_list
assert tasks_in_list
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_namespace_run_query(self, client):
all_namespaces, filtered_namespaces = snippets.namespace_run_query(client)
assert all_namespaces
assert filtered_namespaces
assert "google" in filtered_namespaces
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_kind_run_query(self, client):
kinds = snippets.kind_run_query(client)
client.entities_to_delete.extend(client.query(kind="Task").fetch())
assert kinds
assert "Task" in kinds
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_property_run_query(self, client):
kinds = snippets.property_run_query(client)
client.entities_to_delete.extend(client.query(kind="Task").fetch())
assert kinds
assert "Task" in kinds
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_property_by_kind_run_query(self, client):
reprs = snippets.property_by_kind_run_query(client)
client.entities_to_delete.extend(client.query(kind="Task").fetch())
assert reprs
@backoff.on_exception(backoff.expo, AssertionError, max_time=240)
def test_index_merge_queries(self, client):
snippets.index_merge_queries(client)
|
py | 1a3d3f0be186330179eda0b20a86f22c484d60d3 | from warnings import warn
from django.conf import settings
from wagtail.utils.deprecation import RemovedInWagtail50Warning
def get_admin_base_url(context=None):
"""
Gets the base URL for the wagtail admin site. This is set in `settings.WAGTAILADMIN_BASE_URL`,
which was previously `settings.BASE_URL`.
If setting is omitted and this is called in a request context, falls back to
`request.site.root_url` or next the host_name of the request.
"""
admin_base_url = getattr(settings, "WAGTAILADMIN_BASE_URL", None)
if admin_base_url is None and hasattr(settings, "BASE_URL"):
warn(
"settings.BASE_URL has been renamed to settings.WAGTAILADMIN_BASE_URL",
category=RemovedInWagtail50Warning,
)
admin_base_url = settings.BASE_URL
if admin_base_url is None and context is not None:
request = context["request"]
admin_base_url = getattr(request.site, "root_url", None)
if admin_base_url is None:
admin_base_url = request.get_host()
secure_prefix = "http"
if request.is_secure():
secure_prefix = "https"
admin_base_url = secure_prefix + "://" + admin_base_url
return admin_base_url
|
py | 1a3d3f89a9dd9ee81df8718ff222bb64af9245c0 | # User-User Similarity computation on pySpark
import sys
from itertools import combinations
import numpy as np
import pdb
from pyspark import SparkContext
def parseVector(line):
'''
Parse each line of the specified data file, assuming a "|" delimiter.
Converts each rating to a float
'''
line = line.split("|")
return line[1],(line[0],float(line[2]))
def keyOnUserPair(item_id,user_and_rating_pair):
'''
Convert each item and co_rating user pairs to a new vector
keyed on the user pair ids, with the co_ratings as their value.
'''
(user1_with_rating,user2_with_rating) = user_and_rating_pair
user1_id,user2_id = user1_with_rating[0],user2_with_rating[0]
user1_rating,user2_rating = user1_with_rating[1],user2_with_rating[1]
return (user1_id,user2_id),(user1_rating,user2_rating)
def calcSim(user_pair,rating_pairs):
'''
For each user-user pair, return the specified similarity measure,
along with co_raters_count.
'''
sum_xx, sum_xy, sum_yy, sum_x, sum_y, n = (0.0, 0.0, 0.0, 0.0, 0.0, 0)
for rating_pair in rating_pairs:
sum_xx += np.float(rating_pair[0]) * np.float(rating_pair[0])
sum_yy += np.float(rating_pair[1]) * np.float(rating_pair[1])
sum_xy += np.float(rating_pair[0]) * np.float(rating_pair[1])
# sum_y += rt[1]
# sum_x += rt[0]
n += 1
cos_sim = cosine(sum_xy,np.sqrt(sum_xx),np.sqrt(sum_yy))
return user_pair, (cos_sim,n)
def cosine(dot_product,rating_norm_squared,rating2_norm_squared):
'''
The cosine between two vectors A, B
dotProduct(A, B) / (norm(A) * norm(B))
'''
numerator = dot_product
denominator = rating_norm_squared * rating2_norm_squared
return (numerator / (float(denominator))) if denominator else 0.0
if __name__ == "__main__":
if len(sys.argv) < 3:
print >> sys.stderr, \
"Usage: PythonUserCF <master> <file>"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonUserCF")
lines = sc.textFile(sys.argv[2])
'''
Parse the vector with item_id as the key:
item_id -> (user_id,rating)
'''
item_user = lines.map(parseVector).cache()
'''
Get co_rating users by joining on item_id:
item_id -> ((user_1,rating),(user2,rating))
'''
item_user_pairs = item_user.join(item_user)
'''
Key each item_user_pair on the user_pair and get rid of non-unique
user pairs, then aggregate all co-rating pairs:
(user1_id,user2_id) -> [(rating1,rating2),
(rating1,rating2),
(rating1,rating2),
...]
'''
user_item_rating_pairs = item_user_pairs.map(
lambda p: keyOnUserPair(p[0],p[1])).filter(
lambda p: p[0][0] != p[0][1]).groupByKey()
'''
Calculate the cosine similarity for each user pair:
(user1,user2) -> (similarity,co_raters_count)
'''
user_pair_sims = user_item_rating_pairs.map(
lambda p: calcSim(p[0],p[1]))
for p in user_pair_sims.collect():
print p |
py | 1a3d3faa01eab6ad675070a227c223ad3d753b6d | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/trained_weights_stage_1.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/AND_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=(255))
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=None)
draw.text(text_origin, label, fill=(0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
|
py | 1a3d3fb16fdd3d66937bc1dc1757f531ece91d3d | from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Type,
)
from requests.exceptions import (
ConnectionError,
HTTPError,
Timeout,
TooManyRedirects,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
)
if TYPE_CHECKING:
from web3 import Web3 # noqa: F401
whitelist = [
'admin',
'shh',
'miner',
'net',
'txpool'
'testing',
'evm',
'eth_protocolVersion',
'eth_syncing',
'eth_coinbase',
'eth_mining',
'eth_hashrate',
'eth_gasPrice',
'eth_accounts',
'eth_blockNumber',
'eth_getBalance',
'eth_getStorageAt',
'eth_getProof',
'eth_getCode',
'eth_getBlockByNumber',
'eth_getBlockByHash',
'eth_getBlockTransactionCountByNumber',
'eth_getBlockTransactionCountByHash',
'eth_getUncleCountByBlockNumber',
'eth_getUncleCountByBlockHash',
'eth_getTransactionByHash',
'eth_getTransactionByBlockHashAndIndex',
'eth_getTransactionByBlockNumberAndIndex',
'eth_getTransactionReceipt',
'eth_getTransactionCount',
'eth_call',
'eth_estimateGas',
'eth_newBlockFilter',
'eth_newPendingTransactionFilter',
'eth_newFilter',
'eth_getFilterChanges',
'eth_getFilterLogs',
'eth_getLogs',
'eth_uninstallFilter',
'eth_getCompilers',
'eth_getWork',
'eth_sign',
'eth_signTypedData',
'eth_sendRawTransaction',
'personal_importRawKey',
'personal_newAccount',
'personal_listAccounts',
'personal_lockAccount',
'personal_unlockAccount',
'personal_ecRecover',
'personal_sign',
'personal_signTypedData',
]
def check_if_retry_on_failure(method: RPCEndpoint) -> bool:
root = method.split('_')[0]
if root in whitelist:
return True
elif method in whitelist:
return True
else:
return False
def exception_retry_middleware(
make_request: Callable[[RPCEndpoint, Any], RPCResponse],
web3: "Web3",
errors: Collection[Type[BaseException]],
retries: int=5,
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
"""
Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider.
"""
def middleware(method: RPCEndpoint, params: Any) -> RPCResponse:
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return make_request(method, params)
# https://github.com/python/mypy/issues/5349
except errors: # type: ignore
if i < retries - 1:
continue
else:
raise
return None
else:
return make_request(method, params)
return middleware
def http_retry_request_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], web3: "Web3"
) -> Callable[[RPCEndpoint, Any], Any]:
return exception_retry_middleware(
make_request,
web3,
(ConnectionError, HTTPError, Timeout, TooManyRedirects)
)
|
py | 1a3d40262073396a0fea22a497f4de1ee7ed0217 | def hexal_to_decimal(s):
""" s in form 0X< hexal digits>
returns int in decimal"""
s = s[2:]
s = s[::-1]
s = list(s)
for i, e in enumerate(s):
if s[i] == "A": s[i] = "10"
if s[i] == "B": s[i] = "11"
if s[i] == "C": s[i] = "12"
if s[i] == "D": s[i] = "13"
if s[i] == "E": s[i] = "14"
if s[i] == "F": s[i] = "15"
sum = 0
for i, e in enumerate(s):
sum += (16 ** i) * int(e)
return sum
def octal_to_decimal(s):
"""s in form 0X<octal digits>
returns int in decimal"""
s = s[1:]
s = s[::-1] # reverse
sum = 0
for i, e in enumerate(s):
sum += (8 ** i) * int(e)
return sum
print(hexal_to_decimal("0XCC"))
print(octal_to_decimal("010"))
|
py | 1a3d44621b6c37af173d96dc8cf51e718108a865 | from random import randint
print('{} DESAFIO 28 {}'.format('='*10, '='*10))
aleat = randint(0, 5)
n = int(input("""Hey, hey, hey! Para meu jogo vencer, advinhe que número de 0 a 5 pensei!"""))
if n == aleat:
print('ACERTOU MIZARÁVI! VOCÊ VENCEU!')
else:
print('ERROOW BIXOO! ERA {}!'.format(aleat)) |
py | 1a3d449a1a1be812ff70408da740ca0bc9898c60 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkWatchersOperations(object):
"""NetworkWatchersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NetworkWatcher"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkWatcher"
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher resource.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.NetworkWatcher
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkWatcher')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.NetworkWatcher"
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkWatcher"
"""Updates a network watcher tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters supplied to update network watcher tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkWatcher, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkWatcher
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcher"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkWatcher', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkWatcherListResult"]
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkWatcherListResult"]
"""Gets all network watchers by subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkWatcherListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkWatcherListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkWatcherListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkWatcherListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'} # type: ignore
def get_topology(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TopologyParameters"
**kwargs # type: Any
):
# type: (...) -> "models.Topology"
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the representation of topology.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TopologyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Topology, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.Topology
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Topology"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.get_topology.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TopologyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topology', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'} # type: ignore
def _verify_ip_flow_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.VerificationIPFlowParameters"
**kwargs # type: Any
):
# type: (...) -> "models.VerificationIPFlowResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.VerificationIPFlowResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._verify_ip_flow_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_verify_ip_flow_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
def begin_verify_ip_flow(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.VerificationIPFlowParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VerificationIPFlowResult"]
"""Verify IP flow from the specified VM to a location given the currently configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.VerificationIPFlowParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VerificationIPFlowResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VerificationIPFlowResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VerificationIPFlowResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VerificationIPFlowResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'} # type: ignore
def _get_next_hop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NextHopParameters"
**kwargs # type: Any
):
# type: (...) -> "models.NextHopResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.NextHopResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_next_hop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NextHopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_next_hop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
def begin_get_next_hop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NextHopParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.NextHopResult"]
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination endpoint.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.NextHopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NextHopResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.NextHopResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NextHopResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NextHopResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'} # type: ignore
def _get_vm_security_rules_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.SecurityGroupViewParameters"
**kwargs # type: Any
):
# type: (...) -> "models.SecurityGroupViewResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityGroupViewResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_vm_security_rules_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vm_security_rules_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
def begin_get_vm_security_rules(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.SecurityGroupViewParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.SecurityGroupViewResult"]
"""Gets the configured and effective security group rules on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the VM to check security groups for.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.SecurityGroupViewParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityGroupViewResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.SecurityGroupViewResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityGroupViewResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityGroupViewResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'} # type: ignore
def _get_troubleshooting_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> "models.TroubleshootingResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
def begin_get_troubleshooting(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.TroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.TroubleshootingResult"]
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to troubleshoot.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'} # type: ignore
def _get_troubleshooting_result_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.QueryTroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> "models.TroubleshootingResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_troubleshooting_result_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_troubleshooting_result_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
def begin_get_troubleshooting_result(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.QueryTroubleshootingParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.TroubleshootingResult"]
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to query the troubleshooting result.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.QueryTroubleshootingParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either TroubleshootingResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.TroubleshootingResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.TroubleshootingResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('TroubleshootingResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'} # type: ignore
def _set_flow_log_configuration_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogInformation"
**kwargs # type: Any
):
# type: (...) -> "models.FlowLogInformation"
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_flow_log_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_flow_log_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
def begin_set_flow_log_configuration(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogInformation"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.FlowLogInformation"]
"""Configures flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow log.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.FlowLogInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'} # type: ignore
def _get_flow_log_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogStatusParameters"
**kwargs # type: Any
):
# type: (...) -> "models.FlowLogInformation"
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_flow_log_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_flow_log_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
def begin_get_flow_log_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.FlowLogStatusParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.FlowLogInformation"]
"""Queries status of flow log and traffic analytics (optional) on a specified resource.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define a resource to query flow log and traffic analytics
(optional) status.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.FlowLogStatusParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FlowLogInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.FlowLogInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.FlowLogInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLogInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'} # type: ignore
def _check_connectivity_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.ConnectivityParameters"
**kwargs # type: Any
):
# type: (...) -> "models.ConnectivityInformation"
cls = kwargs.pop('cls', None) # type: ClsType["models.ConnectivityInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_connectivity_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_connectivity_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
def begin_check_connectivity(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.ConnectivityParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ConnectivityInformation"]
"""Verifies the possibility of establishing a direct TCP connection from a virtual machine to a
given endpoint including another VM or an arbitrary remote server.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine how the connectivity check will be performed.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.ConnectivityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectivityInformation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.ConnectivityInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ConnectivityInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectivityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'} # type: ignore
def _get_azure_reachability_report_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.AzureReachabilityReportParameters"
**kwargs # type: Any
):
# type: (...) -> "models.AzureReachabilityReport"
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureReachabilityReport"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_azure_reachability_report_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_azure_reachability_report_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
def begin_get_azure_reachability_report(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.AzureReachabilityReportParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.AzureReachabilityReport"]
"""NOTE: This feature is currently in preview and still being tested for stability. Gets the
relative latency score for internet service providers from a specified location to Azure
regions.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine Azure reachability report configuration.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.AzureReachabilityReportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AzureReachabilityReport or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.AzureReachabilityReport]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.AzureReachabilityReport"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureReachabilityReport', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'} # type: ignore
def _list_available_providers_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.AvailableProvidersListParameters"
**kwargs # type: Any
):
# type: (...) -> "models.AvailableProvidersList"
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableProvidersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._list_available_providers_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_available_providers_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
def begin_list_available_providers(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.AvailableProvidersListParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.AvailableProvidersList"]
"""NOTE: This feature is currently in preview and still being tested for stability. Lists all
available internet service providers for a specified Azure region.
:param resource_group_name: The name of the network watcher resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that scope the list of available providers.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.AvailableProvidersListParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either AvailableProvidersList or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.AvailableProvidersList]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableProvidersList"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AvailableProvidersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'} # type: ignore
def _get_network_configuration_diagnostic_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NetworkConfigurationDiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkConfigurationDiagnosticResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkConfigurationDiagnosticResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_network_configuration_diagnostic_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkConfigurationDiagnosticParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_network_configuration_diagnostic_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
def begin_get_network_configuration_diagnostic(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
parameters, # type: "models.NetworkConfigurationDiagnosticParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.NetworkConfigurationDiagnosticResponse"]
"""Gets Network Configuration Diagnostic data to help customers understand and debug network
behavior. It provides detailed information on what security rules were applied to a specified
traffic flow and the result of evaluating these rules. Customers must provide details of a flow
like source, destination, protocol, etc. The API returns whether traffic was allowed or denied,
the rules evaluated for the specified flow and the evaluation results.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters to get network configuration diagnostic.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.NetworkConfigurationDiagnosticParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkConfigurationDiagnosticResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.NetworkConfigurationDiagnosticResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkConfigurationDiagnosticResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_network_configuration_diagnostic_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkConfigurationDiagnosticResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_network_configuration_diagnostic.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/networkConfigurationDiagnostic'} # type: ignore
|
py | 1a3d45332984821fb926ea89faaecddbb17fd92e | '''
Created on Aug 7, 2017
@author: duncan
'''
import sqlite3 as lite
from utils.DBUtils import dbConnection
class Keyword():
'''
Keyword class
'''
def __init__(self):
'''
Constructor
'''
def fetch_all_key_words(self):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute("select keyword_id,keyword from keyword")
key_word_list = {}
keywords = cur.fetchall()
for keyword in keywords:
key_word_list[keyword[0]] = keyword[1]
dbUtil.close_db_connection()
return key_word_list
def update_keyword(self, keyword, keyword_id):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
try:
cur.execute("update keyword set keyword=? where keyword_id=?", (keyword, keyword_id))
dbUtil.commit()
except lite.IntegrityError:
return ("Keyword already exists", 501)
return "Keyword Updated Successfully"
def delete_keyword(self, keyword_id):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute("delete from keyword where keyword_id=?", (keyword_id))
dbUtil.commit()
return "Keyword Deleted Successfully"
def add_keyword(self, keyword):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
try:
cur.execute("insert into keyword(keyword) values(?)", (keyword,))
dbUtil.commit()
except lite.IntegrityError:
return ("Keyword already exists", 501)
return "Keyword created Successfully"
class Subscription():
def __init__(self):
'''
Constructor
'''
def add_subscription(self, subscription):
sql_isert_user_mail = "insert into subscriber(email) values(?)"
sql_fetch_email_id = "select subscriber_id from subscriber where email=?"
sql_increment_group_id="insert into group_id_sequence values(null)"
sql_insert_subscription = '''insert into subscription(subscriber_id,site_id,keyword_id,page_limit,minimum_alert,subscription_group_id)
values(?,?,?,1,1,?)
'''
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
email = subscription.subscriber.subscriber_email
generated_group_id=None
try:
try:
cur.execute(sql_isert_user_mail, (email,) )
except Exception,e:
print("Email already exists")
cur.execute(sql_increment_group_id)
dbUtil.commit()
generated_group_id=cur.lastrowid
cur.execute(sql_fetch_email_id, (email,))
mailId = cur.fetchone()
s=None
k=None
for site in subscription.site:
s=site
for keyword in subscription.keyword:
k=keyword
cur.execute(sql_insert_subscription, (mailId[0], site, keyword,generated_group_id))
dbUtil.commit()
return ("Subscription saved successfully")
except lite.IntegrityError:
dbUtil.rollback()
sql_fetch_site = "select name from site where site_id=?"
cur.execute(sql_fetch_site, (s,))
st = cur.fetchone()
sqlFetchKeyword = "select keyword from keyword where keyword_id=?"
cur.execute(sqlFetchKeyword, (k,))
ky = cur.fetchone()
return ("The entry, site: %s - Keyword: %s, exists" %(st[0],ky[0]), 501)
'''
Fetches all Subscribers in the database and returns a dictionary of Subscibers
'''
def fetch_all_subscribers(self):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
subscriber_dict = {}
cur.execute("select * from subscriber")
subscibers = cur.fetchall()
cur.execute("select * from site_keyword")
subscriptions = cur.fetchall()
for subsc in subscibers: # Create a map of each subscriber and their subscription details(stats)
subscriber = {}
subscriber['email'] = subsc[1]
siteKeywordMap = {}
sitesId = []
unique_sites_id = set()
key_words_id = []
for sub in subscriptions: # Get all sites and keywords subscriber is subscribed to
if sub[1] == subsc[0]:
sitesId.append(sub[1])
if(sub[1] != None and sub[1] != ""):
unique_sites_id.add(sub[1])
if(sub[2] != None and sub[2] != ""):
key_words_id.append(sub[2])
for x in unique_sites_id: # map keywords per site the subscriber is subscribed to
kWords = []
for y in range(0, len(key_words_id)):
if sitesId[y] == x:
kWords.append(key_words_id[y])
siteKeywordMap[x] = kWords
subscriber['subs'] = siteKeywordMap
subscriber['totalSites'] = len(unique_sites_id)
subscriber['totalKeywords'] = len(key_words_id)
subscriber_dict[subsc[0]] = subscriber
dbUtil.close_db_connection()
return subscriber_dict
def fetch_raw_subscriptions(self):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute('''select * from subscription s inner join keyword k on k.keyword_id= s.keyword_id
inner join site site on site.site_id=s.site_id
inner join subscriber subsc on subsc.subscriber_id=s.subscriber_id''')
subscriptions = cur.fetchall()
return subscriptions
def fetch_all_subscriptions(self):
subscriptions_list=[]
subscriber_map={}
site_id_name={}
subscriber_id_mail={}
keyword_id_name={}
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute('''select * from subscription s inner join keyword k on k.keyword_id= s.keyword_id
inner join site site on site.site_id=s.site_id
inner join subscriber subsc on subsc.subscriber_id=s.subscriber_id''')
subscriptions = cur.fetchall()
for subsc in subscriptions:
try:
subscriber_group_map=subscriber_map[subsc[0]]
try:
site_map=subscriber_group_map[subsc[5]]
try:
keyword_list=site_map[subsc[1]]
keyword_list.append(subsc[2])
except KeyError,e:
site_map[subsc[1]]=[]
keyword_list=site_map[subsc[1]]
keyword_list.append(subsc[2])
except KeyError,e:
subscriber_group_map[subsc[5]]={}
site_map=subscriber_group_map[subsc[5]]
site_map[subsc[1]]=[]
keyword_list=site_map[subsc[1]]
keyword_list.append(subsc[2])
except KeyError,e:
subscriber_map[subsc[0]]={}
subscriber_group_map=subscriber_map[subsc[0]]
subscriber_group_map[subsc[5]]={}
site_map=subscriber_group_map[subsc[5]]
site_map[subsc[1]]=[]
keyword_list=site_map[subsc[1]]
keyword_list.append(subsc[2])
site_id_name[subsc[8]]=subsc[9]
keyword_id_name[subsc[6]]=subsc[7]
subscriber_id_mail[subsc[14]]=subsc[15]
subscriptions_list.append(subscriber_map)
subscriptions_list.append(site_id_name)
subscriptions_list.append(keyword_id_name)
subscriptions_list.append(subscriber_id_mail)
print(subscriptions_list)
return subscriptions_list
def delete_subscription(self, subscriber_group_id,subscriber_id=None, group_id=None):
if(subscriber_id==None or group_id==None):
subscriber_groupid=subscriber_group_id.split("-")
subscriber_id=subscriber_groupid[0]
group_id=subscriber_groupid[1]
try:
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute("delete from subscription where subscriber_id=? and subscription_group_id=?", (subscriber_id,group_id))
dbUtil.commit()
except lite.IntegrityError:
dbUtil.rollback()
return ("Failed to delete subscription", 501)
return "Keyword Deleted Successfully"
def update_subscription(self, subscription):
sql_select_subscription="Select * from subscription where subscriber_id=? and site_id=? and keyword_id=? and subscription_group_id is not ?"
sql_fetch_email_id = "select subscriber_id from subscriber where email=?"
email = subscription.subscriber.subscriber_email
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute(sql_fetch_email_id, (email,))
mailId = cur.fetchone()
for site in subscription.site:
s=site
for keyword in subscription.keyword:
k=keyword
cur.execute(sql_select_subscription, (mailId[0], site, keyword, subscription.subscription_group_id))
subscript = cur.fetchone()
if(subscript!=None):
sqlFetchSite = "select name from site where site_id=?"
cur.execute(sqlFetchSite, (s,))
st = cur.fetchone()
sqlFetchKeyword = "select keyword from keyword where keyword_id=?"
cur.execute(sqlFetchKeyword, (k,))
ky = cur.fetchone()
return ("The entry, site: %s - Keyword: %s, exists" %(st[0],ky[0]), 501)
self.delete_subscription(subscriber_group_id=None,subscriber_id=subscription.subscriber.subscriber_id,group_id=subscription.subscription_group_id)
self.add_subscription(subscription)
return "Subscription updated Successfully"
class Site():
def __init__(self):
'''
Constructor
'''
def fetch_all_sites(self):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute("select site_id,name,alias from site")
site_list = {}
sites = cur.fetchall()
for site in sites:
if len(str(site[2]).strip()) != 0 and site[2] is not None:
site_list[site[0]] = site[2]
else:
site_list[site[0]] = site[1]
if(len(site_list) == 0):
site_list[0] = "There are no sites being scrapped"
dbUtil.close_db_connection()
return site_list
def update_site(self, new_name=None, site_id=None):
if new_name == None or site_id == None:
return ("No keyword selected", 501)
else:
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
try:
cur.execute("update site set alias=? where site_id=?", (new_name, site_id))
dbUtil.commit()
except lite.IntegrityError:
return ("Name already exists", 501)
return "Name Updated Successfully"
class Settings():
def fetch_all_settings(self):
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
cur.execute('''select * from subscription
inner join site on subscription.site_id=site.site_id
inner join keyword on keyword.keyword_id=subscription.keyword_id
inner join subscriber on subscription.subscriber_id = subscriber.subscriber_id;''')
setting_list = []
settings = cur.fetchall()
for setting in settings:
setting_list.append(setting)
dbUtil.close_db_connection()
return setting_list
def update_setting(self,subscription):
sql_setting="update subscription set page_limit=?,minimum_alert=? where subscriber_id=? and site_id=? and keyword_id=?"
dbUtil = dbConnection()
cur = dbUtil.get_cursor()
try:
cur.execute(sql_setting, (subscription.page_limit,subscription.minimum_alert,subscription.subscriber,subscription.site,subscription.keyword))
dbUtil.commit()
except Exception,e:
return ("Failed to update setting", 501)
return self.fetch_all_settings()
class Job:
def __init__(self):
'''pass'''
|
py | 1a3d4616ff9d328f70d202bcda66cceffa90ee92 | from six import string_types
import numpy as np
import os
import h5py
from bmtk.simulator.core.io_tools import io
from .simulation_config import SimulationConfig
from bmtk.simulator.core.node_sets import NodeSet, NodeSetAll
from bmtk.simulator.core import sonata_reader
class SimNetwork(object):
def __init__(self):
self._components = {}
self._io = io
self._node_adaptors = {}
self._edge_adaptors = {}
self._register_adaptors()
self._node_populations = {}
self._node_sets = {}
self._edge_populations = []
self._gap_juncs = {}
@property
def io(self):
return self._io
@property
def node_populations(self):
return self._node_populations.values()
@property
def recurrent_edges(self):
return [ep for ep in self._edge_populations if ep.recurrent_connections]
@property
def py_function_caches(self):
return None
def _register_adaptors(self):
self._node_adaptors['sonata'] = sonata_reader.NodeAdaptor
self._edge_adaptors['sonata'] = sonata_reader.EdgeAdaptor
def get_node_adaptor(self, name):
return self._node_adaptors[name]
def get_edge_adaptor(self, name):
return self._edge_adaptors[name]
def add_component(self, name, path):
self._components[name] = path
def get_component(self, name):
if name not in self._components:
self.io.log_exception('No network component set with name {}'.format(name))
else:
return self._components[name]
def has_component(self, name):
return name in self._components
def get_node_population(self, name):
return self._node_populations[name]
def get_node_populations(self):
return self._node_populations.values()
def add_node_set(self, name, node_set):
self._node_sets[name] = node_set
def get_node_set(self, node_set):
if isinstance(node_set, string_types) and node_set in self._node_sets:
return self._node_sets[node_set]
elif isinstance(node_set, (dict, list)):
return NodeSet(node_set, self)
else:
self.io.log_exception('Unable to load or find node_set "{}"'.format(node_set))
def add_nodes(self, node_population):
pop_name = node_population.name
if pop_name in self._node_populations:
# Make sure their aren't any collisions
self.io.log_exception('There are multiple node populations with name {}.'.format(pop_name))
node_population.initialize(self)
self._node_populations[pop_name] = node_population
if node_population.mixed_nodes:
# We'll allow a population to have virtual and non-virtual nodes but it is not ideal
self.io.log_warning(('Node population {} contains both virtual and non-virtual nodes which can cause ' +
'memory and build-time inefficency. Consider separating virtual nodes into their ' +
'own population').format(pop_name))
# Used in inputs/reports when needed to get all gids belonging to a node population
self._node_sets[pop_name] = NodeSet({'population': pop_name}, self)
def node_properties(self, populations=None):
if populations is None:
selected_pops = self.node_populations
elif isinstance(populations, string_types):
selected_pops = [pop for pop in self.node_populations if pop.name == populations]
else:
selected_pops = [pop for pop in self.node_populations if pop.name in populations]
all_nodes_df = None
for node_pop in selected_pops:
node_pop_df = node_pop.nodes_df()
if 'population' not in node_pop_df:
node_pop_df['population'] = node_pop.name
node_pop_df = node_pop_df.set_index(['population', node_pop_df.index.astype(dtype=np.uint64)])
if all_nodes_df is None:
all_nodes_df = node_pop_df
else:
all_nodes_df = all_nodes_df.append(node_pop_df)
return all_nodes_df
def get_node_groups(self, populations=None):
if populations is None:
selected_pops = self.node_populations
elif isinstance(populations, string_types):
selected_pops = [pop for pop in self.node_populations if pop.name == populations]
else:
selected_pops = [pop for pop in self.node_populations if pop.name in populations]
all_nodes_df = None
for node_pop in selected_pops:
node_pop_df = node_pop.nodes_df(index_by_id=False)
if 'population' not in node_pop_df:
node_pop_df['population'] = node_pop.name
if all_nodes_df is None:
all_nodes_df = node_pop_df
else:
all_nodes_df = all_nodes_df.append(node_pop_df, sort=False)
return all_nodes_df
def get_node_sets(self, populations=None, groupby=None, **filterby):
selected_nodes_df = self.node_properties(populations)
for k, v in filterby:
if isinstance(v, (np.ndarray, list, tuple)):
selected_nodes_df = selected_nodes_df[selected_nodes_df[k].isin(v)]
else:
selected_nodes_df = selected_nodes_df[selected_nodes_df[k].isin(v)]
if groupby is not None:
return {k: v.tolist() for k, v in selected_nodes_df.groupby(groupby).groups.items()}
else:
return selected_nodes_df.index.tolist()
def add_edges(self, edge_population):
edge_population.initialize(self)
pop_name = edge_population.name
# Check that source_population exists
src_pop_name = edge_population.source_nodes
if src_pop_name not in self._node_populations:
self.io.log_exception('Source node population {} not found. Please update {} edges'.format(src_pop_name,
pop_name))
# Check that the target population exists and contains non-virtual nodes (we cannot synapse onto virt nodes)
trg_pop_name = edge_population.target_nodes
if trg_pop_name not in self._node_populations or self._node_populations[trg_pop_name].virtual_nodes_only:
self.io.log_exception(('Node population {} does not exists (or consists of only virtual nodes). ' +
'{} edges cannot create connections.').format(trg_pop_name, pop_name))
edge_population.set_connection_type(src_pop=self._node_populations[src_pop_name],
trg_pop = self._node_populations[trg_pop_name])
self._edge_populations.append(edge_population)
def load_gap_junc_files(self, gj_dic):
for p in gj_dic:
path = p['gap_juncs_file']
f_name = os.path.basename(path)
network = f_name[:f_name.find("_gap_juncs.h5")]
self._gap_juncs[network] = {}
with h5py.File(path, 'r') as f:
for key in ['source_ids', 'target_ids', 'src_gap_ids', 'trg_gap_ids']:
self._gap_juncs[network][key] = f[key][()]
def build(self):
self.build_nodes()
self.build_recurrent_edges()
def build_nodes(self):
raise NotImplementedError()
def build_recurrent_edges(self, **opts):
raise NotImplementedError()
def build_virtual_connections(self):
raise NotImplementedError()
@classmethod
def from_config(cls, conf, **properties):
"""Generates a graph structure from a json config file or dictionary.
:param conf: name of json config file, or a dictionary with config parameters
:param properties: optional properties.
:return: A graph object of type cls
"""
network = cls(**properties)
# The simulation run script should create a config-dict since it's likely to vary based on the simulator engine,
# however in the case the user doesn't we will try a generic conversion from dict/json to ConfigDict
if isinstance(conf, SimulationConfig):
config = conf
else:
try:
config = SimulationConfig.load(conf)
except Exception as e:
network.io.log_exception('Could not convert {} (type "{}") to json.'.format(conf, type(conf)))
if not config.with_networks:
network.io.log_exception('Could not find any network files. Unable to build network.')
# TODO: These are simulator specific
network.spike_threshold = config.spike_threshold
network.dL = config.dL
# load components
for name, value in config.components.items():
network.add_component(name, value)
# load nodes
gid_map = config.gid_mappings
node_adaptor = network.get_node_adaptor('sonata')
for node_dict in config.nodes:
nodes = sonata_reader.load_nodes(node_dict['nodes_file'], node_dict['node_types_file'], gid_map,
adaptor=node_adaptor)
for node_pop in nodes:
network.add_nodes(node_pop)
# TODO: Raise a warning if more than one internal population and no gids (node_id collision)
# load edges
edge_adaptor = network.get_edge_adaptor('sonata')
for edge_dict in config.edges:
if not edge_dict.get('enabled', True):
continue
edges = sonata_reader.load_edges(edge_dict['edges_file'], edge_dict['edge_types_file'],
adaptor=edge_adaptor)
for edge_pop in edges:
network.add_edges(edge_pop)
network.load_gap_junc_files(config.gap_juncs)
# Add nodeset section
network.add_node_set('all', NodeSetAll(network))
for ns_name, ns_filter in config.node_sets.items():
network.add_node_set(ns_name, NodeSet(ns_filter, network))
return network
@classmethod
def from_manifest(cls, manifest_json):
# TODO: Add adaptors to build a simulation network from model files downloaded celltypes.brain-map.org
raise NotImplementedError()
@classmethod
def from_builder(cls, network):
# TODO: Add adaptors to build a simulation network from a bmtk.builder Network object
raise NotImplementedError()
|
py | 1a3d481c9db946c2fd1ec4e877129de9244cd240 | import re
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import data
from yapf.yapflib.yapf_api import FormatCode
from ..core import format_json
from ..model.app_data import ExchangeRequest, ExchangeResponse, ApiCall, HttpExchange
internal_var_selector = re.compile(r"\$\{(\w+)\}")
def highlight_format_json(plain_text, formatter=HtmlFormatter()):
if not plain_text:
return ""
return highlight(format_json(plain_text), data.JsonLexer(), formatter)
def api_request_body_highlighted(api_call: ApiCall):
return highlight_format_json(api_call.http_request_body)
def request_body_highlighted(http_request: ExchangeRequest):
return highlight_format_json(http_request.request_body)
def response_body_highlighted(http_response: ExchangeResponse):
return highlight_format_json(http_response.response_body)
def encode_json_string(json_string):
return json_string.replace('"', '\\"')
def get_base_url(api_call: ApiCall):
return api_call.http_url
def get_function_name(api_call: ApiCall):
norm_title = api_call.title.lower().strip()
rgx = r"[^a-zA-Z]"
return re.sub(rgx, "", norm_title)
def dict_formatter(dict_items, form, splitter=","):
return splitter.join([form.format(**locals()) for k, v in dict_items])
def extract_uri(url, servers):
matched_server = next(
(server for server in servers if url.startswith(server)), None
)
if matched_server:
return url.replace(matched_server, "")
return url
def to_curl(api_call: ApiCall, exchange: HttpExchange, compressed=False, verify=True):
if api_call:
http_method = api_call.http_method
http_url = api_call.http_url
req_headers = api_call.enabled_headers()
req_qp = api_call.enabled_query_params()
req_body = api_call.request_body_without_comments()
if exchange.response.http_status_code != 0:
http_method = exchange.request.http_method
http_url = exchange.request.http_url
req_qp = exchange.request.query_params
req_headers = exchange.request.headers
req_body = exchange.request.request_body
elif not api_call:
raise ValueError(
"Unable to make curl request as api_call is null and exchange response is {}".format(
exchange.response
)
)
if req_qp:
http_url = http_url + "?" + "&".join([f"{k}={v}" for k, v in req_qp.items()])
parts = [("curl", None), ("-X", http_method)]
for k, v in sorted(req_headers.items()):
parts += [("-H", "{0}: {1}".format(k, v))]
if req_body:
body = req_body
if isinstance(body, bytes):
body = body.decode("utf-8")
parts += [("-d", body)]
if compressed:
parts += [("--compressed", None)]
if not verify:
parts += [("--insecure", None)]
parts += [(None, http_url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(k)
if v:
flat_parts.append("'{0}'".format(v))
return " ".join(flat_parts)
def format_python_code(unformatted_code):
return FormatCode(unformatted_code, style_config="pep8")
|
py | 1a3d490e7079434fcefe77357e7dd4139e303a23 | # Copyright 2021 Chuwei Chen [email protected]
# Copyright 2021 Zhaozhong Qi [email protected]
# ===========START OF STUDENT'S CODE================
"2021FALL EC602 HW5"
def left_rotate(string, num):
"left rotate a string by num (CounterClockwise)"
return string[num:] + string[:num]
def right_rotate(string, num):
"right rotate a string by num (Clockwise)"
return string[-num:] + string[:-num]
def linear(orig: str, modified: str) -> bool:
"Check if one arrangement is linear, i.e. barriers at the ends"
orig = list(orig)
modified = list(modified)
for i in orig:
if abs(orig.index(i) - modified.index(i)) > 1:
return False
return True
def valid(orig: str, modified: str) -> bool:
"Check if one arrangement is valid, i.e. follow the Wedding seating rules"
orig = list(orig)
modified = list(modified)
for i in orig:
index_diff = abs(orig.index(i) - modified.index(i))
if index_diff > 1 and index_diff != (len(orig) - 1):
return False
return True
def find_linears(guests: str) -> list:
"Find all the linear arranges for the given str"
# If only one or empty guests, return it or None
if len(guests) == 1:
return [guests[0]]
if len(guests) == 0:
return None
# Initialize some data structures
linear_list = [guests[0]]
# From left to right, iterate through them cumulatively,
# i.e. 'a ab abc abcd'
for i in range(1, len(guests)):
buffer = []
orig = guests[0:i] + guests[i]
# Linear_list contains all prev. linear arranges
for j in linear_list:
new_arranges = add_person(j, guests[i])
# Get rid of non-linear arranges.
for k in new_arranges:
if linear(orig, k):
buffer.append(k)
linear_list = list(set(buffer))
return sorted(linear_list)
def add_person(orig: str, adder: str) -> list:
"Based on the original arrangement, \
return all new arranges when a new person seats in"
# 1. stay
one = orig + adder
# 2. swap w/ tail
two = list(one)
two[len(two)-1], two[len(two)-2] = two[len(two)-2], two[len(two)-1]
two = "".join(two)
# 3. swap w/ head
thr = list(one)
thr[len(thr)-1], thr[0] = thr[0], thr[len(thr)-1]
thr = "".join(thr)
# 4. swap w/ head & cw rotate
four = list(one)
four = right_rotate(four, 1)
four = "".join(four)
# 5. swap w/ tail & ccw rotate
five = list(one)
five = left_rotate(five, 1)
five = "".join(five)
ans = [one, two, thr, four, five]
return ans
def divide_str(guests: str, bars: list) -> list:
"Divide a string up between barriers"
divided = []
for i, val in enumerate(bars):
if i != len(bars) - 1:
divided.append(guests[val:bars[i+1]])
else:
divided.append(guests[val:]+guests[:bars[0]])
return divided
def countem(upper_limit: list, values: list):
"Return all the permutations"
current = [0] * len(upper_limit)
while True:
temp_string = ""
for i, val in enumerate(current):
temp_string = temp_string + values[i][val]
yield temp_string
j = 0
current[j] += 1
while current[j] == upper_limit[j]:
current[j] = 0
j += 1
if j == len(upper_limit):
return
current[j] += 1
class Wedding:
"The assignment: wedding class"
def __init__(self):
pass
def shuffle(self, guests: str) -> list:
"Return all possible seating arrangements"
# If only one or empty guests, return it or None
if len(guests) == 1:
return [guests[0]]
if len(guests) == 0:
return None
arranges = []
# Find prev. linear arranges
linear_list = find_linears(guests[0:len(guests)-1])
# For each prev. linear arranges, add the last person
for j in linear_list:
new_arranges = add_person(j, guests[len(guests)-1])
# Get rid of invalid arranges.
for k in new_arranges:
if valid(guests, k):
arranges.append(k)
return sorted(list(set(arranges)))
def barriers(self, guests: str, bars: list) -> list:
"Return all possible seating arrangements w/ barriers"
# Initialize some data structures
arranges = []
divided_linear = []
permutations = []
upper_limit = []
# Divide guests up and find their linears
divided = divide_str(guests, bars)
for i in divided:
divided_linear.append(find_linears(i))
# Find upper limit (len of each element) of divided_linear
for i in divided_linear:
upper_limit.append(len(i))
# Find permutations in divided_linear
for i in countem(upper_limit, divided_linear):
permutations.append(i)
# Format adjusting
for i in permutations:
i = right_rotate(i, bars[0])
offset = 0
for j in bars:
i = i[:j+offset] + '|' + i[j+offset:]
offset += 1
arranges.append(i)
return arranges
# ===========END OF STUDENT'S CODE================
def show_result(v, partial=False, ind=None):
v.sort()
if not partial:
print("", len(v), "\n".join(v), sep="\n")
else:
print("", len(v), v[ind], sep="\n")
def standard_tests():
standard = Wedding()
res = standard.shuffle("abc")
show_result(res)
res = standard.shuffle("WXYZ")
show_result(res)
res = standard.barriers("xyz", [0])
show_result(res)
res = standard.shuffle("abc")
show_result(res)
res = standard.shuffle("abcdefXY")
show_result(res)
res = standard.barriers("abcDEFxyz", [2, 5, 7])
show_result(res)
res = standard.barriers("ABCDef", [4])
show_result(res)
res = standard.barriers("bgywqa", [0, 1, 2, 4, 5])
show_result(res)
res = standard.barriers("n", [0])
show_result(res)
res = standard.shuffle("hi")
show_result(res)
def main():
print("""Type quit to exit.
Commands:
tests
s guests
b guests n barriers
sp guests ind
bp guests n barriers ind
""")
w = Wedding()
while True:
asktype = input().split()
if asktype[0] == "quit":
break
elif asktype[0] == "tests":
standard_tests()
elif asktype[0] == "s":
guests = asktype[1]
r = w.shuffle(guests)
show_result(r)
elif asktype[0] == "b":
guests, nbar, bars = asktype[1], asktype[2], asktype[3:]
r = w.barriers(guests, [int(x) for x in bars])
show_result(r)
elif asktype[0] == "sp":
guests, ind = asktype[1:]
r = w.shuffle(guests)
show_result(r, True, int(ind))
elif asktype[0] == "bp":
guests, nbar, bars, ind = asktype[1], \
asktype[2], asktype[3:-1], asktype[-1]
r = w.barriers(guests, [int(x) for x in bars])
show_result(r, True, int(ind))
if __name__ == '__main__':
main()
|
py | 1a3d494859357e7c1b217ba92bf80e72cf545a41 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from backend.dashboard.examples.utils import load_demo_manifest
from backend.tests.conftest import TEST_NAMESPACE
from backend.tests.dashboard.conftest import DASHBOARD_API_URL_COMMON_PREFIX as DAU_PREFIX
from backend.utils.basic import getitems
pytestmark = pytest.mark.django_db
class TestIngress:
""" 测试 Ingress 相关接口 """
manifest = load_demo_manifest('networks/simple_ingress')
create_url = f'{DAU_PREFIX}/networks/ingresses/'
list_url = f'{DAU_PREFIX}/namespaces/{TEST_NAMESPACE}/networks/ingresses/'
inst_url = f"{list_url}{getitems(manifest, 'metadata.name')}/"
def test_create(self, api_client):
""" 测试创建资源接口 """
response = api_client.post(self.create_url, data={'manifest': self.manifest})
assert response.json()['code'] == 0
def test_list(self, api_client):
""" 测试获取资源列表接口 """
response = api_client.get(self.list_url)
assert response.json()['code'] == 0
assert response.data['manifest']['kind'] == 'IngressList'
def test_update(self, api_client):
""" 测试更新资源接口 """
self.manifest['metadata']['annotations'] = {'t_key': 't_val'}
response = api_client.put(self.inst_url, data={'manifest': self.manifest})
assert response.json()['code'] == 0
def test_retrieve(self, api_client):
""" 测试获取单个资源接口 """
response = api_client.get(self.inst_url)
assert response.json()['code'] == 0
assert response.data['manifest']['kind'] == 'Ingress'
assert getitems(response.data, 'manifest.metadata.annotations.t_key') == 't_val'
def test_destroy(self, api_client):
""" 测试删除单个资源 """
response = api_client.delete(self.inst_url)
assert response.json()['code'] == 0
|
py | 1a3d49a20328928d951ee98ece97e26798d977b4 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import contextlib
import copy
import weakref
from absl import logging
import numpy as np
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import tpu_values
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.tpu import device_assignment as device_assignment_lib # pylint: disable=unused-import
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_XLA_OP_BY_OP_INPUTS_LIMIT = 200
@contextlib.contextmanager
def maybe_init_scope():
if ops.executing_eagerly_outside_functions():
yield
else:
with ops.init_scope():
yield
def validate_run_function(fn):
"""Validate the function passed into strategy.run."""
# We allow three types of functions/objects passed into TPUStrategy
# run in eager mode:
# 1. a user annotated tf.function
# 2. a ConcreteFunction, this is mostly what you get from loading a saved
# model.
# 3. a callable object and the `__call__` method itself is a tf.function.
#
# Otherwise we return an error, because we don't support eagerly running
# run in TPUStrategy.
if context.executing_eagerly() \
and not isinstance(fn, def_function.Function) \
and not isinstance(fn, function.ConcreteFunction) \
and not (callable(fn) and isinstance(fn.__call__, def_function.Function)):
raise NotImplementedError(
"TPUStrategy.run(fn, ...) does not support pure eager "
"execution. please make sure the function passed into "
"`strategy.run` is a `tf.function` or "
"`strategy.run` is called inside a `tf.function` if "
"eager behavior is enabled.")
@tf_export("distribute.TPUStrategy", v1=[])
class TPUStrategyV2(distribute_lib.Strategy):
"""Synchronous training on TPUs and TPU Pods.
To construct a TPUStrategy object, you need to run the
initialization code as below:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.TPUStrategy(resolver)
While using distribution strategies, the variables created within the
strategy's scope will be replicated across all the replicas and can be kept in
sync using all-reduce algorithms.
To run TF2 programs on TPUs, you can either use `.compile` and
`.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized
training loop by calling `strategy.run` directly. Note that
TPUStrategy doesn't support pure eager execution, so please make sure the
function passed into `strategy.run` is a `tf.function` or
`strategy.run` is called inside a `tf.function` if eager
behavior is enabled. See more details in https://www.tensorflow.org/guide/tpu.
`distribute_datasets_from_function` and
`experimental_distribute_dataset` APIs can be used to distribute the dataset
across the TPU workers when writing your own training loop. If you are using
`fit` and `compile` methods available in `tf.keras.Model`, then Keras will
handle the distribution for you.
An example of writing customized training loop on TPUs:
>>> with strategy.scope():
... model = tf.keras.Sequential([
... tf.keras.layers.Dense(2, input_shape=(5,)),
... ])
... optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
>>> def dataset_fn(ctx):
... x = np.random.random((2, 5)).astype(np.float32)
... y = np.random.randint(2, size=(2, 1))
... dataset = tf.data.Dataset.from_tensor_slices((x, y))
... return dataset.repeat().batch(1, drop_remainder=True)
>>> dist_dataset = strategy.distribute_datasets_from_function(
... dataset_fn)
>>> iterator = iter(dist_dataset)
>>> @tf.function()
... def train_step(iterator):
...
... def step_fn(inputs):
... features, labels = inputs
... with tf.GradientTape() as tape:
... logits = model(features, training=True)
... loss = tf.keras.losses.sparse_categorical_crossentropy(
... labels, logits)
...
... grads = tape.gradient(loss, model.trainable_variables)
... optimizer.apply_gradients(zip(grads, model.trainable_variables))
...
... strategy.run(step_fn, args=(next(iterator),))
>>> train_step(iterator)
For the advanced use cases like model parallelism, you can set
`experimental_device_assignment` argument when creating TPUStrategy to specify
number of replicas and number of logical devices. Below is an example to
initialize TPU system with 2 logical devices and 1 replica.
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> topology = tf.tpu.experimental.initialize_tpu_system(resolver)
>>> device_assignment = tf.tpu.experimental.DeviceAssignment.build(
... topology,
... computation_shape=[1, 1, 1, 2],
... num_replicas=1)
>>> strategy = tf.distribute.TPUStrategy(
... resolver, experimental_device_assignment=device_assignment)
Then you can run a `tf.add` operation only on logical device 0.
>>> @tf.function()
... def step_fn(inputs):
... features, _ = inputs
... output = tf.add(features, features)
...
... # Add operation will be executed on logical device 0.
... output = strategy.experimental_assign_to_logical_device(output, 0)
... return output
>>> dist_dataset = strategy.distribute_datasets_from_function(
... dataset_fn)
>>> iterator = iter(dist_dataset)
>>> strategy.run(step_fn, args=(next(iterator),))
"""
def __init__(self,
tpu_cluster_resolver=None,
experimental_device_assignment=None):
"""Synchronous training in TPU donuts or Pods.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster. If None, it will
assume running on a local TPU worker.
experimental_device_assignment: Optional
`tf.tpu.experimental.DeviceAssignment` to specify the placement of
replicas on the TPU cluster.
"""
super(TPUStrategyV2, self).__init__(TPUExtended(
self, tpu_cluster_resolver,
device_assignment=experimental_device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
def run(self, fn, args=(), kwargs=None, options=None):
"""Run the computation defined by `fn` on each TPU replica.
Executes ops specified by `fn` on each replica. If `args` or `kwargs` have
`tf.distribute.DistributedValues`, such as those produced by a
`tf.distribute.DistributedDataset` from
`tf.distribute.Strategy.experimental_distribute_dataset` or
`tf.distribute.Strategy.distribute_datasets_from_function`,
when `fn` is executed on a particular replica, it will be executed with the
component of `tf.distribute.DistributedValues` that correspond to that
replica.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `all_reduce`.
All arguments in `args` or `kwargs` should either be nest of tensors or
`tf.distribute.DistributedValues` containing tensors or composite tensors.
Example usage:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.TPUStrategy(resolver)
>>> @tf.function
... def run():
... def value_fn(value_context):
... return value_context.num_replicas_in_sync
... distributed_values = (
... strategy.experimental_distribute_values_from_function(value_fn))
... def replica_fn(input):
... return input * 2
... return strategy.run(replica_fn, args=(distributed_values,))
>>> result = run()
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be `tf.distribute.DistributedValues`, `Tensor`
objects, or `Tensor`s (for example, if running on a single replica).
"""
validate_run_function(fn)
# Note: the target function is converted to graph even when in Eager mode,
# so autograph is on by default here.
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
def experimental_assign_to_logical_device(self, tensor, logical_device_id):
"""Adds annotation that `tensor` will be assigned to a logical device.
This adds an annotation to `tensor` specifying that operations on
`tensor` will be invoked on logical core device id `logical_device_id`.
When model parallelism is used, the default behavior is that all ops
are placed on zero-th logical device.
```python
# Initializing TPU system with 2 logical devices and 4 replicas.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=4)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
output = tf.add(inputs, inputs)
# Add operation will be executed on logical device 0.
output = strategy.experimental_assign_to_logical_device(output, 0)
return output
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
logical_device_id: Id of the logical core to which the tensor will be
assigned.
Raises:
ValueError: The logical device id presented is not consistent with total
number of partitions specified by the device assignment.
Returns:
Annotated tensor with identical value as `tensor`.
"""
num_logical_devices_per_replica = self.extended._tpu_devices.shape[1] # pylint: disable=protected-access
if (logical_device_id < 0 or
logical_device_id >= num_logical_devices_per_replica):
raise ValueError("`logical_core_id` to assign must be lower then total "
"number of logical devices per replica. Received "
"logical device id {} but there are only total of {} "
"logical devices in replica.".format(
logical_device_id, num_logical_devices_per_replica))
return xla_sharding.assign_device(
tensor, logical_device_id, use_sharding_op=True)
def experimental_split_to_logical_devices(self, tensor, partition_dimensions):
"""Adds annotation that `tensor` will be split across logical devices.
This adds an annotation to tensor `tensor` specifying that operations on
`tensor` will be be split among multiple logical devices. Tensor `tensor`
will be split across dimensions specified by `partition_dimensions`.
The dimensions of `tensor` must be divisible by corresponding value in
`partition_dimensions`.
For example, for system with 8 logical devices, if `tensor` is an image
tensor with shape (batch_size, width, height, channel) and
`partition_dimensions` is [1, 2, 4, 1], then `tensor` will be split
2 in width dimension and 4 way in height dimension and the split
tensor values will be fed into 8 logical devices.
```python
# Initializing TPU system with 8 logical devices and 1 replica.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 2, 2, 2],
num_replicas=1)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
inputs = strategy.experimental_split_to_logical_devices(
inputs, [1, 2, 4, 1])
# model() function will be executed on 8 logical devices with `inputs`
# split 2 * 4 ways.
output = model(inputs)
return output
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
partition_dimensions: An unnested list of integers with the size equal to
rank of `tensor` specifying how `tensor` will be partitioned. The
product of all elements in `partition_dimensions` must be equal to the
total number of logical devices per replica.
Raises:
ValueError: 1) If the size of partition_dimensions does not equal to rank
of `tensor` or 2) if product of elements of `partition_dimensions` does
not match the number of logical devices per replica defined by the
implementing DistributionStrategy's device specification or
3) if a known size of `tensor` is not divisible by corresponding
value in `partition_dimensions`.
Returns:
Annotated tensor with identical value as `tensor`.
"""
num_logical_devices_per_replica = self.extended._tpu_devices.shape[1] # pylint: disable=protected-access
num_partition_splits = np.prod(partition_dimensions)
input_shape = tensor.shape
tensor_rank = len(input_shape)
if tensor_rank != len(partition_dimensions):
raise ValueError("Length of `partition_dimensions` ({}) must be "
"equal to the rank of `x` ({}).".format(
len(partition_dimensions), tensor_rank))
for dim_index, dim_size in enumerate(input_shape):
if dim_size is None:
continue
split_size = partition_dimensions[dim_index]
if dim_size % split_size != 0:
raise ValueError("Tensor shape at dimension ({}) must be "
"divisible by corresponding value specified "
"by `partition_dimensions` ({}).".format(
dim_index, split_size))
if num_partition_splits != num_logical_devices_per_replica:
raise ValueError("Number of logical devices ({}) does not match the "
"number of partition splits specified ({}).".format(
num_logical_devices_per_replica,
num_partition_splits))
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def experimental_replicate_to_logical_devices(self, tensor):
"""Adds annotation that `tensor` will be replicated to all logical devices.
This adds an annotation to tensor `tensor` specifying that operations on
`tensor` will be invoked on all logical devices.
```python
# Initializing TPU system with 2 logical devices and 4 replicas.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=4)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
images, labels = inputs
images = strategy.experimental_split_to_logical_devices(
inputs, [1, 2, 4, 1])
# model() function will be executed on 8 logical devices with `inputs`
# split 2 * 4 ways.
output = model(inputs)
# For loss calculation, all logical devices share the same logits
# and labels.
labels = strategy.experimental_replicate_to_logical_devices(labels)
output = strategy.experimental_replicate_to_logical_devices(output)
loss = loss_fn(labels, output)
return loss
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
Returns:
Annotated tensor with identical value as `tensor`.
"""
return xla_sharding.replicate(tensor, use_sharding_op=True)
@tf_export("distribute.experimental.TPUStrategy", v1=[])
@deprecation.deprecated_endpoints("distribute.experimental.TPUStrategy")
class TPUStrategy(distribute_lib.Strategy):
"""Synchronous training on TPUs and TPU Pods.
To construct a TPUStrategy object, you need to run the
initialization code as below:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.experimental.TPUStrategy(resolver)
While using distribution strategies, the variables created within the
strategy's scope will be replicated across all the replicas and can be kept in
sync using all-reduce algorithms.
To run TF2 programs on TPUs, you can either use `.compile` and
`.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized
training loop by calling `strategy.run` directly. Note that
TPUStrategy doesn't support pure eager execution, so please make sure the
function passed into `strategy.run` is a `tf.function` or
`strategy.run` is called inside a `tf.function` if eager
behavior is enabled.
"""
def __init__(self,
tpu_cluster_resolver=None,
device_assignment=None):
"""Synchronous training in TPU donuts or Pods.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster.
"""
logging.warning(
"`tf.distribute.experimental.TPUStrategy` is deprecated, please use "
" the non experimental symbol `tf.distribute.TPUStrategy` instead.")
super(TPUStrategy, self).__init__(TPUExtended(
self, tpu_cluster_resolver, device_assignment=device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def run(self, fn, args=(), kwargs=None, options=None):
"""See base class."""
validate_run_function(fn)
# Note: the target function is converted to graph even when in Eager mode,
# so autograph is on by default here.
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
@property
def cluster_resolver(self):
"""Returns the cluster resolver associated with this strategy.
`tf.distribute.experimental.TPUStrategy` provides the
associated `tf.distribute.cluster_resolver.ClusterResolver`. If the user
provides one in `__init__`, that instance is returned; if the user does
not, a default
`tf.distribute.cluster_resolver.TPUClusterResolver` is provided.
"""
return self.extended._tpu_cluster_resolver # pylint: disable=protected-access
@tf_export(v1=["distribute.experimental.TPUStrategy"])
class TPUStrategyV1(distribute_lib.StrategyV1):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategyV1, self).__init__(TPUExtended(
self, tpu_cluster_resolver, steps_per_run, device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
@property
def steps_per_run(self):
"""DEPRECATED: use .extended.steps_per_run instead."""
return self._extended.steps_per_run
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def run(self, fn, args=(), kwargs=None, options=None):
"""Run `fn` on each replica, with the given arguments.
Executes ops specified by `fn` on each replica. If `args` or `kwargs` have
"per-replica" values, such as those produced by a "distributed `Dataset`",
when `fn` is executed on a particular replica, it will be executed with the
component of those "per-replica" values that correspond to that replica.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `all_reduce`.
All arguments in `args` or `kwargs` should either be nest of tensors or
per-replica objects containing tensors or composite tensors.
Users can pass strategy specific options to `options` argument. An example
to enable bucketizing dynamic shapes in `TPUStrategy.run`
is:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.experimental.TPUStrategy(resolver)
>>> options = tf.distribute.RunOptions(
... experimental_bucketizing_dynamic_shape=True)
>>> dataset = tf.data.Dataset.range(
... strategy.num_replicas_in_sync, output_type=dtypes.float32).batch(
... strategy.num_replicas_in_sync, drop_remainder=True)
>>> input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> @tf.function()
... def step_fn(inputs):
... output = tf.reduce_sum(inputs)
... return output
>>> strategy.run(step_fn, args=(next(input_iterator),), options=options)
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be "per-replica" `Tensor` objects or `Tensor`s
(for example, if running on a single replica).
"""
validate_run_function(fn)
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class TPUExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of TPUStrategy."""
def __init__(self,
container_strategy,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
super(TPUExtended, self).__init__(container_strategy)
if tpu_cluster_resolver is None:
tpu_cluster_resolver = TPUClusterResolver("")
if steps_per_run is None:
# TODO(frankchn): Warn when we are being used by DS/Keras and this is
# not specified.
steps_per_run = 1
# `self._tpu_function_cache` is a dict of `tf.function`s, thus if a
# `tf.function` is passed into `strategy.run` in eager mode, the
# `tf.function` won't get retraced.
self._tpu_function_cache = weakref.WeakKeyDictionary()
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = self._tpu_cluster_resolver.get_tpu_system_metadata()
self._device_assignment = device_assignment
tpu_devices_flat = [
d.name for d in self._tpu_metadata.devices if "device:TPU:" in d.name]
# `self._tpu_devices` is a two-dimensional NumPy array of strings. It is
# indexed using `[replica_id][logical_device_id]`.
if device_assignment is None:
self._tpu_devices = np.array(
[[d] for d in tpu_devices_flat], dtype=object)
else:
job_name = device_spec.DeviceSpecV2.from_string(tpu_devices_flat[0]).job
tpu_devices = []
for replica_id in range(device_assignment.num_replicas):
replica_devices = []
for logical_core in range(device_assignment.num_cores_per_replica):
replica_devices.append(
device_util.canonicalize(
device_assignment.tpu_device(
replica=replica_id,
logical_core=logical_core,
job=job_name)))
tpu_devices.append(replica_devices)
self._tpu_devices = np.array(tpu_devices, dtype=object)
self._host_device = device_util.get_host_for_device(self._tpu_devices[0][0])
# Preload the data onto the TPUs. Currently we always preload onto logical
# device 0 for each replica.
# TODO(cjfj): Create `InputWorkers` lazily, allowing users to place the
# input onto a different logical device?
self._device_input_worker_devices = collections.OrderedDict()
self._host_input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices[:, 0]:
host_device = device_util.get_host_for_device(tpu_device)
self._device_input_worker_devices.setdefault(host_device, [])
self._device_input_worker_devices[host_device].append(tpu_device)
self._host_input_worker_devices.setdefault(host_device, [])
self._host_input_worker_devices[host_device].append(host_device)
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
self.experimental_enable_get_next_as_optional = True
self._logical_device_stack = [0]
if context.executing_eagerly():
# In async remote eager, we want to sync the executors before exiting the
# program.
def async_wait():
if context.context()._context_handle is not None: # pylint: disable=protected-access
context.async_wait()
atexit.register(async_wait)
# Flag to turn on VariablePolicy
self._use_var_policy = False
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils. validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
input_workers = input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
return input_lib.DatasetIterator(
dataset,
input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
input_workers = input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(
input_fn,
input_workers,
input_contexts,
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._host_device),
session)
def _get_input_workers(self, options):
if not options or options.experimental_prefetch_to_device:
return input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
else:
return input_lib.InputWorkers(
tuple(self._host_input_worker_devices.items()))
def _check_spec(self, element_spec):
if isinstance(element_spec, values.PerReplicaSpec):
element_spec = element_spec._component_specs # pylint: disable=protected-access
specs = nest.flatten_with_joined_string_paths(element_spec)
for path, spec in specs:
if isinstance(spec, (sparse_tensor.SparseTensorSpec,
ragged_tensor.RaggedTensorSpec)):
raise ValueError(
"Found tensor {} with spec {}. TPUStrategy does not support "
"distributed datasets with device prefetch when using sparse or "
"ragged tensors. If you indend to use sparse or ragged tensors, "
"please pass a tf.distribute.InputOptions object with "
"experimental_prefetch_to_device set to False to your dataset "
"distribution function.".format(path, type(spec)))
def _experimental_distribute_dataset(self, dataset, options):
if options is None or options.experimental_prefetch_to_device:
self._check_spec(dataset.element_spec)
return input_lib.get_distributed_dataset(
dataset,
self._get_input_workers(options),
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _distribute_datasets_from_function(self, dataset_fn, options):
input_workers = self._get_input_workers(options)
input_contexts = []
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
distributed_dataset = input_lib.get_distributed_datasets_from_function(
dataset_fn,
input_workers,
input_contexts,
self._container_strategy())
# We can only check after the dataset_fn is called.
if options is None or options.experimental_prefetch_to_device:
self._check_spec(distributed_dataset.element_spec)
return distributed_dataset
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
for replica_id in range(self._num_replicas_in_sync):
per_replica_values.append(
value_fn(distribute_lib.ValueContext(replica_id,
self._num_replicas_in_sync)))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def run_fn(inputs):
"""Single step on the TPU device."""
fn_result = fn(ctx, inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
per_replica_inputs = multi_worker_iterator.get_next()
replicate_inputs = []
for replica_id in range(self._num_replicas_in_sync):
select_replica = lambda x: distribute_utils.select_replica( # pylint: disable=g-long-lambda
replica_id, x) # pylint: disable=cell-var-from-loop
replicate_inputs.append((nest.map_structure(
select_replica, per_replica_inputs),))
replicate_outputs = tpu.replicate(
run_fn,
replicate_inputs,
device_assignment=self._device_assignment,
xla_options=tpu.XLAOptions(use_spmd_for_xla_partitioning=False))
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the
# output type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on TPU host 0.
with ops.device(self._host_device):
if self.steps_per_run == 1:
replicate_outputs = rewrite_fn()
else:
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
_set_last_step_outputs(ctx, last_step_tensor_outputs)
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
@contextlib.contextmanager
def experimental_logical_device(self, logical_device_id):
"""Places variables and ops on the specified logical device."""
num_logical_devices_per_replica = self._tpu_devices.shape[1]
if logical_device_id >= num_logical_devices_per_replica:
raise ValueError(
"`logical_device_id` not in range (was {}, but there are only {} "
"logical devices per replica).".format(
logical_device_id, num_logical_devices_per_replica))
self._logical_device_stack.append(logical_device_id)
try:
if tpu_values.enclosing_tpu_context() is None:
yield
else:
with ops.device(tpu.core(logical_device_id)):
yield
finally:
self._logical_device_stack.pop()
def _experimental_initialize_system(self):
"""Experimental method added to be used by Estimator.
This is a private method only to be used by Estimator. Other frameworks
should directly be calling `tf.tpu.experimental.initialize_tpu_system`
"""
tpu_strategy_util.initialize_tpu_system(self._tpu_cluster_resolver)
def _create_variable(self, next_creator, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
if kwargs.pop("skip_mirrored_creator", False):
return next_creator(**kwargs)
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
devices = self._tpu_devices[:, self._logical_device_stack[-1]]
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
devices = colocate_with._devices # pylint: disable=protected-access
def _real_mirrored_creator(**kwargs): # pylint: disable=g-missing-docstring
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(**kwargs)
assert not isinstance(v, tpu_values.TPUMirroredVariable)
value_list.append(v)
return value_list
return distribute_utils.create_mirrored_variable(
self._container_strategy(), _real_mirrored_creator,
distribute_utils.TPU_VARIABLE_CLASS_MAPPING,
distribute_utils.TPU_VARIABLE_POLICY_MAPPING, **kwargs)
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
if (isinstance(value, values.DistributedValues) or
tensor_util.is_tensor(value)
) and tpu_values.enclosing_tpu_context() is not None:
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_replicas_in_sync)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
value_list = value.values
# pylint: disable=protected-access
if isinstance(
value,
values.DistributedVariable) and value._packed_variable is not None:
value_list = tuple(
value._packed_variable.on_device(d)
for d in value._packed_variable.devices)
# pylint: enable=protected-access
# Currently XLA op by op mode has a limit for the number of inputs for a
# single op, thus we break one `add_n` op into a group of `add_n` ops to
# work around the constraint.
# TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.
if len(value.values) <= _XLA_OP_BY_OP_INPUTS_LIMIT:
output = math_ops.add_n(value_list)
else:
output = array_ops.zeros_like(value_list[0], dtype=value_list[0].dtype)
for i in range(0, len(value_list), _XLA_OP_BY_OP_INPUTS_LIMIT):
output += math_ops.add_n(value_list[i:i + _XLA_OP_BY_OP_INPUTS_LIMIT])
if reduce_op == reduce_util.ReduceOp.MEAN:
output *= (1. / len(value_list))
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
# If necessary, copy to requested destination.
dest_canonical = device_util.canonicalize(devices[0])
host_canonical = device_util.canonicalize(self._host_device)
if dest_canonical != host_canonical:
with ops.device(dest_canonical):
output = array_ops.identity(output)
else:
output = cross_device_ops_lib.simple_broadcast(output, destinations)
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, tpu_values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
if tpu_values.enclosing_tpu_context() is not None:
if group:
return fn(var, *args, **kwargs)
else:
return (fn(var, *args, **kwargs),)
# Otherwise, we revert to MirroredStrategy behavior and update the variable
# on each replica directly.
updates = []
values_and_devices = []
packed_var = var._packed_variable # pylint: disable=protected-access
if packed_var is not None:
for device in packed_var.devices:
values_and_devices.append((packed_var, device))
else:
for value in var.values:
values_and_devices.append((value, value.device))
for i, value_and_device in enumerate(values_and_devices):
value = value_and_device[0]
device = value_and_device[1]
name = "update_%d" % i
with ops.device(device), \
distribute_lib.UpdateContext(i), \
ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(
fn(value, *distribute_utils.select_replica_mirrored(i, args),
**distribute_utils.select_replica_mirrored(i, kwargs)))
return distribute_utils.update_regroup(self, updates, group)
def read_var(self, var):
assert isinstance(var, tpu_values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
return var.read_value()
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if tpu_values.enclosing_tpu_context() is not None:
broadcast_tensor = [tensor for _ in range(self._num_replicas_in_sync)]
result = tpu_ops.all_to_all(
broadcast_tensor,
concat_dimension=0,
split_dimension=0,
split_count=self._num_replicas_in_sync)
# This uses the broadcasted value from the first replica because the only
# caller of this is for ONLY_FIRST_REPLICA variables aggregation.
return result[0]
return tensor
@property
def num_hosts(self):
if self._device_assignment is None:
return self._tpu_metadata.num_hosts
return len(set([self._device_assignment.host_device(r)
for r in range(self._device_assignment.num_replicas)]))
@property
def num_replicas_per_host(self):
if self._device_assignment is None:
return self._tpu_metadata.num_of_cores_per_host
# TODO(sourabhbajaj): Remove this method we use inputs and remove infeed
# as the computation of num_replicas_per_host is not a constant
# when using device_assignment. This is a temporary workaround to support
# StatefulRNN as everything is 1 in that case.
# This method needs to take host_id as input for correct computation.
max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //
self._device_assignment.num_cores_per_replica)
return min(self._device_assignment.num_replicas, max_models_per_host)
@property
def _num_replicas_in_sync(self):
if self._device_assignment is None:
return self._tpu_metadata.num_cores
return self._device_assignment.num_replicas
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return tuple(self._tpu_devices[:, self._logical_device_stack[-1]])
@property
def parameter_devices(self):
return self.worker_devices
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(None):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def tpu_run(self, fn, args, kwargs, options=None):
func = self._tpu_function_creator(fn, options)
return func(args, kwargs)
def _tpu_function_creator(self, fn, options):
if context.executing_eagerly() and fn in self._tpu_function_cache:
return self._tpu_function_cache[fn]
strategy = self._container_strategy()
def tpu_function(args, kwargs):
"""TF Function used to replicate the user computation."""
if kwargs is None:
kwargs = {}
# Remove None at the end of args as they are not replicatable
# If there are None in the middle we can't do anything about it
# so let those cases fail.
# For example when Keras model predict is used they pass the targets as
# None. We want to handle it here so all client libraries don't have to
# do this as other strategies can handle None values better.
while args and args[-1] is None:
args = args[:-1]
# Used to re-structure flattened output tensors from `tpu.replicate()`
# into a structured format.
result = [[]]
def replicated_fn(replica_id, replica_args, replica_kwargs):
"""Wraps user function to provide replica ID and `Tensor` inputs."""
with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
result[0] = fn(*replica_args, **replica_kwargs)
return result[0]
replicate_inputs = [] # By replica.
for i in range(strategy.num_replicas_in_sync):
replicate_inputs.append(
[constant_op.constant(i, dtype=dtypes.int32),
distribute_utils.select_replica(i, args),
distribute_utils.select_replica(i, kwargs)])
# Construct and pass `maximum_shapes` so that we could support dynamic
# shapes using dynamic padder.
if options.experimental_enable_dynamic_batch_size and replicate_inputs:
maximum_shapes = []
flattened_list = nest.flatten(replicate_inputs[0])
for input_tensor in flattened_list:
if tensor_util.is_tensor(input_tensor):
rank = input_tensor.shape.rank
else:
rank = np.ndim(input_tensor)
maximum_shape = tensor_shape.TensorShape([None] * rank)
maximum_shapes.append(maximum_shape)
maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
maximum_shapes)
else:
maximum_shapes = None
if options.experimental_bucketizing_dynamic_shape:
padding_spec = tpu.PaddingSpec.POWER_OF_TWO
else:
padding_spec = None
with strategy.scope():
replicate_outputs = tpu.replicate(
replicated_fn,
replicate_inputs,
device_assignment=self._device_assignment,
maximum_shapes=maximum_shapes,
padding_spec=padding_spec,
xla_options=tpu.XLAOptions(use_spmd_for_xla_partitioning=False))
# Remove all no ops that may have been added during 'tpu.replicate()'
if isinstance(result[0], list):
result[0] = [
output for output in result[0] if not isinstance(
output, ops.Operation)
]
# Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
if result[0] is None or isinstance(result[0], ops.Operation):
replicate_outputs = [None] * len(replicate_outputs)
else:
replicate_outputs = [
nest.pack_sequence_as(result[0], nest.flatten(replica_output))
for replica_output in replicate_outputs
]
return distribute_utils.regroup(replicate_outputs)
if context.executing_eagerly():
tpu_function = def_function.function(tpu_function)
self._tpu_function_cache[fn] = tpu_function
return tpu_function
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
# TPUStrategy has different distributed training structure that the whole
# cluster should be treated as single worker from higher-level (e.g. Keras)
# library's point of view.
# TODO(rchao): Revisit this as we design a fault-tolerance solution for
# TPUStrategy.
return False
class _TPUReplicaContext(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each replica should be updating this.
# TODO(b/118385803): Always properly initialize replica_id.
def __init__(self, strategy, replica_id_in_sync_group=0):
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._strategy
replica_id = tensor_util.constant_value(self.replica_id_in_sync_group)
if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.
# TODO(cjfj): Return other devices when model parallelism is supported.
return (tpu.core(0),)
else:
return (ds.extended.worker_devices[replica_id],)
def experimental_logical_device(self, logical_device_id):
"""Places variables and ops on the specified logical device."""
return self.strategy.extended.experimental_logical_device(logical_device_id)
def _set_last_step_outputs(ctx, last_step_tensor_outputs):
"""Sets the last step outputs on the given context."""
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that aren't reduced, return a PerReplica of all values. Else
# take the first value from the list as each value should be the same.
if reduce_op is None:
last_step_tensor_outputs_dict[name] = values.PerReplica(output)
else:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
|
py | 1a3d4a3ab6f500cf043bcde9dae0348772d388b1 | config = {
"interfaces": {
"google.ads.googleads.v1.services.FeedPlaceholderViewService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"GetFeedPlaceholderView": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
py | 1a3d4a670a0861e236d7253c60e2b6f0cf71e6cb |
import os
import io
import numpy as np
import librosa
import soundfile as sf
import tensorflow as tf
from scipy.signal import butter, lfilter
from scipy import signal
import copy
def read_raw_audio(audio, sample_rate=16000):
if isinstance(audio, str):
wave, _ = librosa.load(os.path.expanduser(audio), sr=sample_rate)
elif isinstance(audio, bytes):
wave, sr = sf.read(io.BytesIO(audio))
wave = np.asfortranarray(wave)
if sr != sample_rate:
wave = librosa.resample(wave, sr, sample_rate)
elif isinstance(audio, np.ndarray):
return audio
else:
raise ValueError("input audio must be either a path or bytes")
return wave
def normalize_audio_feature(audio_feature: np.ndarray, per_feature=False):
""" Mean and variance normalization """
axis = 0 if per_feature else None
mean = np.mean(audio_feature, axis=axis)
std_dev = np.std(audio_feature, axis=axis) + 1e-9
normalized = (audio_feature - mean) / std_dev
return normalized
def normalize_signal(signal: np.ndarray):
""" Normailize signal to [-1, 1] range """
gain = 1.0 / (np.max(np.abs(signal)) + 1e-9)
return signal * gain
class SpeechFeaturizer:
def __init__(self, speech_config: dict):
# Samples
self.speech_config=speech_config
self.sample_rate = speech_config["sample_rate"]
self.hop_size = int(self.sample_rate * (speech_config["hop_size"]))
self.win_size = int(self.sample_rate * (speech_config["win_size"]))
# Features
self.num_mels = speech_config["num_mels"]
self.preemphasis = speech_config["preemphasis"]
# Normalization
def smooth_energe(self,wav, sr):
factor = 5
cutoff = 20
nyq = 0.5 * sr
order = 3 # set low-pass filter order
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
envelop = lfilter(b, a, abs(wav)) # filter low frequency part as signal's envelop
envelop = envelop / np.abs(envelop).max()
envelop = envelop * factor + 1
wav = np.divide(wav, envelop)
wav /= np.abs(wav).max()
return wav
def load_wav(self,path):
wav=read_raw_audio(path,self.sample_rate)
wav=librosa.effects.preemphasis(wav)
wav=self.smooth_energe(wav,self.sample_rate)
wav=librosa.effects.trim(wav,top_db=20)[0]
return wav
def pad_signal(self,wavs,max_length):
wavs = tf.keras.preprocessing.sequence.pad_sequences(wavs, max_length, 'float32', 'post', 'post')
return wavs
def melspectrogram(self,wav):
D = librosa.stft(y=wav, n_fft=self.speech_config['n_fft'], hop_length=self.hop_size,
win_length=self.win_size)
assert self.speech_config['fmax'] <= self.sample_rate // 2
mel_basis= librosa.filters.mel(self.sample_rate, self.speech_config['n_fft'], n_mels=self.num_mels
, fmin=self.speech_config['fmin'], fmax=self.speech_config['fmax'])
D= np.dot(mel_basis, np.abs(D))
min_level = np.exp(self.speech_config['min_level_db'] / 20 * np.log(10))
D= 20 * np.log10(np.maximum(min_level, D))
S = D - self.speech_config['ref_level_db']
S=np.clip((2 * self.speech_config['max_abs_value']) * (
(S - self.speech_config['min_level_db']) / (-self.speech_config['min_level_db'])) - self.speech_config['max_abs_value'],
-self.speech_config['max_abs_value'], self.speech_config['max_abs_value'])
return S.T
def preemphasis(self,wav):
return np.append(wav[0], wav[1:] - 0.97 * wav[:-1])
def inv_preemphasis(self,wav):
return signal.lfilter([1], [1, -0.97], wav)
def inv_mel_spectrogram(self,mel_spectrogram):
'''Converts mel spectrogram to waveform using librosa'''
mel_spectrogram *= self.speech_config['power']
D=(((np.clip(mel_spectrogram, -self.speech_config['max_abs_value'],
self.speech_config['max_abs_value']) + self.speech_config['max_abs_value']) * -self.speech_config['min_level_db'] / (
2 * self.speech_config['max_abs_value']))+ self.speech_config['min_level_db'])
D=np.power(10.0, (D) * 0.05)
mel_basis = librosa.filters.mel(self.sample_rate, self.speech_config['n_fft'],
n_mels=self.num_mels
, fmin=self.speech_config['fmin'], fmax=self.speech_config['fmax'])
_inv_mel_basis = np.linalg.pinv(mel_basis)
S= np.maximum(1e-10, np.dot(_inv_mel_basis, D))
spectro = copy.deepcopy(S)
for i in range(self.speech_config['griffin_lim_iters']):
estimated_wav = librosa.istft(spectro, hop_length=self.hop_size, win_length=self.win_size)
est_stft = librosa.stft(y=estimated_wav, n_fft=self.speech_config['n_fft'], hop_length=self.hop_size,
win_length=self.win_size)
phase = est_stft / np.maximum(1e-8, np.abs(est_stft))
spectro = S * phase
estimated_wav = librosa.istft(spectro, hop_length=self.hop_size, win_length=self.win_size)
result = np.real(estimated_wav)
return self.inv_preemphasis(result)
def _compute_pitch_feature(self, signal: np.ndarray) -> np.ndarray:
pitches, _ = librosa.core.piptrack(
y=signal, sr=self.sample_rate,
n_fft=self.speech_config['n_fft'], hop_length=self.hop_size,
fmin=0, fmax=int(self.sample_rate / 2), win_length=self.win_size, center=True
)
pitches = pitches.T
assert self.num_mels <= self.speech_config['n_fft'] // 2 + 1, \
"num_features for spectrogram should \
be <= (sample_rate * window_size // 2 + 1)"
return pitches[:, :self.num_mels]
|
py | 1a3d4ac8f4476868e0b79d99126c4d53324fb088 | import cv2 as cv
import os
i = 1
def capture(file, interval=450):
cap = cv.VideoCapture(file)
length = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
global i
j = 0
while (cap.isOpened() and j < length):
cap.set(1, j)
ret, frame = cap.read()
if ret == False:
break
cv.imwrite("./trainingDataPreprocessing/img_y/" + "{:04d}".format(i) + ".png", frame)
i += 1
j += interval
cap.release()
cv.destroyAllWindows()
for dirpath, dirnames, files in os.walk('./trainingDataPreprocessing/vid', topdown=False):
for file_name in files:
capture("./trainingDataPreprocessing/vid/" + file_name)
|
py | 1a3d4d8d05fffbb77b22a5058c547324d964387a | from chatnoir_api import Index
DEFAULT_START = 0
DEFAULT_SIZE = 10
DEFAULT_SLOP = 0
DEFAULT_INDEX = {
Index.ClueWeb09,
Index.ClueWeb12,
Index.CommonCrawl1511,
}
DEFAULT_MINIMAL = False
DEFAULT_EXPLAIN = False
DEFAULT_RETRIES = 5
DEFAULT_BACKOFF_SECONDS = 1
|
py | 1a3d4e2e455afad0ca70fa25ffae383005d145d6 | # coding: utf-8
"""Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 88
PATCH_VERSION = '0.dev0'
__short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = '{}.{}'.format(__short_version__, PATCH_VERSION)
REQUIRED_PYTHON_VER = (3, 5, 3)
# Format for platform files
PLATFORM_FORMAT = '{platform}.{domain}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# Entity target all constant
ENTITY_MATCH_ALL = 'all'
# If no name is specified
DEVICE_DEFAULT_NAME = 'Unnamed Device'
# Sun events
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_ADDRESS = 'address'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_API_VERSION = 'api_version'
CONF_AT = 'at'
CONF_AUTHENTICATION = 'authentication'
CONF_AUTH_MFA_MODULES = 'auth_mfa_modules'
CONF_AUTH_PROVIDERS = 'auth_providers'
CONF_BASE = 'base'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_BINARY_SENSORS = 'binary_sensors'
CONF_BLACKLIST = 'blacklist'
CONF_BRIGHTNESS = 'brightness'
CONF_CODE = 'code'
CONF_COLOR_TEMP = 'color_temp'
CONF_COMMAND = 'command'
CONF_COMMAND_CLOSE = 'command_close'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OPEN = 'command_open'
CONF_COMMAND_STATE = 'command_state'
CONF_COMMAND_STOP = 'command_stop'
CONF_CONDITION = 'condition'
CONF_COVERS = 'covers'
CONF_CURRENCY = 'currency'
CONF_CUSTOMIZE = 'customize'
CONF_CUSTOMIZE_DOMAIN = 'customize_domain'
CONF_CUSTOMIZE_GLOB = 'customize_glob'
CONF_DELAY_TIME = 'delay_time'
CONF_DEVICE = 'device'
CONF_DEVICE_CLASS = 'device_class'
CONF_DEVICES = 'devices'
CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger'
CONF_DISCOVERY = 'discovery'
CONF_DISKS = 'disks'
CONF_DISPLAY_CURRENCY = 'display_currency'
CONF_DISPLAY_OPTIONS = 'display_options'
CONF_DOMAIN = 'domain'
CONF_DOMAINS = 'domains'
CONF_EFFECT = 'effect'
CONF_ELEVATION = 'elevation'
CONF_EMAIL = 'email'
CONF_ENTITIES = 'entities'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_ENTITY_PICTURE_TEMPLATE = 'entity_picture_template'
CONF_EVENT = 'event'
CONF_EXCLUDE = 'exclude'
CONF_FILE_PATH = 'file_path'
CONF_FILENAME = 'filename'
CONF_FOR = 'for'
CONF_FORCE_UPDATE = 'force_update'
CONF_FRIENDLY_NAME = 'friendly_name'
CONF_FRIENDLY_NAME_TEMPLATE = 'friendly_name_template'
CONF_HEADERS = 'headers'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_HS = 'hs'
CONF_ICON = 'icon'
CONF_ICON_TEMPLATE = 'icon_template'
CONF_INCLUDE = 'include'
CONF_ID = 'id'
CONF_IP_ADDRESS = 'ip_address'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_LIGHTS = 'lights'
CONF_MAC = 'mac'
CONF_METHOD = 'method'
CONF_MAXIMUM = 'maximum'
CONF_MINIMUM = 'minimum'
CONF_MODE = 'mode'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PACKAGES = 'packages'
CONF_PASSWORD = 'password'
CONF_PATH = 'path'
CONF_PAYLOAD = 'payload'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PENDING_TIME = 'pending_time'
CONF_PIN = 'pin'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_PROFILE_NAME = 'profile_name'
CONF_PROTOCOL = 'protocol'
CONF_PROXY_SSL = 'proxy_ssl'
CONF_QUOTE = 'quote'
CONF_RADIUS = 'radius'
CONF_RECIPIENT = 'recipient'
CONF_REGION = 'region'
CONF_RESOURCE = 'resource'
CONF_RESOURCES = 'resources'
CONF_RGB = 'rgb'
CONF_ROOM = 'room'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_SENDER = 'sender'
CONF_SENSOR_TYPE = 'sensor_type'
CONF_SENSORS = 'sensors'
CONF_SHOW_ON_MAP = 'show_on_map'
CONF_SLAVE = 'slave'
CONF_SOURCE = 'source'
CONF_SSL = 'ssl'
CONF_STATE = 'state'
CONF_STATE_TEMPLATE = 'state_template'
CONF_STRUCTURE = 'structure'
CONF_SWITCHES = 'switches'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_TIMEOUT = 'timeout'
CONF_TOKEN = 'token'
CONF_TRIGGER_TIME = 'trigger_time'
CONF_TTL = 'ttl'
CONF_TYPE = 'type'
CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_UPDATE_INTERVAL = 'update_interval'
CONF_URL = 'url'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_WEBHOOK_ID = 'webhook_id'
CONF_WEEKDAY = 'weekday'
CONF_WHITELIST = 'whitelist'
CONF_WHITELIST_EXTERNAL_DIRS = 'whitelist_external_dirs'
CONF_WHITE_VALUE = 'white_value'
CONF_XY = 'xy'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = 'homeassistant_start'
EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop'
EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close'
EVENT_STATE_CHANGED = 'state_changed'
EVENT_TIME_CHANGED = 'time_changed'
EVENT_CALL_SERVICE = 'call_service'
EVENT_PLATFORM_DISCOVERED = 'platform_discovered'
EVENT_COMPONENT_LOADED = 'component_loaded'
EVENT_SERVICE_REGISTERED = 'service_registered'
EVENT_SERVICE_REMOVED = 'service_removed'
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
EVENT_THEMES_UPDATED = 'themes_updated'
EVENT_TIMER_OUT_OF_SYNC = 'timer_out_of_sync'
EVENT_AUTOMATION_TRIGGERED = 'automation_triggered'
EVENT_SCRIPT_STARTED = 'script_started'
# #### DEVICE CLASSES ####
DEVICE_CLASS_BATTERY = 'battery'
DEVICE_CLASS_HUMIDITY = 'humidity'
DEVICE_CLASS_ILLUMINANCE = 'illuminance'
DEVICE_CLASS_TEMPERATURE = 'temperature'
DEVICE_CLASS_TIMESTAMP = 'timestamp'
DEVICE_CLASS_PRESSURE = 'pressure'
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_OPENING = 'opening'
STATE_CLOSED = 'closed'
STATE_CLOSING = 'closing'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_ARMED_NIGHT = 'armed_night'
STATE_ALARM_ARMED_CUSTOM_BYPASS = 'armed_custom_bypass'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_ARMING = 'arming'
STATE_ALARM_DISARMING = 'disarming'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
STATE_OK = 'ok'
STATE_PROBLEM = 'problem'
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = 'attribution'
# Credentials
ATTR_CREDENTIALS = 'credentials'
# Contains time-related attributes
ATTR_NOW = 'now'
ATTR_DATE = 'date'
ATTR_TIME = 'time'
ATTR_SECONDS = 'seconds'
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = 'domain'
ATTR_SERVICE = 'service'
ATTR_SERVICE_DATA = 'service_data'
# IDs
ATTR_ID = 'id'
# Name
ATTR_NAME = 'name'
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = 'friendly_name'
# A picture to represent entity
ATTR_ENTITY_PICTURE = 'entity_picture'
# Icon to use in the frontend
ATTR_ICON = 'icon'
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str
CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str
# Electrical attributes
ATTR_VOLTAGE = 'voltage'
# Contains the information that is discovered
ATTR_DISCOVERED = 'discovered'
# Location of the device/sensor
ATTR_LOCATION = 'location'
ATTR_BATTERY_CHARGING = 'battery_charging'
ATTR_BATTERY_LEVEL = 'battery_level'
ATTR_WAKEUP = 'wake_up_interval'
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For calling a device specific command
ATTR_COMMAND = 'command'
# For devices which support an armed state
ATTR_ARMED = 'device_armed'
# For devices which support a locked state
ATTR_LOCKED = 'locked'
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = 'device_tripped'
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = 'last_tripped_time'
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = 'hidden'
# Location of the entity
ATTR_LATITUDE = 'latitude'
ATTR_LONGITUDE = 'longitude'
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
ATTR_STATE = 'state'
ATTR_OPTION = 'option'
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
# Class of device within its domain
ATTR_DEVICE_CLASS = 'device_class'
# Temperature attribute
ATTR_TEMPERATURE = 'temperature'
# #### UNITS OF MEASUREMENT ####
# Temperature units
TEMP_CELSIUS = '°C'
TEMP_FAHRENHEIT = '°F'
# Length units
LENGTH_CENTIMETERS = 'cm' # type: str
LENGTH_METERS = 'm' # type: str
LENGTH_KILOMETERS = 'km' # type: str
LENGTH_INCHES = 'in' # type: str
LENGTH_FEET = 'ft' # type: str
LENGTH_YARD = 'yd' # type: str
LENGTH_MILES = 'mi' # type: str
# Volume units
VOLUME_LITERS = 'L' # type: str
VOLUME_MILLILITERS = 'mL' # type: str
VOLUME_GALLONS = 'gal' # type: str
VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str
# Mass units
MASS_GRAMS = 'g' # type: str
MASS_KILOGRAMS = 'kg' # type: str
MASS_OUNCES = 'oz' # type: str
MASS_POUNDS = 'lb' # type: str
# UV Index units
UNIT_UV_INDEX = 'UV index' # type: str
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = 'stop'
SERVICE_HOMEASSISTANT_RESTART = 'restart'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_RELOAD = 'reload'
SERVICE_VOLUME_UP = 'volume_up'
SERVICE_VOLUME_DOWN = 'volume_down'
SERVICE_VOLUME_MUTE = 'volume_mute'
SERVICE_VOLUME_SET = 'volume_set'
SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause'
SERVICE_MEDIA_PLAY = 'media_play'
SERVICE_MEDIA_PAUSE = 'media_pause'
SERVICE_MEDIA_STOP = 'media_stop'
SERVICE_MEDIA_NEXT_TRACK = 'media_next_track'
SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track'
SERVICE_MEDIA_SEEK = 'media_seek'
SERVICE_SHUFFLE_SET = 'shuffle_set'
SERVICE_ALARM_DISARM = 'alarm_disarm'
SERVICE_ALARM_ARM_HOME = 'alarm_arm_home'
SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away'
SERVICE_ALARM_ARM_NIGHT = 'alarm_arm_night'
SERVICE_ALARM_ARM_CUSTOM_BYPASS = 'alarm_arm_custom_bypass'
SERVICE_ALARM_TRIGGER = 'alarm_trigger'
SERVICE_LOCK = 'lock'
SERVICE_UNLOCK = 'unlock'
SERVICE_OPEN = 'open'
SERVICE_CLOSE = 'close'
SERVICE_CLOSE_COVER = 'close_cover'
SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt'
SERVICE_OPEN_COVER = 'open_cover'
SERVICE_OPEN_COVER_TILT = 'open_cover_tilt'
SERVICE_SET_COVER_POSITION = 'set_cover_position'
SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position'
SERVICE_STOP_COVER = 'stop_cover'
SERVICE_STOP_COVER_TILT = 'stop_cover_tilt'
SERVICE_SELECT_OPTION = 'select_option'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = '/'
URL_API = '/api/'
URL_API_STREAM = '/api/stream'
URL_API_CONFIG = '/api/config'
URL_API_DISCOVERY_INFO = '/api/discovery_info'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
URL_API_COMPONENTS = '/api/components'
URL_API_ERROR_LOG = '/api/error_log'
URL_API_LOG_OUT = '/api/log_out'
URL_API_TEMPLATE = '/api/template'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_TOO_MANY_REQUESTS = 429
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_BASIC_AUTHENTICATION = 'basic'
HTTP_DIGEST_AUTHENTICATION = 'digest'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With'
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str
LENGTH = 'length' # type: str
MASS = 'mass' # type: str
VOLUME = 'volume' # type: str
TEMPERATURE = 'temperature' # type: str
SPEED_MS = 'speed_ms' # type: str
ILLUMINANCE = 'illuminance' # type: str
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The degree of precision for platforms
PRECISION_WHOLE = 1
PRECISION_HALVES = 0.5
PRECISION_TENTHS = 0.1
# Static list of entities that will never be exposed to
# cloud, alexa, or google_home components
CLOUD_NEVER_EXPOSED_ENTITIES = ['group.all_locks']
|
py | 1a3d4ec23be43724cbb4574ad05cbe47849b4530 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras index lookup preprocessing layer."""
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-direct-tensorflow-import
import collections
from keras import backend
from keras.engine import base_layer_utils
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_utils as utils
from keras.saving.saved_model import layer_serialization
from keras.utils import layer_utils
from keras.utils import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
INT = utils.INT
MULTI_HOT = utils.MULTI_HOT
ONE_HOT = utils.ONE_HOT
COUNT = utils.COUNT
TF_IDF = utils.TF_IDF
_VOCAB_NAME = "vocab"
_IDF_WEIGHTS_NAME = "idf_weights"
class NullInitializer(tf.lookup.KeyValueTensorInitializer):
"""A placeholder initializer for restoring this layer from a SavedModel."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = key_dtype
self._value_dtype = value_dtype
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
pass
class VocabWeightHandler(base_layer_utils.TrackableWeightHandler):
"""Adds the vocabulary as a layer weight during serialization."""
def __init__(self, lookup_layer):
self._layer = lookup_layer
self._dtype = lookup_layer.vocabulary_dtype
self._distribute_strategy = tf.distribute.get_strategy()
@property
def num_tensors(self):
return 1
def set_weights(self, weights):
tokens = tf.convert_to_tensor(weights[0], self._dtype)
self._layer.lookup_table = self._layer._lookup_table_from_tokens(tokens) # pylint: disable=protected-access
def get_tensors(self):
# Just save the non-config part of the vocab (no special tokens).
tokens = self._layer.get_vocabulary(include_special_tokens=False)
tokens = tf.convert_to_tensor(tokens, self._dtype)
return [tokens]
class IndexLookup(base_preprocessing_layer.PreprocessingLayer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output via
a table-based lookup, with optional out-of-vocabulary handling. This is the
basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this size
includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0. In
other output modes, the token will not appear in the vocabulary and
instances of the mask token in the input will be dropped. If set to None,
no mask term will be added.
oov_token: Only used when `invert` is True. The token to return for OOV
indices.
vocabulary: Optional. Either an array or a string path to a text file. If
passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor
containing the vocbulary terms. If passing a file path, the file should
contain one line per term in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
vocabulary_dtype: The dtype of the vocabulary terms. For example, `"int64"`
or `"string"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D
numpy array, or 1D tensor or the same length as the vocabulary, containing
the floating point inverse document frequency weights, which will be
multiplied by per sample term counts for the final `tf_idf` weight. If the
`vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this
argument must be supplied.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to `"int"`.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
`"tf_idf"` configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new dimension for
the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`, `"count"`
and `"tf-idf"` output modes. If True, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to False.
"""
def __init__(self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary_dtype,
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(f"If set, `max_tokens` must be greater than 1. "
f"Received: max_tokens={max_tokens}")
if pad_to_max_tokens and max_tokens is None:
raise ValueError(f"If pad_to_max_tokens is True, must set `max_tokens`. "
f"Received: max_tokens={max_tokens}")
if num_oov_indices < 0:
raise ValueError(f"`num_oov_indices` must be greater than or equal to 0. "
f"Received: num_oov_indices={num_oov_indices}")
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
if output_mode == "tf-idf":
output_mode = TF_IDF
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF),
layer_name=self.__class__.__name__,
arg_name="output_mode")
if invert and output_mode != INT:
raise ValueError(f"`output_mode` must be `'int'` when `invert` is true. "
f"Received: output_mode={output_mode}")
if sparse and output_mode == INT:
raise ValueError(f"`sparse` may only be true if `output_mode` is "
f"`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}")
if idf_weights is not None and output_mode != TF_IDF:
raise ValueError(f"`idf_weights` should only be set if `output_mode` is "
f"`'tf_idf'`. Received: idf_weights={idf_weights} and "
f"output_mode={output_mode}")
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.vocabulary_dtype = vocabulary_dtype
self._frozen_vocab_size = None
self.input_vocabulary = vocabulary
self.input_idf_weights = idf_weights
# VocabularySavedModelSaver will clear the config vocabulary to restore the
# lookup table ops directly. We persist this hidden option to persist the
# fact that we have have a non-adaptable layer with a manually set vocab.
self._has_input_vocabulary = kwargs.pop("has_input_vocabulary",
(vocabulary is not None))
# Drop deprecated config options.
kwargs.pop("vocabulary_size", None)
kwargs.pop("has_static_table", None)
# By default, output int64 when output_mode='int' and floats otherwise.
if "dtype" not in kwargs:
kwargs["dtype"] = tf.int64 if output_mode == INT else backend.floatx()
super().__init__(**kwargs)
# Check dtype only after base layer parses it; dtype parsing is complex.
if output_mode == INT and not tf.as_dtype(self.compute_dtype).is_integer:
input_dtype = kwargs["dtype"]
raise ValueError("When `output_mode='int'`, `dtype` should be an integer "
f"type. Received: dtype={input_dtype}")
if invert:
self._key_dtype = self.dtype if output_mode == INT else tf.int64
self._value_dtype = tf.as_dtype(self.vocabulary_dtype)
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = tf.as_dtype(self.vocabulary_dtype)
self._value_dtype = self.dtype if output_mode == INT else tf.int64
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max ints
# will be dropped from the bincount op.
mask_value = 0 if self.output_mode == INT else self._value_dtype.max
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error out
# during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the default
# value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we hav multiple OOV values, we need to do a further hashing step;
# to make this easier, we set the OOV value to -1. (This lets us do a
# vectorized add and cast to boolean to determine locations where we
# need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(mask_value, self._value_dtype)
if self.output_mode == TF_IDF:
self.idf_weights = tf.Variable(
[0] * self._token_start_index(),
shape=(None,),
dtype=self.compute_dtype,
trainable=False)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary, idf_weights)
else:
# When restoring from a keras SavedModel, the loading code will expect to
# find and restore a lookup_table attribute on the layer. This table needs
# to be uninitialized as a StaticHashTable cannot be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
# Only set up adapt state if we did not recieve a vocab on construction.
if not self._has_input_vocabulary:
# Add a custom weight handler to return the layers vocab as it's weight.
self._add_trackable(VocabWeightHandler(self), False)
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype, value_dtype=tf.int64, default_value=0)
if self.output_mode == TF_IDF:
self.token_document_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype, value_dtype=tf.int64, default_value=0)
self.num_documents = tf.Variable(0, dtype=tf.int64, trainable=False)
def compute_output_shape(self, input_shape):
if self.output_mode == INT:
return input_shape
depth = (
self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size)
return tf.TensorShape([input_shape[0], depth])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = self.vocabulary_dtype if self.invert else self.compute_dtype
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If True, the returned vocabulary will include mask
and OOV tokens, and a term's index in the vocabulary will equal the
term's index when calling the layer. If False, the returned vocabulary
will not include any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (self._tensor_vocab_to_numpy(vocab), indices.numpy())
lookup = collections.defaultdict(lambda: self.oov_token,
zip(indices, vocab))
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == INT:
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index():]
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the voculary, including optional mask and oov indices.
"""
return int(self.lookup_table.size().numpy()) + self._token_start_index()
def vocab_size(self):
logging.warning("vocab_size is deprecated, please use vocabulary_size.")
return self.vocabulary_size()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary": utils.listify_tensors(self.input_vocabulary),
"vocabulary_dtype": self.vocabulary_dtype,
"idf_weights": utils.listify_tensors(self.input_idf_weights),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocabulary: Either an array or a string path to a text file. If passing an
array, can pass a tuple, list, 1D numpy array, or 1D tensor containing
the vocbulary terms. If passing a file path, the file should contain one
line per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary. Must be set
if `output_mode` is `"tf_idf"`. Should not be set otherwise.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when `"multi_hot"`, `"count"`, and `"tf_idf"`
modes, if `pad_to_max_tokens` is False and the layer itself has already
been called.
RuntimeError: If a tensor vocabulary is passed outside of eager execution.
"""
if self.output_mode != TF_IDF and idf_weights is not None:
raise ValueError(f"`idf_weights` should only be set if output_mode is "
f"`'tf_idf'`. Received: output_mode={self.output_mode} "
f"and idf_weights={idf_weights}")
if isinstance(vocabulary, str):
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
"Vocabulary file {} does not exist.".format(vocabulary))
if self.output_mode == TF_IDF:
raise ValueError("output_mode `'tf_idf'` does not support loading a "
"vocabulary from file.")
self.lookup_table = self._lookup_table_from_file(vocabulary)
return
if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or
tf.is_tensor(idf_weights)):
raise RuntimeError(
"Cannot set a tensor vocabulary on {} layer {} when not executing "
"eagerly. Create this layer or call `set_vocabulary` outside of "
"any `tf.function`s and with eager execution enabled.".format(
self.__class__.__name__, self.name))
# TODO(mattdangerw): for better performance we should rewrite this entire
# function to operate on tensors and convert vocabulary to a tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary, you passed {}.".format(vocabulary))
oov_start = self._oov_start_index()
token_start = self._token_start_index()
special_tokens = (
[self.mask_token] * oov_start + [self.oov_token] * self.num_oov_indices)
found_special_tokens = np.array_equal(
special_tokens, vocabulary[:token_start])
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError("The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
"are {}".format(repeated_tokens))
if self.mask_token is not None and self.mask_token in tokens:
mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
raise ValueError(
"Found reserved mask token at unexpected location in `vocabulary`. "
"Note that passed `vocabulary` does not need to include the OOV and "
"mask tokens. Either remove all mask and OOV tokens, or include them "
"only at the start of the vocabulary in precisely this order: "
f"{special_tokens}. Received: mask_token={self.mask_token} at "
f"vocabulary index {mask_index}")
# Only error out for oov_token when invert=True. When invert=False,
# oov_token is unused during lookup.
if self.oov_token is not None and self.invert and self.oov_token in tokens:
oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
raise ValueError(
"Found reserved OOV token at unexpected location in `vocabulary`. "
"Note that passed `vocabulary` does not need to include the OOV and "
"mask tokens. Either remove all mask and OOV tokens, or include them "
"only at the start of the vocabulary in precisely this order: "
f"{special_tokens}. Received: oov_token={self.oov_token} at "
f"vocabulary index {oov_index}")
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab size. "
"Passed vocab size is {}, max vocab size is {}.".format(
new_vocab_size, self.max_tokens))
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == TF_IDF:
if idf_weights is None:
raise ValueError("`idf_weights` must be set if output_mode is TF_IDF")
if len(vocabulary) != len(idf_weights):
raise ValueError("`idf_weights` must be the same length as vocabulary. "
"len(idf_weights) is {}, len(vocabulary) is {}".format(
len(vocabulary), len(idf_weights)))
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array, but received {}".format(
type(idf_weights)))
# If the passed vocabulary has no special tokens, we need to pad the front
# of idf_weights. We don't have real document frequencies for these tokens
# so we will use an average of all idf_weights passed in as a reasonable
# default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our total
# vocab size, we need to pad the back of idf_weights with zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = self.max_tokens - front_padding - len(idf_weights)
else:
back_padding = 0
weights = np.pad(
idf_weights, (front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value))
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights.assign(weights)
self.idf_weights_const = self.idf_weights.value()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
"Cannot adapt {} layer after setting a static vocabulary via init "
"argument or `set_vocabulary`.".format(self.__class__.__name__))
data = self._standardize_inputs(data, self.vocabulary_dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, -1)
if data.shape.rank == 1:
data = tf.expand_dims(data, -1)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(tokens, counts + self.token_counts.lookup(tokens))
if self.output_mode == TF_IDF:
# Dedupe each row of our dataset.
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
# Flatten and count tokens.
tokens, doc_counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, doc_counts + self.token_document_counts.lookup(tokens))
if tf_utils.is_ragged(data):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(tf.shape(data, out_type=tf.int64)[0])
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == TF_IDF:
self.idf_weights_const = self.idf_weights.value()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype))
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype))
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break ties
# by sorting the tokens themselves. Tensorflow has no ops for sorting
# strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == TF_IDF:
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(token_document_counts,
self.num_documents)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV tokens.
# We cannot compute the real idf weight of OOV in a single pass.
idf_weights = tf.pad(
idf_weights, [[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights))
self.idf_weights.assign(idf_weights)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary, we
# don't want to keep every token we've seen in separate lookup tables.
self.reset_state()
def reset_state(self): # pylint: disable=method-hidden
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == TF_IDF:
self.token_document_counts.remove(self.token_document_counts.export()[0])
self.num_documents.assign(0)
def call(self, inputs):
self._maybe_freeze_vocab_size()
inputs = self._standardize_inputs(inputs, self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if tf_utils.is_sparse(inputs):
lookups = tf.SparseTensor(inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape)
elif tf_utils.is_ragged(inputs):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == INT:
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
depth = (
self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size)
idf_weights = self.idf_weights_const if self.output_mode == TF_IDF else None
return utils.encode_categorical_inputs(
lookups,
output_mode=self.output_mode,
depth=depth,
dtype=self.compute_dtype,
sparse=self.sparse,
idf_weights=idf_weights)
def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Inputs, do not call lookup. This
# is critical for restoring SavedModel, which will first trace layer.call
# and then attempt to restore the table. We need the table to be unitialized
# for the restore to work, but calling the table unitialized would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
lookups = self.lookup_table.lookup(inputs)
if self.mask_token is not None:
mask_locations = tf.equal(inputs, self._mask_key)
lookups = tf.where(mask_locations, self._mask_value, lookups)
if self.invert:
return lookups
lookup_checks = []
if self.num_oov_indices == 0:
# If we have zero oov indices, we need to check for oov inputs.
oov_indices = tf.where(tf.equal(lookups, -1))
oov_inputs = tf.gather_nd(inputs, oov_indices)
msg = tf.strings.format(
"When `num_oov_indices=0` all inputs should be in vocabulary, "
"found OOV values {}, consider setting `num_oov_indices=1`.",
(oov_inputs,))
assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg])
lookup_checks.append(assertion)
elif self.num_oov_indices > 1:
# If we have multiple oov indices, we need a further hashing step.
if self._key_dtype.is_integer:
oov_indices = tf.math.floormod(inputs, self.num_oov_indices)
else:
oov_indices = tf.strings.to_hash_bucket_fast(
inputs, num_buckets=self.num_oov_indices)
oov_indices = oov_indices + self._oov_start_index()
oov_locations = tf.equal(lookups, self._default_value)
lookups = tf.where(oov_locations, oov_indices, lookups)
with tf.control_dependencies(lookup_checks):
return tf.identity(lookups)
def _uninitialized_lookup_table(self):
with tf.init_scope():
initializer = NullInitializer(self._key_dtype, self._value_dtype)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_tokens(self, tokens):
with tf.init_scope():
token_start = self._token_start_index()
token_end = token_start + tf.size(tokens)
indices_dtype = self._key_dtype if self.invert else self._value_dtype
indices = tf.range(token_start, token_end, dtype=indices_dtype)
keys, values = (indices, tokens) if self.invert else (tokens, indices)
initializer = tf.lookup.KeyValueTensorInitializer(keys, values,
self._key_dtype,
self._value_dtype)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_file(self, filename):
if self.invert:
key_index = tf.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.lookup.TextFileIndex.WHOLE_LINE
else:
key_index = tf.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.lookup.TextFileIndex.LINE_NUMBER
with tf.init_scope():
initializer = tf.lookup.TextFileInitializer(
filename=filename,
key_dtype=self._key_dtype,
key_index=key_index,
value_dtype=self._value_dtype,
value_index=value_index,
value_index_offset=self._token_start_index())
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _standardize_inputs(self, inputs, dtype):
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)):
inputs = tf.convert_to_tensor(inputs, dtype)
elif inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _expand_dims(self, inputs, axis):
if tf_utils.is_sparse(inputs):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def _oov_start_index(self):
return 1 if self.mask_token is not None and self.output_mode == INT else 0
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
def _maybe_freeze_vocab_size(self):
if self.output_mode == INT or self.pad_to_max_tokens:
return
with tf.init_scope():
if not tf.executing_eagerly():
raise RuntimeError(
"When using `output_mode={}` eager execution must be enabled."
.format(self.output_mode))
new_vocab_size = self.vocabulary_size()
if new_vocab_size == self._token_start_index():
raise RuntimeError(
"When using `output_mode={}` and `pad_to_max_tokens=False`, you "
"must set the layer's vocabulary before calling it. Either pass "
"a `vocabulary` argument to the layer, or call `adapt` with some "
"sample data.".format(self.output_mode))
elif (self._frozen_vocab_size is not None and
new_vocab_size != self._frozen_vocab_size):
raise RuntimeError(
"When using `output_mode={}` and `pad_to_max_tokens=False`, the "
"vocabulary size cannot be changed after the layer is called. "
"Vocab size is {}, new vocab size is {}".format(
self.output_mode, self._frozen_vocab_size, new_vocab_size))
self._frozen_vocab_size = new_vocab_size
def _find_repeated_tokens(self, vocabulary):
"""Return all repeated tokens in a vocabulary."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
return [
item for item, count in collections.Counter(vocabulary).items()
if count > 1
]
else:
return []
def _num_tokens(self, data):
"""Count the number of tokens in a ragged, sparse or dense tensor."""
if tf_utils.is_sparse(data):
flat_values = data.values
elif tf_utils.is_ragged(data):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64)
return tokens, counts
def _inverse_document_frequency(self, token_document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of "tf_idf".
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
token_document_counts: An array of the # of documents each token appears
in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return tf.math.log(1 + num_documents / (1 + token_document_counts))
@property
def _trackable_saved_model_saver(self):
return layer_serialization.VocabularySavedModelSaver(self)
# Override points for IntegerLookup and StringLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
"""Converts a tensor vocabulary to a numpy vocabulary."""
return vocabulary.numpy()
|
py | 1a3d4f9aad1a7e2bfd4ab27154bc12cb5fe83230 | import argparse
import getpass
import os
import sys
from .api import notebook_to_page
def main(argv=None):
"""Command line interface."""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Converts Jupyter Notebooks to Atlassian Confluence pages using nbconvert',
epilog="Collects credentials from the following locations:\n"
"1. CONFLUENCE_USERNAME and CONFLUENCE_PASSWORD environment variables\n"
"2. ~/.nbconflux file in the format username:password\n"
"3. User prompts")
parser.add_argument('notebook', type=str, help='Path to local notebook (ipynb)')
parser.add_argument('url', type=str, help='URL of Confluence page to update')
parser.add_argument('--exclude-toc', action='store_true', help='Do not generate a table of contents')
parser.add_argument('--exclude-ipynb', action='store_true', help='Do not attach the notebook to the page')
parser.add_argument('--exclude-style', action='store_true', help='Do not include the Jupyter base stylesheet')
parser.add_argument('--include-mathjax', action='store_true', help='Enable MathJax on the page')
parser.add_argument('--extra-labels', nargs='+', type=str, help='Additional labels to add to the page')
args = parser.parse_args(argv or sys.argv[1:])
username = os.getenv('CONFLUENCE_USERNAME')
password = os.getenv('CONFLUENCE_PASSWORD')
cfg = os.path.expanduser('~/.nbconflux')
# Prefer credentials in environment variables
if username and password:
print('Using credentials for {} from environment variables'.format(username))
elif os.path.isfile(cfg):
# Fallback on credentials in a well known file location
with open(cfg) as f:
segs = f.read().strip().split(':', 1)
if len(segs) == 2:
username = segs[0]
password = segs[1]
print('Using credentials for {} from configuration file'.format(username))
# Prompt the user for missing credentials
if username is None:
current = getpass.getuser()
current = current[2:] if current.startswith('p-') else current
username = input('Confluence username ({}): '.format(current))
# Use the current username if the user doesn't enter anything
if not username.strip():
username = current
if password is None:
password = getpass.getpass('Confluence password: ')
notebook_path_css = os.getenv('NBCONFLUX_NOTEBOOK_CSS', None)
notebook_to_page(args.notebook, args.url, username, password,
generate_toc=not args.exclude_toc, attach_ipynb=not args.exclude_ipynb,
enable_style=not args.exclude_style, enable_mathjax=args.include_mathjax,
extra_labels=args.extra_labels, notebook_css=notebook_path_css)
if __name__ == '__main__':
main() |
py | 1a3d4fa983137e14c85d3689ccb98a3cfa4a10cc | import unittest
import os
import sys
import json
from lib.commander import find_process, kill_process
from lib.multipass import _aliases, patch_compose
from lib.config import _reset, HOME
from tests.test_helpers.common import backup_config, restore_config
import subprocess
from sarge import run as sarge_run, Capture
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
vm_name = "feature-test"
multipass_bin_path = f"{HOME}/Library/Application Support/multipass/bin"
docker_cmd = f"{multipass_bin_path}/docker"
docker_compose_cmd = f"{multipass_bin_path}/docker-compose"
def run(cmd):
process = sarge_run(" ".join(cmd), shell=True,
stdout=Capture(), stderr=Capture())
return process.stdout.read().decode("utf-8"), process.stderr.read().decode("utf-8")
def list_vm():
vm, err = run(["multipass", "list", "--format", "json"])
return json.loads(vm)
def find_vm(name):
list = list_vm()["list"]
for vm in list:
if name == vm["name"]:
return True
def vm_info():
vm, err = run(["multipass", "info", vm_name, "--format", "json"])
return json.loads(vm)
def kill_background_listen():
pids = find_process("background listen")
for pid in pids:
kill_process(int(pid))
def restore_alias():
name = "dockipass"
if find_vm(name):
for alias in _aliases:
run(["multipass", "alias", f"{name}:{alias}", alias])
patch_compose()
def remove_alias():
name = "dockipass"
if find_vm(name):
for alias in _aliases:
run(["multipass", "unalias", alias])
class Feature_Test_Dockipass(unittest.TestCase):
@classmethod
def tearDownClass(self):
restore_config()
kill_background_listen()
run(["multipass", "delete", vm_name])
run(["multipass", "purge"])
restore_alias()
subprocess.run(["./dockipass.py", "listen", "start"])
@classmethod
def setUpClass(self):
backup_config()
remove_alias()
_reset()
def test_1launch(self):
launch_cmd = ["./dockipass.py", "launch", vm_name]
run(launch_cmd)
info = vm_info()
self.assertEqual(len(info["errors"]), 0)
self.assertIn(vm_name, info["info"])
info = info["info"][vm_name]
self.assertEqual(info["state"], "Running")
self.assertEqual(info["image_release"], "20.04 LTS")
self.assertIn("/Users", info["mounts"])
# Aliases should have been setup for docker and docker-compose
aliases = os.listdir(multipass_bin_path)
self.assertListEqual(aliases, ["docker", "docker-compose"])
# Docker compose alias should have file added to it
with open(docker_compose_cmd, "r") as file:
output = file.read()
self.assertIn(
'"/Library/Application Support/com.canonical.multipass/bin/multipass" docker-compose -- $arguments', output.split("\n"))
# Check for bind
pids = find_process("background listen")
self.assertEqual(len(pids), 1)
def test_2stop(self):
run(["./dockipass.py", "stop"])
# The containter have been stopped
info = vm_info()["info"][vm_name]
self.assertEqual(info["state"], "Stopped")
# Check for bind removed
pids = find_process("background listen")
self.assertEqual(len(pids), 0)
def test_3start(self):
run(["./dockipass.py", "start"])
# The containter is running
info = vm_info()["info"][vm_name]
self.assertEqual(info["state"], "Running")
# Check for bind removed
pids = find_process("background listen")
self.assertEqual(len(pids), 1)
def test_4restart(self):
run(["./dockipass.py", "restart"])
# The containter is running
info = vm_info()["info"][vm_name]
self.assertEqual(info["state"], "Running")
# Check for bind
pids = find_process("background listen")
self.assertEqual(len(pids), 1)
def test_5listen(self):
kill_background_listen()
pids = find_process("background listen")
self.assertEqual(len(pids), 0)
subprocess.run([docker_cmd, "run", "--name", "testcontainer", "-p",
"8081:80", "-d", "nginxdemos/hello"], stdout=subprocess.PIPE)
subprocess.run(["./dockipass.py", "listen", "start"])
pids = find_process("socat")
self.assertEqual(len(pids), 1)
def test_6dockercompose(self):
process = subprocess.run(
[docker_compose_cmd, "ps"], cwd=f"{parentdir}/data", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.assertEqual(process.stderr.decode("utf-8"), "")
self.assertNotEqual(process.stdout.decode("utf-8"), "")
def test_7dockerbuildx(self):
process = subprocess.run(
[docker_cmd, "buildx"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.assertEqual(process.stderr.decode("utf-8"), "")
self.assertIn("Usage: docker buildx [OPTIONS] COMMAND", process.stdout.decode(
"utf-8").split("\n"))
def test_8status(self):
status, err = run(["./dockipass.py", "status"])
self.assertEqual(err, "")
self.assertIn("VM Name:\t feature-test\n", status)
self.assertIn("State:\t\t Running\n", status)
self.assertIn("Mounts setup:\t True\n", status)
self.assertIn("Aliases setup:\t True\n", status)
self.assertIn("Listening:\t True\n", status)
self.assertIn("Forwarded:\t 8081\n", status)
def test_9delete(self):
run(["./dockipass.py", "delete"])
# Stopped background process
pids = find_process("background listen")
self.assertEqual(len(pids), 0)
# Stopped all socat processes
pids = find_process("socat")
self.assertEqual(len(pids), 0)
# VM no longer exists
list = list_vm()["list"]
for vm in list:
self.assertNotIn(vm_name, vm["name"])
|
py | 1a3d5069179633101b8852969fba83bf0187268c | """
1249. Minimum Remove to Make Valid Parentheses
Given a string s of '(' , ')' and lowercase English characters.
Your task is to remove the minimum number of parentheses ( '(' or ')', in any positions ) so that the resulting parentheses string is valid and return any valid string.
Formally, a parentheses string is valid if and only if:
It is the empty string, contains only lowercase characters, or
It can be written as AB (A concatenated with B), where A and B are valid strings, or
It can be written as (A), where A is a valid string.
Example 1:
Input: s = "lee(t(c)o)de)"
Output: "lee(t(c)o)de"
Explanation: "lee(t(co)de)" , "lee(t(c)ode)" would also be accepted.
Example 2:
Input: s = "a)b(c)d"
Output: "ab(c)d"
Example 3:
Input: s = "))(("
Output: ""
Explanation: An empty string is also valid.
Example 4:
Input: s = "(a(b(c)d)"
Output: "a(b(c)d)"
Constraints:
1 <= s.length <= 10^5
s[i] is one of '(' , ')' and lowercase English letters.
"""
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
stack, i, invalids = [], 0, set()
while i < len(s):
if s[i] in ['(', ')']:
invalids.add(i)
if s[i] == '(':
stack.append(i)
else:
if stack:
invalids.remove(stack.pop())
invalids.remove(i)
i += 1
res = []
for i in range(len(s)):
if i not in invalids:
res.append(s[i])
return ''.join(res)
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
right = s.count(')')
left = 0
result = ""
for item in s:
if item == '(':
if right == 0:
continue
else:
left += 1
right -= 1
if item == ')':
if left == 0:
right -= 1
continue
else:
left -= 1
result += item
return result |
py | 1a3d52b64c0878dac1f403a055685558499ce084 | #!/usr/bin/env python
import sys, time
import numpy as np
import pickle as pickle
from pandas import DataFrame
from pandas import read_pickle
from pandas import get_dummies
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from djeval import *
def fix_colname(cn):
return cn.translate(None, ' ()[],')
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
colnames = list(yy_df.columns.values)
colnames = [fix_colname(cn) for cn in colnames]
yy_df.columns = colnames
msg("Getting subset ready.")
# TODO save the dummies along with yy_df
categorical_features = ['opening_feature']
dummies = get_dummies(yy_df[categorical_features])
elorange_cols = [x for x in list(yy_df.columns.values) if x.startswith('elochunk_')]
elorange_cols.extend([x for x in list(yy_df.columns.values) if x.startswith('opponent_elochunk_')])
# TODO save the moveelo_features along with yy_df
moveelo_features = [("moveelo_" + x) for x in ['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
new_depth_cols = ['mean_num_bestmoves', 'mean_num_bestmove_changes', 'mean_bestmove_depths_agreeing', 'mean_deepest_change', 'mean_deepest_change_ratio']
stdev_cols = ['stdeverror', 'opponent_stdeverror', 'stdevpos']
train = yy_df[yy_df.meanerror.notnull() & yy_df.elo.notnull()]
# lets look at in sample first
validating = False
if validating:
train = train[train['gamenum'] % 2 == 0]
formula_rhs = "side + nmerror + gameoutcome + drawn_game + gamelength + meanecho"
formula_rhs = formula_rhs + " + opponent_nmerror + opponent_noblunders"
formula_rhs = formula_rhs + " + min_nmerror + early_lead"
formula_rhs = formula_rhs + " + q_error_one + q_error_two"
formula_rhs = formula_rhs + " + opponent_q_error_one"
formula_rhs = formula_rhs + " + mean_depth_clipped + mean_seldepth"
formula_rhs = formula_rhs + " + mean_depths_ar + mean_deepest_ar"
formula_rhs = formula_rhs + " + opponent_mean_depths_ar + opponent_mean_deepest_ar"
formula_rhs = formula_rhs + " + pct_sanemoves"
formula_rhs = formula_rhs + " + " + " + ".join(dummies.columns.values)
formula_rhs = formula_rhs + " + moveelo_weighted"
formula_rhs = formula_rhs + " + " + " + ".join(new_depth_cols)
formula_rhs = formula_rhs + " + " + " + ".join(stdev_cols)
# do these really not help?!
formula_rhs = formula_rhs + " + " + " + ".join(elorange_cols)
# Never mind these, they didnt help much
#formula_rhs = formula_rhs + " + " + " + ".join(moveelo_features)
formula = "elo ~ " + formula_rhs
msg("Fitting!")
ols = sm.ols(formula=formula, data=train).fit()
print(ols.summary())
msg("Making predictions for all playergames")
yy_df['ols_prediction'] = ols.predict(yy_df)
yy_df['ols_error'] = (yy_df['ols_prediction'] - yy_df['elo']).abs()
yy_df['training'] = (yy_df['gamenum'] % 2 == 0)
insample_scores = yy_df.groupby('training')['ols_error'].agg({'mean' : np.mean, 'median' : np.median, 'stdev': np.std})
print(insample_scores)
msg("Writing yy_df back out with ols predictions inside")
yy_df.to_pickle(sys.argv[1])
|
py | 1a3d5344c57e9c94f4f4976c3d6db8b8bf0cac69 | print('='*30)
print(' ANALISANDO VALORES')
print('='*30)
op = 0
while op!= 5:
n1 = float(input('Digite o 1º valor: '))
n2 = float(input('Digite o 2º valor: '))
op = int(input('''[1] SOMAR \n[2] MULTIPLICAR \n[3] MAIOR \n[4] NOVOS NÚMEROS \n[5] SAIR \nOpção desejada: '''))
if op == 4:
while op == 4:
n1 = float(input('Digite o 1º valor: '))
n2 = float(input('Digite o 2º valor: '))
op = int(input('Escolha uma nova opção'))
print('')
while op < 1 or op > 5:
op = int(input('Escolha uma opção válida: '))
if op == 1:
print(f'A soma entre os valores {n1} e {n2} é {n1+n2}')
if op == 2:
print(f'A multiplicação entre os valores {n1} e {n2} é {n1 * n2}')
if op == 3:
if n1 == n2:
print('Os números digitados são iguais')
else:
if n1 > n2:
print(f'O número maior é {n1}')
else:
print(f'O número maior é {n2}')
print('')
|
py | 1a3d53742f5b1634209ae17a12b33609e92b7359 | class BuyProductException(Exception):
""" Signal the problem with buying product """
pass
class WrongProductIdException(BuyProductException):
""" Signal that choosen product id is wrong """
pass
class NoProductIdException(BuyProductException):
""" Signal that product id was not choosen """
pass
class ProductNotAvailableException(BuyProductException):
""" Signal the product is not available """
pass
class NotEnoughMoneyException(BuyProductException):
""" Signal that there is not enough money added to buy product """
pass
class CantGiveTheChangeException(Exception):
""" Signal that automat cant give the change """
pass
class WrongNominalException(Exception):
""" Signal that you cant create coin with such nominal """
pass |
py | 1a3d53a701ec0fa2d89959f885bb788a87582a87 | from .base_entity import BaseEntity
from psutil import net_io_counters, net_connections
class Network(BaseEntity):
""" A simple object to return network usage """
@property
def get_usage(self):
return self.__get_net_usage()
def __get_net_usage(self):
n = net_io_counters()
nc = net_connections(kind='all')
ret = {
"bytes_sent": n.bytes_sent,
"bytes_recv": n.bytes_recv,
"packets_sent": n.packets_sent,
"packets_recv": n.packets_recv,
"dropin": n.dropin,
"dropout": n.dropout,
"errin": n.errin,
"errout": n.errout,
"num_connections": len(nc),
}
return ret
|
py | 1a3d53e8bad019648d5e5932dc351876c08d9eff | from django.db import models
from core import models as core_models
class Review(core_models.TimeStampedModel):
""" Review Model Defination """
review = models.TextField()
accuracy = models.IntegerField()
communication = models.IntegerField()
location = models.IntegerField()
cleanliness = models.IntegerField()
check_in = models.IntegerField()
value = models.IntegerField()
user = models.ForeignKey("users.User", on_delete=models.CASCADE)
room = models.ForeignKey("rooms.Room", on_delete=models.CASCADE)
def __str__(self):
# return self.room.name
return self.review
|
py | 1a3d53fa4c76ae58abb1322dab6da19d18b4ba98 | from datadog import initialize, api
from datadog.api.constants import CheckStatus
options = {'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'}
initialize(**options)
check = 'app.ok'
host = 'app1'
status = CheckStatus.OK # equals 0
api.ServiceCheck.check(check=check, host_name=host, status=status,
message='Response: 200 OK')
|
py | 1a3d553c7de78101f351842b62e9c85a295e2f6f | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
# Allows recording of tied matches.
# Matches opponents of relative standings.
# Pairs players in unique matches.
#
# TODO: implement match byes
# TODO: implement pairing for odd number of players
# TODO: implement Opponent match win tie breaker algorithm
# TODO: implement tournament tracking
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
dbh = connect()
sth = dbh.cursor()
sth.execute("TRUNCATE TABLE matches")
dbh.commit()
dbh.close()
def deletePlayers():
"""Remove all the player records from the database."""
dbh = connect()
sth = dbh.cursor()
sth.execute("TRUNCATE TABLE players CASCADE")
dbh.commit()
dbh.close()
def countPlayers():
"""Returns the number of players currently registered."""
dbh = connect()
sth = dbh.cursor()
sth.execute("SELECT count(players) FROM players")
result = sth.fetchone()
dbh.commit()
dbh.close()
return result[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
dbh = connect()
sth = dbh.cursor()
query = "INSERT INTO players (name) VALUES (%s)"
values = [name]
sth.execute(query, values)
dbh.commit()
dbh.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
dbh = connect()
sth = dbh.cursor()
query = '''
SELECT * FROM standings
'''
sth.execute(query)
result = sth.fetchall()
dbh.commit()
dbh.close()
return result
def reportMatch(winner, challenger, tied=None):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
challenger: the id number of the player who lost
"""
dbh = connect()
sth = dbh.cursor()
query = "INSERT INTO matches (winner_id, challenger_id, tie) VALUES (%s, %s, %s)"
values = [winner, challenger, tied]
sth.execute(query, values)
dbh.commit()
dbh.close()
def getPlayerOpponents():
"""Returns list of opponents for all players
Returns:
A list of tuples, each of which contains (id, list)
id: player's unique id
list: list of opponent id
"""
dbh = connect()
sth = dbh.cursor()
query = '''
SELECT
opponents.id,
array_agg(challenger_id) AS challenger_id_list
FROM opponents
GROUP BY opponents.id
'''
sth.execute(query)
result = sth.fetchall()
dbh.commit()
dbh.close()
return result
def getStandingGroups():
"""Returns a list of standings grouped by win, tie, loss
Assuming standings are provided ordered by (win, match, tie), each standings
group contains players with equivalent standings
Returns:
A list of sets of tuples, each of which contains (id, name)
id: player's unique ID
name: player's name
"""
standings = playerStandings()
standings_groups = []
group = set()
# set initial standings
(win, match, tie) = standings[0][2:5]
for player in standings:
# test if player standings does not match current standings
if ((win, match, tie) != player[2:5]):
# append current player group to the standings group
standings_groups.append(group.copy())
# set new standings
(win, match, tie) = player[2:5]
# reset group
group.clear()
# add (player id, player name) to group of players
group.add(player[0:2])
# add last group to standings_groups
standings_groups.append(group.copy())
return standings_groups
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
# reduce opponents to a dictionary of player_id and the set of their
# previously played opponent_id
opponents = {}
for (id, cid_list) in getPlayerOpponents():
opponents[id] = set(cid_list)
standings_groups = getStandingGroups()
pending_players = set()
pending_players.update(set(standings_groups.pop(0)))
pairs = []
player = None
challenger = None
while len(pending_players) > 0:
player = pending_players.pop()
# if no more pending players add players from next group
if len(pending_players) == 0 and len(standings_groups) > 0:
pending_players.update(set(standings_groups.pop(0)))
challenger = pending_players.pop()
if len(pending_players) == 0 and len(standings_groups) > 0:
pending_players.update(set(standings_groups.pop(0)))
if challenger[0] in opponents[player[0]]:
new_challenger = pending_players.pop()
pending_players.add(challenger)
challenger = new_challenger
pairs.append((player[0], player[1], challenger[0], challenger[1]))
return pairs
|
py | 1a3d5563f0435b7a32fb0a7215dac8794470c63a | #
# ARCADIA Mocks
#
# Copyright (C) 2017 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from requests import Request, Session
from requests.exceptions import ConnectionError
from time import sleep
from arcadiamock.adapters import XMLParser, XMLPrinter
class Client(object):
DEFAULT_HOST_NAME = "localhost"
DEFAULT_PORT = 5000
def __init__(self, hostname=None, port=None):
self._hostname = hostname or self.DEFAULT_HOST_NAME
self._port = port or self.DEFAULT_PORT
self._headers = {
"accept": "application/xml"
}
self._parse = XMLParser()
self._formatter = XMLPrinter()
def register_service_graph(self, service_graph):
xml = service_graph.accept(self._formatter)
response = self._fetch(resource=self._url_of("/register"),
method="POST",
payload=xml.as_text())
response.raise_for_status()
def component_with_CNID(self, cnid):
resource = "/components/{0}".format(cnid)
url = self._url_of(resource)
response = self._fetch(url)
return self._parse.component_from(response.text)
def service_graphs(self):
response = self._fetch(self._url_of("/service_graphs"))
return self._parse.service_graphs_from(response.text)
def components(self):
response = self._fetch(self._url_of("/components"))
return self._parse.components_from(response.text)
def register_component(self, component):
xml = component.accept(self._formatter)
response = self._fetch(resource=self._url_of("/register_component"),
method="POST",
payload=xml.as_text())
response.raise_for_status()
def about(self):
response = self._fetch(self._url_of("/about"))
return self._parse.about_from(response.text)
def _url_of(self, page):
return self._base_url + page
@property
def _base_url(self):
URL = "http://{hostname}:{port}"
return URL.format(
hostname=self._hostname,
port=self._port)
def _fetch(self, resource, method="GET", payload=None):
attempt = self.MAX_ATTEMPTS
while attempt >= 0:
try:
attempt -= 1
request = Request(method, resource, headers=self._headers, data=payload)
return Session().send(request.prepare())
except ConnectionError:
sleep(self.DELAY)
message = self.ERROR_CANNOT_GET_PAGE.format(page=page,
attempts=self.MAX_ATTEMPTS)
raise RuntimeError(message)
MAX_ATTEMPTS = 3
DELAY = 5
ERROR_CANNOT_GET_PAGE = "Cannot access '{page}' ({attempts} attempts)."
|
py | 1a3d559adcc51b7949394d94bf035c4219239054 | """
The MIT License (MIT)
Copyright (c) 2020-Current Skelmis
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from antispam.abc import Cache
class RedisCache(Cache):
"""Not implemented lol"""
def __init__(self, handler): # pragma: no cover
self.handler = handler
raise NotImplementedError
|
py | 1a3d55fae997e8de04e5017155c134b7b3415897 | # ---------------------------
# Alexander Camuto, Matthew Willetts -- 2019
# The University of Oxford, The Alan Turing Institute
# contact: [email protected], [email protected]
# ---------------------------
"""Functions to preprocess SVHN data
"""
import numpy as np
import tensorflow as tf
import os
import sys
import shutil
import zipfile
import scipy.misc
import scipy.io as sio
import pickle as Pkl
import gzip, tarfile
import re, string, fnmatch
import urllib.request
def data_generator_train(x, y, batch_size):
"""
Generates an infinite sequence of data
Args:
x: training data
y: training labels
batch_size: batch size to yield
Yields:
tuples of x,y pairs each of size batch_size
"""
num = x.shape[0]
while True:
# --- Randomly select batch_size elements from the training set
idx = np.random.choice(list(range(num)), batch_size, replace=False)
# idx = np.random.randint(0, num, batch_size)
x_batch = x[idx]
y_batch = y[idx]
# --- Now yield
yield (x_batch, y_batch)
def data_generator_eval(x, y, batch_size):
"""
Generates an infinite sequence of test data
Args:
x: test data
y: test labels
batch_size: batch size to yield
Yields:
tuples of x,y pairs each of size batch_size
"""
num = x.shape[0]
n_batches = int(num / batch_size)
for i in range(n_batches):
idx = list(range(i * batch_size, (i + 1) * batch_size))
x_batch = x[idx]
y_batch = y[idx]
yield (x_batch, y_batch)
def build_input_fns(params, extra=False):
"""Builds an Iterator switching between train and heldout data."""
x_train, y_train, x_test, y_test = load_svhn(dataset=params["data_dir"],
extra=extra)
#
# x_train, y_train = x_train[:params["B"]], y_train[:params[
# "batch_size"]]
def gen_train():
return data_generator_train(x_train, y_train, params["B"])
def gen_eval():
return data_generator_eval(x_test, y_test, params["B"])
def train_input_fn():
# Build an iterator over training batches.
dataset = tf.data.Dataset.from_generator(
gen_train, (tf.float32, tf.int32),
(tf.TensorShape([params["B"], 32, 32, 3
]), tf.TensorShape([params["B"], 10])))
dataset = dataset.prefetch(1)
return dataset.make_one_shot_iterator().get_next()
def eval_input_fn():
# Build an iterator over training batches.
dataset = tf.data.Dataset.from_generator(
gen_eval, (tf.float32, tf.int32),
(tf.TensorShape([params["B"], 32, 32, 3
]), tf.TensorShape([params["B"], 10])))
dataset = dataset.prefetch(1)
return dataset.make_one_shot_iterator().get_next()
# Build an iterator over the heldout set.
return train_input_fn, eval_input_fn, x_train.shape[0]
def _get_datafolder_path():
full_path = os.path.abspath('.')
path = full_path + '/data'
return path
def _unpickle(f):
import cPickle
fo = open(f, 'rb')
d = Pkl.load(fo)
fo.close()
return d
def load_svhn(dataset=_get_datafolder_path() + '/svhn/',
normalize=True,
dequantify=True,
extra=False):
'''
:param dataset:
:param normalize:
:param dequantify: Add uniform noise to dequantify the data following
Uria et. al 2013
"RNADE: The real-valued neural autoregressive density-estimator"
:param extra: include extra svhn samples
:return:
'''
if not os.path.isfile(dataset + 'svhn_train.pkl'):
datasetfolder = os.path.dirname(dataset + 'svhn_train.pkl')
if not os.path.exists(datasetfolder):
os.makedirs(datasetfolder)
_download_svhn(dataset, extra=False)
with open(dataset + 'svhn_train.pkl', 'rb') as f:
train_x, train_y = Pkl.load(f)
with open(dataset + 'svhn_test.pkl', 'rb') as f:
test_x, test_y = Pkl.load(f)
if extra:
if not os.path.isfile(dataset + 'svhn_extra.pkl'):
datasetfolder = os.path.dirname(dataset + 'svhn_train.pkl')
if not os.path.exists(datasetfolder):
os.makedirs(datasetfolder)
_download_svhn(dataset, extra=True)
with open(dataset + 'svhn_extra.pkl', 'rb') as f:
extra_x, extra_y = Pkl.load(f)
train_x = np.concatenate([train_x, extra_x])
train_y = np.concatenate([train_y, extra_y])
train_x = train_x.astype('float32')
test_x = test_x.astype('float32')
train_y = tf.keras.utils.to_categorical(train_y.astype('int32'), 10)
test_y = tf.keras.utils.to_categorical(test_y.astype('int32'), 10)
if dequantify:
train_x += np.random.uniform(0, 1,
size=train_x.shape).astype('float32')
test_x += np.random.uniform(0, 1, size=test_x.shape).astype('float32')
if normalize:
normalizer = train_x.max().astype('float32')
train_x = train_x / normalizer
test_x = test_x / normalizer
return train_x, train_y, test_x, test_y
def _download_svhn(dataset, extra):
"""
Download the SVHN dataset
"""
from scipy.io import loadmat
print('Downloading data from http://ufldl.stanford.edu/housenumbers/, ' \
'this may take a while...')
if extra:
print("Downloading extra data...")
urllib.request.urlretrieve(
'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat',
dataset + 'extra_32x32.mat')
extra = loadmat(dataset + 'extra_32x32.mat')
extra_x = extra['X'].swapaxes(2, 3).swapaxes(1, 2).swapaxes(0, 1)
extra_y = extra['y'].reshape((-1)) - 1
print("Saving extra data")
with open(dataset + 'svhn_extra.pkl', 'wb') as f:
Pkl.dump([extra_x, extra_y], f, protocol=Pkl.HIGHEST_PROTOCOL)
os.remove(dataset + 'extra_32x32.mat')
else:
print("Downloading train data...")
urllib.request.urlretrieve(
'http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
dataset + 'train_32x32.mat')
print("Downloading test data...")
urllib.request.urlretrieve(
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
dataset + 'test_32x32.mat')
train = loadmat(dataset + 'train_32x32.mat')
train_x = train['X'].swapaxes(2, 3).swapaxes(1, 2).swapaxes(0, 1)
train_y = train['y'].reshape((-1)) - 1
test = loadmat(dataset + 'test_32x32.mat')
test_x = test['X'].swapaxes(2, 3).swapaxes(1, 2).swapaxes(0, 1)
test_y = test['y'].reshape((-1)) - 1
print("Saving train data")
with open(dataset + 'svhn_train.pkl', 'wb') as f:
Pkl.dump([train_x, train_y], f, protocol=Pkl.HIGHEST_PROTOCOL)
print("Saving test data")
with open(dataset + 'svhn_test.pkl', 'wb') as f:
Pkl.dump([test_x, test_y], f, protocol=Pkl.HIGHEST_PROTOCOL)
os.remove(dataset + 'train_32x32.mat')
os.remove(dataset + 'test_32x32.mat')
|
py | 1a3d5671ddc0210598d9b55007d4db885c864af7 |
# Write results to this file
OUTFILE = 'runs/10KB/src2-tgt1/seq-nobro-iter06000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False, False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 6000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 4
|
py | 1a3d56aa32f2fbc735f9eda7a097dcc9169800ab | """
sentry.search_indexes
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import haystack
from haystack.indexes import *
from haystack.sites import SearchSite
from sentry.conf import settings
from sentry.utils import to_unicode
from sentry.models import GroupedMessage
if settings.SEARCH_ENGINE:
# Ensure we stop here if we havent configure Sentry to work under haystack
backend = haystack.load_backend(settings.SEARCH_ENGINE)
class SentrySearchSite(SearchSite): pass
site = SentrySearchSite()
site.backend = backend.SearchBackend(site, **settings.SEARCH_OPTIONS)
class GroupedMessageIndex(RealTimeSearchIndex):
text = CharField(document=True, stored=False)
status = IntegerField(model_attr='status', stored=False, null=True)
level = IntegerField(model_attr='level', stored=False, null=True)
logger = CharField(model_attr='logger', stored=False, null=True)
server = MultiValueField(stored=False, null=True)
url = MultiValueField(stored=False, null=True)
site = MultiValueField(stored=False, null=True)
first_seen = DateTimeField(model_attr='first_seen', stored=False)
last_seen = DateTimeField(model_attr='last_seen', stored=False)
# def get_queryset(self):
# """Used when the entire index for model is updated."""
# return GroupedMessage.objects.all()
def get_updated_field(self):
return 'last_seen'
def get_content_field(self):
return 'text'
def prepare_text(self, instance):
chunks = [instance.message, instance.class_name, instance.traceback, instance.view]
chunks.extend(self.prepare_url(instance))
return '\n'.join(map(to_unicode, filter(None, chunks)))
def prepare_server(self, instance):
return [to_unicode(s[0]) for s in instance.unique_servers]
def prepare_site(self, instance):
return [to_unicode(s[0]) for s in instance.unique_sites]
def prepare_url(self, instance):
return [to_unicode(s[0]) for s in instance.unique_urls]
site.register(GroupedMessage, GroupedMessageIndex)
|
py | 1a3d574244ceda857be031c6a0105df0e1c40e90 | from typing import List
from unified_message_relay.Core.UMRType import ChatAttribute
from unified_message_relay.Core.UMRCommand import register_command, quick_reply
@register_command(cmd='echo', description='reply every word you sent')
async def command(chat_attrs: ChatAttribute, args: List):
"""
Prototype of command
:param chat_attrs:
:param args:
:return:
"""
if not args: # args should not be empty
return
await quick_reply(chat_attrs, ' '.join(args))
|
py | 1a3d57bf140ddd12966e187e02a65ac89b2741e9 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl, is_parameter_related
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from ..utils import set_dist_op_desc_original_id
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
class DistributedDefault(DistributedOperatorImplContainer):
def __init__(self, name):
super(DistributedDefault, self).__init__()
self._name = name
register_distributed_operator_impl_container("default",
DistributedDefault("default"))
# Replicated Default
class DistributedDefaultImpl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedDefaultImpl0, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method.")
def is_output_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method.")
def update_dims_mapping(self, dist_op):
raise NotImplementedError("Please Implement this method.")
@staticmethod
def forward(ctx, *args, **kwargs):
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
startup_block = dist_op_context.get_dst_startup_program().global_block()
src_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
# replicate op in dist program
dist_op_desc = main_block.desc.append_op()
dist_op_desc.copy_from(src_op.desc)
set_dist_op_desc_original_id(dist_op_desc, src_op.desc, ctx)
for input_name in src_op.desc.input_names():
dist_op_desc.set_input(input_name, kwargs[input_name])
for output_name in src_op.desc.output_names():
dist_op_desc.set_output(output_name, kwargs[output_name])
main_block._sync_with_cpp()
# param initialization sync
for varname in dist_op_desc.input_arg_names():
if startup_block.has_var(varname) and startup_block.var(
varname
).is_parameter and varname not in dist_op_context.already_init_sync_vars:
dist_op_context.already_init_sync_vars.add(varname)
param = startup_block.var(varname)
param_dist_attr = ctx.get_tensor_dist_attr_for_program(param)
process_mesh = param_dist_attr.process_mesh
dims_mapping = param_dist_attr.dims_mapping
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, process_mesh,
rank_id)
# NOTE all not splited axis should be presented in mesh
for axis, size in enumerate(process_mesh.topology):
if size <= 1 or axis in dims_mapping:
pass
else:
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology,
axis, rank_id)
sync_group = new_process_group(group_ranks)
new_op = startup_block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': sync_group.id,
'root': 0,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
# set distributed attribute
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = process_mesh
op_attr.set_output_dims_mapping(param.name,
dims_mapping)
op_attr.set_input_dims_mapping(param.name, dims_mapping)
ctx.set_op_dist_attr_for_program(new_op, op_attr)
startup_block._sync_with_cpp()
@staticmethod
def backward(ctx, *args, **kwargs):
# by now the backward function only insert the gradient allreduce for dist op itself
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
backward_op = dist_op_context.get_cur_src_op()
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(backward_op))
rank_id = dist_op_context.get_rank_id()
# check validation of inputs / outputs
for input_name in backward_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
backward_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in backward_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
backward_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
# replicate op in dist program
dist_op_desc = main_block.desc.append_op()
dist_op_desc.copy_from(backward_op.desc)
# Refer to the related dist op
set_dist_op_desc_original_id(dist_op_desc, backward_op.desc, ctx)
for input_name in backward_op.desc.input_names():
dist_op_desc.set_input(input_name, kwargs[input_name])
for output_name in backward_op.desc.output_names():
dist_op_desc.set_output(output_name, kwargs[output_name])
main_block._sync_with_cpp()
# check if need gradient allreduce
# if there is a non-gradient & non-parameter input and its batch dimension is splited,
# we need insert gradient allreduce for the gradient of parameter in its output
need_gradient_allreduce = False
for input_name in backward_op.desc.input_names():
for varname in backward_op.desc.input(input_name):
if "@GRAD" not in varname and not is_parameter_related(
varname, main_block):
# NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
process_mesh = dist_attr.process_mesh
var_dim_mapping = dist_attr.get_input_dims_mapping(varname)
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, process_mesh,
rank_id)
mesh_shape = process_mesh.topology
batch_size_axis = var_dim_mapping[0]
if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
need_gradient_allreduce = True
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology,
batch_size_axis, rank_id)
dp_degree = len(group_ranks)
dp_group = new_process_group(group_ranks)
break
if need_gradient_allreduce:
allreduce_vars = []
for input_name in backward_op.desc.input_names():
for varname in backward_op.desc.input(input_name):
if "@GRAD" not in varname and is_parameter_related(
varname, main_block):
assert len(
backward_op.desc.input(input_name)
) == 1, "parameter input to grad op should be length 1, but got [{}]".format(
backward_op.desc.input(input_name))
assert varname + "@GRAD" in backward_op.desc.output_arg_names(
), "parameter's grad [{}] not found in the grad op's output".format(
varname + "@GRAD")
assert len(
backward_op.desc.output(input_name + "@GRAD")
) == 1, "parameter grad of grad op should be length 1, but got [{}]".format(
backward_op.desc.output(input_name + "@GRAD"))
allreduce_vars.append(
backward_op.desc.output(input_name + "@GRAD")[0])
if len(allreduce_vars) > 0:
for varname in allreduce_vars:
grad_var = main_block.var(varname)
allreduce_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': [grad_var]},
outputs={'Out': [grad_var]},
attrs={
'ring_id': dp_group.id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Backward
})
scale_op = main_block.append_op(
type='scale',
inputs={'X': grad_var},
outputs={'Out': grad_var},
attrs={
'scale': 1.0 / dp_degree,
OP_ROLE_KEY: OpRole.Backward
})
dims_mapping = ctx.get_tensor_dist_attr_for_program(
grad_var).dims_mapping
process_mesh = dist_attr.process_mesh
for op in [allreduce_op, scale_op]:
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = process_mesh
op_attr.set_output_dims_mapping(grad_var.name,
dims_mapping)
op_attr.set_input_dims_mapping(grad_var.name,
dims_mapping)
ctx.set_op_dist_attr_for_program(op, op_attr)
main_block._sync_with_cpp()
register_distributed_operator_impl(
"default", DistributedDefaultImpl0("replicate_parallel"))
|
py | 1a3d58f84d4056a1a8d51ee22a6bc740357a6f05 | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_reformer import ReformerTokenizer
else:
ReformerTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to file names for serializing Tokenizer instances
####################################################
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to pretrained vocabulary URL for all the model ids.
####################################################
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/spiece.model"
},
"tokenizer_file": {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/tokenizer.json"
},
}
####################################################
# Mapping from model ids to max length of inputs
####################################################
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/reformer-crime-and-punishment": 524288,
}
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece
<https://github.com/google/sentencepiece>`__ .
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
slow_tokenizer_class = ReformerTokenizer
def __init__(
self,
vocab_file,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
py | 1a3d5b6cc1c528b07b113484d101546fc98f05ed | import signal
class TimeoutException(Exception):
pass
def timeout(signum, frame):
raise TimeoutException
def register_signal():
signal.signal(signal.SIGALRM, timeout)
def signal_ttl(allowed_time_to_process):
signal.alarm(allowed_time_to_process)
def reset_signal():
signal.alarm(0)
|
py | 1a3d5e24f3ebcc2cc2e7505f2ef554b522193d39 | import numpy as np
import scipy.io as sio
import xarray as xr
import pkg_resources
from .nortek import read_nortek
from .nortek2 import read_signature
from .rdi import read_rdi
from .base import _create_dataset
from ..rotate.base import _set_coords
from ..time import epoch2date, date2epoch, date2matlab, matlab2date
# time variables stored as data variables (as opposed to coordinates)
t_additional = ['hdwtime_gps', ]
def read(fname, userdata=True, nens=None):
"""Read a binary Nortek (e.g., .VEC, .wpr, .ad2cp, etc.) or RDI
(.000, .PD0, .ENX, etc.) data file.
Parameters
----------
filename : string
Filename of instrument file to read.
userdata : True, False, or string of userdata.json filename (default ``True``)
Whether to read the '<base-filename>.userdata.json' file.
nens : None (default: read entire file), int, or 2-element tuple (start, stop)
Number of pings or ensembles to read from the file
Returns
-------
ds : xarray.Dataset
An xarray dataset from instrument datafile.
"""
# Loop over binary readers until we find one that works.
for func in [read_nortek, read_signature, read_rdi]:
try:
ds = func(fname, userdata=userdata, nens=nens)
except:
continue
else:
return ds
raise Exception(
"Unable to find a suitable reader for file {}.".format(fname))
def read_example(name, **kwargs):
"""Read an ADCP or ADV datafile from the examples directory.
Parameters
----------
name : str
A few available files:
AWAC_test01.wpr
BenchFile01.ad2cp
RDI_test01.000
burst_mode01.VEC
vector_data01.VEC
vector_data_imu01.VEC
winriver01.PD0
winriver02.PD0
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data.
"""
filename = pkg_resources.resource_filename(
'dolfyn',
'example_data/' + name)
return read(filename, **kwargs)
def save(dataset, filename):
"""Save xarray dataset as netCDF (.nc).
Parameters
----------
dataset : xarray.Dataset
filename : str
Filename and/or path with the '.nc' extension
Notes
-----
Drops 'config' lines.
"""
if '.' in filename:
assert filename.endswith('nc'), 'File extension must be of the type nc'
else:
filename += '.nc'
# Dropping the detailed configuration stats because netcdf can't save it
for key in list(dataset.attrs.keys()):
if 'config' in key:
dataset.attrs.pop(key)
# Handling complex values for netCDF4
dataset.attrs['complex_vars'] = []
for var in dataset.data_vars:
if np.iscomplexobj(dataset[var]):
dataset[var+'_real'] = dataset[var].real
dataset[var+'_imag'] = dataset[var].imag
dataset = dataset.drop(var)
dataset.attrs['complex_vars'].append(var)
# Keeping time in raw file's time instance, unaware of timezone
t_list = [t for t in dataset.coords if 'time' in t]
for ky in t_list:
dt = epoch2date(dataset[ky])
dataset = dataset.assign_coords({ky: dt})
t_data = [t for t in dataset.data_vars if t in t_additional]
for ky in t_data:
dt = epoch2date(dataset[ky])
dataset = dataset.drop_vars(ky) # must do b/c of netcdf encoding error
dataset[ky] = xr.DataArray(dt, coords={'time_gps': dataset.time_gps})
dataset.to_netcdf(filename, format='NETCDF4', engine='netcdf4')
def load(filename):
"""Load xarray dataset from netCDF (.nc)
Parameters
----------
filename : str
Filename and/or path with the '.nc' extension
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data.
"""
if '.' in filename:
assert filename.endswith('nc'), 'File extension must be of the type nc'
else:
filename += '.nc'
ds = xr.load_dataset(filename, engine='netcdf4')
# Single item lists were saved as 'int' or 'str'
if hasattr(ds, 'rotate_vars') and len(ds.rotate_vars[0]) == 1:
ds.attrs['rotate_vars'] = [ds.rotate_vars]
# Python lists were saved as numpy arrays
if hasattr(ds, 'rotate_vars') and type(ds.rotate_vars) is not list:
ds.attrs['rotate_vars'] = list(ds.rotate_vars)
# Rejoin complex numbers
if hasattr(ds, 'complex_vars') and len(ds.complex_vars):
if len(ds.complex_vars[0]) == 1:
ds.attrs['complex_vars'] = [ds.complex_vars]
for var in ds.complex_vars:
ds[var] = ds[var+'_real'] + ds[var+'_imag'] * 1j
ds = ds.drop_vars([var+'_real', var+'_imag'])
ds.attrs.pop('complex_vars')
# Reload raw file's time instance since the timezone is unknown
t_list = [t for t in ds.coords if 'time' in t]
for ky in t_list:
dt = ds[ky].values.astype('datetime64[us]').tolist()
ds = ds.assign_coords({ky: date2epoch(dt)})
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
# Time data variables
t_data = [t for t in ds.data_vars if t in t_additional]
for ky in t_data:
dt = ds[ky].values.astype('datetime64[us]').tolist()
ds[ky].data = date2epoch(dt)
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
return ds
def save_mat(dataset, filename, datenum=True):
"""Save xarray dataset as a MATLAB (.mat) file
Parameters
----------
dataset : xarray.Dataset
Data to save
filename : str
Filename and/or path with the '.mat' extension
datenum : bool
Converts epoch time into MATLAB datenum
Notes
-----
The xarray data format is saved as a MATLAB structure with the fields
'vars, coords, config, units'
See Also
--------
scipy.io.savemat()
"""
if '.' in filename:
assert filename.endswith(
'mat'), 'File extension must be of the type mat'
else:
filename += '.mat'
# Convert from epoch time to datenum
if datenum:
t_list = [t for t in dataset.coords if 'time' in t]
for ky in t_list:
dt = date2matlab(epoch2date(dataset[ky]))
dataset = dataset.assign_coords({ky: dt})
t_data = [t for t in dataset.data_vars if t in t_additional]
for ky in t_data:
dt = date2matlab(epoch2date(dataset[ky]))
dataset[ky].data = dt
# Save xarray structure with more descriptive structure names
matfile = {'vars': {}, 'coords': {}, 'config': {}, 'units': {}}
for key in dataset.data_vars:
matfile['vars'][key] = dataset[key].values
if hasattr(dataset[key], 'units'):
matfile['units'][key] = dataset[key].units
for key in dataset.coords:
matfile['coords'][key] = dataset[key].values
matfile['config'] = dataset.attrs
sio.savemat(filename, matfile)
def load_mat(filename, datenum=True):
"""Load xarray dataset from MATLAB (.mat) file, complimentary to `save_mat()`
A .mat file must contain the fields: {vars, coords, config, units},
where 'coords' contain the dimensions of all variables in 'vars'.
Parameters
----------
filename : str
Filename and/or path with the '.mat' extension
datenum : bool
Converts MATLAB datenum into epoch time
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data.
See Also
--------
scipy.io.loadmat()
"""
if '.' in filename:
assert filename.endswith(
'mat'), 'File extension must be of the type mat'
else:
filename += '.mat'
data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
ds_dict = {'vars': {}, 'coords': {}, 'config': {}, 'units': {}}
for nm in ds_dict:
key_list = data[nm]._fieldnames
for ky in key_list:
ds_dict[nm][ky] = getattr(data[nm], ky)
ds_dict['data_vars'] = ds_dict.pop('vars')
ds_dict['attrs'] = ds_dict.pop('config')
# Recreate dataset
ds = _create_dataset(ds_dict)
ds = _set_coords(ds, ds.coord_sys)
# Convert datenum time back into epoch time
if datenum:
t_list = [t for t in ds.coords if 'time' in t]
for ky in t_list:
dt = date2epoch(matlab2date(ds[ky].values))
ds = ds.assign_coords({ky: dt})
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
t_data = [t for t in ds.data_vars if t in t_additional]
for ky in t_data:
dt = date2epoch(matlab2date(ds[ky].values))
ds[ky].data = dt
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
# Restore 'rotate vars" to a proper list
if hasattr(ds, 'rotate_vars') and len(ds.rotate_vars[0]) == 1:
ds.attrs['rotate_vars'] = [ds.rotate_vars]
else:
ds.attrs['rotate_vars'] = [x.strip(' ') for x in list(ds.rotate_vars)]
return ds
|
py | 1a3d5eadc53297bccb691e57da773b5a0b1a325a | from .BaseTestCase import BaseTestCase
from .CliTest import CliTest
from . import Crawler
from . import Template
from . import Task
from . import TaskReporter
from . import TaskWrapper
from . import TaskHolder
from . import examples
|
py | 1a3d5f5205557fece433e03717453b5e4d1240f6 | # -*- coding: utf-8 -*-
"""
Helpers for downloading data
The :func:`download` function access the network and requests the content at a
specific url using :mod:`urllib` or :mod:`urllib2`. You can either specify
where the data goes or download it to the default location in ubelt cache.
Either way this function returns the location of the downloaded data. You can
also specify the expected hash in order to check the validity of the data. By
default downloading is verbose.
The :func:`grabdata` function is almost identitcal to :func:`download`, but it
checks if the data already exists in the download location, and only downloads
if it needs to.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from os.path import basename, join, exists
import six
import os
__all__ = ['download', 'grabdata']
def download(url, fpath=None, hash_prefix=None, hasher='sha512',
chunksize=8192, verbose=1):
"""
Downloads a url to a file on disk.
If unspecified the location and name of the file is chosen automatically.
A hash_prefix can be specified to verify the integrity of the downloaded
data. This function will download the data every time its called. For
cached downloading see `grabdata`.
Args:
url (str):
The url to download.
fpath (PathLike | io.BytesIOtringIO):
The path to download to. Defaults to basename of url and ubelt's
application cache. If this is a io.BytesIO object then information
is directly written to this object (note this prevents the use of
temporary files).
hash_prefix (None | str):
If specified, download will retry / error if the file hash
does not match this value. Defaults to None.
hasher (str | Hasher):
If hash_prefix is specified, this indicates the hashing
algorithm to apply to the file. Defaults to sha512.
chunksize (int, default=2 ** 13):
Download chunksize.
verbose (int, default=1):
Verbosity level 0 or 1.
Returns:
PathLike: fpath - path to the downloaded file.
Raises:
URLError - if there is problem downloading the url
RuntimeError - if the hash does not match the hash_prefix
Notes:
Based largely on code in pytorch [4]_ with modifications influenced by
other resources [1]_ [2]_ [3]_.
References:
.. [1] http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
.. [2] http://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
.. [3] http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
.. [4] https://github.com/pytorch/pytorch/blob/2787f1d8edbd4aadd4a8680d204341a1d7112e2d/torch/hub.py#L347
TODO:
- [ ] fine-grained control of progress
Example:
>>> # xdoctest: +REQUIRES(--network)
>>> from ubelt.util_download import * # NOQA
>>> url = 'http://i.imgur.com/rqwaDag.png'
>>> fpath = download(url)
>>> print(basename(fpath))
rqwaDag.png
Example:
>>> # xdoctest: +REQUIRES(--network)
>>> import ubelt as ub
>>> import io
>>> url = 'http://i.imgur.com/rqwaDag.png'
>>> file = io.BytesIO()
>>> fpath = download(url, file)
>>> file.seek(0)
>>> data = file.read()
>>> assert ub.hash_data(data, hasher='sha1').startswith('f79ea24571')
Example:
>>> # xdoctest: +REQUIRES(--network)
>>> url = 'http://i.imgur.com/rqwaDag.png'
>>> fpath = download(url, hasher='sha1', hash_prefix='f79ea24571da6ddd2ba12e3d57b515249ecb8a35')
Downloading url='http://i.imgur.com/rqwaDag.png' to fpath=...rqwaDag.png
...
...1233/1233... rate=... Hz, eta=..., total=..., wall=...
Example:
>>> # xdoctest: +REQUIRES(--network)
>>> # test download from girder
>>> import pytest
>>> import ubelt as ub
>>> url = 'https://data.kitware.com/api/v1/item/5b4039308d777f2e6225994c/download'
>>> ub.download(url, hasher='sha512', hash_prefix='c98a46cb31205cf')
>>> with pytest.raises(RuntimeError):
>>> ub.download(url, hasher='sha512', hash_prefix='BAD_HASH')
"""
from ubelt import ProgIter as Progress
from ubelt import ensure_app_cache_dir
import shutil
import tempfile
import hashlib
if six.PY2: # nocover
from urllib2 import urlopen # NOQA
else:
from urllib.request import urlopen # NOQA
if fpath is None:
dpath = ensure_app_cache_dir('ubelt')
fname = basename(url)
fpath = join(dpath, fname)
_dst_is_io_object = hasattr(fpath, 'write')
if verbose:
if _dst_is_io_object:
print('Downloading url=%r to IO object' % (url,))
else:
print('Downloading url=%r to fpath=%r' % (url, fpath))
urldata = urlopen(url)
meta = urldata.info()
try:
if hasattr(meta, 'getheaders'): # nocover
file_size = int(meta.getheaders("Content-Length")[0])
else:
file_size = int(meta.get_all("Content-Length")[0])
except Exception: # nocover
# sometimes the url does not contain content length metadata
# TODO: find a public URL that exemplifies this or figure out how to
# mock it locally.
file_size = None
if hash_prefix:
if isinstance(hasher, six.string_types):
if hasher == 'sha1':
hasher = hashlib.sha1()
elif hasher == 'sha512':
hasher = hashlib.sha512()
else:
raise KeyError(hasher)
if _dst_is_io_object:
_file_write = fpath.write
else:
tmp = tempfile.NamedTemporaryFile(delete=False)
_file_write = tmp.write
# possible optimization (have not tested or timed)
_urldata_read = urldata.read
try:
with Progress(total=file_size, disable=not verbose) as pbar:
_pbar_update = pbar.update
def _critical_loop():
# Initialize the buffer to a non-empty object
buffer = ' '
if hash_prefix:
_hasher_update = hasher.update
while buffer:
buffer = _urldata_read(chunksize)
_file_write(buffer)
_hasher_update(buffer)
_pbar_update(len(buffer))
else:
# Same code as above, just without the hasher update.
# (tight loop optimization: remove in-loop conditional)
while buffer:
buffer = _urldata_read(chunksize)
_file_write(buffer)
_pbar_update(len(buffer))
_critical_loop()
if not _dst_is_io_object:
tmp.close()
# We keep a potentially corrupted file if the hash doesn't match.
# It could be the case that the user simply specified the wrong
# hash_prefix.
shutil.move(tmp.name, fpath)
if hash_prefix:
got = hasher.hexdigest()
if got[:len(hash_prefix)] != hash_prefix:
print('hash_prefix = {!r}'.format(hash_prefix))
print('got = {!r}'.format(got))
if _dst_is_io_object:
raise RuntimeError(
'invalid hash value '
'(expected "{}", got "{}")'.format(hash_prefix, got))
else:
raise RuntimeError(
'invalid hash value for fpath={!r} '
'(expected "{}", got "{}")'.format(
fpath, hash_prefix, got))
finally:
if not _dst_is_io_object: # nocover
tmp.close()
# If for some reason the move failed, delete the temporary file
if exists(tmp.name):
os.remove(tmp.name)
return fpath
def grabdata(url, fpath=None, dpath=None, fname=None, redo=False,
verbose=1, appname=None, hash_prefix=None, hasher='sha512',
**download_kw):
"""
Downloads a file, caches it, and returns its local path.
If unspecified the location and name of the file is chosen automatically.
A hash_prefix can be specified to verify the integrity of the downloaded
data.
Args:
url (str): url to the file to download
fpath (PathLike): The full path to download the file to. If
unspecified, the arguments `dpath` and `fname` are used to
determine this.
dpath (PathLike): where to download the file. If unspecified `appname`
is used to determine this. Mutually exclusive with fpath.
fname (str): What to name the downloaded file. Defaults to the url
basename. Mutually exclusive with fpath.
redo (bool, default=False): if True forces redownload of the file
verbose (bool, default=True): verbosity flag
appname (str): set dpath to `ub.get_app_cache_dir(appname)`.
Mutually exclusive with dpath and fpath.
hash_prefix (None | str):
If specified, grabdata verifies that this matches the hash of the
file, and then saves the hash in a adjacent file to certify that
the download was successful. Defaults to None.
hasher (str | Hasher):
If hash_prefix is specified, this indicates the hashing
algorithm to apply to the file. Defaults to sha512.
**download_kw: additional kwargs to pass to ub.download
Returns:
PathLike: fpath - path to downloaded or cached file.
Example:
>>> # xdoctest: +REQUIRES(--network)
>>> import ubelt as ub
>>> url = 'http://i.imgur.com/rqwaDag.png'
>>> fpath = ub.grabdata(url, fname='mario.png')
>>> result = basename(fpath)
>>> print(result)
mario.png
Example:
>>> # xdoctest: +REQUIRES(--network)
>>> import ubelt as ub
>>> fname = 'foo.bar'
>>> url = 'http://i.imgur.com/rqwaDag.png'
>>> prefix1 = '944389a39dfb8fa9'
>>> fpath = ub.grabdata(url, fname=fname, hash_prefix=prefix1)
>>> stamp_fpath = fpath + '.hash'
>>> assert ub.readfrom(stamp_fpath) == prefix1
>>> # Check that the download doesn't happen again
>>> fpath = ub.grabdata(url, fname=fname, hash_prefix=prefix1)
>>> # todo: check file timestamps have not changed
>>> #
>>> # Check redo works with hash
>>> fpath = ub.grabdata(url, fname=fname, hash_prefix=prefix1, redo=True)
>>> # todo: check file timestamps have changed
>>> #
>>> # Check that a redownload occurs when the stamp is changed
>>> open(stamp_fpath, 'w').write('corrupt-stamp')
>>> fpath = ub.grabdata(url, fname=fname, hash_prefix=prefix1)
>>> assert ub.readfrom(stamp_fpath) == prefix1
>>> #
>>> # Check that a redownload occurs when the stamp is removed
>>> ub.delete(stamp_fpath)
>>> open(fpath, 'w').write('corrupt-data')
>>> assert not ub.hash_file(fpath, base='hex', hasher='sha512').startswith(prefix1)
>>> fpath = ub.grabdata(url, fname=fname, hash_prefix=prefix1)
>>> assert ub.hash_file(fpath, base='hex', hasher='sha512').startswith(prefix1)
>>> #
>>> # Check that requesting new data causes redownload
>>> url2 = 'https://data.kitware.com/api/v1/item/5b4039308d777f2e6225994c/download'
>>> prefix2 = 'c98a46cb31205cf'
>>> fpath = ub.grabdata(url2, fname=fname, hash_prefix=prefix2)
>>> assert ub.readfrom(stamp_fpath) == prefix2
"""
from ubelt.util_platform import ensure_app_cache_dir
if appname and dpath:
raise ValueError('Cannot specify appname with dpath')
if fpath and (dpath or fname or appname):
raise ValueError('Cannot specify fpath with dpath or fname')
if fpath is None:
if dpath is None:
appname = appname or 'ubelt'
dpath = ensure_app_cache_dir(appname)
if fname is None:
fname = basename(url)
fpath = join(dpath, fname)
# note that needs_download is never set to false after it becomes true
# this is the key to working through the logic of the following checks
needs_download = redo
if not exists(fpath):
# always download if we are missing the file
needs_download = True
if hash_prefix:
stamp_fpath, needs_download = _check_hash_stamp(
fpath, hash_prefix, hasher, verbose, needs_download)
if needs_download:
fpath = download(url, fpath, verbose=verbose,
hash_prefix=hash_prefix, hasher=hasher,
**download_kw)
if hash_prefix:
# If the file successfully downloaded then the hashes match.
# write out the expected prefix so we can check it later
with open(stamp_fpath, 'w') as file:
file.write(hash_prefix)
else:
if verbose >= 2:
print('Already have file %s' % fpath)
return fpath
def _check_hash_stamp(fpath, hash_prefix, hasher, verbose, needs_download=False):
stamp_fpath = fpath + '.hash'
# Force a re-download if the hash file does not exist or it does
# not match the expected hash
if exists(stamp_fpath):
with open(stamp_fpath, 'r') as file:
hashstr = file.read()
if not hashstr.startswith(hash_prefix):
if verbose: # pragma: nobranch
print('invalid hash value (expected "{}", got "{}")'.format(
hash_prefix, hashstr))
needs_download = True
elif exists(fpath):
# If the file exists, but the hash doesnt exist, simply compute the
# hash of the existing file instead of redownloading it.
# Redownload if this fails.
from ubelt import util_hash
hashstr = util_hash.hash_file(fpath, hasher=hasher)
if hashstr.startswith(hash_prefix):
# Write the missing stamp file if it matches
with open(stamp_fpath, 'w') as file:
file.write(hash_prefix)
else:
if verbose: # pragma: nobranch
print('invalid hash value (expected "{}", got "{}")'.format(
hash_prefix, hashstr))
needs_download = True
else:
needs_download = True
return stamp_fpath, needs_download
|
py | 1a3d5fc6db2f2707aacb1a4e432cc60e40f9b584 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import importlib
import logging
import re
import six
from saml2_tophat import saml
from saml2_tophat import xmlenc
from saml2_tophat.attribute_converter import from_local, ac_factory
from saml2_tophat.attribute_converter import get_local_name
from saml2_tophat.s_utils import assertion_factory
from saml2_tophat.s_utils import factory
from saml2_tophat.s_utils import sid
from saml2_tophat.s_utils import MissingValue
from saml2_tophat.saml import NAME_FORMAT_URI
from saml2_tophat.time_util import instant
from saml2_tophat.time_util import in_a_while
logger = logging.getLogger(__name__)
def _filter_values(vals, vlist=None, must=False):
""" Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
"""
if not vlist: # No value specified equals any value
return vals
if isinstance(vlist, six.string_types):
vlist = [vlist]
res = []
for val in vlist:
if val in vals:
res.append(val)
if must:
if res:
return res
else:
raise MissingValue("Required attribute value missing")
else:
return res
def _match(attr, ava):
if attr in ava:
return attr
_la = attr.lower()
if _la in ava:
return _la
for _at in ava.keys():
if _at.lower() == _la:
return _at
return None
def filter_on_attributes(ava, required=None, optional=None, acs=None,
fail_on_unfulfilled_requirements=True):
""" Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion
"""
def _match_attr_name(attr, ava):
local_name = None
for a in ['name_format', 'friendly_name']:
_val = attr.get(a)
if _val:
if a == 'name_format':
local_name = get_local_name(acs, attr['name'], _val)
else:
local_name = _val
break
if local_name:
_fn = _match(local_name, ava)
else:
_fn = None
if not _fn: # In the unlikely case that someone has provided us with
# URIs as attribute names
_fn = _match(attr["name"], ava)
return _fn
def _apply_attr_value_restrictions(attr, res, must=False):
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
try:
res[_fn].extend(_filter_values(ava[_fn], values))
except KeyError:
res[_fn] = _filter_values(ava[_fn], values)
return _filter_values(ava[_fn], values, must)
res = {}
if required is None:
required = []
for attr in required:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, True)
elif fail_on_unfulfilled_requirements:
desc = "Required attribute missing: '%s'" % (attr["name"])
raise MissingValue(desc)
if optional is None:
optional = []
for attr in optional:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, False)
return res
def filter_on_demands(ava, required=None, optional=None):
""" Never return more than is needed. Filters out everything
the server is prepared to return but the receiver doesn't ask for
:param ava: Attribute value assertion as a dictionary
:param required: Required attributes
:param optional: Optional attributes
:return: The possibly reduced assertion
"""
# Is all what's required there:
if required is None:
required = {}
lava = dict([(k.lower(), k) for k in ava.keys()])
for attr, vals in required.items():
attr = attr.lower()
if attr in lava:
if vals:
for val in vals:
if val not in ava[lava[attr]]:
raise MissingValue(
"Required attribute value missing: %s,%s" % (attr,
val))
else:
raise MissingValue("Required attribute missing: %s" % (attr,))
if optional is None:
optional = {}
oka = [k.lower() for k in required.keys()]
oka.extend([k.lower() for k in optional.keys()])
# OK, so I can imaging releasing values that are not absolutely necessary
# but not attributes that are not asked for.
for attr in lava.keys():
if attr not in oka:
del ava[lava[attr]]
return ava
def filter_on_wire_representation(ava, acs, required=None, optional=None):
"""
:param ava: A dictionary with attributes and values
:param acs: List of tuples (Attribute Converter name,
Attribute Converter instance)
:param required: A list of saml.Attributes
:param optional: A list of saml.Attributes
:return: Dictionary of expected/wanted attributes and values
"""
acsdic = dict([(ac.name_format, ac) for ac in acs])
if required is None:
required = []
if optional is None:
optional = []
res = {}
for attr, val in ava.items():
done = False
for req in required:
try:
_name = acsdic[req.name_format]._to[attr]
if _name == req.name:
res[attr] = val
done = True
except KeyError:
pass
if done:
continue
for opt in optional:
try:
_name = acsdic[opt.name_format]._to[attr]
if _name == opt.name:
res[attr] = val
break
except KeyError:
pass
return res
def filter_attribute_value_assertions(ava, attribute_restrictions=None):
""" Will weed out attribute values and values according to the
rules defined in the attribute restrictions. If filtering results in
an attribute without values, then the attribute is removed from the
assertion.
:param ava: The incoming attribute value assertion (dictionary)
:param attribute_restrictions: The rules that govern which attributes
and values that are allowed. (dictionary)
:return: The modified attribute value assertion
"""
if not attribute_restrictions:
return ava
for attr, vals in list(ava.items()):
_attr = attr.lower()
try:
_rests = attribute_restrictions[_attr]
except KeyError:
del ava[attr]
else:
if _rests is None:
continue
if isinstance(vals, six.string_types):
vals = [vals]
rvals = []
for restr in _rests:
for val in vals:
if restr.match(val):
rvals.append(val)
if rvals:
ava[attr] = list(set(rvals))
else:
del ava[attr]
return ava
def restriction_from_attribute_spec(attributes):
restr = {}
for attribute in attributes:
restr[attribute.name] = {}
for val in attribute.attribute_value:
if not val.text:
restr[attribute.name] = None
break
else:
restr[attribute.name] = re.compile(val.text)
return restr
def post_entity_categories(maps, **kwargs):
restrictions = {}
try:
required = [d['friendly_name'].lower() for d in kwargs['required']]
except (KeyError, TypeError):
required = []
if kwargs["mds"]:
if "sp_entity_id" in kwargs:
ecs = kwargs["mds"].entity_categories(kwargs["sp_entity_id"])
for ec_map in maps:
for key, (atlist, only_required) in ec_map.items():
if key == "": # always released
attrs = atlist
elif isinstance(key, tuple):
if only_required:
attrs = [a for a in atlist if a in required]
else:
attrs = atlist
for _key in key:
try:
assert _key in ecs
except AssertionError:
attrs = []
break
elif key in ecs:
if only_required:
attrs = [a for a in atlist if a in required]
else:
attrs = atlist
else:
attrs = []
for attr in attrs:
restrictions[attr] = None
else:
for ec_map in maps:
for attr in ec_map[""]:
restrictions[attr] = None
return restrictions
class Policy(object):
""" handles restrictions on assertions """
def __init__(self, restrictions=None):
if restrictions:
self.compile(restrictions)
else:
self._restrictions = None
self.acs = []
def compile(self, restrictions):
""" This is only for IdPs or AAs, and it's about limiting what
is returned to the SP.
In the configuration file, restrictions on which values that
can be returned are specified with the help of regular expressions.
This function goes through and pre-compiles the regular expressions.
:param restrictions:
:return: The assertion with the string specification replaced with
a compiled regular expression.
"""
self._restrictions = copy.deepcopy(restrictions)
for who, spec in self._restrictions.items():
if spec is None:
continue
try:
items = spec["entity_categories"]
except KeyError:
pass
else:
ecs = []
for cat in items:
_mod = importlib.import_module(
"saml2_tophat.entity_category.%s" % cat)
_ec = {}
for key, items in _mod.RELEASE.items():
alist = [k.lower() for k in items]
try:
_only_required = _mod.ONLY_REQUIRED[key]
except (AttributeError, KeyError):
_only_required = False
_ec[key] = (alist, _only_required)
ecs.append(_ec)
spec["entity_categories"] = ecs
try:
restr = spec["attribute_restrictions"]
except KeyError:
continue
if restr is None:
continue
_are = {}
for key, values in restr.items():
if not values:
_are[key.lower()] = None
continue
_are[key.lower()] = [re.compile(value) for value in values]
spec["attribute_restrictions"] = _are
logger.debug("policy restrictions: %s", self._restrictions)
return self._restrictions
def get(self, attribute, sp_entity_id, default=None, post_func=None,
**kwargs):
"""
:param attribute:
:param sp_entity_id:
:param default:
:param post_func:
:return:
"""
if not self._restrictions:
return default
try:
try:
val = self._restrictions[sp_entity_id][attribute]
except KeyError:
try:
val = self._restrictions["default"][attribute]
except KeyError:
val = None
except KeyError:
val = None
if val is None:
return default
elif post_func:
return post_func(val, sp_entity_id=sp_entity_id, **kwargs)
else:
return val
def get_nameid_format(self, sp_entity_id):
""" Get the NameIDFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
return self.get("nameid_format", sp_entity_id,
saml.NAMEID_FORMAT_TRANSIENT)
def get_name_form(self, sp_entity_id):
""" Get the NameFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
return self.get("name_form", sp_entity_id, NAME_FORMAT_URI)
def get_lifetime(self, sp_entity_id):
""" The lifetime of the assertion
:param sp_entity_id: The SP entity ID
:param: lifetime as a dictionary
"""
# default is a hour
return self.get("lifetime", sp_entity_id, {"hours": 1})
def get_attribute_restrictions(self, sp_entity_id):
""" Return the attribute restriction for SP that want the information
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
return self.get("attribute_restrictions", sp_entity_id)
def get_fail_on_missing_requested(self, sp_entity_id):
""" Return the whether the IdP should should fail if the SPs
requested attributes could not be found.
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
return self.get("fail_on_missing_requested", sp_entity_id, True)
def entity_category_attributes(self, ec):
if not self._restrictions:
return None
ec_maps = self._restrictions["default"]["entity_categories"]
for ec_map in ec_maps:
try:
return ec_map[ec]
except KeyError:
pass
return []
def get_entity_categories(self, sp_entity_id, mds, required):
"""
:param sp_entity_id:
:param mds: MetadataStore instance
:return: A dictionary with restrictions
"""
kwargs = {"mds": mds, 'required': required}
return self.get("entity_categories", sp_entity_id, default={},
post_func=post_entity_categories, **kwargs)
def not_on_or_after(self, sp_entity_id):
""" When the assertion stops being valid, should not be
used after this time.
:param sp_entity_id: The SP entity ID
:return: String representation of the time
"""
return in_a_while(**self.get_lifetime(sp_entity_id))
def filter(self, ava, sp_entity_id, mdstore, required=None, optional=None):
""" What attribute and attribute values returns depends on what
the SP has said it wants in the request or in the metadata file and
what the IdP/AA wants to release. An assumption is that what the SP
asks for overrides whatever is in the metadata. But of course the
IdP never releases anything it doesn't want to.
:param ava: The information about the subject as a dictionary
:param sp_entity_id: The entity ID of the SP
:param mdstore: A Metadata store
:param required: Attributes that the SP requires in the assertion
:param optional: Attributes that the SP regards as optional
:return: A possibly modified AVA
"""
_ava = None
if not self.acs: # acs MUST have a value, fall back to default.
self.acs = ac_factory()
_rest = self.get_entity_categories(sp_entity_id, mdstore, required)
if _rest:
_ava = filter_attribute_value_assertions(ava.copy(), _rest)
elif required or optional:
logger.debug("required: %s, optional: %s", required, optional)
_ava = filter_on_attributes(
ava.copy(), required, optional, self.acs,
self.get_fail_on_missing_requested(sp_entity_id))
_rest = self.get_attribute_restrictions(sp_entity_id)
if _rest:
if _ava is None:
_ava = ava.copy()
_ava = filter_attribute_value_assertions(_ava, _rest)
elif _ava is None:
_ava = ava.copy()
if _ava is None:
return {}
else:
return _ava
def restrict(self, ava, sp_entity_id, metadata=None):
""" Identity attribute names are expected to be expressed in
the local lingo (== friendlyName)
:return: A filtered ava according to the IdPs/AAs rules and
the list of required/optional attributes according to the SP.
If the requirements can't be met an exception is raised.
"""
if metadata:
spec = metadata.attribute_requirement(sp_entity_id)
if spec:
return self.filter(ava, sp_entity_id, metadata,
spec["required"], spec["optional"])
return self.filter(ava, sp_entity_id, metadata, [], [])
def conditions(self, sp_entity_id):
""" Return a saml.Condition instance
:param sp_entity_id: The SP entity ID
:return: A saml.Condition instance
"""
return factory(saml.Conditions,
not_before=instant(),
# How long might depend on who's getting it
not_on_or_after=self.not_on_or_after(sp_entity_id),
audience_restriction=[factory(
saml.AudienceRestriction,
audience=[factory(saml.Audience,
text=sp_entity_id)])])
def get_sign(self, sp_entity_id):
"""
Possible choices
"sign": ["response", "assertion", "on_demand"]
:param sp_entity_id:
:return:
"""
return self.get("sign", sp_entity_id, [])
class EntityCategories(object):
pass
def _authn_context_class_ref(authn_class, authn_auth=None):
"""
Construct the authn context with a authn context class reference
:param authn_class: The authn context class reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
cntx_class = factory(saml.AuthnContextClassRef, text=authn_class)
if authn_auth:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
else:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class)
def _authn_context_decl(decl, authn_auth=None):
"""
Construct the authn context with a authn context declaration
:param decl: The authn context declaration
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl=decl,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def _authn_context_decl_ref(decl_ref, authn_auth=None):
"""
Construct the authn context with a authn context declaration reference
:param decl_ref: The authn context declaration reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl_ref=decl_ref,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def authn_statement(authn_class=None, authn_auth=None,
authn_decl=None, authn_decl_ref=None, authn_instant="",
subject_locality="", session_not_on_or_after=None):
"""
Construct the AuthnStatement
:param authn_class: Authentication Context Class reference
:param authn_auth: Authenticating Authority
:param authn_decl: Authentication Context Declaration
:param authn_decl_ref: Authentication Context Declaration reference
:param authn_instant: When the Authentication was performed.
Assumed to be seconds since the Epoch.
:param subject_locality: Specifies the DNS domain name and IP address
for the system from which the assertion subject was apparently
authenticated.
:return: An AuthnContext instance
"""
if authn_instant:
_instant = instant(time_stamp=authn_instant)
else:
_instant = instant()
if authn_class:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_class_ref(
authn_class, authn_auth))
elif authn_decl:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_decl(authn_decl, authn_auth))
elif authn_decl_ref:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_decl_ref(authn_decl_ref,
authn_auth))
else:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after)
if subject_locality:
res.subject_locality = saml.SubjectLocality(text=subject_locality)
return res
def do_subject_confirmation(policy, sp_entity_id, key_info=None, **treeargs):
"""
:param policy: Policy instance
:param sp_entity_id: The entityid of the SP
:param subject_confirmation_method: How was the subject confirmed
:param address: The network address/location from which an attesting entity
can present the assertion.
:param key_info: Information of the key used to confirm the subject
:param in_response_to: The ID of a SAML protocol message in response to
which an attesting entity can present the assertion.
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:param not_before: A time instant before which the subject cannot be
confirmed. The time value MUST be encoded in UTC.
:return:
"""
_sc = factory(saml.SubjectConfirmation, **treeargs)
_scd = _sc.subject_confirmation_data
_scd.not_on_or_after = policy.not_on_or_after(sp_entity_id)
if _sc.method == saml.SCM_HOLDER_OF_KEY:
_scd.add_extension_element(key_info)
return _sc
def do_subject(policy, sp_entity_id, name_id, **farg):
#
specs = farg['subject_confirmation']
if isinstance(specs, list):
res = [do_subject_confirmation(policy, sp_entity_id, **s) for s in
specs]
else:
res = [do_subject_confirmation(policy, sp_entity_id, **specs)]
return factory(saml.Subject, name_id=name_id, subject_confirmation=res)
class Assertion(dict):
""" Handles assertions about subjects """
def __init__(self, dic=None):
dict.__init__(self, dic)
self.acs = []
def construct(self, sp_entity_id, attrconvs, policy, issuer, farg,
authn_class=None, authn_auth=None, authn_decl=None,
encrypt=None, sec_context=None, authn_decl_ref=None,
authn_instant="", subject_locality="", authn_statem=None,
name_id=None, session_not_on_or_after=None):
""" Construct the Assertion
:param sp_entity_id: The entityid of the SP
:param in_response_to: An identifier of the message, this message is
a response to
:param name_id: An NameID instance
:param attrconvs: AttributeConverters
:param policy: The policy that should be adhered to when replying
:param issuer: Who is issuing the statement
:param authn_class: The authentication class
:param authn_auth: The authentication instance
:param authn_decl: An Authentication Context declaration
:param encrypt: Whether to encrypt parts or all of the Assertion
:param sec_context: The security context used when encrypting
:param authn_decl_ref: An Authentication Context declaration reference
:param authn_instant: When the Authentication was performed
:param subject_locality: Specifies the DNS domain name and IP address
for the system from which the assertion subject was apparently
authenticated.
:param authn_statem: A AuthnStatement instance
:return: An Assertion instance
"""
if policy:
_name_format = policy.get_name_form(sp_entity_id)
else:
_name_format = NAME_FORMAT_URI
attr_statement = saml.AttributeStatement(attribute=from_local(
attrconvs, self, _name_format))
if encrypt == "attributes":
for attr in attr_statement.attribute:
enc = sec_context.encrypt(text="%s" % attr)
encd = xmlenc.encrypted_data_from_string(enc)
encattr = saml.EncryptedAttribute(encrypted_data=encd)
attr_statement.encrypted_attribute.append(encattr)
attr_statement.attribute = []
# start using now and for some time
conds = policy.conditions(sp_entity_id)
if authn_statem:
_authn_statement = authn_statem
elif authn_auth or authn_class or authn_decl or authn_decl_ref:
_authn_statement = authn_statement(authn_class, authn_auth,
authn_decl, authn_decl_ref,
authn_instant,
subject_locality,
session_not_on_or_after=session_not_on_or_after)
else:
_authn_statement = None
subject = do_subject(policy, sp_entity_id, name_id,
**farg['subject'])
_ass = assertion_factory(issuer=issuer, conditions=conds,
subject=subject)
if _authn_statement:
_ass.authn_statement = [_authn_statement]
if not attr_statement.empty():
_ass.attribute_statement = [attr_statement]
return _ass
def apply_policy(self, sp_entity_id, policy, metadata=None):
""" Apply policy to the assertion I'm representing
:param sp_entity_id: The SP entity ID
:param policy: The policy
:param metadata: Metadata to use
:return: The resulting AVA after the policy is applied
"""
policy.acs = self.acs
ava = policy.restrict(self, sp_entity_id, metadata)
for key, val in list(self.items()):
if key in ava:
self[key] = ava[key]
else:
del self[key]
return ava
|
gyp | 1a3d6008b734e3a754339cfc726576fe125a6d22 | {
"targets": [
{
"target_name": "nodespotify",
"sources": [
"src/node-spotify.cc", "src/audio/audio.c",
"src/callbacks/PlaylistCallbacksHolder.cc",
"src/callbacks/SessionCallbacks.cc",
"src/callbacks/SearchCallbacks.cc", "src/callbacks/AlbumBrowseCallbacks.cc",
"src/callbacks/ArtistBrowseCallbacks.cc", "src/callbacks/PlaylistContainerCallbacksHolder.cc",
"src/utils/ImageUtils.cc", "src/utils/V8Utils.cc",
"src/objects/spotify/Track.cc", "src/objects/spotify/Artist.cc",
"src/objects/spotify/Playlist.cc", "src/objects/spotify/PlaylistContainer.cc",
"src/objects/spotify/Album.cc", "src/objects/spotify/Search.cc",
"src/objects/spotify/Spotify.cc", "src/objects/spotify/Player.cc",
"src/objects/spotify/PlaylistFolder.cc", "src/objects/spotify/User.cc",
"src/objects/spotify/TrackExtended.cc",
"src/objects/node/NodeTrack.cc", "src/objects/node/NodeArtist.cc",
"src/objects/node/NodePlaylist.cc", "src/objects/node/NodeAlbum.cc",
"src/objects/node/NodePlayer.cc", "src/objects/node/NodeSearch.cc",
"src/objects/node/NodeSpotify.cc", "src/objects/node/NodePlaylistFolder.cc",
"src/objects/node/NodePlaylistContainer.cc", "src/objects/node/NodeUser.cc",
"src/objects/node/NodeTrackExtended.cc"
],
"link_settings" : {
"libraries": ["-framework OpenAL -framework libspotify"]
},
"copies": [ {
"destination": "<(PRODUCT_DIR)",
"files": ["src/spotify.js", "src/metadataUpdater.js"]
}
],
"conditions": [
["OS=='mac'", {
"xcode_settings": {
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11", "-stdlib=libc++"],
"GCC_ENABLE_CPP_EXCEPTIONS": 'YES',
"MACOSX_DEPLOYMENT_TARGET" : "10.8"
},
"sources": ["src/audio/openal-audio.c"],
"defines": ["OS_OSX"],
"link_settings" : { "libraries" : ["-framework", "OpenAL"] }
}],
["OS=='linux'", {
"sources": ["src/audio/alsa-audio.c"],
"cflags": ["-I/usr/include/alsa"],
"cflags_cc": [
"-std=c++11",
"-fexceptions"
],
"defines": ["OS_LINUX"],
"link_settings" : { "libraries" : ["-lasound"] }
}]
]
}
]
}
|
py | 1a3d6216a762a4e5b2e0262f2efdc05e795d1c0f | from __future__ import absolute_import
import unittest
import yaml
from attrdict import AttrDict
from pyswitch.device import Device
class InterfaceISISTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(InterfaceISISTestCase, self).__init__(*args, **kwargs)
with open('config.yaml') as fileobj:
cfg = AttrDict(yaml.safe_load(fileobj))
switch = cfg.InterfaceISISTestCase.switch
self.switch_ip = switch.ip
self.switch_username = switch.username
self.switch_pasword = switch.password
self.intf_name = str(switch.intf_name)
self.intf_type = str(switch.intf_type)
self.conn = (self.switch_ip, '22')
self.auth = (self.switch_username, self.switch_pasword)
def setUp(self):
with Device(conn=self.conn, auth=self.auth) as dev:
dev.services.isis()
op = dev.services.isis(get=True)
self.assertEqual(op)
dev.isis.address_family_ipv4_unicast()
op = dev.isis.address_family_ipv4_unicast(get=True)
self.assertEqual(op)
dev.isis.log_adjacency()
op = dev.isis.log_adjacency(get=True)
self.assertEqual(op)
dev.isis.net_address(net='49.0001.0100.1001.0006.00')
op = dev.isis.net_address(get=True)
self.assertEqual(op)
def tearDown(self):
with Device(conn=self.conn, auth=self.auth) as dev:
dev.services.isis(enable=False)
dev.isis.address_family_ipv4_unicast(delete=True)
dev.isis.log_adjacency(delete=True)
dev.isis.net_address(delete=True)
def test_enable_isis_on_intf(self):
with Device(conn=self.conn, auth=self.auth) as dev:
dev.interface.ip_router_isis(
intf_type='loopback',
intf_name='11')
op = dev.interface.ip_ospf(
intf_type='loopback',
intf_name='11',
get=True)
self.assertEqual(op)
dev.interface.ip_ospf(
intf_type='loopback',
intf_name='11',
delete=True)
|
py | 1a3d63d580c0f77b49f6abb364954eedf12d97df | # -*- coding: utf-8 -*-
'''
=====================================================================================
Copyright (c) 2016 Université de Lorraine & Luleå tekniska universitet
Author: Luca Di Stasio <[email protected]>
<[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=====================================================================================
DESCRIPTION
Tested with Python 2.7 Anaconda 2.4.1 (64-bit) distribution
in Windows 10.
'''
from numpy import *
E1 = 3e4
E2 = 3e4
E3 = 5e5
nu12 =0.45
nu13 =(E1/E3)*0.2
nu23 =(E2/E3)*0.2
'''
E1 = 134.6287e3
E2 = 9.4757e3
E3 = 9.4757e3
nu12 =0.2883
nu13 =0.2883
nu23 =0.3969
'''
'''
E1 = 5e5
E2 = 3e4
E3 = 3e4
nu12 =0.2
nu13 =0.2
nu23 =0.45
'''
if abs(nu12)<sqrt(E1/E2):
print 'TEST 1 --- abs(nu12)<sqrt(E1/E2) --- PASSED'
else:
print 'TEST 1 --- abs(nu12)<sqrt(E1/E2) --- FAILED'
if abs(nu13)<sqrt(E1/E3):
print 'TEST 2 --- abs(nu13)<sqrt(E1/E3) --- PASSED'
else:
print 'TEST 2 --- abs(nu13)<sqrt(E1/E3) --- FAILED'
if abs(nu23)<sqrt(E2/E3):
print 'TEST 3 --- abs(nu23)<sqrt(E2/E3) --- PASSED'
else:
print 'TEST 3 --- abs(nu23)<sqrt(E2/E3) --- FAILED'
if 1-(E2/E1)*pow(nu12,2)-(E3/E2)*pow(nu23,2)-(E3/E1)*pow(nu13,2)-2*nu13*(E3/E2)*nu23*(E2/E1)*nu12>0:
print 'TEST 4 --- 1-nu12nu21-nu23nu32-nu31nu13-nu21nu32nu13>0 --- PASSED'
else:
print 'TEST 4 --- 1-nu12nu21-nu23nu32-nu31nu13-nu21nu32nu13>0 --- FAILED'
print ' LHS = ' + str(1-(E2/E1)*pow(nu12,2)-(E3/E2)*pow(nu23,2)-(E3/E1)*pow(nu13,2)-2*nu13*(E3/E2)*nu23*(E2/E1)*nu12)
|
py | 1a3d640d7f6d2ee3faab75944d8c557787ff7677 | #!/usr/bin/env python
# encoding: utf8
#
# Copyright © BJ Cardon <bj dot car dot don at gmail dot com>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
url(r'^hello_world/','core.views.hello_world_service'),
)
|
py | 1a3d64313bec36c4c68799df1179be18f0e79633 | # Generated by Django 2.2.10 on 2020-09-14 08:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("career", "0020_auto_20200905_2352"),
]
operations = [
migrations.AddField(
model_name="joblisting",
name="is_summerjob_marathon",
field=models.BooleanField(default=False, verbose_name="sommerjobbmaraton"),
),
migrations.AddField(
model_name="joblisting",
name="video_url",
field=models.URLField(blank=True, null=True, verbose_name="video"),
),
]
|
py | 1a3d64cc304734bfe1c2e0dcbaa7584875128f51 | from django.db import models
from academic.models import Department
from administration.models import Designation
from address.models import District, Upazilla, Union
class AddressInfo(models.Model):
district = models.ForeignKey(District, on_delete=models.CASCADE, null=True)
upazilla = models.ForeignKey(Upazilla, on_delete=models.CASCADE, null=True)
union = models.ForeignKey(Union, on_delete=models.CASCADE, null=True)
"""class EducationInfo(models.Model):
name_of_exam = models.CharField(max_length=100)
institute = models.CharField(max_length=255)
group = models.CharField(max_length=100)
grade = models.CharField(max_length=45)
board = models.CharField(max_length=45)
passing_year = models.IntegerField()
def __str__(self):
return self.name_of_exam
class TrainingInfo(models.Model):
training_name = models.CharField(max_length=100)
year = models.IntegerField()
duration = models.IntegerField()
place = models.CharField(max_length=100)
def __str__(self):
return self.training_name
class JobInfo(models.Model):
category_choice = (
('bcs', 'BCS'),
('nationalized', 'Nationalized'),
('10% quota', '10% quota'),
('non govt.', 'Non Govt.')
)
category = models.CharField(choices=category_choice, max_length=45)
joning_date = models.DateField()
institute_name = models.CharField(max_length=100)
job_designation = models.ForeignKey(Designation, on_delete=models.CASCADE)
department = models.ForeignKey(Department, on_delete=models.CASCADE)
scale = models.IntegerField()
grade_of_post = models.CharField(max_length=45)
first_time_scale_due_year = models.IntegerField()
second_time_scale_due_year = models.IntegerField()
promotion_due_year = models.IntegerField()
recreation_leave_due_year = models.IntegerField()
expected_retirement_year = models.IntegerField()
def __str__(self):
return self.institute_name
class ExperienceInfo(models.Model):
institute_name = models.CharField(max_length=100)
designation = models.CharField(max_length=45)
trainer = models.CharField(max_length=45)
def __str__(self):
return self.institute_name"""
class PersonalInfo(models.Model):
name = models.CharField(max_length=45)
photo = models.ImageField()
#start date
date_of_birth = models.DateField()
place_of_birth = models.DateField()
nationality_choice = (
('Bangladeshi', 'Bangladeshi'),
('Others', 'Others')
)
nationality = models.CharField(max_length=45, choices=nationality_choice)
religion_choice = (
('Islam', 'Islam'),
('Hinduism', 'Hinduism'),
('Buddhism', 'Buddhism'),
('Christianity', 'Christianity'),
('Others', 'Others')
)
religion = models.CharField(max_length=45, choices=religion_choice)
gender_choice = (
('male', 'Male'),
('female', 'Female'),
('other', 'Other')
)
gender = models.CharField(choices=gender_choice, max_length=10)
blood_group_choice = (
('a+', 'A+'),
('o+', 'O+'),
('b+', 'B+'),
('ab+', 'AB+'),
('a-', 'A-'),
('o-', 'O-'),
('b-', 'B-'),
('ab-', 'AB-')
)
blood_group = models.CharField(choices=blood_group_choice, max_length=5)
e_tin = models.IntegerField(unique=True)
nid = models.IntegerField(unique=True)
driving_license_passport = models.IntegerField(unique=True)
phone_no = models.CharField(max_length=11, unique=True)
email = models.CharField(max_length=255, unique=True)
father_name = models.CharField(max_length=45)
mother_name = models.CharField(max_length=45)
marital_status_choice = (
('married', 'Married'),
('widowed', 'Widowed'),
('separated', 'Separated'),
('divorced', 'Divorced'),
('single', 'Single')
)
marital_status = models.CharField(choices=marital_status_choice, max_length=10)
address = models.ForeignKey(AddressInfo, on_delete=models.CASCADE, null=True)
#education = models.ForeignKey(EducationInfo, on_delete=models.CASCADE, null=True)
#training = models.ForeignKey(TrainingInfo, on_delete=models.CASCADE, null=True)
#job = models.ForeignKey(JobInfo, on_delete=models.CASCADE, null=True)
#experience = models.ForeignKey(ExperienceInfo, on_delete=models.CASCADE, null=True)
is_delete = models.BooleanField(default=False)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
|
py | 1a3d654bfb71f7ec121816de489887153093da60 | # Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from datetime import datetime, timezone, timedelta
from collections import namedtuple
import errno
import os
import time
import sys
if 'require' in globals():
git = require('./git')
else:
from . import git
BRANCH = 'worklog'
now = datetime.now
time_fmt = '%d/%b/%Y:%H:%M:%S %z'
CheckinData = namedtuple('CheckinData', 'name time')
CheckoutData = namedtuple('CheckoutData', 'name begin end interval message')
Log = namedtuple('Log', 'begin end message')
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def now():
tz = timezone(timedelta(hours=-time.timezone/3600))
return datetime.now().replace(tzinfo=tz)
def strftime(time, fmt=None):
return time.strftime(fmt or time_fmt)
def strptime(value, fmt=None):
return datetime.strptime(value, fmt or time_fmt)
def splittimedelta(tdelta, components='DHMS'):
l = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
r = []
rem = int(tdelta.total_seconds())
for k in components:
d, rem = divmod(rem, l[k])
r.append(d)
return r
def strftimedelta(tdelta, components='DHMS'):
parts = []
for i, val in enumerate(splittimedelta(tdelta, components)):
if val > 0:
parts.append('{}{}'.format(val, components[i].lower()))
return ', '.join(parts)
def parse_time(value, dt=None):
"""
Parses a time string in multiple possible variants and otherwise applies
the defaults from *dt*. If *dt* is not specified, the result of #now() is
used.
"""
# Intentionally leaving out microseconds.
fields = ['year', 'month', 'day', 'hour', 'minute', 'second', 'tzinfo']
formats = [
(time_fmt, []),
('%H:%M', ['hour', 'minute']),
('%H:%M:%S', ['hour', 'minute', 'second']),
('%H-%M', ['hour', 'minute']),
('%H-%M-%S', ['hour', 'minute', 'second']),
('%d/%H:%M', ['day', 'hour', 'minute']),
('%d/%H:%M:%S', ['day', 'hour', 'minute', 'second']),
('%d', ['day', '#0daytime']),
('%d/%b', ['day', 'month', '#0daytime']),
('%m/%d/%H:%M', ['month', 'day', 'hour', 'minute']),
('%m/%d/%H:%M:%S', ['month', 'day', 'hour', 'minute', 'second']),
]
for fmt, filled_fields in formats:
try:
result = datetime.strptime(value, fmt)
break
except ValueError:
pass
else:
raise ValueError('invalid time string: {!r}'.format(value))
# Update the values that haven't been parsed.
if dt is None:
dt = now()
kwargs = {k: getattr(dt, k) for k in fields if k not in filled_fields}
if '#0daytime' in filled_fields:
kwargs['hour'] = 0
kwargs['minute'] = 0
kwargs['second'] = 0
return result.replace(**kwargs)
def parse_sheet(data):
"""
Parses a timetable sheet and returns a list of #Log entries.
"""
result = []
for line in data.split('\n'):
cols = line.split('\t', 3)
cols[0] = strptime(cols[0])
cols[1] = strptime(cols[1])
result.append(Log(*cols))
return result
class NoCheckinAvailable(Exception):
pass
def get_checkin_file(fatal=True):
return os.path.join(git.dir(fatal=fatal), 'worklog', 'checkin')
def get_commit_repo_and_branch():
# Check if we should check-in to a different repository.
target_repo = git.config('worklog.repository')
if target_repo:
if not os.path.isdir(target_repo):
print('fatal: worklog.repository={}'.format(target_repo), file=sys.stderr)
print(' the specified directory does not exist.')
sys.exit(128)
target_branch = git.config('worklog.project')
if not target_branch:
print('fatal: worklog.repository is set but worklog.project is not', file=sys.stderr)
print(' please do `git config worklog.project <projectname>` first', file=sys.stderr)
sys.exit(128)
else:
target_branch = git.config('worklog.branch') or BRANCH
return target_repo or None, target_branch
def set_checkin(name, time=None):
time = time or now()
filename = get_checkin_file()
makedirs(os.path.dirname(filename))
with open(filename, 'w') as fp:
fp.write('{}\n{}\n'.format(name, strftime(time)))
return CheckinData(name, time)
def get_checkin():
filename = get_checkin_file()
if not os.path.isfile(filename):
raise NoCheckinAvailable(filename)
with open(filename) as fp:
name = fp.readline().rstrip()
time = fp.readline().rstrip()
time = strptime(time)
if not name or fp.read().strip():
raise ValueError('invalid check-in file at {!r}'.format(filename))
return CheckinData(name, time)
def rem_checkin():
filename = get_checkin_file()
try:
os.remove(filename)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def add_checkout(name, begin, end, message=None):
interval = end - begin
if not message:
message = 'Checkout ' + str(interval)
repo, branch = get_commit_repo_and_branch()
# Read the contents of the timetable file for this user.
filename = name + '.tsv'
try:
contents = git.show('{}:{}'.format(branch, filename), cwd=repo)
except git.DoesNotExist:
contents = ''
# Add an entry to the file.
if not contents.endswith('\n'):
contents += '\n'
contents += '{}\t{}\t{}\n'.format(strftime(begin), strftime(end), message or '')
# Create a commit to add the line to the timetable.
commit = git.Commit()
commit.head(branch, message)
commit.add_file_contents(contents, filename)
git.fast_import(commit.getvalue(), date_format='raw', quiet=True, cwd=repo)
return CheckoutData(name, begin, end, interval, message)
|
py | 1a3d65a5bb45ed53b128bd8f2886bcdd5093097d | import datetime
import time
import pytz
import pandas as pd
import json
import urllib.request
import requests
from tzwhere import tzwhere
from darksky import forecast
import numpy as np
from helpers import okta_to_percent, granularity_to_freq
def get_temperature_cloudcover(start_time=None, end_time=None,
granularity=None,latitude=None, longitude=None, source='weather_underground', timezone='US/Eastern', darksky_api_key=None):
if (source == 'weather_underground' or darksky_api_key == None):
# create a pandas datetimeindex
df = pd.date_range(start_time - datetime.timedelta(days=1), end_time , freq='D')
print(df)
# convert it into a simple dataframe and rename the column
df = df.to_frame(index=False)
df.columns = ['time']
# convert it into required format for weather underground
df['time'] = df['time'].dt.strftime('%Y%m%d')
temp_cloud_df = pd.DataFrame()
for _ , row in df.iterrows():
# print(row['time'])
try:
url = "https://api.weather.com/v1/geocode/{}/{}/observations/historical.json?apiKey=6532d6454b8aa370768e63d6ba5a832e&startDate={}&endDate={}&units=e".format(latitude, longitude, row['time'], row['time'])
data = urllib.request.urlopen(url).read()
output = json.loads(data)
output= pd.DataFrame(output['observations'])
output = output[['valid_time_gmt', 'temp', 'clds', 'wx_phrase']]
output.columns = ['time', 'temperature', 'clds', 'wx_phrase']
temp_cloud_df = temp_cloud_df.append(output, ignore_index=True)
except urllib.error.HTTPError as e:
# print(e)
pass
# time.sleep(0.01)
# convert to datetime and set the correct timezone
temp_cloud_df['time_s'] = temp_cloud_df['time']
temp_cloud_df['time'] = pd.to_datetime(temp_cloud_df['time'],unit='s').dt.tz_localize('utc').dt.tz_convert(timezone)
# temp_cloud_df['time'] = temp_cloud_df['time'].dt.round("H")
# resample the data to desired granularity
temp_cloud_df = temp_cloud_df.set_index(temp_cloud_df['time'])
temp_cloud_df = temp_cloud_df.resample(granularity_to_freq(granularity)).ffill()
temp_cloud_df = temp_cloud_df[['temperature', 'clds']]
temp_cloud_df = temp_cloud_df.reset_index()
# chnage to C from F
temp_cloud_df['temperature'] = (temp_cloud_df['temperature'] - 32) * 5/9
# cloud okta code to percent
temp_cloud_df['clouds'] = pd.to_numeric(temp_cloud_df['clds'].apply(lambda x: okta_to_percent(x)))
# keep only relevant columns
temp_cloud_df = temp_cloud_df[['time', 'temperature', 'clouds', 'clds']]
######################### future release ############################
# # create a pandas datetimeindex
# df = pd.date_range(start_time, end_time, freq=granularity_to_freq(granularity), tz=timezone)
# # convert it into a simple dataframe and rename the column
# df = df.to_frame(index=False)
# df.columns = ['time']
# # combine both df and temperature_df
# temp_cloud_df = df.join(temp_cloud_df.set_index('time'), on='time')
####################################################################
# temp_cloud_df['time'] = temp_cloud_df['time'].dt.tz_localize('utc').dt.tz_convert(timezone)
temp_cloud_df['time'] = temp_cloud_df['time'].dt.tz_localize(None)
# print(temp_cloud_df)
elif (source == 'darksky' and darksky_api_key != None):
time = []
temperature = []
cloudcover = []
summary = []
# localizing the datetime based on the timezone
start: datetime.datetime = timezone.localize(start_time)
end: datetime.datetime = timezone.localize(end_time)
while start <= end:
day = int(start.timestamp())
start = start + datetime.timedelta(days=1)
response = urllib.request.urlopen('https://api.darksky.net/forecast/{}/{},{},{}?exclude=currently,daily,flags'.format(darksky_api_key, latitude, longitude, day)).read()
output = json.loads(response)['hourly']['data']
for item in output:
time.append(item['time'])
temperature.append(item['temperature'])
cloudcover.append(item['cloudCover'])
summary.append(item['summary'])
temp_cloud_df = pd.DataFrame({'time':time, 'temperature':temperature,'clouds':cloudcover,'clds':summary})
temp_cloud_df['time'] = pd.to_datetime(temp_cloud_df['time'], unit='s').dt.tz_localize('utc').dt.tz_convert(timezone).dt.tz_localize(None)
temp_cloud_df['temperature'] = (temp_cloud_df['temperature'] - 32) * 5/9
else:
print('Sorry, {} source has not been implemented yet.'.format(source))
return temp_cloud_df |
py | 1a3d66e7812eb1f9cd22d2825f89eda7d92aa7e6 | # ========================================================================
# Copyright (C) 2019 The MITRE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# This file contains the implementation for Grover's algorithm.
# It's an intriguing approach at "reversing a function" - essentially
# if you have a function f(x) = y, Grover's algorithm figures out
# x given f and y. If you're interested in trying all possible inputs of a
# fixed search space (like, say, finding the password for for an encrypted
# file or something), Grover can do it in O(√N) steps instead of brute
# force searching the entire space (which is O(N)).
from projectq import MainEngine
from projectq.ops import *
from projectq.meta import Dagger, Control
from utility import reset
from oracle_utility import run_flip_marker_as_phase_marker
import oracles
def grover_iteration(oracle, qubits, oracle_args):
"""
Runs a single iteration of the main loop in Grover's algorithm,
which is the oracle followed by the diffusion operator.
Parameters:
oracle (function): The oracle that flags the correct answer to
the problem being solved (essentially, this should just
implement the function as a quantum program)
qubits (Qureg): The register to run the oracle on
oracle_args (anything): An oracle-specific argument object to pass to the
oracle during execution
"""
# Run the oracle on the input to see if it was a correct result
run_flip_marker_as_phase_marker(oracle, qubits, oracle_args)
# Run the diffusion operator
All(H) | qubits
run_flip_marker_as_phase_marker(oracles.check_if_all_zeros, qubits, None)
All(H) | qubits
def grover_search(oracle, qubits, oracle_args):
"""
Runs Grover's algorithm on the provided oracle, turning the input into
a superposition where the correct answer's state has a very large amplitude
relative to all of the other states.
Parameters:
oracle (function): The oracle that flags the correct answer to
the problem being solved (essentially, this should just
implement the function as a quantum program)
qubits (Qureg): The register to run the oracle on
oracle_args (anything): An oracle-specific argument object to pass to the
oracle during execution
"""
# Run the algorithm for √N iterations.
All(H) | qubits
iterations = round(2 ** (len(qubits) / 2))
for i in range(0, iterations):
grover_iteration(oracle, qubits, oracle_args)
def run_grover_search(number_of_qubits, oracle, oracle_args):
"""
Uses Grover's quantum search to find the single answer to a problem
with high probability.
Parameters:
number_of_qubits (int): The number of qubits that the oracle expects
(the number of qubits that the answer will contain)
oracle (function): The oracle that flags the correct answer to
the problem being solved (essentially, this should just
implement the function as a quantum program)
oracle_args (anything): An oracle-specific argument object to pass to the
oracle during execution
Returns:
A list[int] that represents the discovered answer as a bit string
"""
# Build the engine and run the search
engine = MainEngine()
qubits = engine.allocate_qureg(number_of_qubits)
grover_search(oracle, qubits, oracle_args)
# Measure the potential solution and return it
solution = []
for qubit in qubits:
Measure | qubit
solution += [int(qubit)]
return solution
|
py | 1a3d67b371f4e45a4ce8a27db7770b73b1c1f751 | import pathlib
import pygubu
import threading
import globals
from book_generator import BookMetaData
PROJECT_PATH = pathlib.Path(__file__).parent
PROJECT_UI = PROJECT_PATH / "main_gui.ui"
class MainWindow:
def __init__(self, master=None):
# 1: Create a builder and setup resources path (if you have images)
self.builder = builder = pygubu.Builder()
builder.add_resource_path(PROJECT_PATH)
# 2: Load an ui file
builder.add_from_file(PROJECT_UI)
# 3: Create the mainwindow
self.mainwindow = builder.get_object('main_gui', master)
# 4: Connect callbacks
builder.connect_callbacks(self)
self.languageDict = globals.LANGUAGE_ALIASES
self.setDefaults()
self.outputMessage = self.builder.get_variable('outputMessage').get()
def run(self):
self.mainwindow.mainloop()
def setGenerateButtonAction(self, action):
self.generateAction = action
def generate(self):
actionThread = threading.Thread(target=self.generateAction)
actionThread.start()
def getFilePathA(self):
return self.builder.get_variable('filePathA').get()
def getFilePathB(self):
return self.builder.get_variable('filePathB').get()
def getTestRunEnabled(self):
return self.builder.get_variable('testCheckbox').get()
def getOutputFilePath(self):
return self.builder.get_variable('outputFilepath').get()
def getOutputMetaData(self):
title = self.builder.get_object('outputTitle').get()
author = self.builder.get_object('outputAuthor').get()
return BookMetaData(title=title, author=author, language=self.getLanguageA())
def getLanguageA(self):
return self.languageDict[self.builder.get_variable('languageA').get()]
def getLanguageB(self):
return self.languageDict[self.builder.get_variable('languageB').get()]
def getPairingThreshold(self):
return self.builder.get_variable('pairingThreshold').get()
def addOutputMessage(self, message):
currentMessage = self.builder.get_variable('outputMessage').get()
newMessage = currentMessage + "\n" + message
self.outputMessage = newMessage
return self.builder.get_variable('outputMessage').set(newMessage)
def printProgress(self, currentIndex, maxIndex):
newMessage = self.outputMessage + f"\n({currentIndex}/{maxIndex})"
return self.builder.get_variable('outputMessage').set(newMessage)
def setDefaults(self):
self.builder.get_variable('languageA').set("English")
self.builder.get_variable('languageB').set("German")
self.builder.get_variable('filePathA').set("example_books/german/siddhartha_hesse.txt")
self.builder.get_variable('filePathB').set("example_books/english/siddhartha_hesse.txt") # example_books/hungarian/amok_zweig.rtf
# self.builder.get_variable('languageA').set("German")
# self.builder.get_variable('languageB').set("Hungarian(Experimental)")
# self.builder.get_variable('filePathA').set(
# "example_books/german/amok_zweig.txt") # example_books/german/siddhartha_hesse.txt
# self.builder.get_variable('filePathB').set(
# "example_books/hungarian/amok_zweig.rtf") # self.builder.get_variable('filePathB').set("") # example_books/hungarian/amok_zweig.rtf
self.builder.get_variable('outputExtension').set("EPUB")
self.builder.get_variable('outputFilepath').set("test.epub")
|
py | 1a3d68585b6a6844369b3622e28fa30faff122b9 | import fnmatch
import functools
from collections import OrderedDict
from datetime import datetime
import pytest
from django.db.models import F
from django.utils import timezone
from pontoon.base.models import TranslatedResource, Translation
from pontoon.tags.models import Tag
from .site import _factory
def tag_factory():
def instance_attrs(instance, i):
if not instance.slug:
instance.slug = "factorytag%s" % i
if not instance.name:
instance.name = "Factory Tag %s" % i
return functools.partial(
_factory, Model=Tag, instance_attrs=instance_attrs
)
def _assert_tags(expected, data):
assert len(expected) == len(data)
results = dict((d['slug'], d) for d in data)
attrs = [
"pk",
"name",
# "last_change",
"total_strings",
"approved_strings",
"unreviewed_strings",
"fuzzy_strings",
]
for slug, stats in results.items():
_exp = expected[slug]
for attr in attrs:
assert _exp[attr] == stats[attr]
@pytest.fixture
def assert_tags():
"""This fixture provides a function for comparing calculated
tag stats against those provided by the tags tool
"""
return _assert_tags
def _calculate_resource_tags(**kwargs):
# returns the tags associated with a given resource, filters
# on priority if given
priority = kwargs.get("priority", None)
resource_tags = {}
tags_through = Tag.resources.through.objects.values_list(
"resource",
"tag",
"tag__slug",
"tag__name",
)
if priority is not None:
if priority is True:
tags_through = tags_through.exclude(tag__priority__isnull=True)
elif priority is False:
tags_through = tags_through.exclude(tag__priority__isnull=False)
else:
tags_through = tags_through.filter(tag__priority=priority)
for resource, tag, _slug, name in tags_through.iterator():
resource_tags[resource] = (
resource_tags.get(resource, []) + [(tag, _slug, name)]
)
return resource_tags
def _tag_iterator(things, **kwargs):
# for given qs.values() (`things`) and **kwargs to filter on, this will
# find and iterate matching tags.
# `things` can be either translations or translated_resources, but
# they must have their `locale`, `project`, `resource` and
# `resource.path` denormalized where required.
locales = list(l.id for l in kwargs.get("locales", []))
projects = list(p.id for p in kwargs.get("projects", []))
slug = kwargs.get("slug", None)
path = kwargs.get("path", None)
resource_tags = _calculate_resource_tags(**kwargs)
for thing in things.iterator():
if locales and thing["locale"] not in locales:
continue
if projects and thing["project"] not in projects:
continue
if path and not fnmatch.fnmatch(thing['path'], path):
continue
for tag in resource_tags.get(thing['resource'], []):
__, _slug, __ = tag
if slug and not fnmatch.fnmatch(_slug, slug):
continue
yield thing, tag
def _calculate_tags(**kwargs):
# calculate what the stats per-tag with given **kwargs should be
# the long-hand way
trs = TranslatedResource.objects.all()
attrs = [
"approved_strings",
"unreviewed_strings",
"fuzzy_strings",
]
totals = {}
resource_attrs = [
"resource",
"locale",
"latest_translation__date",
]
annotations = dict(
total_strings=F('resource__total_strings'),
project=F('resource__project'),
path=F('resource__path'),
)
# this is a `values` of translated resources, with the project, path
# and total_strings denormalized to project/path/total_strings.
qs = trs.values(*resource_attrs + attrs).annotate(**annotations)
translated_resource_tags = _tag_iterator(qs, **kwargs)
attrs = ['total_strings'] + attrs
# iterate through associated tags for all matching translated resources
for tr, (_pk, _slug, _name) in translated_resource_tags:
if kwargs.get('groupby'):
key = tr[kwargs['groupby']]
else:
key = _slug
if key not in totals:
# create a totals[tag] with zeros for this tag
totals[key] = dict((attr, 0) for attr in attrs)
totals[key].update(dict(name=_name, pk=_pk, last_change=None))
for attr in attrs:
# add the total for this translated resource to the tags total
totals[key][attr] += tr[attr]
return totals
@pytest.fixture
def calculate_tags():
"""This fixture provides a function for calculating the tags and their
expected stats etc currently in the database, after filtering for
provided **kwargs
"""
return _calculate_tags
def _calculate_tags_latest(**kwargs):
# calculate what the latest events per-tag with given **kwargs should be
# the long-hand way
translations = Translation.objects.all()
latest_dates = {}
translation_attrs = [
"pk",
"date",
"locale",
]
annotations = dict(
resource=F('entity__resource'),
path=F('entity__resource__path'),
project=F('entity__resource__project'),
)
# this is a `values` of translations, with the resource, path and project
# denormalized to resource/path/project.
qs = translations.values(*translation_attrs).annotate(**annotations)
translation_tags = _tag_iterator(qs, **kwargs)
# iterate through associated tags for all matching translations
for translation, (tag, __, __) in translation_tags:
if kwargs.get('groupby'):
key = translation[kwargs['groupby']]
else:
key = tag
# get the current latest for this tag
_pk, _date = latest_dates.get(
key, (None, timezone.make_aware(datetime.min))
)
if translation['date'] > _date:
# set this translation if its newer than the current latest
# for this tag
latest_dates[key] = (translation['pk'], translation['date'])
return latest_dates
@pytest.fixture
def calculate_tags_latest():
"""This fixture provides a function for calculating the tags and their
expected latest changes currently in the database, after filtering for
provided **kwargs
"""
return _calculate_tags_latest
@pytest.fixture
def tag_matrix(site_matrix):
"""This provides the `site_matrix` fixture but with added tags.
This fixture can be used in conjunction with the `calculate_tags`
fixture to test for tags using kwargs from the parametrized
`tag_test_kwargs` fixture
"""
factories = site_matrix['factories']
factories['tag'] = tag_factory()
# this creates 113 tags
# every 20th tag gets no priority
# the others get between 0-5
tags = factories['tag'](
args=[
{
'priority': (
None
if not i or not (i % 20)
else int(i / 20)
)
}
for i in range(0, 113)
]
)
# associate tags with resources
for i, resource in enumerate(site_matrix['resources']):
x = 0
indeces = []
# this distributes the tags amongst resources in
# a fairly arbitrary fashion
# every resource gets the tag with index of 0
while True:
idx = x * (i + 1)
if idx >= len(tags):
break
indeces.append(idx)
x = x + 1
# add tags to the resource's tag_set
resource.tag_set.add(*[tags[n] for n in indeces])
site_matrix['tags'] = tags
return site_matrix
_tag_kwargs = OrderedDict(
(('empty', dict()),
('project0_match', dict(projects=[0])),
('project1_match', dict(projects=[1])),
('locale_match', dict(locales=[0])),
('locale_and_project_match', dict(locales=[0], projects=[0])),
('locales_and_projects_match', dict(projects=[1, 2], locales=[0, 1])),
('priority_match', dict(priority=3)),
('priority_true_match', dict(priority=True)),
('priority_false_match', dict(priority=False)),
('path_no_match', dict(path="NOPATHSHERE")),
('path_match', dict(path=11)),
('path_glob', dict(path="resource[5-9]*")),
('slug_no_match', dict(slug="NOSLUGSHERE")),
('slug_exact', dict(slug=23)),
('slug_glob', dict(slug="factory*7")),
('party_glob',
dict(path="resource[1]*",
projects=[0, 2],
locales=[1, 2],
slug="factory*[2-5]"))))
@pytest.fixture(params=_tag_kwargs)
def tag_test_kwargs(request, tag_matrix):
"""This is a parametrized fixture that provides a range of possible
**kwargs for testing the TagsTool against tags in the `tag_matrix`
fixture.
If a parameter values for `path` is an `int` its mangled to the
`path` of the `resource` (in the site_matrix) with the corresponding
index.
If a `slug` is an `int` its likewise mangled to the `tag`.`slug` with
a corresponding index.
`projects` and `locales` are similarly mangled to the corresponding
projects/locales in the site_matrix.
Parameters that are suffixed with `_match` expect at least 1 result
to be returned.
Parameters suffixed with `_exact` expect exactly 1 result.
Parameters suffixed with `_glob` expect more than 1 result.
Finally, parameters suffixed with `_no_match` expect 0 results.
"""
kwargs = _tag_kwargs.get(request.param).copy()
if kwargs.get("path"):
if isinstance(kwargs['path'], int):
kwargs['path'] = tag_matrix['resources'][kwargs['path']].path
if kwargs.get("slug"):
if isinstance(kwargs['slug'], int):
kwargs["slug"] = tag_matrix['tags'][kwargs['slug']].slug
for k in ['projects', 'locales']:
if kwargs.get(k):
kwargs[k] = [
tag_matrix[k][i]
for i
in kwargs[k]]
return request.param, kwargs
_tag_data_init_kwargs = OrderedDict(
(('no_args',
dict(annotations=None,
groupby=None,
locales=None,
path=None,
priority=None,
projects=None,
slug=None)),
('args',
dict(annotations=1,
groupby=2,
locales=3,
path=4,
priority=5,
projects=6,
slug=7))))
@pytest.fixture(params=_tag_data_init_kwargs)
def tag_data_init_kwargs(request):
"""This is a parametrized fixture that provides 2 sets
of possible **kwargs to instantiate the TagsDataTools with
The first set of kwargs, are all set to `None` and the
second contain numeric values for testing against
"""
return _tag_data_init_kwargs.get(request.param).copy()
_tag_init_kwargs = OrderedDict(
(('no_args',
dict(locales=None,
path=None,
priority=None,
projects=None,
slug=None)),
('args',
dict(locales=1,
path=2,
priority=3,
projects=4,
slug=5))))
@pytest.fixture(params=_tag_init_kwargs)
def tag_init_kwargs(request):
"""This is a parametrized fixture that provides 2 sets
of possible **kwargs to instantiate the TagsTool with
The first set of kwargs, are all set to `None` and the
second contain numeric values for testing against
"""
return _tag_init_kwargs.get(request.param).copy()
|
py | 1a3d68cbe0c3a19ef72afe7fa8204eb613b6f864 | from threading import Thread
from time import sleep
from tkinter import *
def main():
global left_timer, right_timer
while True:
sleep(1)
if flag:
left_timer = left_timer - 1
m = int(left_timer / 60)
s = left_timer % 60
left['text'] = '{:02d}:{:02d}'.format(m, s)
else:
right_timer = right_timer - 1
m = int(right_timer / 60)
s = right_timer % 60
right['text'] = '{:02d}:{:02d}'.format(m, s)
def s():
t = Thread(target=main)
t.start()
def stopl():
global flag
flag = False
def stopr():
global flag
flag = True
root = Tk()
# variables ##########################
left_timer = 1200
right_timer = 1200
flag = True
######################################
# Label ##############################
l1 = Label(root, text='Left Player',
font=('times', 20, 'italic'))
l1.grid(row=0, column=0)
l2 = Label(root, text='Right Player',
font=('times', 20, 'italic'))
l2.grid(row=0, column=1)
######################################
# Timer Label ########################
left = Label(root, text='20:00',
font=('courier', 20))
left.grid(row=1, column=0)
right = Label(root, text='20:00',
font=('courier', 20))
right.grid(row=1, column=1)
######################################
# Button #############################
b1 = Button(root,
text='Stop',
command=stopl,
font=('courier', 20))
b1.grid(row=2, column=0)
b2 = Button(root,
command=stopr,
text='Stop',
font=('courier', 20))
b2.grid(row=2, column=1)
b3 = Button(root,
text='Start',
command=s,
font=('courier', 20))
b3.grid(row=3, column=0)
######################################
root.mainloop() |
py | 1a3d6a130a3e4951df4aeade2a3680a191621269 | from builtins import range
from builtins import object
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension, nor use np.linalg.norm(). #
#####################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, j] = np.sqrt(np.sum(np.square(self.X_train[j, :]-X[i, :])));
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
# Do not use np.linalg.norm(). #
#######################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, :]=np.sqrt(np.sum(np.square(self.X_train-X[i, :]), axis=1));
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy, #
# nor use np.linalg.norm(). #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
ab = np.dot(self.X_train,X.T)
a2 = np.sum(np.square(self.X_train), axis=1)
a2 = a2.reshape(a2.size,1)
b2 = np.sum(np.square(X), axis=1)
b2 = b2.reshape(1, b2.size)
dists = np.sqrt((-2*ab+a2+b2).T)
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
sortArray=np.argsort(dists[i, :])
for j in range(k):
closest_y.append(self.y_train[sortArray[j]])
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
y_pred[i]= np.argmax(np.bincount(closest_y))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
|
py | 1a3d6b0be4bda8a3c7a1198000e38492b255e862 | """combine_tables
Revision ID: 2195e3e2a9ce
Revises: ed6021c67b9f
Create Date: 2019-10-31 10:40:36.829434
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from geoalchemy2.types import Geometry
# revision identifiers, used by Alembic.
revision = '2195e3e2a9ce'
down_revision = 'ed6021c67b9f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('text_with_suggestions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('map_search_id', sa.Integer(), nullable=True),
sa.Column('text_typed', sa.Text(), nullable=False),
sa.Column('suggestion_choosen', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['map_search_id'], ['map_search.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_constraint('click_bbox_id_fkey', 'click', type_='foreignkey')
op.drop_constraint('pan_new_bbox_id_fkey', 'pan', type_='foreignkey')
op.drop_constraint('pan_old_bbox_id_fkey', 'pan', type_='foreignkey')
op.drop_constraint('zoom_in_out_new_bbox_id_fkey', 'zoom_in_out', type_='foreignkey')
op.drop_constraint('zoom_in_out_old_bbox_id_fkey', 'zoom_in_out', type_='foreignkey')
op.drop_constraint('suggestions_text_with_suggestion_id_fkey', 'suggestions', type_='foreignkey')
op.drop_constraint('zoom_in_out_map_interaction_type_id_fkey', 'zoom_in_out', type_='foreignkey')
op.drop_constraint('spatial_bookmark_geocoord_id_fkey', 'spatial_bookmark', type_='foreignkey')
op.drop_constraint('user_position_geocoord_id_fkey', 'user_position', type_='foreignkey')
# op.drop_constraint('map_interaction_geocoord_id_fkey', 'map_interaction', type_='foreignkey')
op.drop_constraint('map_search_bbox_id_fkey', 'map_search', type_='foreignkey')
op.drop_column('map_search', 'bbox_id')
# op.add_column('routing', sa.Column('end_routing_interface_send_request_or_interface_closed_time_stamp', sa.DateTime(), nullable=False))
op.drop_constraint('routing_origin_text_box_history_id_fkey', 'routing', type_='foreignkey')
op.drop_constraint('routing_destination_text_box_history_id_fkey', 'routing', type_='foreignkey')
# op.create_foreign_key('origin_text_bbox_fk', 'routing', 'text_with_suggestions', ['origin_text_box_history_id'], ['id'])
# op.create_foreign_key('dest_text_bbox_fk', 'routing', 'text_with_suggestions', ['destination_text_box_history_id'], ['id'])
# op.drop_column('routing', 'end_routing_interface_send_request_or_interface_closed_time_sta')
op.drop_table('text_with_suggestion')
op.drop_table('bbox')
op.drop_table('pan')
op.drop_table('click')
op.drop_table('zoom_in_out')
op.drop_constraint('map_interaction_geocoord_id_fkey', 'map_interaction', type_='foreignkey')
op.drop_column('map_interaction', 'geocoord_id')
op.drop_index('idx_geocoordinate_with_time_stamp_geom', table_name='geocoordinate_with_time_stamp')
op.drop_table('geocoordinate_with_time_stamp')
op.drop_table('map_interaction_type')
op.add_column('map_interaction', sa.Column('click_time_stamp', sa.DateTime()))
op.add_column('map_interaction', sa.Column('geom', Geometry(geometry_type='POINT', srid=4326), nullable=False))
op.add_column('map_interaction', sa.Column('is_click_interaction', sa.Boolean(), nullable=True))
op.add_column('map_interaction', sa.Column('is_pan_interaction', sa.Boolean(), nullable=True))
op.add_column('map_interaction', sa.Column('is_zoom_in_interaction', sa.Boolean(), nullable=True))
op.add_column('map_interaction', sa.Column('is_zoom_out_interaction', sa.Boolean(), nullable=True))
op.add_column('map_interaction', sa.Column('new_bbox_geom', Geometry(geometry_type='POLYGON', srid=4326)))
op.add_column('map_interaction', sa.Column('new_bbox_time_stamp', sa.DateTime()))
op.add_column('map_interaction', sa.Column('new_zoom_level', sa.Integer()))
op.add_column('map_interaction', sa.Column('old_bbox_geom', Geometry(geometry_type='POLYGON', srid=4326)))
op.add_column('map_interaction', sa.Column('old_bbox_time_stamp', sa.DateTime()))
op.add_column('map_interaction', sa.Column('old_zoom_level', sa.Integer()))
op.add_column('map_interaction', sa.Column('time_stamp', sa.DateTime(), nullable=False))
op.add_column('map_interaction', sa.Column('where_clicked_geom', Geometry(geometry_type='POINT', srid=4326)))
op.add_column('map_search', sa.Column('bbox_geom', Geometry(geometry_type='POLYGON', srid=4326), nullable=False))
op.add_column('map_search', sa.Column('bbox_time_stamp', sa.DateTime(), nullable=False))
op.add_column('spatial_bookmark', sa.Column('geom', Geometry(geometry_type='POINT', srid=4326), nullable=False))
op.add_column('spatial_bookmark', sa.Column('time_stamp', sa.DateTime(), nullable=False))
# op.drop_constraint('spatial_bookmark_geocoord_id_fkey', 'spatial_bookmark', type_='foreignkey')
op.drop_column('spatial_bookmark', 'geocoord_id')
op.add_column('suggestions', sa.Column('text_with_suggestions_id', sa.Integer(), nullable=True))
# op.drop_constraint('suggestions_text_with_suggestion_id_fkey', 'suggestions', type_='foreignkey')
op.create_foreign_key('text_with_suggestion_fk', 'suggestions', 'text_with_suggestions', ['text_with_suggestions_id'], ['id'])
op.drop_column('suggestions', 'text_with_suggestion_id')
op.add_column('user_position', sa.Column('geom', Geometry(geometry_type='POINT', srid=4326), nullable=False))
op.add_column('user_position', sa.Column('time_stamp', sa.DateTime(), nullable=False))
# op.drop_constraint('user_position_geocoord_id_fkey', 'user_position', type_='foreignkey')
op.drop_column('user_position', 'geocoord_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('geocoordinate_with_time_stamp',
sa.Column('id', sa.INTEGER(), server_default=sa.text("nextval('geocoordinate_with_time_stamp_id_seq'::regclass)"), autoincrement=True, nullable=False),
sa.Column('time_stamp', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('geom', Geometry(geometry_type='POINT', srid=4326), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='geocoordinate_with_time_stamp_pkey'),
postgresql_ignore_search_path=False
)
op.create_table('bbox',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('upper_left_coordinate_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('lower_right_coordinate_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['lower_right_coordinate_id'], ['geocoordinate_with_time_stamp.id'], name='bbox_lower_right_coordinate_id_fkey'),
sa.ForeignKeyConstraint(['upper_left_coordinate_id'], ['geocoordinate_with_time_stamp.id'], name='bbox_upper_left_coordinate_id_fkey'),
sa.PrimaryKeyConstraint('id', name='bbox_pkey')
)
op.create_table('map_interaction_type',
sa.Column('id', sa.INTEGER(), server_default=sa.text("nextval('map_interaction_type_id_seq'::regclass)"), autoincrement=True, nullable=False),
sa.Column('map_interaction_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['map_interaction_id'], ['map_interaction.id'], name='map_interaction_type_map_interaction_id_fkey'),
sa.PrimaryKeyConstraint('id', name='map_interaction_type_pkey'),
postgresql_ignore_search_path=False
)
op.create_table('zoom_in_out',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('map_interaction_type_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('old_zoom_level', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('new_zoom_level', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('old_bbox_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('new_bbox_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['map_interaction_type_id'], ['map_interaction_type.id'], name='zoom_in_out_map_interaction_type_id_fkey'),
sa.ForeignKeyConstraint(['new_bbox_id'], ['bbox.id'], name='zoom_in_out_new_bbox_id_fkey'),
sa.ForeignKeyConstraint(['old_bbox_id'], ['bbox.id'], name='zoom_in_out_old_bbox_id_fkey'),
sa.PrimaryKeyConstraint('id', name='zoom_in_out_pkey')
)
op.create_table('text_with_suggestion',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('map_search_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('text_typed', sa.TEXT(), autoincrement=False, nullable=False),
sa.Column('suggestion_choosen', sa.TEXT(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['map_search_id'], ['map_search.id'], name='text_with_suggestion_map_search_id_fkey'),
sa.PrimaryKeyConstraint('id', name='text_with_suggestion_pkey')
)
op.create_table('click',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('map_interaction_type_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('where_clicked_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('bbox_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['bbox_id'], ['bbox.id'], name='click_bbox_id_fkey'),
sa.ForeignKeyConstraint(['map_interaction_type_id'], ['map_interaction_type.id'], name='click_map_interaction_type_id_fkey'),
sa.ForeignKeyConstraint(['where_clicked_id'], ['geocoordinate_with_time_stamp.id'], name='click_where_clicked_id_fkey'),
sa.PrimaryKeyConstraint('id', name='click_pkey')
)
op.create_table('pan',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('map_interaction_type_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('old_bbox_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('new_bbox_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['map_interaction_type_id'], ['map_interaction_type.id'], name='pan_map_interaction_type_id_fkey'),
sa.ForeignKeyConstraint(['new_bbox_id'], ['bbox.id'], name='pan_new_bbox_id_fkey'),
sa.ForeignKeyConstraint(['old_bbox_id'], ['bbox.id'], name='pan_old_bbox_id_fkey'),
sa.PrimaryKeyConstraint('id', name='pan_pkey')
)
op.drop_constraint('text_with_suggestion_fk', 'suggestions', type_='foreignkey')
op.drop_table('text_with_suggestions')
op.add_column('user_position', sa.Column('geocoord_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('user_position_geocoord_id_fkey', 'user_position', 'geocoordinate_with_time_stamp', ['geocoord_id'], ['id'])
op.drop_column('user_position', 'time_stamp')
op.drop_column('user_position', 'geom')
op.add_column('suggestions', sa.Column('text_with_suggestion_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('suggestions_text_with_suggestion_id_fkey', 'suggestions', 'text_with_suggestion', ['text_with_suggestion_id'], ['id'])
op.drop_column('suggestions', 'text_with_suggestions_id')
op.add_column('spatial_bookmark', sa.Column('geocoord_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('spatial_bookmark_geocoord_id_fkey', 'spatial_bookmark', 'geocoordinate_with_time_stamp', ['geocoord_id'], ['id'])
op.drop_column('spatial_bookmark', 'time_stamp')
op.drop_column('spatial_bookmark', 'geom')
# op.drop_constraint('origin_text_bbox_fk', 'routing', type_='foreignkey')
# op.drop_constraint('dest_text_bbox_fk', 'routing', type_='foreignkey')
op.create_foreign_key('routing_origin_text_box_history_id_fkey', 'routing', 'text_with_suggestion', ['origin_text_box_history_id'], ['id'])
op.create_foreign_key('routing_destination_text_box_history_id_fkey', 'routing', 'text_with_suggestion', ['destination_text_box_history_id'], ['id'])
op.add_column('map_search', sa.Column('bbox_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('map_search_bbox_id_fkey', 'map_search', 'bbox', ['bbox_id'], ['id'])
op.drop_column('map_search', 'bbox_time_stamp')
op.drop_column('map_search', 'bbox_geom')
op.add_column('map_interaction', sa.Column('geocoord_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('map_interaction_geocoord_id_fkey', 'map_interaction', 'geocoordinate_with_time_stamp', ['geocoord_id'], ['id'])
op.drop_column('map_interaction', 'where_clicked_geom')
op.drop_column('map_interaction', 'time_stamp')
op.drop_column('map_interaction', 'old_zoom_level')
op.drop_column('map_interaction', 'old_bbox_time_stamp')
op.drop_column('map_interaction', 'old_bbox_geom')
op.drop_column('map_interaction', 'new_zoom_level')
op.drop_column('map_interaction', 'new_bbox_time_stamp')
op.drop_column('map_interaction', 'new_bbox_geom')
op.drop_column('map_interaction', 'is_zoom_out_interaction')
op.drop_column('map_interaction', 'is_zoom_in_interaction')
op.drop_column('map_interaction', 'is_pan_interaction')
op.drop_column('map_interaction', 'is_click_interaction')
op.drop_column('map_interaction', 'geom')
op.drop_column('map_interaction', 'click_time_stamp')
# op.create_index('idx_geocoordinate_with_time_stamp_geom', 'geocoordinate_with_time_stamp', ['geom'], unique=False)
# ### end Alembic commands ###
|
py | 1a3d6b3401716c7d8e9520ab0d20ba900d03bbb5 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""Unit Tests for internal methods."""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
from collections import namedtuple
import graphviz as gv
import numpy as np
from onnx import TensorProto
from onnx import helper, numpy_helper
import tensorflow as tf
from tf2onnx import utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.graph import GraphUtil
from common import unittest_main
# pylint: disable=missing-docstring
def onnx_to_graphviz(g):
"""Onnx graph as dot string."""
g2 = gv.Digraph()
for node in g.get_nodes():
kwarg = {}
attr = node.attr
if "shape" in attr:
kwarg["shape"] = str(attr["shape"].ints)
if "broadcast" in attr:
kwarg["broadcast"] = str(attr["broadcast"].i)
g2.node(node.name, op_type=node.type, **kwarg)
for node in g.get_nodes():
for i in node.input:
if i:
g2.edge(i, node.name)
return " ".join(g2.source.split())
def onnx_pretty(g, args=None):
"""Onnx graph pretty print."""
graph_proto = g.make_model("converted from {}".format(args.input))
return helper.printable_graph(graph_proto.graph)
class Tf2OnnxInternalTests(unittest.TestCase):
def setUp(self):
"""Setup test."""
# suppress log info of tensorflow so that result of test can be seen much easier
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.WARN)
utils.INTERNAL_NAME = 1
arg = namedtuple("Arg", "input inputs outputs verbose")
self._args0 = arg(input="test", inputs=[], outputs=["output:0"], verbose=False)
self._args1 = arg(input="test", inputs=["input:0"], outputs=["output:0"], verbose=False)
self._args2 = arg(input="test", inputs=["input1:0", "input2:0"], outputs=["output:0"], verbose=False)
self._args3 = arg(input="test", inputs=["input1:0", "input2:0", "prob:0"], outputs=["output:0"], verbose=False)
self._args4 = arg(input="test", inputs=["input1:0", "input2:0"], outputs=["output1:0", "output2:0"],
verbose=False)
@staticmethod
def sample_net():
n1 = helper.make_node("Abs", ["input"], ["n1:0"], name="n1")
n2 = helper.make_node("Abs", ["n1:0"], ["n2:0"], name="n2")
n3 = helper.make_node("Abs", ["n1:0"], ["n3:0"], name="n3")
n4 = helper.make_node("Add", ["n2:0", "n3:0"], ["n4:0"], name="n4")
n5 = helper.make_node("Abs", ["n4:0"], ["n5:0"], name="n5")
n6 = helper.make_node("Identity", ["n5:0"], ["n6:0"], name="n6")
graph_proto = helper.make_graph(
nodes=[n1, n2, n3, n4, n5, n6],
name="test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("n5:0", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
return graph_proto
def test_insert_node1(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n2 = g.get_node_by_name("n2")
g.insert_new_node_on_input(n2, "Abs", "n1:0", name="n7")
ops = g.get_nodes()
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] ' \
'n1 [op_type=Abs] n7 [op_type=Abs] n2 [op_type=Abs] n3 [op_type=Abs] ' \
'n4 [op_type=Add] n5 [op_type=Abs] n5_graph_outputs_Identity__3 [op_type=Identity] ' \
'n6 [op_type=Identity] input -> n1 n1:0 -> n7 n7:0 -> n2 n1:0 -> n3 ' \
'n2:0 -> n4 n3:0 -> n4 n4:0 -> n5 n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 ' \
'n5_raw_output___2:0 -> n6 }'
self.assertEqual(expected, result)
def test_insert_node2(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
g.insert_new_node_on_output("Abs", "n1:0", name="n7")
ops = g.get_nodes()
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] n7 [op_type=Abs] ' \
'n3 [op_type=Abs] n2 [op_type=Abs] n4 [op_type=Add] n5 [op_type=Abs] ' \
'n5_graph_outputs_Identity__3 [op_type=Identity] n6 [op_type=Identity] ' \
'input -> n1 n1:0 -> n7 n7:0 -> n3 n7:0 -> n2 n2:0 -> n4 n3:0 -> n4 ' \
'n4:0 -> n5 n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 n5_raw_output___2:0 -> n6 }'
self.assertEqual(expected, result)
def test_remove_input(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n4 = g.get_node_by_name("n4")
g.remove_input(n4, n4.input[1])
ops = g.get_nodes()
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] n3 [op_type=Abs] ' \
'n2 [op_type=Abs] n4 [op_type=Add] n5 [op_type=Abs] ' \
'n5_graph_outputs_Identity__3 [op_type=Identity] n6 [op_type=Identity] ' \
'input -> n1 n1:0 -> n3 n1:0 -> n2 n2:0 -> n4 n4:0 -> n5 ' \
'n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 n5_raw_output___2:0 -> n6 }'
self.assertEqual(expected, result)
def test_rewrite_subgraph(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
pattern = \
OpTypePattern('Abs', name='output', inputs=[
OpTypePattern('Add', name='input')
])
ops = g.get_nodes()
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(ops))
for match in match_results:
input_node = match.get_op('input')
output_node = match.get_op('output')
op_name = utils.make_name("ReplacedOp")
out_name = utils.port_name(op_name)
new_node = g.make_node("Sub", inputs=input_node.input, outputs=[out_name], name=op_name)
g.replace_all_inputs(ops, output_node.output[0], new_node.output[0])
for n in set(match.get_nodes()):
g.remove_node(n.name)
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] ' \
'n3 [op_type=Abs] n2 [op_type=Abs] ReplacedOp__5 [op_type=Sub] ' \
'n5_graph_outputs_Identity__3 [op_type=Identity] n6 [op_type=Identity] ' \
'input -> n1 n1:0 -> n3 n1:0 -> n2 n2:0 -> ReplacedOp__5 ' \
'n3:0 -> ReplacedOp__5 ReplacedOp__5:0 -> n5_graph_outputs_Identity__3 ' \
'ReplacedOp__5:0 -> n6 }'
self.assertEqual(expected, result)
def test_match_flipped(self):
n1 = helper.make_node("Sub", ["i1", "i1"], ["n1:0"], name="n1")
n2 = helper.make_node("Add", ["i2", "i2"], ["n2:0"], name="n2")
n3 = helper.make_node("Mul", ["n1:0", "n2:0"], ["n3:0"], name="n3")
graph_proto = helper.make_graph(
nodes=[n1, n2, n3],
name="test",
inputs=[helper.make_tensor_value_info("i1", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("i2", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("n2:0", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
pattern = OpTypePattern('Mul', inputs=[
OpTypePattern('Add'),
OpTypePattern('Sub')
])
ops = g.get_nodes()
matcher = GraphMatcher(pattern, allow_reorder=True)
match_results = list(matcher.match_ops(ops))
self.assertEqual(1, len(match_results))
def test_cmdarg_parse(self):
arg = "input/V-1_2:0,input/X:0[1,2,3],Y:1[4,5],Z:3,A:1,B"
expected_inputs = ['input/V-1_2:0', 'input/X:0', 'Y:1', 'Z:3', 'A:1', 'B']
expected_shape = {'Y:1': [4, 5], 'input/X:0': [1, 2, 3]}
inputs, shape_override = utils.split_nodename_and_shape(arg)
self.assertEqual(expected_inputs, inputs)
self.assertEqual(expected_shape, shape_override)
def test_shape_utils(self):
self.assertEqual(utils.merge_shapes(None, None), None)
self.assertEqual(utils.merge_shapes([], None), [])
self.assertEqual(utils.merge_shapes(None, [1, 2, 3]), [1, 2, 3])
self.assertEqual(utils.merge_shapes([1, 3], [None, 3]), [1, 3])
self.assertEqual(utils.merge_shapes([1, None, 3], (-1, 2, "unk")), [1, 2, 3])
self.assertTrue(utils.are_shapes_compatible(None, []))
self.assertTrue(utils.are_shapes_compatible([1, None, 3], (-1, 2, "unk")))
self.assertFalse(utils.are_shapes_compatible([1, 2, 3], (2, 3)))
self.assertFalse(utils.are_shapes_compatible([1, 2, 3], (4, 5, 6)))
self.assertTrue(utils.are_shapes_equal(None, None))
self.assertFalse(utils.are_shapes_equal(None, []))
self.assertTrue(utils.are_shapes_equal([1, 2, 3], (1, 2, 3)))
def test_data_format(self):
n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", data_format="NHWC")
graph_proto = helper.make_graph(
nodes=[n1],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n = g.get_node_by_name("n1")
self.assertEqual(n.data_format, "NHWC")
self.assertTrue(n.is_nhwc())
def test_node_attr_onnx(self):
n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", my_attr="my_attr")
graph_proto = helper.make_graph(
nodes=[n1],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n1 = g.get_node_by_name("n1")
self.assertTrue("my_attr" in n1.attr)
self.assertTrue("my_attr" not in n1.attr_onnx)
n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", domain="my_domain", my_attr="my_attr")
graph_proto = helper.make_graph(
nodes=[n1],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n1 = g.get_node_by_name("n1")
self.assertTrue("my_attr" in n1.attr)
self.assertTrue("my_attr" in n1.attr_onnx)
def test_tensor_data(self):
tensors = {
"empty_tensor": np.array([], dtype=np.float32),
"multi_dim_empty_tensor": np.array([[], []], dtype=np.float32),
"scalar": np.array(1., dtype=np.float32),
"one_item_array": np.array([1.], dtype=np.float32),
"normal_array": np.array([[1., 2.], [2., 3.]], dtype=np.float32)
}
tf.reset_default_graph()
with tf.Session() as sess:
for n, data in tensors.items():
tf.constant(data, dtype=tf.float32, name=n)
for tf_node in sess.graph.get_operations():
name = tf_node.name
self.assertTrue(name in tensors.keys())
self.assertTrue("value" in tf_node.node_def.attr)
# convert to onnx tensor value
tensor_value = utils.tf_to_onnx_tensor(
utils.get_tf_node_attr(tf_node, "value"),
name=utils.port_name(tf_node.name)
)
attr = helper.make_attribute("value", tensor_value)
# same as node.get_tensor_value(is_list=False)
actual = numpy_helper.to_array(helper.get_attribute_value(attr))
expected = tensors[name]
self.assertTrue(np.array_equal(expected, actual))
if __name__ == '__main__':
unittest_main()
|
py | 1a3d6b959e9b74e9eb25246b883d3a1c51408408 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import json
import sys
import time
import typing
from pickle import dumps as p_dumps, loads as p_loads
import pika
# noinspection PyPackageRequirements
from pyspark import SparkContext, RDD
from fate_arch.common import conf_utils, file_utils
from fate_arch.abc import FederationABC, GarbageCollectionABC
from fate_arch.common import Party
from fate_arch.common.log import getLogger
from fate_arch.computing.spark import get_storage_level, Table
from fate_arch.computing.spark._materialize import materialize
from fate_arch.federation.rabbitmq._mq_channel import MQChannel
from fate_arch.federation.rabbitmq._rabbit_manager import RabbitManager
LOGGER = getLogger()
# default message max size in bytes = 1MB
DEFAULT_MESSAGE_MAX_SIZE = 1048576
NAME_DTYPE_TAG = "<dtype>"
_SPLIT_ = "^"
# Datastream is a wraper of StringIO, it receives kv pairs and dump it to json string
class Datastream(object):
def __init__(self):
self._string = io.StringIO()
self._string.write("[")
def get_size(self):
return sys.getsizeof(self._string.getvalue())
def get_data(self):
self._string.write("]")
return self._string.getvalue()
def append(self, kv: dict):
# add ',' if not the first element
if self._string.getvalue() != "[":
self._string.write(",")
json.dump(kv, self._string)
def clear(self):
self._string.close()
self.__init__()
class FederationDataType(object):
OBJECT = "obj"
TABLE = "Table"
class MQ(object):
def __init__(self, host, port, union_name, policy_id, route_table):
self.host = host
self.port = port
self.union_name = union_name
self.policy_id = policy_id
self.route_table = route_table
def __str__(self):
return (
f"MQ(host={self.host}, port={self.port}, union_name={self.union_name}, "
f"policy_id={self.policy_id}, route_table={self.route_table})"
)
def __repr__(self):
return self.__str__()
class _QueueNames(object):
def __init__(self, vhost, send, receive):
self.vhost = vhost
# self.union = union
self.send = send
self.receive = receive
_remote_history = set()
def _remote_tag_not_duplicate(name, tag, parties):
for party in parties:
if (name, tag, party) in _remote_history:
return False
_remote_history.add((name, tag, party))
return True
_get_history = set()
def _get_tag_not_duplicate(name, tag, party):
if (name, tag, party) in _get_history:
return False
_get_history.add((name, tag, party))
return True
class Federation(FederationABC):
@staticmethod
def from_conf(
federation_session_id: str,
party: Party,
runtime_conf: dict,
rabbitmq_config: dict,
):
LOGGER.debug(f"rabbitmq_config: {rabbitmq_config}")
host = rabbitmq_config.get("host")
port = rabbitmq_config.get("port")
mng_port = rabbitmq_config.get("mng_port")
base_user = rabbitmq_config.get("user")
base_password = rabbitmq_config.get("password")
federation_info = runtime_conf.get("job_parameters", {}).get(
"federation_info", {}
)
union_name = federation_info.get("union_name")
policy_id = federation_info.get("policy_id")
rabbitmq_run = runtime_conf.get("job_parameters", {}).get("rabbitmq_run", {})
LOGGER.debug(f"rabbitmq_run: {rabbitmq_run}")
max_message_size = rabbitmq_run.get(
"max_message_size", DEFAULT_MESSAGE_MAX_SIZE
)
LOGGER.debug(f"set max message size to {max_message_size} Bytes")
rabbit_manager = RabbitManager(
base_user, base_password, f"{host}:{mng_port}", rabbitmq_run
)
rabbit_manager.create_user(union_name, policy_id)
route_table_path = rabbitmq_config.get("route_table")
if route_table_path is None:
route_table_path = "conf/rabbitmq_route_table.yaml"
route_table = file_utils.load_yaml_conf(conf_path=route_table_path)
mq = MQ(host, port, union_name, policy_id, route_table)
return Federation(
federation_session_id, party, mq, rabbit_manager, max_message_size
)
def __init__(
self,
session_id,
party: Party,
mq: MQ,
rabbit_manager: RabbitManager,
max_message_size,
):
self._session_id = session_id
self._party = party
self._mq = mq
self._rabbit_manager = rabbit_manager
self._queue_map: typing.MutableMapping[_QueueKey, _QueueNames] = {}
self._channels_map: typing.MutableMapping[_QueueKey, MQChannel] = {}
self._vhost_set = set()
self._name_dtype_map = {}
self._message_cache = {}
self._max_message_size = max_message_size
def __getstate__(self):
pass
def get(
self, name: str, tag: str, parties: typing.List[Party], gc: GarbageCollectionABC
) -> typing.List:
log_str = f"[rabbitmq.get](name={name}, tag={tag}, parties={parties})"
LOGGER.debug(f"[{log_str}]start to get")
# for party in parties:
# if not _get_tag_not_duplicate(name, tag, party):
# raise ValueError(f"[{log_str}]get from {party} with duplicate tag")
_name_dtype_keys = [
_SPLIT_.join([party.role, party.party_id, name, tag, "get"])
for party in parties
]
if _name_dtype_keys[0] not in self._name_dtype_map:
mq_names = self._get_mq_names(parties, dtype=NAME_DTYPE_TAG)
channel_infos = self._get_channels(mq_names=mq_names)
rtn_dtype = []
for i, info in enumerate(channel_infos):
obj = self._receive_obj(
info, name, tag=_SPLIT_.join([tag, NAME_DTYPE_TAG])
)
rtn_dtype.append(obj)
LOGGER.debug(
f"[rabbitmq.get] _name_dtype_keys: {_name_dtype_keys}, dtype: {obj}"
)
for k in _name_dtype_keys:
if k not in self._name_dtype_map:
self._name_dtype_map[k] = rtn_dtype[0]
rtn_dtype = self._name_dtype_map[_name_dtype_keys[0]]
rtn = []
dtype = rtn_dtype.get("dtype", None)
partitions = rtn_dtype.get("partitions", None)
if dtype == FederationDataType.TABLE:
mq_names = self._get_mq_names(parties, name, partitions=partitions)
for i in range(len(mq_names)):
party = parties[i]
role = party.role
party_id = party.party_id
party_mq_names = mq_names[i]
receive_func = self._get_partition_receive_func(
name,
tag,
party_id,
role,
party_mq_names,
mq=self._mq,
connection_conf=self._rabbit_manager.runtime_config.get(
"connection", {}
),
)
sc = SparkContext.getOrCreate()
rdd = sc.parallelize(range(partitions), partitions)
rdd = rdd.mapPartitionsWithIndex(receive_func)
rdd = materialize(rdd)
table = Table(rdd)
rtn.append(table)
# add gc
gc.add_gc_action(tag, table, "__del__", {})
LOGGER.debug(
f"[{log_str}]received rdd({i + 1}/{len(parties)}), party: {parties[i]} "
)
else:
mq_names = self._get_mq_names(parties, name)
channel_infos = self._get_channels(mq_names=mq_names)
for i, info in enumerate(channel_infos):
obj = self._receive_obj(info, name, tag)
LOGGER.debug(
f"[{log_str}]received obj({i + 1}/{len(parties)}), party: {parties[i]} "
)
rtn.append(obj)
LOGGER.debug(f"[{log_str}]finish to get")
return rtn
def remote(
self,
v,
name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC,
) -> typing.NoReturn:
log_str = f"[rabbitmq.remote](name={name}, tag={tag}, parties={parties})"
# if not _remote_tag_not_duplicate(name, tag, parties):
# raise ValueError(f"[{log_str}]remote to {parties} with duplicate tag")
_name_dtype_keys = [
_SPLIT_.join([party.role, party.party_id, name, tag, "remote"])
for party in parties
]
if _name_dtype_keys[0] not in self._name_dtype_map:
mq_names = self._get_mq_names(parties, dtype=NAME_DTYPE_TAG)
channel_infos = self._get_channels(mq_names=mq_names)
if isinstance(v, Table):
body = {"dtype": FederationDataType.TABLE, "partitions": v.partitions}
else:
body = {"dtype": FederationDataType.OBJECT}
LOGGER.debug(
f"[rabbitmq.remote] _name_dtype_keys: {_name_dtype_keys}, dtype: {body}"
)
self._send_obj(
name=name,
tag=_SPLIT_.join([tag, NAME_DTYPE_TAG]),
data=p_dumps(body),
channel_infos=channel_infos,
)
for k in _name_dtype_keys:
if k not in self._name_dtype_map:
self._name_dtype_map[k] = body
if isinstance(v, Table):
total_size = v.count()
partitions = v.partitions
LOGGER.debug(
f"[{log_str}]start to remote RDD, total_size={total_size}, partitions={partitions}"
)
mq_names = self._get_mq_names(parties, name, partitions=partitions)
# add gc
gc.add_gc_action(tag, v, "__del__", {})
send_func = self._get_partition_send_func(
name,
tag,
partitions,
mq_names,
mq=self._mq,
maximun_message_size=self._max_message_size,
connection_conf=self._rabbit_manager.runtime_config.get(
"connection", {}
),
)
# noinspection PyProtectedMember
v._rdd.mapPartitionsWithIndex(send_func).count()
else:
LOGGER.debug(f"[{log_str}]start to remote obj")
mq_names = self._get_mq_names(parties, name)
channel_infos = self._get_channels(mq_names=mq_names)
self._send_obj(
name=name, tag=tag, data=p_dumps(v), channel_infos=channel_infos
)
LOGGER.debug(f"[{log_str}]finish to remote")
def cleanup(self, parties):
LOGGER.debug("[rabbitmq.cleanup]start to cleanup...")
for party in parties:
vhost = self._get_vhost(party)
LOGGER.debug(f"[rabbitmq.cleanup]start to cleanup vhost {vhost}...")
self._rabbit_manager.clean(vhost)
LOGGER.debug(f"[rabbitmq.cleanup]cleanup vhost {vhost} done")
if self._mq.union_name:
LOGGER.debug(f"[rabbitmq.cleanup]clean user {self._mq.union_name}.")
self._rabbit_manager.delete_user(user=self._mq.union_name)
def _get_vhost(self, party):
low, high = (
(self._party, party) if self._party < party else (party, self._party)
)
vhost = (
f"{self._session_id}-{low.role}-{low.party_id}-{high.role}-{high.party_id}"
)
return vhost
def _get_mq_names(
self, parties: typing.List[Party], name=None, partitions=None, dtype=None
) -> typing.List:
mq_names = [
self._get_or_create_queue(party, name, partitions, dtype)
for party in parties
]
return mq_names
def _get_or_create_queue(
self, party: Party, name=None, partitions=None, dtype=None
) -> typing.Tuple:
queue_key_list = []
queue_infos = []
if dtype is not None:
queue_key = _SPLIT_.join([party.role, party.party_id, dtype, dtype])
queue_key_list.append(queue_key)
else:
if partitions is not None:
for i in range(partitions):
queue_key = _SPLIT_.join([party.role, party.party_id, name, str(i)])
queue_key_list.append(queue_key)
elif name is not None:
queue_key = _SPLIT_.join([party.role, party.party_id, name])
queue_key_list.append(queue_key)
else:
queue_key = _SPLIT_.join([party.role, party.party_id])
queue_key_list.append(queue_key)
for queue_key in queue_key_list:
if queue_key not in self._queue_map:
LOGGER.debug(
f"[rabbitmq.get_or_create_queue]queue: {queue_key} for party:{party} not found, start to create"
)
# gen names
vhost_name = self._get_vhost(party)
queue_key_splits = queue_key.split(_SPLIT_)
queue_suffix = "-".join(queue_key_splits[2:])
send_queue_name = f"send-{self._session_id}-{self._party.role}-{self._party.party_id}-{party.role}-{party.party_id}-{queue_suffix}"
receive_queue_name = f"receive-{self._session_id}-{party.role}-{party.party_id}-{self._party.role}-{self._party.party_id}-{queue_suffix}"
queue_names = _QueueNames(
vhost_name, send_queue_name, receive_queue_name
)
# initial vhost
if queue_names.vhost not in self._vhost_set:
self._rabbit_manager.create_vhost(queue_names.vhost)
self._rabbit_manager.add_user_to_vhost(
self._mq.union_name, queue_names.vhost
)
self._vhost_set.add(queue_names.vhost)
# initial send queue, the name is send-${vhost}
self._rabbit_manager.create_queue(queue_names.vhost, queue_names.send)
# initial receive queue, the name is receive-${vhost}
self._rabbit_manager.create_queue(
queue_names.vhost, queue_names.receive
)
upstream_uri = self._upstream_uri(party_id=party.party_id)
self._rabbit_manager.federate_queue(
upstream_host=upstream_uri,
vhost=queue_names.vhost,
send_queue_name=queue_names.send,
receive_queue_name=queue_names.receive,
)
self._queue_map[queue_key] = queue_names
# TODO: check federated queue status
LOGGER.debug(
f"[rabbitmq.get_or_create_queue]queue for queue_key: {queue_key}, party:{party} created"
)
queue_names = self._queue_map[queue_key]
queue_infos.append((queue_key, queue_names))
return queue_infos
def _upstream_uri(self, party_id):
host = self._mq.route_table.get(int(party_id)).get("host")
port = self._mq.route_table.get(int(party_id)).get("port")
upstream_uri = (
f"amqp://{self._mq.union_name}:{self._mq.policy_id}@{host}:{port}"
)
return upstream_uri
def _get_channel(
self, mq, queue_names: _QueueNames, party_id, role, connection_conf: dict
):
return MQChannel(
host=mq.host,
port=mq.port,
user=mq.union_name,
password=mq.policy_id,
vhost=queue_names.vhost,
send_queue_name=queue_names.send,
receive_queue_name=queue_names.receive,
party_id=party_id,
role=role,
extra_args=connection_conf,
)
def _get_channels(self, mq_names):
channel_infos = []
for e in mq_names:
for queue_key, queue_names in e:
queue_key_splits = queue_key.split(_SPLIT_)
role = queue_key_splits[0]
party_id = queue_key_splits[1]
info = self._channels_map.get(queue_key)
if info is None:
info = self._get_channel(
self._mq,
queue_names,
party_id=party_id,
role=role,
connection_conf=self._rabbit_manager.runtime_config.get(
"connection", {}
),
)
self._channels_map[queue_key] = info
channel_infos.append(info)
return channel_infos
# can't pickle _thread.lock objects
def _get_channels_index(self, index, mq_names, mq, connection_conf: dict):
channel_infos = []
for e in mq_names:
queue_key, queue_names = e[index]
queue_key_splits = queue_key.split(_SPLIT_)
role = queue_key_splits[0]
party_id = queue_key_splits[1]
info = self._get_channel(
mq,
queue_names,
party_id=party_id,
role=role,
connection_conf=connection_conf,
)
channel_infos.append(info)
return channel_infos
def _send_obj(self, name, tag, data, channel_infos):
for info in channel_infos:
properties = pika.BasicProperties(
content_type="text/plain",
app_id=info.party_id,
message_id=name,
correlation_id=tag,
delivery_mode=1,
)
LOGGER.debug(f"[rabbitmq._send_obj]properties:{properties}.")
info.basic_publish(body=data, properties=properties)
def _get_message_cache_key(self, name, tag, party_id, role):
cache_key = _SPLIT_.join([name, tag, str(party_id), role])
return cache_key
def _receive_obj(self, channel_info, name, tag):
party_id = channel_info._party_id
role = channel_info._role
wish_cache_key = self._get_message_cache_key(name, tag, party_id, role)
if wish_cache_key in self._message_cache:
return self._message_cache[wish_cache_key]
for method, properties, body in channel_info.consume():
LOGGER.debug(
f"[rabbitmq._receive_obj] method: {method}, properties: {properties}."
)
if properties.message_id != name or properties.correlation_id != tag:
# todo: fix this
LOGGER.warning(
f"[rabbitmq._receive_obj] require {name}.{tag}, got {properties.message_id}.{properties.correlation_id}"
)
cache_key = self._get_message_cache_key(
properties.message_id, properties.correlation_id, party_id, role
)
# object
if properties.content_type == "text/plain":
self._message_cache[cache_key] = p_loads(body)
channel_info.basic_ack(delivery_tag=method.delivery_tag)
if cache_key == wish_cache_key:
channel_info.cancel()
LOGGER.debug(
f"[rabbitmq._receive_obj] cache_key: {cache_key}, obj: {self._message_cache[cache_key]}"
)
return self._message_cache[cache_key]
else:
raise ValueError(
f"[rabbitmq._receive_obj] properties.content_type is {properties.content_type}, but must be text/plain"
)
def _send_kv(
self, name, tag, data, channel_infos, partition_size, partitions, message_key
):
headers = {
"partition_size": partition_size,
"partitions": partitions,
"message_key": message_key,
}
for info in channel_infos:
properties = pika.BasicProperties(
content_type="application/json",
app_id=info.party_id,
message_id=name,
correlation_id=tag,
headers=headers,
delivery_mode=1,
)
print(f"[rabbitmq._send_kv]info: {info}, properties: {properties}.")
info.basic_publish(body=data, properties=properties)
def _get_partition_send_func(
self,
name,
tag,
partitions,
mq_names,
mq,
maximun_message_size,
connection_conf: dict,
):
def _fn(index, kvs):
return self._partition_send(
index,
kvs,
name,
tag,
partitions,
mq_names,
mq,
maximun_message_size,
connection_conf,
)
return _fn
def _partition_send(
self,
index,
kvs,
name,
tag,
partitions,
mq_names,
mq,
maximun_message_size,
connection_conf: dict,
):
channel_infos = self._get_channels_index(
index=index, mq_names=mq_names, mq=mq, connection_conf=connection_conf
)
datastream = Datastream()
base_message_key = str(index)
message_key_idx = 0
count = 0
for k, v in kvs:
count += 1
el = {"k": p_dumps(k).hex(), "v": p_dumps(v).hex()}
# roughly caculate the size of package to avoid serialization ;)
if (
datastream.get_size() + sys.getsizeof(el["k"]) + sys.getsizeof(el["v"])
>= maximun_message_size
):
print(
f"[rabbitmq._partition_send]The size of message is: {datastream.get_size()}"
)
message_key_idx += 1
message_key = base_message_key + "_" + str(message_key_idx)
self._send_kv(
name=name,
tag=tag,
data=datastream.get_data(),
channel_infos=channel_infos,
partition_size=-1,
partitions=partitions,
message_key=message_key,
)
datastream.clear()
datastream.append(el)
message_key_idx += 1
message_key = _SPLIT_.join([base_message_key, str(message_key_idx)])
self._send_kv(
name=name,
tag=tag,
data=datastream.get_data(),
channel_infos=channel_infos,
partition_size=count,
partitions=partitions,
message_key=message_key,
)
return [1]
def _get_partition_receive_func(
self, name, tag, party_id, role, party_mq_names, mq, connection_conf: dict
):
def _fn(index, kvs):
return self._partition_receive(
index,
kvs,
name,
tag,
party_id,
role,
party_mq_names,
mq,
connection_conf,
)
return _fn
def _partition_receive(
self,
index,
kvs,
name,
tag,
party_id,
role,
party_mq_names,
mq,
connection_conf: dict,
):
queue_names = party_mq_names[index][1]
channel_info = self._get_channel(
mq, queue_names, party_id, role, connection_conf
)
message_key_cache = set()
count = 0
partition_size = -1
all_data = []
for method, properties, body in channel_info.consume():
print(
f"[rabbitmq._partition_receive] method: {method}, properties: {properties}."
)
if properties.message_id != name or properties.correlation_id != tag:
# todo: fix this
channel_info.basic_ack(delivery_tag=method.delivery_tag)
print(
f"[rabbitmq._partition_receive]: require {name}.{tag}, got {properties.message_id}.{properties.correlation_id}"
)
continue
if properties.content_type == "application/json":
message_key = properties.headers["message_key"]
if message_key in message_key_cache:
print(
f"[rabbitmq._partition_receive] message_key : {message_key} is duplicated"
)
channel_info.basic_ack(delivery_tag=method.delivery_tag)
continue
message_key_cache.add(message_key)
if properties.headers["partition_size"] >= 0:
partition_size = properties.headers["partition_size"]
data = json.loads(body)
data_iter = (
(p_loads(bytes.fromhex(el["k"])), p_loads(bytes.fromhex(el["v"])))
for el in data
)
count += len(data)
print(f"[rabbitmq._partition_receive] count: {count}")
all_data.extend(data_iter)
channel_info.basic_ack(delivery_tag=method.delivery_tag)
if count == partition_size:
channel_info.cancel()
return all_data
else:
ValueError(
f"[rabbitmq._partition_receive]properties.content_type is {properties.content_type}, but must be application/json"
)
|
py | 1a3d6bf2f30de43719b93e0a326c9f8caadcae55 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import utils.segms as segms_util
import utils.boxes as bboxs_util
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default=None, type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default=None, type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
scipy.misc.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
# for Cityscapes
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000)
def convert_cityscapes_instance_only(
data_dir, out_dir):
"""Convert from cityscapes format to COCO instance seg format - polygons"""
sets = [
'gtFine_val',
# 'gtFine_train',
# 'gtFine_test',
# 'gtCoarse_train',
# 'gtCoarse_val',
# 'gtCoarse_train_extra'
]
ann_dirs = [
'gtFine_trainvaltest/gtFine/val',
# 'gtFine_trainvaltest/gtFine/train',
# 'gtFine_trainvaltest/gtFine/test',
# 'gtCoarse/train',
# 'gtCoarse/train_extra',
# 'gtCoarse/val'
]
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = [
'person',
'rider',
'car',
'truck',
'bus',
'train',
'motorcycle',
'bicycle',
]
for data_set, ann_dir in zip(sets, ann_dirs):
print('Starting %s' % data_set)
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for root, _, files in os.walk(ann_dir):
for filename in files:
if filename.endswith(ends_in % data_set.split('_')[0]):
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
image['seg_file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + \
'%s_instanceIds.png' % data_set.split('_')[0]
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons(
[fullname], verbose=False)[fullname]
for object_cls in objects:
if object_cls not in category_instancesonly:
continue # skip non-instance categories
for obj in objects[object_cls]:
if obj['contours'] == []:
print('Warning: empty contours.')
continue # skip non-instance categories
len_p = [len(p) for p in obj['contours']]
if min(len_p) <= 4:
print('Warning: invalid contours.')
continue # skip non-instance categories
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if object_cls not in category_dict:
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(
segms_util.polys_to_boxes(
[ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{"id": category_dict[name], "name": name} for name in
category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "cityscapes_instance_only":
convert_cityscapes_instance_only(args.datadir, args.outdir)
elif args.dataset == "cocostuff":
convert_coco_stuff_mat(args.datadir, args.outdir)
else:
print("Dataset not supported: %s" % args.dataset)
|
py | 1a3d6c9219145d89b33541ab96fb25a0dce41047 | import json
from web3 import Web3
from config import NUM_TRANSACTIONS
from config import DEADBEEF
from config import SHARD_IDS
web3 = Web3()
alice_key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
alice_address = web3.eth.account.privateKeyToAccount(alice_key).address.lower()[2:]
abi = json.loads('[{"constant":false,"inputs":[{"name":"_shard_ID","type":"uint256"},{"name":"_sendGas","type":"uint256"},{"name":"_sendToAddress","type":"address"},{"name":"_data","type":"bytes"}],"name":"send","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"shard_ID","type":"uint256"},{"indexed":false,"name":"sendGas","type":"uint256"},{"indexed":false,"name":"sendFromAddress","type":"address"},{"indexed":true,"name":"sendToAddress","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"data","type":"bytes"},{"indexed":true,"name":"base","type":"uint256"},{"indexed":false,"name":"TTL","type":"uint256"}],"name":"SentMessage","type":"event"}]')
contract = web3.eth.contract(address='0x000000000000000000000000000000000000002A', abi=abi)
def format_transaction(tx, signed):
if isinstance(tx["data"], bytes):
data = tx["data"].hex()
else:
data = tx["data"]
return {
"gas": hex(tx["gas"]),
"gasPrice": tx["gasPrice"],
"hash": signed["hash"].hex(),
"input": data,
"nonce": tx["nonce"],
"r": hex(signed["r"]),
"s": hex(signed["s"]),
"v": hex(signed["v"]),
"to": tx["to"],
"value": hex(tx["value"]),
}
# Alice sends cross shard transactions
def gen_cross_shard_tx(nonce, shard_ID):
cross_shard_tx = contract.functions.send(shard_ID, 300000, DEADBEEF, bytes(0)).buildTransaction({ "gas": 3000000, "gasPrice": "0x2", "nonce": hex(nonce), "value": 1})
cross_shard_tx_signed = web3.eth.account.signTransaction(cross_shard_tx, alice_key)
cross_shard_tx_formatted = format_transaction(cross_shard_tx, cross_shard_tx_signed)
return cross_shard_tx_formatted
'''
# Bob sends simple transfers between account in the same shard
def gen_in_shard_tx(nonce):
private_key_bob = '0x5c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
address_bob = web3.eth.account.privateKeyToAccount(private_key_bob).address.lower()[2:]
in_shard_tx = {
"gas": 3000000,
"gasPrice": "0x2",
"nonce": "0x0", # we will need to overwrite this by getting the nonce from the state
"to": "0x000000000000000000000000000000000000002F",
"value": 20,
"data": "0x",
}
in_shard_tx_signed = web3.eth.account.signTransaction(in_shard_tx, private_key_bob)
in_shard_tx_formatted = format_transaction(in_shard_tx, in_shard_tx_signed)
return in_shard_tx_formatted
def gen_payloads():
private_key_alice = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
address_alice = web3.eth.account.privateKeyToAccount(private_key_alice).address.lower()[2:]
payloadA = {
"fromAddress": address_alice,
"toAddress": "0x000000000000000000000000000000000000002A",
"value": 100,
"data": cross_shard_tx["data"]
}
# MessagePayload(address_alice, "0x000000000000000000000000000000000000002A", 100, cross_shard_tx["data"])
tx = []
for x in range(0, 100):
tx.append(payloadA)
return tx
'''
def gen_alice_and_bob_tx(dest_shards = None):
tx = []
if dest_shards is None:
for x in range(0, NUM_TRANSACTIONS):
tx.append(gen_cross_shard_tx(x, SHARD_IDS[x%len(SHARD_IDS)]))
else:
for x in range(0, NUM_TRANSACTIONS):
tx.append(gen_cross_shard_tx(x, dest_shards[x % len(dest_shards)]))
return tx
|
py | 1a3d6cab3ab6c0b7620671b2953656007fb21caf | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
from django_mfa import totp
if __name__ == "__main__":
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django_mfa',
),
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
),
ROOT_URLCONF='django_mfa.urls',
STATIC_URL='/static/',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'django_mfa/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
],
)
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["django_mfa"])
sys.exit(bool(failures)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.