ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df9096b9492233450b5c182a6490a95993016bd | import praw
reddit = praw.Reddit(client_id='CLIENT_ID',
client_secret='CLIENT_SECRET',
user_agent='USER_AGENT')
def redditScraper(subReddit, amountOfPosts=None, topOfWhat='week'):
listOfPosts = []
for submission in reddit.subreddit(subReddit).top(topOfWhat, limit=amountOfPosts):
urlAndTitle = {}
urlAndTitle["url"] = submission.url
urlAndTitle["title"] = submission.title
listOfPosts.append(urlAndTitle)
print ("Grabing " + str(len(listOfPosts)) + " posts from r/" + subReddit)
print ("")
return listOfPosts |
py | 7df90a1efae3485b4e79eec22070e54f4d6200bf | import mimetypes
from django.contrib.contenttypes.models import ContentType
from django.templatetags.static import static
from django.http.response import HttpResponse
from django.urls import reverse
from django.utils.html import format_html, mark_safe
from django.utils.translation import gettext_lazy as _
from wagtail.core import hooks
from wagtail.core.models import UserPagePermissionsProxy, get_page_models
from wagtailcache.cache import clear_cache
from coderedcms.wagtail_flexible_forms.wagtail_hooks import FormAdmin, SubmissionAdmin
@hooks.register('insert_global_admin_css')
def global_admin_css():
return format_html(
'<link rel="stylesheet" type="text/css" href="{}">',
static('coderedcms/css/codered-admin.css')
)
@hooks.register('insert_editor_css')
def editor_css():
return format_html(
'<link rel="stylesheet" type="text/css" href="{}">',
static('coderedcms/css/codered-editor.css')
)
@hooks.register('insert_editor_js')
def collapsible_js():
return format_html('<script src="{}"></script>', static('coderedcms/js/codered-editor.js'))
@hooks.register('after_create_page')
@hooks.register('after_edit_page')
def clear_wagtailcache(request, page):
if page.live:
clear_cache()
@hooks.register('filter_form_submissions_for_user')
def codered_forms(user, editable_forms):
from coderedcms.models import CoderedFormMixin
"""
Add our own CoderedFormPage to editable_forms, since wagtail is unaware
of its existence. Essentially this is a fork of wagtail.contrib.forms.get_forms_for_user()
and wagtail.contrib.forms.get_form_types()
"""
form_models = [
model for model in get_page_models()
if issubclass(model, CoderedFormMixin)
]
form_types = list(
ContentType.objects.get_for_models(*form_models).values()
)
editable_forms = UserPagePermissionsProxy(user).editable_pages()
editable_forms = editable_forms.filter(content_type__in=form_types)
return editable_forms
@hooks.register('before_serve_document')
def serve_document_directly(document, request):
"""
This hook prevents documents from being downloaded unless
specified by an <a> tag with the download attribute.
"""
content_type, content_encoding = mimetypes.guess_type(document.filename)
response = HttpResponse(document.file.read(), content_type=content_type)
response['Content-Disposition'] = 'inline;filename="{0}"'.format(document.filename)
response['Content-Encoding'] = content_encoding
return response
class CoderedSubmissionAdmin(SubmissionAdmin):
def __init__(self, parent=None):
from coderedcms.models import CoderedSessionFormSubmission
self.model = CoderedSessionFormSubmission
super().__init__(parent=parent)
class CoderedFormAdmin(FormAdmin):
list_display = ('title', 'action_links')
def all_submissions_link(self, obj, label=_('See all submissions'),
url_suffix=''):
return '<a href="%s?page_id=%s%s">%s</a>' % (
reverse(CoderedSubmissionAdmin().url_helper.get_action_url_name('index')),
obj.pk, url_suffix, label)
all_submissions_link.short_description = ''
all_submissions_link.allow_tags = True
def action_links(self, obj):
from coderedcms.models import CoderedFormPage, CoderedStreamFormPage
actions = []
if issubclass(type(obj.specific), CoderedFormPage):
actions.append(
'<a href="{0}">{1}</a>'.format(reverse(
'wagtailforms:list_submissions',
args=(obj.pk,)),
_('See all Submissions')
)
)
actions.append(
'<a href="{0}">{1}</a>'.format(
reverse("wagtailadmin_pages:edit", args=(obj.pk,)), _("Edit this form page")
)
)
elif issubclass(type(obj.specific), CoderedStreamFormPage):
actions.append(self.unprocessed_submissions_link(obj))
actions.append(self.all_submissions_link(obj))
actions.append(self.edit_link(obj))
return mark_safe("<br />".join(actions))
# modeladmin_register(CoderedFormAdmin)
# modeladmin_register(CoderedSubmissionAdmin)
|
py | 7df90a8806c9cba8ab0a806ad501866c53624f02 | from requests import get
from os import mkdir,path,remove
from tqdm import tqdm
from hashlib import md5
import colorama as col
"""
freshlybuiltimagebol library model downloader
status_code for checking the execution status
code meaning
1000 - model already exist
1001 - model name incorrect
1002 - download interupted
1003 - successful download
1004 - http error
1005 - connection error
1006 - timeout error
1007 - miscellanious error
1008 - building models directory
1009 - signature mismatch
"""
class imagebol_model_downloader:
status_code=0000
def __init__(self,model_name,status_code=0000):
self.download_model(model_name,status_code)
def download_model(self,model_name,status_code):
dir_path = path.dirname(path.realpath(__file__))
available_models={
"F_est":["frozen_east_text_detection","94.4MB","8a9b7f2ebd9bcf8212bfa856b065e6f0"]
}
if path.isdir(dir_path+"models/")==False:
try:
mkdir(dir_path+"/models")
except:
print("models directory found")
if model_name in available_models:
model_name=available_models[model_name]
if path.isfile(dir_path+"/models/"+model_name[0]+".pb")==False:
try:
print('starting model download')
print("don't quit until downlaoding completes")
print('download can take time depending upon your internet conection')
print(Fore.BLUE+model_name[0]+" is of "+model_name[1])
choice=input(Fore.YELLOW+"do you wish to download type 'y':")
if (choice=='y'):
return self.start_downloading(model_name,dir_path,status_code)
else:
print('download canceled')
status_code=0000
return status_code
print('model download successful')
self.status_code=1003
return status_code
except:
print('download interrupted')
try:
remove(dir_path+"/models/"+model_name[0] +".pb")
self.status_code=1002
return status_code
except:
self.status_code=1002
return status_code
else:
print('model found')
print('checking encryption signature')
self.hash_signature_match(model_name,available_models,dir_path,status_code)
if self.status_code==1000:
return self.status_code
else:
print(self.status_code)
print(Fore.CYAN+model_name[0]+" is of "+model_name[1])
re_choice=input(Fore.YELLOW+"press 'y' to start re-downloading: ")
if re_choice =='y':
remove(dir_path+"/models/"+model_name[0] +".pb")
self.start_downloading(model_name,dir_path,status_code)
self.hash_signature_match(model_name,available_models,dir_path,status_code)
return self.status_code
else:
print("no reference found for "+model_name)
self.status_code=1001
return status_code
def start_downloading(self,model_name,dir_path,status_code):
model_url= "https://raw.githubusercontent.com/FreshlyBuilt/freshlybuiltimagebol/master/freshlybuiltimagebol/models/"
response = get(model_url+model_name[0]+".pb", stream=True)
try:
response.raise_for_status()
except response.exceptions.HTTPError as errh:
print ("Http Error:",errh)
self.status_code=1004
return self.status_code
except response.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
self.status_code=1005
return self.status_code
except response.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
self.status_code=1006
return self.status_code
except response.exceptions.RequestException as err:
print ("OOps: Something Else",err)
self.status_code=1007
return self.status_code
with open(dir_path+"/models/"+model_name[0] +".pb", "wb") as f:
total_length = int(response.headers.get('content-length'))
if total_length is None:
f.write(response.content)
else:
chunk_size=1024
for data in tqdm(iterable = response.iter_content(chunk_size),total=total_length/chunk_size, unit = 'KB'):
try:
f.write(data)
except:
pass
def hash_signature_match(self,model_name,available_models,dir_path,status_code):
model_checksum=dir_path+"/models/"+model_name[0]+".pb"
md5_hash=md5()
model_handler=open(model_checksum,"rb").read()
md5_hash.update(model_handler)
hash_code=md5_hash.hexdigest()
if hash_code == model_name[2]:
col.init(autoreset=True)
print(col.Fore.GREEN+"signature matched")
col.deinit()
self.status_code=1000
return status_code
else:
col.init(autoreset=True)
print(Fore.RED+"warning signature mismatched, model may not work properly ")
col.deinit()
self.status_code=1009
return status_code
"""downloader_debugger"""
#model_name=input("model name: ")
#print(imagebol_model_downloader(model_name).status_code)
|
py | 7df90ad0d47813956bdf7b797af3e3b63345004d | """users table
Revision ID: 2dd5da4ccf72
Revises:
Create Date: 2020-06-29 12:57:15.566032
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2dd5da4ccf72'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=128), nullable=True),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
py | 7df90ad72fa42bee8766f5861e4af07bc6e0f099 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"TruncatedNormal",
"Variable",
"VariableV2",
}
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, node_def_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op in ["DynamicStitch", "ParallelDynamicStitch"]:
dtype = node_def.attr["T"].type
if dtype == dtypes.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == dtypes.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
edges = {} # Keyed by the dest node name.
name_to_node_map = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
node_seq = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node_map[n] = node
edges[n] = [_node_name(x) for x in node.input]
node_seq[n] = seq
seq += 1
for d in dest_nodes:
assert d in name_to_node_map, "%s is not in graph" % d
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = dest_nodes[:]
while next_to_visit:
n = next_to_visit[0]
del next_to_visit[0]
if n in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(n)
next_to_visit += edges[n]
nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node_map[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
def convert_variables_to_constants(sess, input_graph_def, output_node_names,
variable_names_whitelist=None,
variable_names_blacklist=None):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
variable_names_whitelist: The set of variable names to convert (by default,
all variables are converted).
variable_names_blacklist: The set of variable names to omit converting
to constants.
Returns:
GraphDef containing a simplified version of the original.
"""
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
found_variables = {}
variable_names = []
variable_dict_names = []
for node in inference_graph.node:
if node.op in ["Variable", "VariableV2"]:
variable_name = node.name
if ((variable_names_whitelist is not None and
variable_name not in variable_names_whitelist) or
(variable_names_blacklist is not None and
variable_name in variable_names_blacklist)):
continue
variable_dict_names.append(variable_name)
variable_names.append(variable_name + ":0")
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
found_variables = dict(zip(variable_dict_names, returned_variables))
logging.info("Froze %d variables.", len(returned_variables))
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = node_def_pb2.NodeDef()
if input_node.name in found_variables:
output_node.op = "Const"
output_node.name = input_node.name
dtype = input_node.attr["dtype"]
data = found_variables[input_node.name]
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(data,
dtype=dtype.type,
shape=data.shape)))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
output_graph_def.library.CopyFrom(inference_graph.library)
print("Converted %d variables to const ops." % how_many_converted)
return output_graph_def
def remove_training_nodes(input_graph, protected_nodes=None):
"""Prunes out nodes that aren't needed for inference.
There are nodes like Identity and CheckNumerics that are only useful
during training, and can be removed in graphs that will be used for
nothing but inference. Here we identify and remove them, returning an
equivalent graph. To be specific, CheckNumerics nodes are always removed, and
Identity nodes that aren't involved in control edges are spliced out so that
their input and outputs are directly connected.
Args:
input_graph: Model to analyze and prune.
protected_nodes: An optional list of names of nodes to be kept
unconditionally. This is for example useful to preserve Identity output
nodes.
Returns:
A list of nodes with the unnecessary ones removed.
"""
if not protected_nodes:
protected_nodes = []
types_to_remove = {"CheckNumerics": True}
input_nodes = input_graph.node
names_to_remove = {}
for node in input_nodes:
if node.op in types_to_remove and node.name not in protected_nodes:
names_to_remove[node.name] = True
nodes_after_removal = []
for node in input_nodes:
if node.name in names_to_remove:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_remove:
continue
new_node.input.append(full_input_name)
nodes_after_removal.append(new_node)
types_to_splice = {"Identity": True}
names_to_splice = {}
for node in nodes_after_removal:
if node.op in types_to_splice and node.name not in protected_nodes:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
has_control_edge = False
for input_name in node.input:
if re.match(r"^\^", input_name):
has_control_edge = True
if not has_control_edge:
names_to_splice[node.name] = node.input[0]
nodes_after_splicing = []
for node in nodes_after_removal:
if node.name in names_to_splice:
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
while input_name in names_to_splice:
full_input_name = names_to_splice[input_name]
input_name = re.sub(r"^\^", "", full_input_name)
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = graph_pb2.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
|
py | 7df90b29899fae1725345b81c781e24018ab7bfc | import pytest
from amortization.amount import calculate_amortization_amount
from amortization.schedule import amortization_schedule
def test_amortization_amount() -> None:
principal = 150000
period = 36
interest_rate = 0.1
amortization = calculate_amortization_amount(principal, interest_rate, period)
assert pytest.approx(4840.08, 0.002) == amortization
def test_amortization_schedule() -> None:
principal: float = 150000
period = 36
interest_rate = 0.1
expected = (
(1, 4840.08, 1250.00, 3590.08, 146409.92),
(2, 4840.08, 1220.08, 3620.00, 142789.92),
(3, 4840.08, 1189.92, 3650.16, 139139.76),
(4, 4840.08, 1159.50, 3680.58, 135459.18),
(5, 4840.08, 1128.83, 3711.25, 131747.93),
(6, 4840.08, 1097.90, 3742.18, 128005.75),
(7, 4840.08, 1066.71, 3773.37, 124232.38),
(8, 4840.08, 1035.27, 3804.81, 120427.57),
(9, 4840.08, 1003.56, 3836.52, 116591.05),
(10, 4840.08, 971.59, 3868.49, 112722.56),
(11, 4840.08, 939.35, 3900.73, 108821.83),
(12, 4840.08, 906.85, 3933.23, 104888.60),
(13, 4840.08, 874.07, 3966.01, 100922.59),
(14, 4840.08, 841.02, 3999.06, 96923.53),
(15, 4840.08, 807.70, 4032.38, 92891.15),
(16, 4840.08, 774.09, 4065.99, 88825.16),
(17, 4840.08, 740.21, 4099.87, 84725.29),
(18, 4840.08, 706.04, 4134.04, 80591.25),
(19, 4840.08, 671.59, 4168.49, 76422.76),
(20, 4840.08, 636.86, 4203.22, 72219.54),
(21, 4840.08, 601.83, 4238.25, 67981.29),
(22, 4840.08, 566.51, 4273.57, 63707.72),
(23, 4840.08, 530.90, 4309.18, 59398.54),
(24, 4840.08, 494.99, 4345.09, 55053.45),
(25, 4840.08, 458.78, 4381.30, 50672.15),
(26, 4840.08, 422.27, 4417.81, 46254.34),
(27, 4840.08, 385.45, 4454.63, 41799.71),
(28, 4840.08, 348.33, 4491.75, 37307.96),
(29, 4840.08, 310.90, 4529.18, 32778.78),
(30, 4840.08, 273.16, 4566.92, 28211.86),
(31, 4840.08, 235.10, 4604.98, 23606.88),
(32, 4840.08, 196.72, 4643.36, 18963.52),
(33, 4840.08, 158.03, 4682.05, 14281.47),
(34, 4840.08, 119.01, 4721.07, 9560.40),
(35, 4840.08, 79.67, 4760.41, 4799.99),
(36, 4839.99, 40.00, 4799.99, 0.00),
)
result = amortization_schedule(principal, interest_rate, period)
for e, r in zip(expected, result):
assert pytest.approx(e) == r
|
py | 7df90bd6c9a955fb698c7011250fdf8b6f7f9d93 | #
# Copyright (C) 2014-2015 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <[email protected]>
# Ben Dyer <[email protected]>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import re
from logging import getLogger
from io import StringIO
from .signature import Signature, compute_signature
from .common import DsdlException, pretty_filename, bytes_from_crc64
from .type_limits import get_unsigned_integer_range, get_signed_integer_range, get_float_range
# Python 2.7 compatibility
try:
# noinspection PyUnresolvedReferences,PyShadowingBuiltins
str = unicode # @ReservedAssignment @UndefinedVariable
except NameError:
pass
try:
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
long(1) # @UndefinedVariable
except NameError:
long = int # @ReservedAssignment
MAX_FULL_TYPE_NAME_LEN = 80
SERVICE_DATA_TYPE_ID_MAX = 255
MESSAGE_DATA_TYPE_ID_MAX = 65535
logger = getLogger(__name__)
class Type:
"""
Common type description. The specialized type description classes inherit from this one.
Fields:
full_name Full type name string, e.g. "uavcan.protocol.NodeStatus"
category Any CATEGORY_*
"""
CATEGORY_PRIMITIVE = 0
CATEGORY_ARRAY = 1
CATEGORY_COMPOUND = 2
CATEGORY_VOID = 3
def __init__(self, full_name, category):
self.full_name = str(full_name)
self.category = category
def __str__(self):
return self.get_normalized_definition()
def get_data_type_signature(self):
return None
def get_normalized_definition(self):
raise NotImplementedError('Pure virtual method')
def get_max_bitlen(self):
raise NotImplementedError('Pure virtual method')
def get_min_bitlen(self):
raise NotImplementedError('Pure virtual method')
__repr__ = __str__
class PrimitiveType(Type):
"""
Primitive type description, e.g. bool or float16.
Fields:
kind Any KIND_*
bitlen Bit length, 1 to 64
cast_mode Any CAST_MODE_*
value_range Tuple containing min and max values: (min, max)
"""
KIND_BOOLEAN = 0
KIND_UNSIGNED_INT = 1
KIND_SIGNED_INT = 2
KIND_FLOAT = 3
CAST_MODE_SATURATED = 0
CAST_MODE_TRUNCATED = 1
def __init__(self, kind, bitlen, cast_mode):
self.kind = kind
self.bitlen = bitlen
self.cast_mode = cast_mode
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_PRIMITIVE)
self.value_range = {
PrimitiveType.KIND_BOOLEAN: get_unsigned_integer_range,
PrimitiveType.KIND_UNSIGNED_INT: get_unsigned_integer_range,
PrimitiveType.KIND_SIGNED_INT: get_signed_integer_range,
PrimitiveType.KIND_FLOAT: get_float_range
}[self.kind](bitlen)
def get_normalized_definition(self):
"""Please refer to the specification for details about normalized definitions."""
cast_mode = 'saturated' if self.cast_mode == PrimitiveType.CAST_MODE_SATURATED else 'truncated'
primary_type = {
PrimitiveType.KIND_BOOLEAN: 'bool',
PrimitiveType.KIND_UNSIGNED_INT: 'uint' + str(self.bitlen),
PrimitiveType.KIND_SIGNED_INT: 'int' + str(self.bitlen),
PrimitiveType.KIND_FLOAT: 'float' + str(self.bitlen)
}[self.kind]
return cast_mode + ' ' + primary_type
def validate_value_range(self, value):
"""
Args:
value: Throws DsdlException if this value cannot be represented by this type.
"""
low, high = self.value_range
if not low <= value <= high:
error('Value [%s] is out of range %s', value, self.value_range)
def get_max_bitlen(self):
"""Returns type bit length."""
return self.bitlen
def get_min_bitlen(self):
"""Returns type bit length."""
return self.bitlen
class ArrayType(Type):
"""
Array type description, e.g. float32[8], uint12[<34].
Fields:
value_type Description of the array value type; the type of this field inherits Type, e.g. PrimitiveType
mode Any MODE_*
max_size Maximum number of elements in the array
"""
MODE_STATIC = 0
MODE_DYNAMIC = 1
def __init__(self, value_type, mode, max_size):
self.value_type = value_type
self.mode = mode
self.max_size = max_size
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_ARRAY)
def get_normalized_definition(self):
"""Please refer to the specification for details about normalized definitions."""
typedef = self.value_type.get_normalized_definition()
return ('%s[<=%d]' if self.mode == ArrayType.MODE_DYNAMIC else '%s[%d]') % (typedef, self.max_size)
def get_max_bitlen(self):
"""Returns total maximum bit length of the array, including length field if applicable."""
payload_max_bitlen = self.max_size * self.value_type.get_max_bitlen()
return {
self.MODE_DYNAMIC: payload_max_bitlen + self.max_size.bit_length(),
self.MODE_STATIC: payload_max_bitlen
}[self.mode]
def get_min_bitlen(self):
if self.mode == self.MODE_STATIC:
return self.value_type.get_min_bitlen() * self.max_size
else:
return 0 # Considering TAO
def get_data_type_signature(self):
return self.value_type.get_data_type_signature()
@property
def is_string_like(self):
return self.mode == self.MODE_DYNAMIC and \
self.value_type.category == Type.CATEGORY_PRIMITIVE and \
self.value_type.bitlen == 8
# noinspection PyAbstractClass
class CompoundType(Type):
"""
Compound type description, e.g. uavcan.protocol.NodeStatus.
Fields:
source_file Path to the DSDL definition file for this type
default_dtid Default Data Type ID, if specified, None otherwise
version The version number of the dsdl definition as a tuple (e.g. (1,7))
kind Any KIND_*
source_text Raw DSDL definition text (as is, with comments and the original formatting)
Fields if kind == KIND_SERVICE:
request_fields Request struct field list, the type of each element is Field
response_fields Response struct field list
request_constants Request struct constant list, the type of each element is Constant
response_constants Response struct constant list
request_union Boolean indicating whether the request struct is a union
response_union Boolean indicating whether the response struct is a union
Fields if kind == KIND_MESSAGE:
fields Field list, the type of each element is Field
constants Constant list, the type of each element is Constant
union Boolean indicating whether the message struct is a union
Extra methods if kind == KIND_SERVICE:
get_max_bitlen_request() Returns maximum total bit length of the serialized request struct
get_max_bitlen_response() Same for the response struct
get_min_bitlen_request() Returns minimum total bit length of the serialized request struct
get_min_bitlen_response() Same for the response struct
Extra methods if kind == KIND_MESSAGE:
get_max_bitlen() Returns maximum total bit length of the serialized struct
get_min_bitlen() Returns minimum total bit length of the serialized struct
"""
KIND_SERVICE = 0
KIND_MESSAGE = 1
def __init__(self, full_name, kind, source_file, default_dtid, version, source_text):
Type.__init__(self, full_name, Type.CATEGORY_COMPOUND)
self.source_file = source_file
self.default_dtid = default_dtid
self.version = version
self.kind = kind
self.source_text = source_text
self._data_type_signature = None
def compute_max_bitlen(flds, union):
if len(flds) == 0:
return 0
lens = [x.type.get_max_bitlen() for x in flds]
if union:
return max(lens) + max(len(flds) - 1, 1).bit_length()
else:
return sum(lens)
def compute_min_bitlen(flds, union):
if len(flds) == 0:
return 0
lens = [x.type.get_min_bitlen() for x in flds]
if union:
return min(lens) + max(len(flds) - 1, 1).bit_length()
else:
return sum(lens)
if kind == CompoundType.KIND_SERVICE:
self.request_fields = []
self.response_fields = []
self.request_constants = []
self.response_constants = []
self.get_max_bitlen_request = lambda: compute_max_bitlen(self.request_fields, self.request_union)
self.get_max_bitlen_response = lambda: compute_max_bitlen(self.response_fields, self.response_union)
self.get_min_bitlen_request = lambda: compute_min_bitlen(self.request_fields, self.request_union)
self.get_min_bitlen_response = lambda: compute_min_bitlen(self.response_fields, self.response_union)
self.request_union = False
self.response_union = False
elif kind == CompoundType.KIND_MESSAGE:
self.fields = []
self.constants = []
self.get_max_bitlen = lambda: compute_max_bitlen(self.fields, self.union)
self.get_min_bitlen = lambda: compute_min_bitlen(self.fields, self.union)
self.union = False
else:
error('Compound type of unknown kind [%s]', kind)
def _instantiate(self, *args, **kwargs):
# This is a stub
pass
def __call__(self, *args, **kwargs):
return self._instantiate(*args, **kwargs)
def get_dsdl_signature_source_definition(self):
"""
Returns normalized DSDL definition text.
Please refer to the specification for details about normalized DSDL definitions.
"""
txt = StringIO()
txt.write(self.full_name + '\n')
def adjoin(attrs):
return txt.write('\n'.join(x.get_normalized_definition() for x in attrs) + '\n')
if self.kind == CompoundType.KIND_SERVICE:
if self.request_union:
txt.write('\n@union\n')
adjoin(self.request_fields)
txt.write('\n---\n')
if self.response_union:
txt.write('\n@union\n')
adjoin(self.response_fields)
elif self.kind == CompoundType.KIND_MESSAGE:
if self.union:
txt.write('\n@union\n')
adjoin(self.fields)
else:
error('Compound type of unknown kind [%s]', self.kind)
return txt.getvalue().strip().replace('\n\n\n', '\n').replace('\n\n', '\n')
def get_dsdl_signature(self):
"""
Computes DSDL signature of this type.
Please refer to the specification for details about signatures.
"""
return compute_signature(self.get_dsdl_signature_source_definition())
def get_normalized_definition(self):
"""Returns full type name string, e.g. 'uavcan.protocol.NodeStatus'"""
return self.full_name
def get_data_type_signature(self):
"""
Computes data type signature of this type. The data type signature is
guaranteed to match only if all nested data structures are compatible.
Please refer to the specification for details about signatures.
"""
if self._data_type_signature is None:
sig = Signature(self.get_dsdl_signature())
fields = self.request_fields + self.response_fields if self.kind == CompoundType.KIND_SERVICE else self.fields
for field in fields:
field_sig = field.type.get_data_type_signature()
if field_sig is not None:
sig_value = sig.get_value()
sig.add(bytes_from_crc64(field_sig))
sig.add(bytes_from_crc64(sig_value))
self._data_type_signature = sig.get_value()
return self._data_type_signature
class VoidType(Type):
"""
Void type description, e.g. void2.
Fields:
bitlen Bit length, 1 to 64
"""
def __init__(self, bitlen):
self.bitlen = bitlen
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_VOID)
def get_normalized_definition(self):
"""Please refer to the specification for details about normalized definitions."""
return 'void' + str(self.bitlen)
def get_max_bitlen(self):
"""Returns type bit length."""
return self.bitlen
def get_min_bitlen(self):
"""Returns type bit length."""
return self.bitlen
class Attribute:
"""
Base class of an attribute description.
Fields:
type Attribute type description, the type of this field inherits the class Type, e.g. PrimitiveType
name Attribute name string
"""
# noinspection PyShadowingBuiltins
def __init__(self, type, name): # @ReservedAssignment
self.type = type
self.name = name
def __str__(self):
return self.get_normalized_definition()
def get_normalized_definition(self):
raise NotImplementedError('Pure virtual method')
__repr__ = __str__
class Field(Attribute):
"""
Field description.
Does not add new fields to Attribute.
If type is void, the name will be None.
"""
def get_normalized_definition(self):
if self.type.category == self.type.CATEGORY_VOID:
return self.type.get_normalized_definition()
else:
return '%s %s' % (self.type.get_normalized_definition(), self.name)
class Constant(Attribute):
"""
Constant description.
Fields:
init_expression Constant initialization expression string, e.g. "2+2" or "'\x66'"
value Computed result of the initialization expression in the final type (e.g. int, float)
string_value Computed result of the initialization expression as string
"""
# noinspection PyShadowingBuiltins
def __init__(self, type, name, init_expression, value): # @ReservedAssignment
Attribute.__init__(self, type, name)
self.init_expression = init_expression
self.value = value
self.string_value = repr(value)
if isinstance(value, long):
self.string_value = self.string_value.replace('L', '')
def get_normalized_definition(self):
return '%s %s = %s' % (self.type.get_normalized_definition(), self.name, self.init_expression)
class Parser:
"""
DSDL parser logic. Do not use this class directly; use the helper function instead.
"""
def __init__(self, search_dirs):
self.search_dirs = validate_search_directories(search_dirs)
def _namespace_from_filename(self, filename):
search_dirs = sorted(map(os.path.abspath, self.search_dirs)) # Nested last
filename = os.path.abspath(filename)
for dirname in search_dirs:
root_ns = dirname.split(os.path.sep)[-1]
if filename.startswith(dirname):
dir_len = len(dirname)
basename_len = len(os.path.basename(filename))
ns = filename[dir_len:-basename_len]
ns = (root_ns + '.' + ns.replace(os.path.sep, '.').strip('.')).strip('.')
validate_namespace_name(ns)
return ns
error('File [%s] was not found in search directories', filename)
def _full_typename_version_and_dtid_from_filename(self, filename):
basename = os.path.basename(filename)
items = basename.split('.')
if (len(items) != 2 and len(items) != 3 and len(items) != 4 and len(items) != 5) or items[-1] != 'uavcan':
error('Invalid file name [%s]; expected pattern: [<default-dtid>.]<short-type-name>.[<major-version>.<minor-version>.]uavcan', basename)
if len(items) == 2 or len(items) == 4:
default_dtid, name = None, items[0]
else:
default_dtid, name = items[0], items[1]
try:
default_dtid = int(default_dtid)
except ValueError:
error('Invalid default data type ID [%s]', default_dtid)
if len(items) == 2 or len(items) == 3:
version = None
else:
major_version, minor_version = items[-3], items[-2]
try:
version = (int(major_version), int(minor_version))
except ValueError:
error('Invalid version number [%s]', major_version, minor_version)
full_name = self._namespace_from_filename(filename) + '.' + name
validate_compound_type_full_name(full_name)
return full_name, version, default_dtid
def _locate_compound_type_definition(self, referencing_filename, typename):
def locate_namespace_directories(ns):
namespace_dirs = []
namespace_components = ns.split('.')
root_namespace, sub_namespace_components = namespace_components[0], namespace_components[1:]
for d in self.search_dirs:
if d.split(os.path.sep)[-1] == root_namespace:
namespace_dirs.append(os.path.join(d, *sub_namespace_components))
if len(namespace_dirs) == 0:
error('Unknown namespace [%s]', ns)
return namespace_dirs
if '.' not in typename:
current_namespace = self._namespace_from_filename(referencing_filename)
full_typename = current_namespace + '.' + typename
else:
full_typename = typename
namespace = '.'.join(full_typename.split('.')[:-1])
directories = locate_namespace_directories(namespace)
for directory in directories:
logger.debug('Searching for [%s] in [%s]', full_typename, directory)
if not os.path.isdir(directory):
continue
for fn in os.listdir(directory):
fn = os.path.join(directory, fn)
if os.path.isfile(fn):
try:
fn_full_typename, _version, _dtid = self._full_typename_version_and_dtid_from_filename(fn)
if full_typename == fn_full_typename:
return fn
except Exception as ex:
logger.debug('Unknown file [%s], skipping... [%s]', pretty_filename(fn), ex)
error('Type definition not found [%s]', typename)
# noinspection PyUnusedLocal
@staticmethod
def _parse_void_type(filename, bitlen):
enforce(1 <= bitlen <= 64, 'Invalid void bit length [%d]', bitlen)
return VoidType(bitlen)
def _parse_array_type(self, filename, value_typedef, size_spec, cast_mode):
logger.debug('Parsing the array value type [%s]...', value_typedef)
value_type = self._parse_type(filename, value_typedef, cast_mode)
enforce(value_type.category != value_type.CATEGORY_ARRAY,
'Multidimensional arrays are not allowed (protip: use nested types)')
try:
if size_spec.startswith('<='):
max_size = int(size_spec[2:], 0)
mode = ArrayType.MODE_DYNAMIC
elif size_spec.startswith('<'):
max_size = int(size_spec[1:], 0) - 1
mode = ArrayType.MODE_DYNAMIC
else:
max_size = int(size_spec, 0)
mode = ArrayType.MODE_STATIC
except ValueError:
error('Invalid array size specifier [%s] (valid patterns: [<=X], [<X], [X])', size_spec)
else:
enforce(max_size > 0, 'Array size must be positive, not %d', max_size)
return ArrayType(value_type, mode, max_size)
# noinspection PyUnusedLocal
@staticmethod
def _parse_primitive_type(filename, base_name, bitlen, cast_mode):
if cast_mode is None or cast_mode == 'saturated':
cast_mode = PrimitiveType.CAST_MODE_SATURATED
elif cast_mode == 'truncated':
cast_mode = PrimitiveType.CAST_MODE_TRUNCATED
else:
error('Invalid cast mode [%s]', cast_mode)
if base_name == 'bool':
return PrimitiveType(PrimitiveType.KIND_BOOLEAN, 1, cast_mode)
try:
kind = {
'uint': PrimitiveType.KIND_UNSIGNED_INT,
'int': PrimitiveType.KIND_SIGNED_INT,
'float': PrimitiveType.KIND_FLOAT,
}[base_name]
except KeyError:
error('Unknown primitive type (note: compound types should be in CamelCase)')
# noinspection PyUnboundLocalVariable
if kind == PrimitiveType.KIND_FLOAT:
enforce(bitlen in (16, 32, 64), 'Invalid bit length for float type [%d]', bitlen)
else:
enforce(2 <= bitlen <= 64, 'Invalid bit length [%d] (note: use bool instead of uint1)', bitlen)
return PrimitiveType(kind, bitlen, cast_mode)
def _parse_compound_type(self, filename, typedef):
definition_filename = self._locate_compound_type_definition(filename, typedef)
logger.debug('Nested type [%s] is defined in [%s], parsing...', typedef, pretty_filename(definition_filename))
t = self.parse(definition_filename)
if t.kind == t.KIND_SERVICE:
error('A service type can not be nested into another compound type')
return t
def _parse_type(self, filename, typedef, cast_mode):
typedef = typedef.strip()
void_match = re.match(r'void(\d{1,2})$', typedef)
array_match = re.match(r'(.+?)\[([^\]]*)\]$', typedef)
primitive_match = re.match(r'([a-z]+)(\d{1,2})$|(bool)$', typedef)
if void_match:
size_spec = void_match.group(1).strip()
return self._parse_void_type(filename, int(size_spec))
elif array_match:
assert not primitive_match
value_typedef = array_match.group(1).strip()
size_spec = array_match.group(2).strip()
return self._parse_array_type(filename, value_typedef, size_spec, cast_mode)
elif primitive_match:
if primitive_match.group(0) == 'bool':
return self._parse_primitive_type(filename, 'bool', 1, cast_mode)
else:
base_name = primitive_match.group(1)
bitlen = int(primitive_match.group(2))
return self._parse_primitive_type(filename, base_name, bitlen, cast_mode)
else:
enforce(cast_mode is None, 'Cast mode specifier is not applicable for compound types [%s]', cast_mode)
return self._parse_compound_type(filename, typedef)
@staticmethod
def _make_constant(attrtype, name, init_expression):
enforce(attrtype.category == attrtype.CATEGORY_PRIMITIVE, 'Invalid type for constant [%d]', attrtype.category)
init_expression = ''.join(init_expression.split()) # Remove spaces
value = evaluate_expression(init_expression)
if isinstance(value, str) and len(value) == 1: # ASCII character
value = ord(value)
elif isinstance(value, (float, int, bool, long)): # Numeric literal
value = {
attrtype.KIND_UNSIGNED_INT: long,
attrtype.KIND_SIGNED_INT: long,
attrtype.KIND_BOOLEAN: int, # Not bool because we need to check range
attrtype.KIND_FLOAT: float
}[attrtype.kind](value)
else:
error('Invalid type of constant initialization expression [%s]', type(value).__name__)
logger.debug('Constant initialization expression evaluated as: [%s] --> %s', init_expression, repr(value))
attrtype.validate_value_range(value)
return Constant(attrtype, name, init_expression, value)
def _parse_line(self, filename, tokens):
cast_mode = None
if tokens[0] == 'saturated' or tokens[0] == 'truncated':
cast_mode, tokens = tokens[0], tokens[1:]
if len(tokens) < 2 and not tokens[0].startswith('void'):
error('Invalid attribute definition')
if len(tokens) == 1:
typename, attrname, tokens = tokens[0], None, []
else:
typename, attrname, tokens = tokens[0], tokens[1], tokens[2:]
validate_attribute_name(attrname)
attrtype = self._parse_type(filename, typename, cast_mode)
if len(tokens) > 0:
if len(tokens) < 2 or tokens[0] != '=':
error('Constant assignment expected')
expression = ' '.join(tokens[1:])
return self._make_constant(attrtype, attrname, expression)
else:
return Field(attrtype, attrname)
@staticmethod
def _tokenize(text):
for idx, line in enumerate(text.splitlines()):
line = re.sub('#.*', '', line).strip() # Remove comments and leading/trailing whitespaces
if line:
tokens = [tk for tk in line.split() if tk]
yield idx + 1, tokens
def parse_source(self, filename, source_text):
try:
full_typename, version, default_dtid = self._full_typename_version_and_dtid_from_filename(filename)
numbered_lines = list(self._tokenize(source_text))
all_attributes_names = set()
fields, constants, resp_fields, resp_constants = [], [], [], []
union, resp_union = False, False
response_part = False
for num, tokens in numbered_lines:
try:
if tokens == ['---']:
enforce(not response_part, 'Duplicate response mark')
response_part = True
all_attributes_names = set()
continue
if tokens == ['@union']:
if response_part:
enforce(not resp_union, 'Response data structure has already been declared as union')
resp_union = True
else:
enforce(not union, 'Data structure has already been declared as union')
union = True
continue
attr = self._parse_line(filename, tokens)
if attr.name and attr.name in all_attributes_names:
error('Duplicated attribute name [%s]', attr.name)
all_attributes_names.add(attr.name)
if isinstance(attr, Constant):
(resp_constants if response_part else constants).append(attr)
elif isinstance(attr, Field):
(resp_fields if response_part else fields).append(attr)
else:
error('Unknown attribute type - internal error')
except DsdlException as ex:
if not ex.line:
ex.line = num
raise ex
except Exception as ex:
logger.error('Internal error', exc_info=True)
raise DsdlException('Internal error: %s' % str(ex), line=num)
if response_part:
t = CompoundType(full_typename, CompoundType.KIND_SERVICE, filename, default_dtid, version, source_text)
t.request_fields = fields
t.request_constants = constants
t.response_fields = resp_fields
t.response_constants = resp_constants
t.request_union = union
t.response_union = resp_union
max_bitlen = t.get_max_bitlen_request(), t.get_max_bitlen_response()
max_bytelen = tuple(map(bitlen_to_bytelen, max_bitlen))
else:
t = CompoundType(full_typename, CompoundType.KIND_MESSAGE, filename, default_dtid, version, source_text)
t.fields = fields
t.constants = constants
t.union = union
max_bitlen = t.get_max_bitlen()
max_bytelen = bitlen_to_bytelen(max_bitlen)
validate_union(t)
validate_data_type_id(t)
logger.debug('Type [%s], default DTID: %s, signature: %08x, maxbits: %s, maxbytes: %s, DSSD:',
full_typename, default_dtid, t.get_dsdl_signature(), max_bitlen, max_bytelen)
for ln in t.get_dsdl_signature_source_definition().splitlines():
logger.debug(' %s', ln)
return t
except DsdlException as ex:
if not ex.file:
ex.file = filename
raise ex
def parse(self, filename):
try:
filename = os.path.abspath(filename)
with open(filename) as f:
source_text = f.read()
return self.parse_source(filename, source_text)
except IOError as ex:
raise DsdlException('IO error: %s' % str(ex), file=filename)
except Exception as ex:
logger.error('Internal error', exc_info=True)
raise DsdlException('Internal error: %s' % str(ex), file=filename)
def error(fmt, *args):
raise DsdlException(fmt % args)
def enforce(cond, fmt, *args):
if not cond:
error(fmt, *args)
def bitlen_to_bytelen(x):
return int((x + 7) / 8)
def evaluate_expression(expression):
try:
env = {
'locals': None,
'globals': None,
'__builtins__': None,
'true': 1,
'false': 0
}
return eval(expression, env)
except Exception as ex:
error('Cannot evaluate expression: %s', str(ex))
def validate_search_directories(dirnames):
dirnames = set(dirnames)
dirnames = list(map(os.path.abspath, dirnames))
for d1 in dirnames:
for d2 in dirnames:
if d1 == d2:
continue
enforce(not d1.startswith(d2), 'Nested search directories are not allowed [%s] [%s]', d1, d2)
return dirnames
def validate_namespace_name(name):
for component in name.split('.'):
enforce(re.match(r'[a-z][a-z0-9_]*$', component), 'Invalid namespace name [%s]', name)
enforce(len(name) <= MAX_FULL_TYPE_NAME_LEN, 'Namespace name is too long [%s]', name)
def validate_compound_type_full_name(name):
enforce('.' in name, 'Full type name must explicitly specify its namespace [%s]', name)
short_name = name.split('.')[-1]
namespace = '.'.join(name.split('.')[:-1])
validate_namespace_name(namespace)
enforce(re.match(r'[A-Z][A-Za-z0-9_]*$', short_name), 'Invalid type name [%s]', name)
enforce(len(name) <= MAX_FULL_TYPE_NAME_LEN, 'Type name is too long [%s]', name)
def validate_attribute_name(name):
enforce(re.match(r'[a-zA-Z][a-zA-Z0-9_]*$', name), 'Invalid attribute name [%s]', name)
def validate_data_type_id(t):
if t.default_dtid is None:
return
if t.kind == t.KIND_MESSAGE:
enforce(0 <= t.default_dtid <= MESSAGE_DATA_TYPE_ID_MAX,
'Invalid data type ID for message [%s]', t.default_dtid)
elif t.kind == t.KIND_SERVICE:
enforce(0 <= t.default_dtid <= SERVICE_DATA_TYPE_ID_MAX,
'Invalid data type ID for service [%s]', t.default_dtid)
else:
error('Invalid kind: %s', t.kind)
def validate_union(t):
def check_fields(fields):
enforce(len(fields) > 1, 'Union contains less than 2 fields')
enforce(not any(_.type.category == _.type.CATEGORY_VOID for _ in fields), 'Union must not contain void fields')
if t.kind == t.KIND_MESSAGE:
if t.union:
check_fields(t.fields)
elif t.kind == t.KIND_SERVICE:
if t.request_union:
check_fields(t.request_fields)
if t.response_union:
check_fields(t.response_fields)
else:
error('Invalid kind: %s', t.kind)
def parse_namespaces(source_dirs, search_dirs=None):
"""
Use only this function to parse DSDL definitions.
This function takes a list of root namespace directories (containing DSDL definition files to parse) and an
optional list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed).
Returns the list of parsed type definitions, where type of each element is CompoundType.
Args:
source_dirs: List of root namespace directories to parse.
search_dirs: List of root namespace directories with referenced types (optional). This list is
automatically extended with source_dirs.
Example:
>>> import uavcan
>>> a = uavcan.dsdl.parse_namespaces(['../dsdl/uavcan'])
>>> len(a)
77
>>> a[0]
uavcan.Timestamp
>>> a[0].fields
[truncated uint48 husec]
>>> a[0].constants
[saturated uint48 UNKNOWN = 0, saturated uint48 USEC_PER_LSB = 100]
"""
# noinspection PyShadowingNames
def walk():
import fnmatch
from functools import partial
def on_walk_error(directory, ex):
raise DsdlException('OS error in [%s]: %s' % (directory, str(ex)))
for source_dir in source_dirs:
walker = os.walk(source_dir, onerror=partial(on_walk_error, source_dir), followlinks=True)
for root, _dirnames, filenames in walker:
for filename in fnmatch.filter(filenames, '*.uavcan'):
filename = os.path.join(root, filename)
yield filename
all_default_dtid = {} # (kind, dtid) : filename
# noinspection PyShadowingNames
def ensure_unique_dtid(t, filename):
if t.default_dtid is None:
return
key = t.kind, t.default_dtid
if key in all_default_dtid:
value = all_default_dtid[key]
first = pretty_filename(value[0])
second = pretty_filename(filename)
if t.get_dsdl_signature() != value[1].get_dsdl_signature():
error('Redefinition of data type ID: [%s] [%s]', first, second)
else:
logger.debug('ignoring duplicate definition of %s', t.full_name)
return
all_default_dtid[key] = (filename, t)
parser = Parser(source_dirs + (search_dirs or []))
output_types = []
for filename in walk():
t = parser.parse(filename)
ensure_unique_dtid(t, filename)
output_types.append(t)
return output_types
|
py | 7df90ca370832e17c229dbcc7e7994c3c2fa269f | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: networking/v1alpha3/virtual_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from networking.v1alpha3 import service_dependency_pb2 as networking_dot_v1alpha3_dot_service__dependency__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='networking/v1alpha3/virtual_service.proto',
package='istio.networking.v1alpha3',
syntax='proto3',
serialized_pb=_b('\n)networking/v1alpha3/virtual_service.proto\x12\x19istio.networking.v1alpha3\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a,networking/v1alpha3/service_dependency.proto\"\x87\x02\n\x0eVirtualService\x12\r\n\x05hosts\x18\x01 \x03(\t\x12\x10\n\x08gateways\x18\x02 \x03(\t\x12\x32\n\x04http\x18\x03 \x03(\x0b\x32$.istio.networking.v1alpha3.HTTPRoute\x12\x30\n\x03tls\x18\x05 \x03(\x0b\x32#.istio.networking.v1alpha3.TLSRoute\x12\x30\n\x03tcp\x18\x04 \x03(\x0b\x32#.istio.networking.v1alpha3.TCPRoute\x12<\n\x0c\x63onfig_scope\x18\x06 \x01(\x0e\x32&.istio.networking.v1alpha3.ConfigScope\"b\n\x0b\x44\x65stination\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0e\n\x06subset\x18\x02 \x01(\t\x12\x35\n\x04port\x18\x03 \x01(\x0b\x32\'.istio.networking.v1alpha3.PortSelector\"\xf9\x08\n\tHTTPRoute\x12:\n\x05match\x18\x01 \x03(\x0b\x32+.istio.networking.v1alpha3.HTTPMatchRequest\x12>\n\x05route\x18\x02 \x03(\x0b\x32/.istio.networking.v1alpha3.HTTPRouteDestination\x12\x39\n\x08redirect\x18\x03 \x01(\x0b\x32\'.istio.networking.v1alpha3.HTTPRedirect\x12\x37\n\x07rewrite\x18\x04 \x01(\x0b\x32&.istio.networking.v1alpha3.HTTPRewrite\x12\x19\n\x11websocket_upgrade\x18\x05 \x01(\x08\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x35\n\x07retries\x18\x07 \x01(\x0b\x32$.istio.networking.v1alpha3.HTTPRetry\x12<\n\x05\x66\x61ult\x18\x08 \x01(\x0b\x32-.istio.networking.v1alpha3.HTTPFaultInjection\x12\x36\n\x06mirror\x18\t \x01(\x0b\x32&.istio.networking.v1alpha3.Destination\x12:\n\x0b\x63ors_policy\x18\n \x01(\x0b\x32%.istio.networking.v1alpha3.CorsPolicy\x12S\n\x0e\x61ppend_headers\x18\x0b \x03(\x0b\x32\x37.istio.networking.v1alpha3.HTTPRoute.AppendHeadersEntryB\x02\x18\x01\x12#\n\x17remove_response_headers\x18\x0c \x03(\tB\x02\x18\x01\x12\x64\n\x17\x61ppend_response_headers\x18\r \x03(\x0b\x32?.istio.networking.v1alpha3.HTTPRoute.AppendResponseHeadersEntryB\x02\x18\x01\x12\"\n\x16remove_request_headers\x18\x0e \x03(\tB\x02\x18\x01\x12\x62\n\x16\x61ppend_request_headers\x18\x0f \x03(\x0b\x32>.istio.networking.v1alpha3.HTTPRoute.AppendRequestHeadersEntryB\x02\x18\x01\x12\x33\n\x07headers\x18\x10 \x01(\x0b\x32\".istio.networking.v1alpha3.Headers\x1a\x34\n\x12\x41ppendHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a<\n\x1a\x41ppendResponseHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a;\n\x19\x41ppendRequestHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa9\x03\n\x07Headers\x12\x44\n\x07request\x18\x01 \x01(\x0b\x32\x33.istio.networking.v1alpha3.Headers.HeaderOperations\x12\x45\n\x08response\x18\x02 \x01(\x0b\x32\x33.istio.networking.v1alpha3.Headers.HeaderOperations\x1a\x90\x02\n\x10HeaderOperations\x12I\n\x03set\x18\x01 \x03(\x0b\x32<.istio.networking.v1alpha3.Headers.HeaderOperations.SetEntry\x12I\n\x03\x61\x64\x64\x18\x02 \x03(\x0b\x32<.istio.networking.v1alpha3.Headers.HeaderOperations.AddEntry\x12\x0e\n\x06remove\x18\x03 \x03(\t\x1a*\n\x08SetEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a*\n\x08\x41\x64\x64\x45ntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x84\x01\n\x08TLSRoute\x12<\n\x05match\x18\x01 \x03(\x0b\x32-.istio.networking.v1alpha3.TLSMatchAttributes\x12:\n\x05route\x18\x02 \x03(\x0b\x32+.istio.networking.v1alpha3.RouteDestination\"\x83\x01\n\x08TCPRoute\x12;\n\x05match\x18\x01 \x03(\x0b\x32,.istio.networking.v1alpha3.L4MatchAttributes\x12:\n\x05route\x18\x02 \x03(\x0b\x32+.istio.networking.v1alpha3.RouteDestination\"\xc0\x04\n\x10HTTPMatchRequest\x12\x33\n\x03uri\x18\x01 \x01(\x0b\x32&.istio.networking.v1alpha3.StringMatch\x12\x36\n\x06scheme\x18\x02 \x01(\x0b\x32&.istio.networking.v1alpha3.StringMatch\x12\x36\n\x06method\x18\x03 \x01(\x0b\x32&.istio.networking.v1alpha3.StringMatch\x12\x39\n\tauthority\x18\x04 \x01(\x0b\x32&.istio.networking.v1alpha3.StringMatch\x12I\n\x07headers\x18\x05 \x03(\x0b\x32\x38.istio.networking.v1alpha3.HTTPMatchRequest.HeadersEntry\x12\x0c\n\x04port\x18\x06 \x01(\r\x12T\n\rsource_labels\x18\x07 \x03(\x0b\x32=.istio.networking.v1alpha3.HTTPMatchRequest.SourceLabelsEntry\x12\x10\n\x08gateways\x18\x08 \x03(\t\x1aV\n\x0cHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.istio.networking.v1alpha3.StringMatch:\x02\x38\x01\x1a\x33\n\x11SourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xbc\x04\n\x14HTTPRouteDestination\x12;\n\x0b\x64\x65stination\x18\x01 \x01(\x0b\x32&.istio.networking.v1alpha3.Destination\x12\x0e\n\x06weight\x18\x02 \x01(\x05\x12#\n\x17remove_response_headers\x18\x03 \x03(\tB\x02\x18\x01\x12o\n\x17\x61ppend_response_headers\x18\x04 \x03(\x0b\x32J.istio.networking.v1alpha3.HTTPRouteDestination.AppendResponseHeadersEntryB\x02\x18\x01\x12\"\n\x16remove_request_headers\x18\x05 \x03(\tB\x02\x18\x01\x12m\n\x16\x61ppend_request_headers\x18\x06 \x03(\x0b\x32I.istio.networking.v1alpha3.HTTPRouteDestination.AppendRequestHeadersEntryB\x02\x18\x01\x12\x33\n\x07headers\x18\x07 \x01(\x0b\x32\".istio.networking.v1alpha3.Headers\x1a<\n\x1a\x41ppendResponseHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a;\n\x19\x41ppendRequestHeadersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"_\n\x10RouteDestination\x12;\n\x0b\x64\x65stination\x18\x01 \x01(\x0b\x32&.istio.networking.v1alpha3.Destination\x12\x0e\n\x06weight\x18\x02 \x01(\x05\"\xf3\x01\n\x11L4MatchAttributes\x12\x1b\n\x13\x64\x65stination_subnets\x18\x01 \x03(\t\x12\x0c\n\x04port\x18\x02 \x01(\r\x12\x15\n\rsource_subnet\x18\x03 \x01(\t\x12U\n\rsource_labels\x18\x04 \x03(\x0b\x32>.istio.networking.v1alpha3.L4MatchAttributes.SourceLabelsEntry\x12\x10\n\x08gateways\x18\x05 \x03(\t\x1a\x33\n\x11SourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x88\x02\n\x12TLSMatchAttributes\x12\x11\n\tsni_hosts\x18\x01 \x03(\t\x12\x1b\n\x13\x64\x65stination_subnets\x18\x02 \x03(\t\x12\x0c\n\x04port\x18\x03 \x01(\r\x12\x15\n\rsource_subnet\x18\x04 \x01(\t\x12V\n\rsource_labels\x18\x05 \x03(\x0b\x32?.istio.networking.v1alpha3.TLSMatchAttributes.SourceLabelsEntry\x12\x10\n\x08gateways\x18\x06 \x03(\t\x1a\x33\n\x11SourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\".\n\x0cHTTPRedirect\x12\x0b\n\x03uri\x18\x01 \x01(\t\x12\x11\n\tauthority\x18\x02 \x01(\t\"-\n\x0bHTTPRewrite\x12\x0b\n\x03uri\x18\x01 \x01(\t\x12\x11\n\tauthority\x18\x02 \x01(\t\"O\n\x0bStringMatch\x12\x0f\n\x05\x65xact\x18\x01 \x01(\tH\x00\x12\x10\n\x06prefix\x18\x02 \x01(\tH\x00\x12\x0f\n\x05regex\x18\x03 \x01(\tH\x00\x42\x0c\n\nmatch_type\"c\n\tHTTPRetry\x12\x10\n\x08\x61ttempts\x18\x01 \x01(\x05\x12\x32\n\x0fper_try_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x10\n\x08retry_on\x18\x03 \x01(\t\"\xcb\x01\n\nCorsPolicy\x12\x14\n\x0c\x61llow_origin\x18\x01 \x03(\t\x12\x15\n\rallow_methods\x18\x02 \x03(\t\x12\x15\n\rallow_headers\x18\x03 \x03(\t\x12\x16\n\x0e\x65xpose_headers\x18\x04 \x03(\t\x12*\n\x07max_age\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x35\n\x11\x61llow_credentials\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\x9a\x04\n\x12HTTPFaultInjection\x12\x42\n\x05\x64\x65lay\x18\x01 \x01(\x0b\x32\x33.istio.networking.v1alpha3.HTTPFaultInjection.Delay\x12\x42\n\x05\x61\x62ort\x18\x02 \x01(\x0b\x32\x33.istio.networking.v1alpha3.HTTPFaultInjection.Abort\x1a\xd1\x01\n\x05\x44\x65lay\x12\x13\n\x07percent\x18\x01 \x01(\x05\x42\x02\x18\x01\x12\x30\n\x0b\x66ixed_delay\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x36\n\x11\x65xponential_delay\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x36\n\npercentage\x18\x05 \x01(\x0b\x32\".istio.networking.v1alpha3.PercentB\x11\n\x0fhttp_delay_type\x1a\xa7\x01\n\x05\x41\x62ort\x12\x13\n\x07percent\x18\x01 \x01(\x05\x42\x02\x18\x01\x12\x15\n\x0bhttp_status\x18\x02 \x01(\x05H\x00\x12\x15\n\x0bgrpc_status\x18\x03 \x01(\tH\x00\x12\x15\n\x0bhttp2_error\x18\x04 \x01(\tH\x00\x12\x36\n\npercentage\x18\x05 \x01(\x0b\x32\".istio.networking.v1alpha3.PercentB\x0c\n\nerror_type\"8\n\x0cPortSelector\x12\x10\n\x06number\x18\x01 \x01(\rH\x00\x12\x0e\n\x04name\x18\x02 \x01(\tH\x00\x42\x06\n\x04port\"\x18\n\x07Percent\x12\r\n\x05value\x18\x01 \x01(\x01\x42\"Z istio.io/api/networking/v1alpha3b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,networking_dot_v1alpha3_dot_service__dependency__pb2.DESCRIPTOR,])
_VIRTUALSERVICE = _descriptor.Descriptor(
name='VirtualService',
full_name='istio.networking.v1alpha3.VirtualService',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hosts', full_name='istio.networking.v1alpha3.VirtualService.hosts', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateways', full_name='istio.networking.v1alpha3.VirtualService.gateways', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http', full_name='istio.networking.v1alpha3.VirtualService.http', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls', full_name='istio.networking.v1alpha3.VirtualService.tls', index=3,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tcp', full_name='istio.networking.v1alpha3.VirtualService.tcp', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config_scope', full_name='istio.networking.v1alpha3.VirtualService.config_scope', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=183,
serialized_end=446,
)
_DESTINATION = _descriptor.Descriptor(
name='Destination',
full_name='istio.networking.v1alpha3.Destination',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='istio.networking.v1alpha3.Destination.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subset', full_name='istio.networking.v1alpha3.Destination.subset', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='istio.networking.v1alpha3.Destination.port', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=448,
serialized_end=546,
)
_HTTPROUTE_APPENDHEADERSENTRY = _descriptor.Descriptor(
name='AppendHeadersEntry',
full_name='istio.networking.v1alpha3.HTTPRoute.AppendHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPRoute.AppendHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPRoute.AppendHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1519,
serialized_end=1571,
)
_HTTPROUTE_APPENDRESPONSEHEADERSENTRY = _descriptor.Descriptor(
name='AppendResponseHeadersEntry',
full_name='istio.networking.v1alpha3.HTTPRoute.AppendResponseHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPRoute.AppendResponseHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPRoute.AppendResponseHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1573,
serialized_end=1633,
)
_HTTPROUTE_APPENDREQUESTHEADERSENTRY = _descriptor.Descriptor(
name='AppendRequestHeadersEntry',
full_name='istio.networking.v1alpha3.HTTPRoute.AppendRequestHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPRoute.AppendRequestHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPRoute.AppendRequestHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1635,
serialized_end=1694,
)
_HTTPROUTE = _descriptor.Descriptor(
name='HTTPRoute',
full_name='istio.networking.v1alpha3.HTTPRoute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='match', full_name='istio.networking.v1alpha3.HTTPRoute.match', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='istio.networking.v1alpha3.HTTPRoute.route', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='redirect', full_name='istio.networking.v1alpha3.HTTPRoute.redirect', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rewrite', full_name='istio.networking.v1alpha3.HTTPRoute.rewrite', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='websocket_upgrade', full_name='istio.networking.v1alpha3.HTTPRoute.websocket_upgrade', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeout', full_name='istio.networking.v1alpha3.HTTPRoute.timeout', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retries', full_name='istio.networking.v1alpha3.HTTPRoute.retries', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fault', full_name='istio.networking.v1alpha3.HTTPRoute.fault', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mirror', full_name='istio.networking.v1alpha3.HTTPRoute.mirror', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cors_policy', full_name='istio.networking.v1alpha3.HTTPRoute.cors_policy', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='append_headers', full_name='istio.networking.v1alpha3.HTTPRoute.append_headers', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove_response_headers', full_name='istio.networking.v1alpha3.HTTPRoute.remove_response_headers', index=11,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='append_response_headers', full_name='istio.networking.v1alpha3.HTTPRoute.append_response_headers', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove_request_headers', full_name='istio.networking.v1alpha3.HTTPRoute.remove_request_headers', index=13,
number=14, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='append_request_headers', full_name='istio.networking.v1alpha3.HTTPRoute.append_request_headers', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headers', full_name='istio.networking.v1alpha3.HTTPRoute.headers', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HTTPROUTE_APPENDHEADERSENTRY, _HTTPROUTE_APPENDRESPONSEHEADERSENTRY, _HTTPROUTE_APPENDREQUESTHEADERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=549,
serialized_end=1694,
)
_HEADERS_HEADEROPERATIONS_SETENTRY = _descriptor.Descriptor(
name='SetEntry',
full_name='istio.networking.v1alpha3.Headers.HeaderOperations.SetEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.SetEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.SetEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2036,
serialized_end=2078,
)
_HEADERS_HEADEROPERATIONS_ADDENTRY = _descriptor.Descriptor(
name='AddEntry',
full_name='istio.networking.v1alpha3.Headers.HeaderOperations.AddEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.AddEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.AddEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2080,
serialized_end=2122,
)
_HEADERS_HEADEROPERATIONS = _descriptor.Descriptor(
name='HeaderOperations',
full_name='istio.networking.v1alpha3.Headers.HeaderOperations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='set', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.set', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='add', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.add', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove', full_name='istio.networking.v1alpha3.Headers.HeaderOperations.remove', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HEADERS_HEADEROPERATIONS_SETENTRY, _HEADERS_HEADEROPERATIONS_ADDENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1850,
serialized_end=2122,
)
_HEADERS = _descriptor.Descriptor(
name='Headers',
full_name='istio.networking.v1alpha3.Headers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request', full_name='istio.networking.v1alpha3.Headers.request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='response', full_name='istio.networking.v1alpha3.Headers.response', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HEADERS_HEADEROPERATIONS, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1697,
serialized_end=2122,
)
_TLSROUTE = _descriptor.Descriptor(
name='TLSRoute',
full_name='istio.networking.v1alpha3.TLSRoute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='match', full_name='istio.networking.v1alpha3.TLSRoute.match', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='istio.networking.v1alpha3.TLSRoute.route', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2125,
serialized_end=2257,
)
_TCPROUTE = _descriptor.Descriptor(
name='TCPRoute',
full_name='istio.networking.v1alpha3.TCPRoute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='match', full_name='istio.networking.v1alpha3.TCPRoute.match', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='istio.networking.v1alpha3.TCPRoute.route', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2260,
serialized_end=2391,
)
_HTTPMATCHREQUEST_HEADERSENTRY = _descriptor.Descriptor(
name='HeadersEntry',
full_name='istio.networking.v1alpha3.HTTPMatchRequest.HeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPMatchRequest.HeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPMatchRequest.HeadersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2831,
serialized_end=2917,
)
_HTTPMATCHREQUEST_SOURCELABELSENTRY = _descriptor.Descriptor(
name='SourceLabelsEntry',
full_name='istio.networking.v1alpha3.HTTPMatchRequest.SourceLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPMatchRequest.SourceLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPMatchRequest.SourceLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2919,
serialized_end=2970,
)
_HTTPMATCHREQUEST = _descriptor.Descriptor(
name='HTTPMatchRequest',
full_name='istio.networking.v1alpha3.HTTPMatchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uri', full_name='istio.networking.v1alpha3.HTTPMatchRequest.uri', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scheme', full_name='istio.networking.v1alpha3.HTTPMatchRequest.scheme', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='method', full_name='istio.networking.v1alpha3.HTTPMatchRequest.method', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authority', full_name='istio.networking.v1alpha3.HTTPMatchRequest.authority', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headers', full_name='istio.networking.v1alpha3.HTTPMatchRequest.headers', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='istio.networking.v1alpha3.HTTPMatchRequest.port', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_labels', full_name='istio.networking.v1alpha3.HTTPMatchRequest.source_labels', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateways', full_name='istio.networking.v1alpha3.HTTPMatchRequest.gateways', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HTTPMATCHREQUEST_HEADERSENTRY, _HTTPMATCHREQUEST_SOURCELABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2394,
serialized_end=2970,
)
_HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY = _descriptor.Descriptor(
name='AppendResponseHeadersEntry',
full_name='istio.networking.v1alpha3.HTTPRouteDestination.AppendResponseHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPRouteDestination.AppendResponseHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPRouteDestination.AppendResponseHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1573,
serialized_end=1633,
)
_HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY = _descriptor.Descriptor(
name='AppendRequestHeadersEntry',
full_name='istio.networking.v1alpha3.HTTPRouteDestination.AppendRequestHeadersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.HTTPRouteDestination.AppendRequestHeadersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.HTTPRouteDestination.AppendRequestHeadersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1635,
serialized_end=1694,
)
_HTTPROUTEDESTINATION = _descriptor.Descriptor(
name='HTTPRouteDestination',
full_name='istio.networking.v1alpha3.HTTPRouteDestination',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='destination', full_name='istio.networking.v1alpha3.HTTPRouteDestination.destination', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='istio.networking.v1alpha3.HTTPRouteDestination.weight', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove_response_headers', full_name='istio.networking.v1alpha3.HTTPRouteDestination.remove_response_headers', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='append_response_headers', full_name='istio.networking.v1alpha3.HTTPRouteDestination.append_response_headers', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove_request_headers', full_name='istio.networking.v1alpha3.HTTPRouteDestination.remove_request_headers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='append_request_headers', full_name='istio.networking.v1alpha3.HTTPRouteDestination.append_request_headers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headers', full_name='istio.networking.v1alpha3.HTTPRouteDestination.headers', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY, _HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2973,
serialized_end=3545,
)
_ROUTEDESTINATION = _descriptor.Descriptor(
name='RouteDestination',
full_name='istio.networking.v1alpha3.RouteDestination',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='destination', full_name='istio.networking.v1alpha3.RouteDestination.destination', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='istio.networking.v1alpha3.RouteDestination.weight', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3547,
serialized_end=3642,
)
_L4MATCHATTRIBUTES_SOURCELABELSENTRY = _descriptor.Descriptor(
name='SourceLabelsEntry',
full_name='istio.networking.v1alpha3.L4MatchAttributes.SourceLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.L4MatchAttributes.SourceLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.L4MatchAttributes.SourceLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2919,
serialized_end=2970,
)
_L4MATCHATTRIBUTES = _descriptor.Descriptor(
name='L4MatchAttributes',
full_name='istio.networking.v1alpha3.L4MatchAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='destination_subnets', full_name='istio.networking.v1alpha3.L4MatchAttributes.destination_subnets', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='istio.networking.v1alpha3.L4MatchAttributes.port', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_subnet', full_name='istio.networking.v1alpha3.L4MatchAttributes.source_subnet', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_labels', full_name='istio.networking.v1alpha3.L4MatchAttributes.source_labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateways', full_name='istio.networking.v1alpha3.L4MatchAttributes.gateways', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_L4MATCHATTRIBUTES_SOURCELABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3645,
serialized_end=3888,
)
_TLSMATCHATTRIBUTES_SOURCELABELSENTRY = _descriptor.Descriptor(
name='SourceLabelsEntry',
full_name='istio.networking.v1alpha3.TLSMatchAttributes.SourceLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.TLSMatchAttributes.SourceLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.TLSMatchAttributes.SourceLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2919,
serialized_end=2970,
)
_TLSMATCHATTRIBUTES = _descriptor.Descriptor(
name='TLSMatchAttributes',
full_name='istio.networking.v1alpha3.TLSMatchAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sni_hosts', full_name='istio.networking.v1alpha3.TLSMatchAttributes.sni_hosts', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='destination_subnets', full_name='istio.networking.v1alpha3.TLSMatchAttributes.destination_subnets', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='istio.networking.v1alpha3.TLSMatchAttributes.port', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_subnet', full_name='istio.networking.v1alpha3.TLSMatchAttributes.source_subnet', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_labels', full_name='istio.networking.v1alpha3.TLSMatchAttributes.source_labels', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateways', full_name='istio.networking.v1alpha3.TLSMatchAttributes.gateways', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TLSMATCHATTRIBUTES_SOURCELABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3891,
serialized_end=4155,
)
_HTTPREDIRECT = _descriptor.Descriptor(
name='HTTPRedirect',
full_name='istio.networking.v1alpha3.HTTPRedirect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uri', full_name='istio.networking.v1alpha3.HTTPRedirect.uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authority', full_name='istio.networking.v1alpha3.HTTPRedirect.authority', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4157,
serialized_end=4203,
)
_HTTPREWRITE = _descriptor.Descriptor(
name='HTTPRewrite',
full_name='istio.networking.v1alpha3.HTTPRewrite',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uri', full_name='istio.networking.v1alpha3.HTTPRewrite.uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authority', full_name='istio.networking.v1alpha3.HTTPRewrite.authority', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4205,
serialized_end=4250,
)
_STRINGMATCH = _descriptor.Descriptor(
name='StringMatch',
full_name='istio.networking.v1alpha3.StringMatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='exact', full_name='istio.networking.v1alpha3.StringMatch.exact', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prefix', full_name='istio.networking.v1alpha3.StringMatch.prefix', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regex', full_name='istio.networking.v1alpha3.StringMatch.regex', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='match_type', full_name='istio.networking.v1alpha3.StringMatch.match_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=4252,
serialized_end=4331,
)
_HTTPRETRY = _descriptor.Descriptor(
name='HTTPRetry',
full_name='istio.networking.v1alpha3.HTTPRetry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='attempts', full_name='istio.networking.v1alpha3.HTTPRetry.attempts', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='per_try_timeout', full_name='istio.networking.v1alpha3.HTTPRetry.per_try_timeout', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retry_on', full_name='istio.networking.v1alpha3.HTTPRetry.retry_on', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4333,
serialized_end=4432,
)
_CORSPOLICY = _descriptor.Descriptor(
name='CorsPolicy',
full_name='istio.networking.v1alpha3.CorsPolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allow_origin', full_name='istio.networking.v1alpha3.CorsPolicy.allow_origin', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_methods', full_name='istio.networking.v1alpha3.CorsPolicy.allow_methods', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_headers', full_name='istio.networking.v1alpha3.CorsPolicy.allow_headers', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expose_headers', full_name='istio.networking.v1alpha3.CorsPolicy.expose_headers', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_age', full_name='istio.networking.v1alpha3.CorsPolicy.max_age', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_credentials', full_name='istio.networking.v1alpha3.CorsPolicy.allow_credentials', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4435,
serialized_end=4638,
)
_HTTPFAULTINJECTION_DELAY = _descriptor.Descriptor(
name='Delay',
full_name='istio.networking.v1alpha3.HTTPFaultInjection.Delay',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='percent', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Delay.percent', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fixed_delay', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Delay.fixed_delay', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exponential_delay', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Delay.exponential_delay', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='percentage', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Delay.percentage', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='http_delay_type', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Delay.http_delay_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=4800,
serialized_end=5009,
)
_HTTPFAULTINJECTION_ABORT = _descriptor.Descriptor(
name='Abort',
full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='percent', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort.percent', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http_status', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort.http_status', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='grpc_status', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort.grpc_status', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http2_error', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort.http2_error', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='percentage', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort.percentage', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='error_type', full_name='istio.networking.v1alpha3.HTTPFaultInjection.Abort.error_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=5012,
serialized_end=5179,
)
_HTTPFAULTINJECTION = _descriptor.Descriptor(
name='HTTPFaultInjection',
full_name='istio.networking.v1alpha3.HTTPFaultInjection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='delay', full_name='istio.networking.v1alpha3.HTTPFaultInjection.delay', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='abort', full_name='istio.networking.v1alpha3.HTTPFaultInjection.abort', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HTTPFAULTINJECTION_DELAY, _HTTPFAULTINJECTION_ABORT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4641,
serialized_end=5179,
)
_PORTSELECTOR = _descriptor.Descriptor(
name='PortSelector',
full_name='istio.networking.v1alpha3.PortSelector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='istio.networking.v1alpha3.PortSelector.number', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='istio.networking.v1alpha3.PortSelector.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='port', full_name='istio.networking.v1alpha3.PortSelector.port',
index=0, containing_type=None, fields=[]),
],
serialized_start=5181,
serialized_end=5237,
)
_PERCENT = _descriptor.Descriptor(
name='Percent',
full_name='istio.networking.v1alpha3.Percent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.Percent.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5239,
serialized_end=5263,
)
_VIRTUALSERVICE.fields_by_name['http'].message_type = _HTTPROUTE
_VIRTUALSERVICE.fields_by_name['tls'].message_type = _TLSROUTE
_VIRTUALSERVICE.fields_by_name['tcp'].message_type = _TCPROUTE
_VIRTUALSERVICE.fields_by_name['config_scope'].enum_type = networking_dot_v1alpha3_dot_service__dependency__pb2._CONFIGSCOPE
_DESTINATION.fields_by_name['port'].message_type = _PORTSELECTOR
_HTTPROUTE_APPENDHEADERSENTRY.containing_type = _HTTPROUTE
_HTTPROUTE_APPENDRESPONSEHEADERSENTRY.containing_type = _HTTPROUTE
_HTTPROUTE_APPENDREQUESTHEADERSENTRY.containing_type = _HTTPROUTE
_HTTPROUTE.fields_by_name['match'].message_type = _HTTPMATCHREQUEST
_HTTPROUTE.fields_by_name['route'].message_type = _HTTPROUTEDESTINATION
_HTTPROUTE.fields_by_name['redirect'].message_type = _HTTPREDIRECT
_HTTPROUTE.fields_by_name['rewrite'].message_type = _HTTPREWRITE
_HTTPROUTE.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_HTTPROUTE.fields_by_name['retries'].message_type = _HTTPRETRY
_HTTPROUTE.fields_by_name['fault'].message_type = _HTTPFAULTINJECTION
_HTTPROUTE.fields_by_name['mirror'].message_type = _DESTINATION
_HTTPROUTE.fields_by_name['cors_policy'].message_type = _CORSPOLICY
_HTTPROUTE.fields_by_name['append_headers'].message_type = _HTTPROUTE_APPENDHEADERSENTRY
_HTTPROUTE.fields_by_name['append_response_headers'].message_type = _HTTPROUTE_APPENDRESPONSEHEADERSENTRY
_HTTPROUTE.fields_by_name['append_request_headers'].message_type = _HTTPROUTE_APPENDREQUESTHEADERSENTRY
_HTTPROUTE.fields_by_name['headers'].message_type = _HEADERS
_HEADERS_HEADEROPERATIONS_SETENTRY.containing_type = _HEADERS_HEADEROPERATIONS
_HEADERS_HEADEROPERATIONS_ADDENTRY.containing_type = _HEADERS_HEADEROPERATIONS
_HEADERS_HEADEROPERATIONS.fields_by_name['set'].message_type = _HEADERS_HEADEROPERATIONS_SETENTRY
_HEADERS_HEADEROPERATIONS.fields_by_name['add'].message_type = _HEADERS_HEADEROPERATIONS_ADDENTRY
_HEADERS_HEADEROPERATIONS.containing_type = _HEADERS
_HEADERS.fields_by_name['request'].message_type = _HEADERS_HEADEROPERATIONS
_HEADERS.fields_by_name['response'].message_type = _HEADERS_HEADEROPERATIONS
_TLSROUTE.fields_by_name['match'].message_type = _TLSMATCHATTRIBUTES
_TLSROUTE.fields_by_name['route'].message_type = _ROUTEDESTINATION
_TCPROUTE.fields_by_name['match'].message_type = _L4MATCHATTRIBUTES
_TCPROUTE.fields_by_name['route'].message_type = _ROUTEDESTINATION
_HTTPMATCHREQUEST_HEADERSENTRY.fields_by_name['value'].message_type = _STRINGMATCH
_HTTPMATCHREQUEST_HEADERSENTRY.containing_type = _HTTPMATCHREQUEST
_HTTPMATCHREQUEST_SOURCELABELSENTRY.containing_type = _HTTPMATCHREQUEST
_HTTPMATCHREQUEST.fields_by_name['uri'].message_type = _STRINGMATCH
_HTTPMATCHREQUEST.fields_by_name['scheme'].message_type = _STRINGMATCH
_HTTPMATCHREQUEST.fields_by_name['method'].message_type = _STRINGMATCH
_HTTPMATCHREQUEST.fields_by_name['authority'].message_type = _STRINGMATCH
_HTTPMATCHREQUEST.fields_by_name['headers'].message_type = _HTTPMATCHREQUEST_HEADERSENTRY
_HTTPMATCHREQUEST.fields_by_name['source_labels'].message_type = _HTTPMATCHREQUEST_SOURCELABELSENTRY
_HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY.containing_type = _HTTPROUTEDESTINATION
_HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY.containing_type = _HTTPROUTEDESTINATION
_HTTPROUTEDESTINATION.fields_by_name['destination'].message_type = _DESTINATION
_HTTPROUTEDESTINATION.fields_by_name['append_response_headers'].message_type = _HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY
_HTTPROUTEDESTINATION.fields_by_name['append_request_headers'].message_type = _HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY
_HTTPROUTEDESTINATION.fields_by_name['headers'].message_type = _HEADERS
_ROUTEDESTINATION.fields_by_name['destination'].message_type = _DESTINATION
_L4MATCHATTRIBUTES_SOURCELABELSENTRY.containing_type = _L4MATCHATTRIBUTES
_L4MATCHATTRIBUTES.fields_by_name['source_labels'].message_type = _L4MATCHATTRIBUTES_SOURCELABELSENTRY
_TLSMATCHATTRIBUTES_SOURCELABELSENTRY.containing_type = _TLSMATCHATTRIBUTES
_TLSMATCHATTRIBUTES.fields_by_name['source_labels'].message_type = _TLSMATCHATTRIBUTES_SOURCELABELSENTRY
_STRINGMATCH.oneofs_by_name['match_type'].fields.append(
_STRINGMATCH.fields_by_name['exact'])
_STRINGMATCH.fields_by_name['exact'].containing_oneof = _STRINGMATCH.oneofs_by_name['match_type']
_STRINGMATCH.oneofs_by_name['match_type'].fields.append(
_STRINGMATCH.fields_by_name['prefix'])
_STRINGMATCH.fields_by_name['prefix'].containing_oneof = _STRINGMATCH.oneofs_by_name['match_type']
_STRINGMATCH.oneofs_by_name['match_type'].fields.append(
_STRINGMATCH.fields_by_name['regex'])
_STRINGMATCH.fields_by_name['regex'].containing_oneof = _STRINGMATCH.oneofs_by_name['match_type']
_HTTPRETRY.fields_by_name['per_try_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CORSPOLICY.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CORSPOLICY.fields_by_name['allow_credentials'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_HTTPFAULTINJECTION_DELAY.fields_by_name['fixed_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_HTTPFAULTINJECTION_DELAY.fields_by_name['exponential_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_HTTPFAULTINJECTION_DELAY.fields_by_name['percentage'].message_type = _PERCENT
_HTTPFAULTINJECTION_DELAY.containing_type = _HTTPFAULTINJECTION
_HTTPFAULTINJECTION_DELAY.oneofs_by_name['http_delay_type'].fields.append(
_HTTPFAULTINJECTION_DELAY.fields_by_name['fixed_delay'])
_HTTPFAULTINJECTION_DELAY.fields_by_name['fixed_delay'].containing_oneof = _HTTPFAULTINJECTION_DELAY.oneofs_by_name['http_delay_type']
_HTTPFAULTINJECTION_DELAY.oneofs_by_name['http_delay_type'].fields.append(
_HTTPFAULTINJECTION_DELAY.fields_by_name['exponential_delay'])
_HTTPFAULTINJECTION_DELAY.fields_by_name['exponential_delay'].containing_oneof = _HTTPFAULTINJECTION_DELAY.oneofs_by_name['http_delay_type']
_HTTPFAULTINJECTION_ABORT.fields_by_name['percentage'].message_type = _PERCENT
_HTTPFAULTINJECTION_ABORT.containing_type = _HTTPFAULTINJECTION
_HTTPFAULTINJECTION_ABORT.oneofs_by_name['error_type'].fields.append(
_HTTPFAULTINJECTION_ABORT.fields_by_name['http_status'])
_HTTPFAULTINJECTION_ABORT.fields_by_name['http_status'].containing_oneof = _HTTPFAULTINJECTION_ABORT.oneofs_by_name['error_type']
_HTTPFAULTINJECTION_ABORT.oneofs_by_name['error_type'].fields.append(
_HTTPFAULTINJECTION_ABORT.fields_by_name['grpc_status'])
_HTTPFAULTINJECTION_ABORT.fields_by_name['grpc_status'].containing_oneof = _HTTPFAULTINJECTION_ABORT.oneofs_by_name['error_type']
_HTTPFAULTINJECTION_ABORT.oneofs_by_name['error_type'].fields.append(
_HTTPFAULTINJECTION_ABORT.fields_by_name['http2_error'])
_HTTPFAULTINJECTION_ABORT.fields_by_name['http2_error'].containing_oneof = _HTTPFAULTINJECTION_ABORT.oneofs_by_name['error_type']
_HTTPFAULTINJECTION.fields_by_name['delay'].message_type = _HTTPFAULTINJECTION_DELAY
_HTTPFAULTINJECTION.fields_by_name['abort'].message_type = _HTTPFAULTINJECTION_ABORT
_PORTSELECTOR.oneofs_by_name['port'].fields.append(
_PORTSELECTOR.fields_by_name['number'])
_PORTSELECTOR.fields_by_name['number'].containing_oneof = _PORTSELECTOR.oneofs_by_name['port']
_PORTSELECTOR.oneofs_by_name['port'].fields.append(
_PORTSELECTOR.fields_by_name['name'])
_PORTSELECTOR.fields_by_name['name'].containing_oneof = _PORTSELECTOR.oneofs_by_name['port']
DESCRIPTOR.message_types_by_name['VirtualService'] = _VIRTUALSERVICE
DESCRIPTOR.message_types_by_name['Destination'] = _DESTINATION
DESCRIPTOR.message_types_by_name['HTTPRoute'] = _HTTPROUTE
DESCRIPTOR.message_types_by_name['Headers'] = _HEADERS
DESCRIPTOR.message_types_by_name['TLSRoute'] = _TLSROUTE
DESCRIPTOR.message_types_by_name['TCPRoute'] = _TCPROUTE
DESCRIPTOR.message_types_by_name['HTTPMatchRequest'] = _HTTPMATCHREQUEST
DESCRIPTOR.message_types_by_name['HTTPRouteDestination'] = _HTTPROUTEDESTINATION
DESCRIPTOR.message_types_by_name['RouteDestination'] = _ROUTEDESTINATION
DESCRIPTOR.message_types_by_name['L4MatchAttributes'] = _L4MATCHATTRIBUTES
DESCRIPTOR.message_types_by_name['TLSMatchAttributes'] = _TLSMATCHATTRIBUTES
DESCRIPTOR.message_types_by_name['HTTPRedirect'] = _HTTPREDIRECT
DESCRIPTOR.message_types_by_name['HTTPRewrite'] = _HTTPREWRITE
DESCRIPTOR.message_types_by_name['StringMatch'] = _STRINGMATCH
DESCRIPTOR.message_types_by_name['HTTPRetry'] = _HTTPRETRY
DESCRIPTOR.message_types_by_name['CorsPolicy'] = _CORSPOLICY
DESCRIPTOR.message_types_by_name['HTTPFaultInjection'] = _HTTPFAULTINJECTION
DESCRIPTOR.message_types_by_name['PortSelector'] = _PORTSELECTOR
DESCRIPTOR.message_types_by_name['Percent'] = _PERCENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VirtualService = _reflection.GeneratedProtocolMessageType('VirtualService', (_message.Message,), dict(
DESCRIPTOR = _VIRTUALSERVICE,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.VirtualService)
))
_sym_db.RegisterMessage(VirtualService)
Destination = _reflection.GeneratedProtocolMessageType('Destination', (_message.Message,), dict(
DESCRIPTOR = _DESTINATION,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Destination)
))
_sym_db.RegisterMessage(Destination)
HTTPRoute = _reflection.GeneratedProtocolMessageType('HTTPRoute', (_message.Message,), dict(
AppendHeadersEntry = _reflection.GeneratedProtocolMessageType('AppendHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPROUTE_APPENDHEADERSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRoute.AppendHeadersEntry)
))
,
AppendResponseHeadersEntry = _reflection.GeneratedProtocolMessageType('AppendResponseHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPROUTE_APPENDRESPONSEHEADERSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRoute.AppendResponseHeadersEntry)
))
,
AppendRequestHeadersEntry = _reflection.GeneratedProtocolMessageType('AppendRequestHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPROUTE_APPENDREQUESTHEADERSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRoute.AppendRequestHeadersEntry)
))
,
DESCRIPTOR = _HTTPROUTE,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRoute)
))
_sym_db.RegisterMessage(HTTPRoute)
_sym_db.RegisterMessage(HTTPRoute.AppendHeadersEntry)
_sym_db.RegisterMessage(HTTPRoute.AppendResponseHeadersEntry)
_sym_db.RegisterMessage(HTTPRoute.AppendRequestHeadersEntry)
Headers = _reflection.GeneratedProtocolMessageType('Headers', (_message.Message,), dict(
HeaderOperations = _reflection.GeneratedProtocolMessageType('HeaderOperations', (_message.Message,), dict(
SetEntry = _reflection.GeneratedProtocolMessageType('SetEntry', (_message.Message,), dict(
DESCRIPTOR = _HEADERS_HEADEROPERATIONS_SETENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Headers.HeaderOperations.SetEntry)
))
,
AddEntry = _reflection.GeneratedProtocolMessageType('AddEntry', (_message.Message,), dict(
DESCRIPTOR = _HEADERS_HEADEROPERATIONS_ADDENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Headers.HeaderOperations.AddEntry)
))
,
DESCRIPTOR = _HEADERS_HEADEROPERATIONS,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Headers.HeaderOperations)
))
,
DESCRIPTOR = _HEADERS,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Headers)
))
_sym_db.RegisterMessage(Headers)
_sym_db.RegisterMessage(Headers.HeaderOperations)
_sym_db.RegisterMessage(Headers.HeaderOperations.SetEntry)
_sym_db.RegisterMessage(Headers.HeaderOperations.AddEntry)
TLSRoute = _reflection.GeneratedProtocolMessageType('TLSRoute', (_message.Message,), dict(
DESCRIPTOR = _TLSROUTE,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.TLSRoute)
))
_sym_db.RegisterMessage(TLSRoute)
TCPRoute = _reflection.GeneratedProtocolMessageType('TCPRoute', (_message.Message,), dict(
DESCRIPTOR = _TCPROUTE,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.TCPRoute)
))
_sym_db.RegisterMessage(TCPRoute)
HTTPMatchRequest = _reflection.GeneratedProtocolMessageType('HTTPMatchRequest', (_message.Message,), dict(
HeadersEntry = _reflection.GeneratedProtocolMessageType('HeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPMATCHREQUEST_HEADERSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPMatchRequest.HeadersEntry)
))
,
SourceLabelsEntry = _reflection.GeneratedProtocolMessageType('SourceLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPMATCHREQUEST_SOURCELABELSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPMatchRequest.SourceLabelsEntry)
))
,
DESCRIPTOR = _HTTPMATCHREQUEST,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPMatchRequest)
))
_sym_db.RegisterMessage(HTTPMatchRequest)
_sym_db.RegisterMessage(HTTPMatchRequest.HeadersEntry)
_sym_db.RegisterMessage(HTTPMatchRequest.SourceLabelsEntry)
HTTPRouteDestination = _reflection.GeneratedProtocolMessageType('HTTPRouteDestination', (_message.Message,), dict(
AppendResponseHeadersEntry = _reflection.GeneratedProtocolMessageType('AppendResponseHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRouteDestination.AppendResponseHeadersEntry)
))
,
AppendRequestHeadersEntry = _reflection.GeneratedProtocolMessageType('AppendRequestHeadersEntry', (_message.Message,), dict(
DESCRIPTOR = _HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRouteDestination.AppendRequestHeadersEntry)
))
,
DESCRIPTOR = _HTTPROUTEDESTINATION,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRouteDestination)
))
_sym_db.RegisterMessage(HTTPRouteDestination)
_sym_db.RegisterMessage(HTTPRouteDestination.AppendResponseHeadersEntry)
_sym_db.RegisterMessage(HTTPRouteDestination.AppendRequestHeadersEntry)
RouteDestination = _reflection.GeneratedProtocolMessageType('RouteDestination', (_message.Message,), dict(
DESCRIPTOR = _ROUTEDESTINATION,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.RouteDestination)
))
_sym_db.RegisterMessage(RouteDestination)
L4MatchAttributes = _reflection.GeneratedProtocolMessageType('L4MatchAttributes', (_message.Message,), dict(
SourceLabelsEntry = _reflection.GeneratedProtocolMessageType('SourceLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _L4MATCHATTRIBUTES_SOURCELABELSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.L4MatchAttributes.SourceLabelsEntry)
))
,
DESCRIPTOR = _L4MATCHATTRIBUTES,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.L4MatchAttributes)
))
_sym_db.RegisterMessage(L4MatchAttributes)
_sym_db.RegisterMessage(L4MatchAttributes.SourceLabelsEntry)
TLSMatchAttributes = _reflection.GeneratedProtocolMessageType('TLSMatchAttributes', (_message.Message,), dict(
SourceLabelsEntry = _reflection.GeneratedProtocolMessageType('SourceLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _TLSMATCHATTRIBUTES_SOURCELABELSENTRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.TLSMatchAttributes.SourceLabelsEntry)
))
,
DESCRIPTOR = _TLSMATCHATTRIBUTES,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.TLSMatchAttributes)
))
_sym_db.RegisterMessage(TLSMatchAttributes)
_sym_db.RegisterMessage(TLSMatchAttributes.SourceLabelsEntry)
HTTPRedirect = _reflection.GeneratedProtocolMessageType('HTTPRedirect', (_message.Message,), dict(
DESCRIPTOR = _HTTPREDIRECT,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRedirect)
))
_sym_db.RegisterMessage(HTTPRedirect)
HTTPRewrite = _reflection.GeneratedProtocolMessageType('HTTPRewrite', (_message.Message,), dict(
DESCRIPTOR = _HTTPREWRITE,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRewrite)
))
_sym_db.RegisterMessage(HTTPRewrite)
StringMatch = _reflection.GeneratedProtocolMessageType('StringMatch', (_message.Message,), dict(
DESCRIPTOR = _STRINGMATCH,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.StringMatch)
))
_sym_db.RegisterMessage(StringMatch)
HTTPRetry = _reflection.GeneratedProtocolMessageType('HTTPRetry', (_message.Message,), dict(
DESCRIPTOR = _HTTPRETRY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPRetry)
))
_sym_db.RegisterMessage(HTTPRetry)
CorsPolicy = _reflection.GeneratedProtocolMessageType('CorsPolicy', (_message.Message,), dict(
DESCRIPTOR = _CORSPOLICY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.CorsPolicy)
))
_sym_db.RegisterMessage(CorsPolicy)
HTTPFaultInjection = _reflection.GeneratedProtocolMessageType('HTTPFaultInjection', (_message.Message,), dict(
Delay = _reflection.GeneratedProtocolMessageType('Delay', (_message.Message,), dict(
DESCRIPTOR = _HTTPFAULTINJECTION_DELAY,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPFaultInjection.Delay)
))
,
Abort = _reflection.GeneratedProtocolMessageType('Abort', (_message.Message,), dict(
DESCRIPTOR = _HTTPFAULTINJECTION_ABORT,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPFaultInjection.Abort)
))
,
DESCRIPTOR = _HTTPFAULTINJECTION,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.HTTPFaultInjection)
))
_sym_db.RegisterMessage(HTTPFaultInjection)
_sym_db.RegisterMessage(HTTPFaultInjection.Delay)
_sym_db.RegisterMessage(HTTPFaultInjection.Abort)
PortSelector = _reflection.GeneratedProtocolMessageType('PortSelector', (_message.Message,), dict(
DESCRIPTOR = _PORTSELECTOR,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.PortSelector)
))
_sym_db.RegisterMessage(PortSelector)
Percent = _reflection.GeneratedProtocolMessageType('Percent', (_message.Message,), dict(
DESCRIPTOR = _PERCENT,
__module__ = 'networking.v1alpha3.virtual_service_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.Percent)
))
_sym_db.RegisterMessage(Percent)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z istio.io/api/networking/v1alpha3'))
_HTTPROUTE_APPENDHEADERSENTRY.has_options = True
_HTTPROUTE_APPENDHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPROUTE_APPENDRESPONSEHEADERSENTRY.has_options = True
_HTTPROUTE_APPENDRESPONSEHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPROUTE_APPENDREQUESTHEADERSENTRY.has_options = True
_HTTPROUTE_APPENDREQUESTHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPROUTE.fields_by_name['append_headers'].has_options = True
_HTTPROUTE.fields_by_name['append_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTE.fields_by_name['remove_response_headers'].has_options = True
_HTTPROUTE.fields_by_name['remove_response_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTE.fields_by_name['append_response_headers'].has_options = True
_HTTPROUTE.fields_by_name['append_response_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTE.fields_by_name['remove_request_headers'].has_options = True
_HTTPROUTE.fields_by_name['remove_request_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTE.fields_by_name['append_request_headers'].has_options = True
_HTTPROUTE.fields_by_name['append_request_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HEADERS_HEADEROPERATIONS_SETENTRY.has_options = True
_HEADERS_HEADEROPERATIONS_SETENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HEADERS_HEADEROPERATIONS_ADDENTRY.has_options = True
_HEADERS_HEADEROPERATIONS_ADDENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPMATCHREQUEST_HEADERSENTRY.has_options = True
_HTTPMATCHREQUEST_HEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPMATCHREQUEST_SOURCELABELSENTRY.has_options = True
_HTTPMATCHREQUEST_SOURCELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY.has_options = True
_HTTPROUTEDESTINATION_APPENDRESPONSEHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY.has_options = True
_HTTPROUTEDESTINATION_APPENDREQUESTHEADERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPROUTEDESTINATION.fields_by_name['remove_response_headers'].has_options = True
_HTTPROUTEDESTINATION.fields_by_name['remove_response_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTEDESTINATION.fields_by_name['append_response_headers'].has_options = True
_HTTPROUTEDESTINATION.fields_by_name['append_response_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTEDESTINATION.fields_by_name['remove_request_headers'].has_options = True
_HTTPROUTEDESTINATION.fields_by_name['remove_request_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPROUTEDESTINATION.fields_by_name['append_request_headers'].has_options = True
_HTTPROUTEDESTINATION.fields_by_name['append_request_headers']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_L4MATCHATTRIBUTES_SOURCELABELSENTRY.has_options = True
_L4MATCHATTRIBUTES_SOURCELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_TLSMATCHATTRIBUTES_SOURCELABELSENTRY.has_options = True
_TLSMATCHATTRIBUTES_SOURCELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_HTTPFAULTINJECTION_DELAY.fields_by_name['percent'].has_options = True
_HTTPFAULTINJECTION_DELAY.fields_by_name['percent']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_HTTPFAULTINJECTION_ABORT.fields_by_name['percent'].has_options = True
_HTTPFAULTINJECTION_ABORT.fields_by_name['percent']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
# @@protoc_insertion_point(module_scope)
|
py | 7df90ccd8df8ae0542743bf0e0883331ac5143d0 | import os
is_arch_valid = 1
#python_lt_27 = 1
flags_arch = '-O3'
flags_link = '-pgf90libs'
cc = 'pgcc'
f90 = 'pgf90'
libpath_fortran = ''
libs_fortran = []
flags_prec_single = '-Mr4'
flags_prec_double = '-Mr8'
home = os.environ['HOME']
libpath_fortran = ''
libs_fortran = ['gfortran']
home = os.environ["HOME"]
charm_path = home + '/Charm/charm'
papi_path = home
hdf5_path = os.environ['HDF5HOME']
hdf5_inc = hdf5_path + '/include'
hdf5_lib = hdf5_path + '/lib'
mpi_path = os.environ['MPIHOME']
png_path = '/usr/lib64'
grackle_path = home + '/Grackle/src/clib'
|
py | 7df90cd6e8aac37917b061166e7533f6a2360da2 | # Copyright (c) 2012, Machinalis S.R.L.
#
# Author: Rafael Carrascosa <[email protected]>
#
# This file is part of REfO and is distributed under the Modified BSD License.
# You should have received a copy of license in the LICENSE.txt file.
from .instructions import Atom, Accept, Split, Save
class Pattern(object):
def _compile(self):
raise NotImplementedError
def compile(self):
return self._compile(Accept())
def __or__(self, other):
return Disjunction(self, other)
def __add__(self, other):
xs = []
for item in [self, other]:
if isinstance(item, Concatenation):
xs.extend(item.xs)
else:
xs.append(item)
return Concatenation(*xs)
def __mul__(self, x):
if isinstance(x, int):
mn = x
mx = x
else:
assert isinstance(x, tuple)
mn, mx = x
if mn is None:
mn = 0
return Repetition(self, mn=mn, mx=mx)
def __str__(self):
return str(self.arg)
def __repr__(self):
return "{1}({0!r})".format(self.arg, self.__class__.__name__)
class Predicate(Pattern):
def __init__(self, f):
self.f = f
self.arg = f
def _compile(self, cont):
x = Atom(self.f, succ=cont)
return x
class Any(Predicate):
def __init__(self):
super(Any, self).__init__(lambda x: True)
def __str__(self):
return "Any()"
def __repr__(self):
return "Any()"
class Literal(Predicate):
def __init__(self, x):
super(Literal, self).__init__(lambda y: x == y)
self.x = x
self.arg = x
class Disjunction(Pattern):
def __init__(self, a, b):
self.a = a
self.b = b
def _compile(self, cont):
a = self.a._compile(cont)
b = self.b._compile(cont)
return Split(a, b)
def __str__(self):
return "(" + " | ".join(map(str, [self.a, self.b])) + ")"
def __repr__(self):
return "(" + " | ".join(map(repr, [self.a, self.b])) + ")"
class Concatenation(Pattern):
def __init__(self, *patterns):
self.xs = list(patterns)
assert len(self.xs) != 0
def _compile(self, cont):
code = cont
for x in reversed(self.xs):
code = x._compile(code)
return code
def __str__(self):
return "(" + " + ".join(map(str, self.xs)) + ")"
def __repr__(self):
return "(" + " + ".join(map(repr, self.xs)) + ")"
class Star(Pattern):
def __init__(self, pattern, greedy=True):
self.x = pattern
self.greedy = greedy
self.arg = pattern
def _compile(self, cont):
# In words: split to (`x` and return to split) and `cont`
split = Split()
x = self.x._compile(split)
if self.greedy:
split.succ = x
split.split = cont
else:
split.succ = cont
split.split = x
# `Plus` would return `x`
return split
def __str__(self):
return str(self.x) + "*"
class Plus(Pattern):
def __init__(self, pattern, greedy=True):
self.x = pattern
self.greedy = greedy
self.arg = pattern
def _compile(self, cont):
# In words: take `x` and split to `x` and `cont`
split = Split()
x = self.x._compile(split)
if self.greedy:
split.succ = x
split.split = cont
else:
split.succ = cont
split.split = x
# `Star` would return `split`
return x
def __str__(self):
return str(self.x) + "+"
class Question(Pattern):
def __init__(self, pattern, greedy=True):
self.x = pattern
self.greedy = greedy
self.arg = pattern
def _compile(self, cont):
xcode = self.x._compile(cont)
if self.greedy:
return Split(xcode, cont)
else:
return Split(cont, xcode)
def __str__(self):
return str(self.x) + "?"
class Group(Pattern):
def __init__(self, pattern, key):
self.x = pattern
self.key = key
def _compile(self, cont):
start = Save(_start(self.key))
end = Save(_end(self.key))
code = self.x._compile(end)
start.succ = code
end.succ = cont
return start
def __str__(self):
return "Group({0!s}, {1!s})".format(self.x, self.key)
def __repr__(self):
return "Group({0!r}, {1!r})".format(self.x, self.key)
class Repetition(Pattern):
def __init__(self, pattern, mn=0, mx=None, greedy=True):
assert mn is not None or mx is not None or mn <= mx
self.x = pattern
self.mn = mn
self.mx = mx
self.greedy = greedy
def _compile(self, cont):
code = cont
if self.mx is not None:
q = Question(self.x, self.greedy)
for _ in range(self.mx - self.mn):
code = q._compile(code)
else:
code = Star(self.x, greedy=self.greedy)._compile(code)
for _ in range(self.mn):
code = self.x._compile(code)
return code
def __str__(self):
return self._tostring("{0!s}")
def __repr__(self):
return self._tostring("{0!r}")
def _tostring(self, s):
base = "(" + s + ")*"
if self.mn == 0 and self.mx is None:
return base.format(self.x)
if self.mn == self.mx:
return (base + "{1}").format(self.x, self.mn)
return (base + "*({1},{2})").format(self.x, self.mn, self.mx)
def _start(key):
return (key, 0)
def _end(key):
return (key, 1)
|
py | 7df90cf7c53354758ac521cd744350805f2572c2 | """Eager mode TF policy built using build_tf_policy().
It supports both traced and non-traced eager execution modes."""
import functools
import logging
import threading
from typing import Dict, List, Optional, Tuple
from ray.util.debug import log_once
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.repeated_values import RepeatedValues
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import add_mixins, force_list
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_ops import convert_to_non_tf_type
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.tracking_dict import UsageTrackingDict
from ray.rllib.utils.typing import TensorType
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
def _convert_to_tf(x, dtype=None):
if isinstance(x, SampleBatch):
x = {k: v for k, v in x.items() if k != SampleBatch.INFOS}
return tf.nest.map_structure(_convert_to_tf, x)
elif isinstance(x, Policy):
return x
# Special handling of "Repeated" values.
elif isinstance(x, RepeatedValues):
return RepeatedValues(
tf.nest.map_structure(_convert_to_tf, x.values), x.lengths,
x.max_len)
if x is not None:
d = dtype
x = tf.nest.map_structure(
lambda f: tf.convert_to_tensor(f, d) if f is not None else None, x)
return x
def _convert_to_numpy(x):
def _map(x):
if isinstance(x, tf.Tensor):
return x.numpy()
return x
try:
return tf.nest.map_structure(_map, x)
except AttributeError:
raise TypeError(
("Object of type {} has no method to convert to numpy.").format(
type(x)))
def convert_eager_inputs(func):
@functools.wraps(func)
def _func(*args, **kwargs):
if tf.executing_eagerly():
args = [_convert_to_tf(x) for x in args]
# TODO: (sven) find a way to remove key-specific hacks.
kwargs = {
k: _convert_to_tf(
v, dtype=tf.int32 if k == "timestep" else None)
for k, v in kwargs.items()
if k not in {"info_batch", "episodes"}
}
return func(*args, **kwargs)
return _func
def convert_eager_outputs(func):
@functools.wraps(func)
def _func(*args, **kwargs):
out = func(*args, **kwargs)
if tf.executing_eagerly():
out = tf.nest.map_structure(_convert_to_numpy, out)
return out
return _func
def _disallow_var_creation(next_creator, **kw):
v = next_creator(**kw)
raise ValueError("Detected a variable being created during an eager "
"forward pass. Variables should only be created during "
"model initialization: {}".format(v.name))
def traced_eager_policy(eager_policy_cls):
"""Wrapper that enables tracing for all eager policy methods.
This is enabled by the --trace / "eager_tracing" config."""
class TracedEagerPolicy(eager_policy_cls):
def __init__(self, *args, **kwargs):
self._traced_learn_on_batch = None
self._traced_compute_actions = None
self._traced_compute_gradients = None
self._traced_apply_gradients = None
super(TracedEagerPolicy, self).__init__(*args, **kwargs)
@override(eager_policy_cls)
@convert_eager_inputs
@convert_eager_outputs
def _learn_on_batch_eager(self, samples):
if self._traced_learn_on_batch is None:
self._traced_learn_on_batch = tf.function(
super(TracedEagerPolicy, self)._learn_on_batch_eager,
autograph=False,
experimental_relax_shapes=True)
return self._traced_learn_on_batch(samples)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
obs_batch = tf.convert_to_tensor(obs_batch)
state_batches = _convert_to_tf(state_batches)
prev_action_batch = _convert_to_tf(prev_action_batch)
prev_reward_batch = _convert_to_tf(prev_reward_batch)
if self._traced_compute_actions is None:
self._traced_compute_actions = tf.function(
super(TracedEagerPolicy, self).compute_actions,
autograph=False,
experimental_relax_shapes=True)
return self._traced_compute_actions(
obs_batch, state_batches, prev_action_batch, prev_reward_batch,
info_batch, episodes, explore, timestep, **kwargs)
@override(eager_policy_cls)
@convert_eager_inputs
@convert_eager_outputs
def _compute_gradients_eager(self, samples):
if self._traced_compute_gradients is None:
self._traced_compute_gradients = tf.function(
super(TracedEagerPolicy, self).compute_gradients,
autograph=False,
experimental_relax_shapes=True)
return self._traced_compute_gradients(samples)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def apply_gradients(self, grads):
if self._traced_apply_gradients is None:
self._traced_apply_gradients = tf.function(
super(TracedEagerPolicy, self).apply_gradients,
autograph=False,
experimental_relax_shapes=True)
return self._traced_apply_gradients(grads)
TracedEagerPolicy.__name__ = eager_policy_cls.__name__
TracedEagerPolicy.__qualname__ = eager_policy_cls.__qualname__
return TracedEagerPolicy
def build_eager_tf_policy(name,
loss_fn,
get_default_config=None,
postprocess_fn=None,
stats_fn=None,
optimizer_fn=None,
gradients_fn=None,
apply_gradients_fn=None,
grad_stats_fn=None,
extra_learn_fetches_fn=None,
extra_action_fetches_fn=None,
validate_spaces=None,
before_init=None,
before_loss_init=None,
after_init=None,
make_model=None,
action_sampler_fn=None,
action_distribution_fn=None,
mixins=None,
obs_include_prev_action_reward=True,
get_batch_divisibility_req=None):
"""Build an eager TF policy.
An eager policy runs all operations in eager mode, which makes debugging
much simpler, but has lower performance.
You shouldn't need to call this directly. Rather, prefer to build a TF
graph policy and use set {"framework": "tfe"} in the trainer config to have
it automatically be converted to an eager policy.
This has the same signature as build_tf_policy()."""
base = add_mixins(Policy, mixins)
class eager_policy_cls(base):
def __init__(self, observation_space, action_space, config):
assert tf.executing_eagerly()
self.framework = config.get("framework", "tfe")
Policy.__init__(self, observation_space, action_space, config)
self._is_training = False
self._loss_initialized = False
self._sess = None
self._loss = loss_fn
self.batch_divisibility_req = get_batch_divisibility_req(self) if \
callable(get_batch_divisibility_req) else \
(get_batch_divisibility_req or 1)
self._max_seq_len = config["model"]["max_seq_len"]
if get_default_config:
config = dict(get_default_config(), **config)
if validate_spaces:
validate_spaces(self, observation_space, action_space, config)
if before_init:
before_init(self, observation_space, action_space, config)
self.config = config
self.dist_class = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given")
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if make_model:
self.model = make_model(self, observation_space, action_space,
config)
else:
self.model = ModelCatalog.get_model_v2(
observation_space,
action_space,
logit_dim,
config["model"],
framework=self.framework,
)
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when calling the model
# first, then its value function (e.g. in a loss function), in
# between of which another model call is made (e.g. to compute an
# action).
self._lock = threading.RLock()
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._state_inputs = self.model.get_initial_state()
self._is_recurrent = len(self._state_inputs) > 0
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
if before_loss_init:
before_loss_init(self, observation_space, action_space, config)
if optimizer_fn:
optimizers = optimizer_fn(self, config)
else:
optimizers = tf.keras.optimizers.Adam(config["lr"])
optimizers = force_list(optimizers)
if getattr(self, "exploration", None):
optimizers = self.exploration.get_exploration_optimizer(
optimizers)
# TODO: (sven) Allow tf policy to have more than 1 optimizer.
# Just like torch Policy does.
self._optimizer = optimizers[0] if optimizers else None
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True,
stats_fn=stats_fn,
)
self._loss_initialized = True
if after_init:
after_init(self, observation_space, action_space, config)
# Got to reset global_timestep again after fake run-throughs.
self.global_timestep = 0
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
assert tf.executing_eagerly()
# Call super's postprocess_trajectory first.
sample_batch = Policy.postprocess_trajectory(self, sample_batch)
if postprocess_fn:
return postprocess_fn(self, sample_batch, other_agent_batches,
episode)
return sample_batch
@with_lock
@override(Policy)
def learn_on_batch(self, postprocessed_batch):
# Callback handling.
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch)
pad_batch_to_sequences_of_same_size(
postprocessed_batch,
shuffle=False,
max_seq_len=self._max_seq_len,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
self._is_training = True
postprocessed_batch["is_training"] = True
return self._learn_on_batch_eager(postprocessed_batch)
@convert_eager_inputs
@convert_eager_outputs
def _learn_on_batch_eager(self, samples):
with tf.variable_creator_scope(_disallow_var_creation):
grads_and_vars, stats = self._compute_gradients(samples)
self._apply_gradients(grads_and_vars)
return stats
@override(Policy)
def compute_gradients(self, samples):
pad_batch_to_sequences_of_same_size(
samples,
shuffle=False,
max_seq_len=self._max_seq_len,
batch_divisibility_req=self.batch_divisibility_req)
self._is_training = True
samples["is_training"] = True
return self._compute_gradients_eager(samples)
@convert_eager_inputs
@convert_eager_outputs
def _compute_gradients_eager(self, samples):
with tf.variable_creator_scope(_disallow_var_creation):
grads_and_vars, stats = self._compute_gradients(samples)
grads = [g for g, v in grads_and_vars]
return grads, stats
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
self._is_training = False
self._is_recurrent = \
state_batches is not None and state_batches != []
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
input_dict = {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_batch),
"is_training": tf.constant(False),
}
if obs_include_prev_action_reward:
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = \
tf.convert_to_tensor(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = \
tf.convert_to_tensor(prev_reward_batch)
return self._compute_action_helper(input_dict, state_batches,
episodes, explore, timestep)
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Dict[str, TensorType],
explore: bool = None,
timestep: Optional[int] = None,
**kwargs
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
# Pass lazy (torch) tensor dict to Model as `input_dict`.
input_dict = self._lazy_tensor_dict(input_dict)
# Pack internal state inputs into (separate) list.
state_batches = [
input_dict[k] for k in input_dict.keys() if "state_in" in k[:8]
]
return self._compute_action_helper(input_dict, state_batches, None,
explore, timestep)
@with_lock
@convert_eager_inputs
@convert_eager_outputs
def _compute_action_helper(self, input_dict, state_batches, episodes,
explore, timestep):
explore = explore if explore is not None else \
self.config["explore"]
timestep = timestep if timestep is not None else \
self.global_timestep
if isinstance(timestep, tf.Tensor):
timestep = int(timestep.numpy())
self._is_training = False
self._state_in = state_batches or []
# Calculate RNN sequence lengths.
batch_size = input_dict[SampleBatch.CUR_OBS].shape[0]
seq_lens = tf.ones(batch_size, dtype=tf.int32) if state_batches \
else None
# Use Exploration object.
with tf.variable_creator_scope(_disallow_var_creation):
if action_sampler_fn:
dist_inputs = None
state_out = []
actions, logp = action_sampler_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
episodes=episodes)
else:
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(
timestep=timestep, explore=explore)
if action_distribution_fn:
dist_inputs, self.dist_class, state_out = \
action_distribution_fn(
self, self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
is_training=False)
else:
dist_inputs, state_out = self.model(
input_dict, state_batches, seq_lens)
action_dist = self.dist_class(dist_inputs, self.model)
# Get the exploration action from the forward results.
actions, logp = self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
# Add default and custom fetches.
extra_fetches = {}
# Action-logp and action-prob.
if logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = tf.exp(logp)
extra_fetches[SampleBatch.ACTION_LOGP] = logp
# Action-dist inputs.
if dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
# Custom extra fetches.
if extra_action_fetches_fn:
extra_fetches.update(extra_action_fetches_fn(self))
# Update our global timestep by the batch size.
self.global_timestep += int(batch_size)
return actions, state_out, extra_fetches
@with_lock
@override(Policy)
def compute_log_likelihoods(self,
actions,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None):
if action_sampler_fn and action_distribution_fn is None:
raise ValueError("Cannot compute log-prob/likelihood w/o an "
"`action_distribution_fn` and a provided "
"`action_sampler_fn`!")
seq_lens = tf.ones(len(obs_batch), dtype=tf.int32)
input_dict = {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_batch),
"is_training": tf.constant(False),
}
if obs_include_prev_action_reward:
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = \
tf.convert_to_tensor(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = \
tf.convert_to_tensor(prev_reward_batch)
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(explore=False)
# Action dist class and inputs are generated via custom function.
if action_distribution_fn:
dist_inputs, dist_class, _ = action_distribution_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=False,
is_training=False)
# Default log-likelihood calculation.
else:
dist_inputs, _ = self.model(input_dict, state_batches,
seq_lens)
dist_class = self.dist_class
action_dist = dist_class(dist_inputs, self.model)
log_likelihoods = action_dist.logp(actions)
return log_likelihoods
@override(Policy)
def apply_gradients(self, gradients):
self._apply_gradients(
zip([(tf.convert_to_tensor(g) if g is not None else None)
for g in gradients], self.model.trainable_variables()))
@override(Policy)
def get_exploration_info(self):
return _convert_to_numpy(self.exploration.get_info())
@override(Policy)
def get_weights(self, as_dict=False):
variables = self.variables()
if as_dict:
return {v.name: v.numpy() for v in variables}
return [v.numpy() for v in variables]
@override(Policy)
def set_weights(self, weights):
variables = self.variables()
assert len(weights) == len(variables), (len(weights),
len(variables))
for v, w in zip(variables, weights):
v.assign(w)
@override(Policy)
def get_state(self):
state = {"_state": super().get_state()}
state["_optimizer_variables"] = self._optimizer.variables()
return state
@override(Policy)
def set_state(self, state):
state = state.copy() # shallow copy
# Set optimizer vars first.
optimizer_vars = state.pop("_optimizer_variables", None)
if optimizer_vars and self._optimizer.variables():
logger.warning(
"Cannot restore an optimizer's state for tf eager! Keras "
"is not able to save the v1.x optimizers (from "
"tf.compat.v1.train) since they aren't compatible with "
"checkpoints.")
for opt_var, value in zip(self._optimizer.variables(),
optimizer_vars):
opt_var.assign(value)
# Then the Policy's (NN) weights.
super().set_state(state["_state"])
def variables(self):
"""Return the list of all savable variables for this policy."""
return self.model.variables()
@override(Policy)
def is_recurrent(self):
return self._is_recurrent
@override(Policy)
def num_state_tensors(self):
return len(self._state_inputs)
@override(Policy)
def get_initial_state(self):
if hasattr(self, "model"):
return self.model.get_initial_state()
return []
def get_session(self):
return None # None implies eager
def get_placeholder(self, ph):
raise ValueError(
"get_placeholder() is not allowed in eager mode. Try using "
"rllib.utils.tf_ops.make_tf_callable() to write "
"functions that work in both graph and eager mode.")
def loss_initialized(self):
return self._loss_initialized
@override(Policy)
def export_model(self, export_dir):
pass
@override(Policy)
def export_checkpoint(self, export_dir):
pass
def _get_is_training_placeholder(self):
return tf.convert_to_tensor(self._is_training)
def _apply_gradients(self, grads_and_vars):
if apply_gradients_fn:
apply_gradients_fn(self, self._optimizer, grads_and_vars)
else:
self._optimizer.apply_gradients(
[(g, v) for g, v in grads_and_vars if g is not None])
@with_lock
def _compute_gradients(self, samples):
"""Computes and returns grads as eager tensors."""
with tf.GradientTape(persistent=gradients_fn is not None) as tape:
loss = loss_fn(self, self.model, self.dist_class, samples)
variables = self.model.trainable_variables()
if gradients_fn:
class OptimizerWrapper:
def __init__(self, tape):
self.tape = tape
def compute_gradients(self, loss, var_list):
return list(
zip(self.tape.gradient(loss, var_list), var_list))
grads_and_vars = gradients_fn(self, OptimizerWrapper(tape),
loss)
else:
grads_and_vars = list(
zip(tape.gradient(loss, variables), variables))
if log_once("grad_vars"):
for _, v in grads_and_vars:
logger.info("Optimizing variable {}".format(v.name))
grads = [g for g, v in grads_and_vars]
stats = self._stats(self, samples, grads)
return grads_and_vars, stats
def _stats(self, outputs, samples, grads):
fetches = {}
if stats_fn:
fetches[LEARNER_STATS_KEY] = {
k: v
for k, v in stats_fn(outputs, samples).items()
}
else:
fetches[LEARNER_STATS_KEY] = {}
if extra_learn_fetches_fn:
fetches.update(
{k: v
for k, v in extra_learn_fetches_fn(self).items()})
if grad_stats_fn:
fetches.update({
k: v
for k, v in grad_stats_fn(self, samples, grads).items()
})
return fetches
def _lazy_tensor_dict(self, postprocessed_batch):
train_batch = UsageTrackingDict(postprocessed_batch)
train_batch.set_get_interceptor(_convert_to_tf)
return train_batch
def _lazy_numpy_dict(self, postprocessed_batch):
train_batch = UsageTrackingDict(postprocessed_batch)
train_batch.set_get_interceptor(convert_to_non_tf_type)
return train_batch
@classmethod
def with_tracing(cls):
return traced_eager_policy(cls)
eager_policy_cls.__name__ = name + "_eager"
eager_policy_cls.__qualname__ = name + "_eager"
return eager_policy_cls
|
py | 7df90d44792ffb2f5cb1ee8000cb70aae65d270b | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import sys
import os.path as op
def main():
sys.path.insert(0, op.abspath('.'))
from templateflow.__about__ import __version__
print(__version__)
if __name__ == '__main__':
main()
|
py | 7df90e2d37c02a476004482653eb58d8665afa1f | from dataclasses import dataclass
from typing import Any, Dict, Optional
import pytest
from fastapi import FastAPI
from pydantic import BaseModel
from ray.serve.utils import register_custom_serializers
import ray
@pytest.fixture(scope="session")
def start_ray():
ray.init(ignore_reinit_error=True)
register_custom_serializers()
def test_serialize_cls(start_ray):
class User(BaseModel):
name: str
ray.get(ray.put(User))
def test_serialize_instance(start_ray):
class User(BaseModel):
name: str
ray.get(ray.put(User(name="a")))
def test_serialize_imported_cls(start_ray):
from pydantic_module import User
ray.get(ray.put(User))
def test_serialize_imported_instance(start_ray):
from pydantic_module import user
ray.get(ray.put(user))
def test_serialize_app_no_route(start_ray):
app = FastAPI()
ray.get(ray.put(app))
def test_serialize_app_no_validation(start_ray):
app = FastAPI()
@app.get("/")
def hello() -> str:
return "hi"
ray.get(ray.put(app))
def test_serialize_app_primitive_type(start_ray):
app = FastAPI()
@app.get("/")
def hello(v: str) -> str:
return "hi"
ray.get(ray.put(app))
def test_serialize_app_pydantic_type_imported(start_ray):
from pydantic_module import User
app = FastAPI()
@app.get("/")
def hello(v: str, u: User) -> str:
return "hi"
ray.get(ray.put(app))
def test_serialize_app_pydantic_type_inline(start_ray):
class User(BaseModel):
name: str
app = FastAPI()
@app.get("/")
def hello(v: str, u: User) -> str:
return "hi"
ray.get(ray.put(app))
def test_serialize_app_imported(start_ray):
from pydantic_module import app
ray.get(ray.put(app))
def test_serialize_app_pydantic_type_closure_ref(start_ray):
class User(BaseModel):
name: str
def make():
app = FastAPI()
@app.get("/")
def hello(v: str, u: User) -> str:
return "hi"
return app
ray.get(ray.put(make))
def test_serialize_app_pydantic_type_closure_ref_import(start_ray):
from pydantic_module import User
def make():
app = FastAPI()
@app.get("/")
def hello(v: str, u: User) -> str:
return "hi"
return app
ray.get(ray.put(make))
def test_serialize_app_pydantic_type_closure(start_ray):
def make():
class User(BaseModel):
name: str
app = FastAPI()
@app.get("/")
def hello(v: str, u: User) -> str:
return "hi"
return app
ray.get(ray.put(make))
def test_serialize_app_imported_closure(start_ray):
from pydantic_module import closure
ray.get(ray.put(closure))
def test_serialize_serve_dataclass(start_ray):
@dataclass
class BackendMetadata:
accepts_batches: bool = False
is_blocking: bool = True
autoscaling_config: Optional[Dict[str, Any]] = None
class BackendConfig(BaseModel):
internal_metadata: BackendMetadata = BackendMetadata()
ray.get(ray.put(BackendConfig()))
@ray.remote
def consume(f):
pass
ray.get(consume.remote(BackendConfig()))
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
py | 7df90e528c54ce70a49eb8142102f3e1827144c9 | from ViewportPainter import *
'''
This is an example of the implementation of a custom interactive tool.
The example creates a context where a user draws lines on a surface which will be converted into a lofted polygonal mesh.
When you initialize an instance of "Example" class, press CTRL and then Left Mouse Button to start drawing curves on a surface.
As soon as you release Left Mouse Button it will create a curve.
Draw several curves and only then release CTRL key to get a lofted polyMesh.
'''
class Example(ViewportPainter):
def __init__(self):
self.hitPoint = None
self.hitFace = None
self.hitObject = None
self.hitObjectDag = None
self.editing = True
self.closestX = None
self.closestY = None
self.closestIndex = None
self.structData = [] # DagPath | HitFaceID | [U,V] | HitPoint
self.controlPoints = []
self.curveList = []
super(Example, self).__init__()
def drawRect2D(self, worldPoint, radius = 5):
"""Draws a 2D rectangle based on a 3D point in a space
@param[in] worldPoint which is MPoint
@param[in] radius of the rectangle
"""
point = worldPoint
xPtrInit = OpenMaya.MScriptUtil()
yPtrInit = OpenMaya.MScriptUtil()
xPtr = xPtrInit.asShortPtr()
yPtr = yPtrInit.asShortPtr()
pointFix = OpenMaya.MPoint(worldPoint.x, worldPoint.y, worldPoint.z)
self.view3D.worldToView(pointFix, xPtr, yPtr)
xcoord = OpenMaya.MScriptUtil().getShort(xPtr)
ycoord = OpenMaya.MScriptUtil().getShort(yPtr)
# self.view3D.beginGL()
# self.glFT.glPushAttrib(OpenMayaRender.MGL_ALL_ATTRIB_BITS )
self.glFT.glPushMatrix()
# self.glFT.glDrawBuffer( OpenMayaRender.MGL_FRONT )
self.glFT.glDisable( OpenMayaRender.MGL_DEPTH_TEST )
# Setup the Orthographic projection Matrix.
self.glFT.glMatrixMode( OpenMayaRender.MGL_PROJECTION )
self.glFT.glLoadIdentity()
self.glFT.glOrtho( 0.0, float(self.view3D.portWidth()), 0.0, float(self.view3D.portHeight()), -1.0, 1.0 )
self.glFT.glMatrixMode( OpenMayaRender.MGL_MODELVIEW )
self.glFT.glLoadIdentity()
self.glFT.glTranslatef(0.0, 0.375, 0.0)
self.glFT.glColor4f(1, 0, 0, 1) #0.2
self.glFT.glBegin(OpenMayaRender.MGL_POLYGON)
self.glFT.glVertex2f(xcoord - radius, ycoord - radius)
self.glFT.glVertex2f(xcoord - radius, ycoord + radius)
self.glFT.glVertex2f(xcoord + radius, ycoord + radius)
self.glFT.glVertex2f(xcoord + radius, ycoord - radius)
self.glFT.glEnd()
# Restore the state of the matrix from stack
self.glFT.glMatrixMode( OpenMayaRender.MGL_MODELVIEW )
self.glFT.glPopMatrix()
# Restore the previous state of these attributes
self.glFT.glPopAttrib()
def draw(self):
"""The main draw OpenGL method
"""
self.glFT.glPushAttrib(OpenMayaRender.MGL_ALL_ATTRIB_BITS) #save all stackable states
self.view3D.beginGL() #setup port for drawing native OpenGL calls
self.glFT.glClearDepth(0.0)
self.glFT.glDepthFunc(OpenMayaRender.MGL_ALWAYS)
self.glFT.glEnable( OpenMayaRender.MGL_BLEND )
self.glFT.glBlendFunc( OpenMayaRender.MGL_SRC_ALPHA, OpenMayaRender.MGL_ONE_MINUS_SRC_ALPHA )
fnMesh = None
self.controlPoints = []
#recreate control points for every callback
if len(self.structData) > 0:
for i in self.structData:
fnMesh = OpenMaya.MFnMesh(i[0])
point = OpenMaya.MPoint(0,0,0)
u = i[2][0]
v = i[2][1]
util = OpenMaya.MScriptUtil()
util.createFromList([u, v], 2)
uvFloat2Ptr = util.asFloat2Ptr()
fnMesh.getPointAtUV(i[1], point, uvFloat2Ptr, OpenMaya.MSpace.kWorld)
self.controlPoints.append(point)
if self.controlPoints:
self.glFT.glColor4f(1, 1, 0, 1)
for i in range(len(self.controlPoints) - 1):
p1 = self.controlPoints[i]
p2 = self.controlPoints[i+1]
self.glFT.glBegin(OpenMayaRender.MGL_LINES)
self.glFT.glVertex3f(p1.x, p1.y, p1.z)
self.glFT.glVertex3f(p2.x, p2.y, p2.z)
self.glFT.glEnd()
#Draw Locators by control points
if self.controlPoints:
for i in self.controlPoints:
self.drawRect2D(i, 3)
self.view3D.endGL()#eng openGL drawings
self.glFT.glPopAttrib() #restores values of state variables, changed by glPushAttrib
#Overwritten update method
def update(self, *args):
"""
A Method that is called for every refresh of Maya viewport
"""
if self.userKeyboardEvents.K_Esc:
self.uninitializeCallback()
return
if self.userKeyboardEvents.K_Ctrl:
self.userMouseEvents.editMode = True
'''Mouse press'''
if self.userMouseEvents.M_Button_Left:
self.hitPoint, self.hitFace, self.hitObject = self.getMouseIntersect()
if self.hitObject: #intersection was successfull
#get UV at hitPoint
dagPath = OpenMaya.MDagPath()
selectionList = OpenMaya.MSelectionList()
selectionList.clear()
selectionList.add(self.hitObject)
selectionList.getDagPath(0, dagPath)
self.hitObjectDag = dagPath
fnMesh = OpenMaya.MFnMesh(dagPath)
util = OpenMaya.MScriptUtil()
util.createFromList([0.0, 0.0], 2)
uvPoint = util.asFloat2Ptr()
fnMesh.getUVAtPoint(OpenMaya.MPoint(self.hitPoint.x, self.hitPoint.y, self.hitPoint.z), uvPoint, OpenMaya.MSpace.kWorld, "map1", None)
u = OpenMaya.MScriptUtil.getFloat2ArrayItem(uvPoint, 0, 0)
v = OpenMaya.MScriptUtil.getFloat2ArrayItem(uvPoint, 0, 1)
#fill up StructData with a New Element
uv = []
uv.append(u)
uv.append(v)
data = []
data.append(dagPath)
data.append(self.hitFace)
data.append(uv)
data.append(self.hitPoint)
# if self.editing == False and self.closestIndex == None and not self.userKeyboardEvents.K_Alt:
if self.editing == False:
self.structData.append(data) #we add a new element to StructData
else:
lastPoint = self.structData[-1][3]
vector = OpenMaya.MVector(self.hitPoint.x - lastPoint.x, self.hitPoint.y - lastPoint.y, self.hitPoint.z - lastPoint.z)
if vector.length() > 0.5:
self.structData.append(data) #we add a new element to StructData
self.editing = True
'''Mouse release'''
if not self.userMouseEvents.M_Button_Left:
self.editing = False #stop editing, next time we will add a new element
self.userMouseEvents.editMode = False
if self.structData and self.controlPoints:
curveCV = []
for i in self.controlPoints:
point = []
point.append(i.x)
point.append(i.y)
point.append(i.z)
curveCV.append(point)
if len(curveCV) >= 4:
curve = cmds.curve( p=curveCV )
cmds.rebuildCurve(curve, ch=0, rpo=1, rt=0, end=1, kr=0, kcp=0, kep=1, kt=0, s=6, d=3, tol=0)
self.curveList.append(curve)
self.structData = []
if not self.userKeyboardEvents.K_Ctrl:
if len(self.curveList) >= 2:
surface = cmds.loft(self.curveList, ch=0, u=1, c=0, ar=0, d=3, ss=1, rn=1, po=0, rsn = True)[0]
poly = cmds.nurbsToPoly(surface, mnd=1, ch=0, f=0, pt=1, pc=200, chr=0.9, ft=0.01, mel = 0.001, d=0.1, ut=1, un=3, vt=1, vn=3, uch=0, ucr=0, cht=0.2, es=0, ntr=0, mrt=0, uss=1)
cmds.delete(self.curveList)
cmds.delete(surface)
cmds.polyNormal(poly[0], normalMode = 0, userNormalMode = 0, ch=0)
self.uninitializeCallback()
if len(self.structData) == 0:
return
self.draw()
def main():
instance = Example()
|
py | 7df90eef6a0c4e32f736bc77cccc9492f1368a6f | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
import sys
import math
n = int(sys.argv[1])
def eratosthenes(n):
if (n >= 3):
list1 = []
for i in range(n):
list1.append(( i + 1 ))
a = 2
for x in range(int(math.ceil(math.sqrt(n)))):
for i in list1:
if ( i != a and ( i % a == 0 ) and i > 1):
list1.remove(i)
a = a + 1
del list1[0]
print list1
else:
print("There are no primes less than your number.")
eratosthenes(n) |
py | 7df90f2d5c0b157ff80b767bdce5e054509be8bc | from setuptools import setup, find_packages
import os
from io import open
import versioneer
packagename = 'ltd-mason'
description = 'LSST the Docs build tool'
author = 'Jonathan Sick'
author_email = '[email protected]'
license = 'MIT'
url = 'https://github.com/lsst-sqre/ltd-mason'
def read(filename):
full_filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
filename)
return open(full_filename, mode='r', encoding='utf-8').read()
long_description = read('README.rst')
setup(
name=packagename,
version=versioneer.get_version(),
description=description,
long_description=long_description,
url=url,
author=author,
author_email=author_email,
license=license,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='lsst',
packages=find_packages(exclude=['docs', 'tests*', 'data']),
cmdclass=versioneer.get_cmdclass(),
install_requires=['ruamel.yaml',
'sh',
'boto3',
'jsonschema',
'requests'],
# package_data={},
entry_points={
'console_scripts': [
'ltd-mason = ltdmason.cli:run_ltd_mason',
'ltd-mason-travis = ltdmason.traviscli:run',
'ltd-mason-make-redirects = ltdmason.redirectdircli:run',
]
}
)
|
py | 7df90fc8584f4c4551f402d14564e2f0195a12a4 | import repo
CREDENTIALS = "credentials"
def getCredentials():
return repo.getRepo(CREDENTIALS)
def add(username, password):
if not username:
print("Username cannot be empty.")
return False
if exists(username):
print("Username", username, "is already taken.")
return False
if not password:
print("Passowrd cannot be empty.")
return False
repo.addOrUpdate(CREDENTIALS, username, password)
return True
def updatePassword(username, password):
if not password:
print("Password cannot be empty.")
return False
repo.addOrUpdate(CREDENTIALS, username, password)
def getPassword(username):
return getCredentials()[username]
def exists(username):
return repo.exists(CREDENTIALS, username) |
py | 7df90fe0662f6e3da917f91b1d5e67d2ade62cde | from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from app.models import DefaultQuerySet, TimestampedModel, models
class PromoCodeQuerySet(DefaultQuerySet):
def active(self):
return self.filter(active=True)
def get_or_nothing(self, name):
try:
return self.active().get(name__iexact=name)
except PromoCode.DoesNotExist:
return None
class PromoCode(TimestampedModel):
objects = PromoCodeQuerySet.as_manager()
name = models.CharField(_('Promo Code'), max_length=32, unique=True, db_index=True)
discount_percent = models.IntegerField(_('Discount percent'))
active = models.BooleanField(_('Active'), default=True)
comment = models.TextField(_('Comment'), blank=True, null=True)
class Meta:
verbose_name = _('Promo Code')
verbose_name_plural = _('Promo Codes')
def apply(self, price: Decimal) -> Decimal:
return Decimal(price * (100 - self.discount_percent) / 100)
|
py | 7df910d8548214ae0e3aca22259ad883fa4cc51e |
import mne
from pyriemann.classification import MDM
from pyriemann.estimation import XdawnCovariances, ERPCovariances
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
from sklearn.externals import joblib
import sys
sys.path.append('.')
from braininvaders2014b.dataset import BrainInvaders2014b
#filename = 'classification_scores.pkl'
#scores = joblib.load(filename)
dataset = BrainInvaders2014b()
# loop on list of subjects
for pair in dataset.pair_list:
print('treating pair', str(pair).zfill(2))
# get the raw object
sessions = dataset._get_single_pair_data(pair=pair)
raw_solo1 = sessions['solo_1']['run_1']
raw_solo2 = sessions['solo_2']['run_1']
raw_colab = sessions['collaborative']['run_1']
chname2idx = {}
for i, chn in enumerate(raw_colab.ch_names):
chname2idx[chn] = i
# filter data and resample
fmin = 1
fmax = 20
raw_solo1.filter(fmin, fmax, verbose=False)
raw_solo2.filter(fmin, fmax, verbose=False)
raw_colab.filter(fmin, fmax, verbose=False)
# detect the events and cut the signal into epochs
event_id = {'NonTarget': 1, 'Target': 2}
events_solo1 = mne.find_events(raw=raw_solo1, shortest_event=1, verbose=False)
epochs_solo1 = mne.Epochs(raw_solo1, events_solo1, event_id, tmin=0.0, tmax=0.8, baseline=None, verbose=False, preload=True)
epochs_solo1.pick_types(eeg=True)
events_solo2 = mne.find_events(raw=raw_solo2, shortest_event=1, verbose=False)
epochs_solo2 = mne.Epochs(raw_solo2, events_solo2, event_id, tmin=0.0, tmax=0.8, baseline=None, verbose=False, preload=True)
epochs_solo2.pick_types(eeg=True)
events_colab = mne.find_events(raw=raw_colab, shortest_event=1, verbose=False)
epochs_colab = mne.Epochs(raw_colab, events_colab, event_id, tmin=0.0, tmax=0.8, baseline=None, verbose=False, preload=True)
epochs_colab.pick_types(eeg=True)
# plot the figure
fig, ax = plt.subplots(facecolor='white', figsize=(17.00, 12.30), nrows=2, ncols=2)
for i, epochs, Cz in zip([0, 1], [epochs_solo1, epochs_solo2], ['Cz_1', 'Cz_2']):
evkTarget = epochs['Target'].average().data[chname2idx[Cz],:]
evkNonTarget = epochs['NonTarget'].average().data[chname2idx[Cz],:]
t = np.arange(len(evkTarget)) / epochs_solo1.info['sfreq']
ax[0][i].plot(t, evkTarget, c='#2166ac', lw=3.0, label='Target (' + str(len(epochs['Target'])) + ' trials)')
ax[0][i].plot(t, evkNonTarget, c='#b2182b', lw=3.0, label='NonTarget (' + str(len(epochs['NonTarget'])) + ' trials)')
ax[0][i].plot([0, 0.8], [0, 0], c='#CDCDCD', lw=2.0, ls='--')
ax[0][i].set_xlim(0, 0.8)
ax[0][i].set_title('subject ' + str(i+1) + ' on solo', fontsize=12)
ax[0][i].set_ylabel(r'amplitude ($\mu$V)', fontsize=10)
ax[0][i].set_xlabel('time after stimulus (s)', fontsize=10)
ax[0][i].legend()
for i, Cz in zip([0, 1], ['Cz_1', 'Cz_2']):
evkTarget = epochs_colab['Target'].average().data[chname2idx[Cz],:]
evkNonTarget = epochs_colab['NonTarget'].average().data[chname2idx[Cz],:]
t = np.arange(len(evkTarget)) / epochs_solo1.info['sfreq']
ax[1][i].plot(t, evkTarget, c='#2166ac', lw=3.0, label='Target (' + str(len(epochs['Target'])) + ' trials)')
ax[1][i].plot(t, evkNonTarget, c='#b2182b', lw=3.0, label='NonTarget (' + str(len(epochs['NonTarget'])) + ' trials)')
ax[1][i].plot([0, 0.8], [0, 0], c='#CDCDCD', lw=2.0, ls='--')
ax[1][i].set_xlim(0, 0.8)
ax[1][i].set_title('subject ' + str(i+1) + ' on collaborative', fontsize=12)
ax[1][i].set_ylabel(r'amplitude ($\mu$V)', fontsize=10)
ax[1][i].set_xlabel('time after stimulus (s)', fontsize=10)
ax[1][i].legend()
fig.suptitle('Average evoked potentials at electrode Cz for pair ' + str(pair), fontsize=14)
filename = './evoked_potentials/evoked_potentials_pair_' + str(pair).zfill(2) + '.pdf'
fig.savefig(filename, format='pdf')
|
py | 7df91183cdccff6a02fcca7ad780df08aca0d15d | """
Week 3 class exercises
Warmups, non-graded assignments, and practice
9/29/2020
Andrew Nalundasan
"""
#n = int(input('Enter number: '))
#while n > 0:
# print('hello')
# n -= 1
#x = int(input('Enter number: '))
#while x != 100:
# print(x)
# x += 1
#print('Done')
#num = int(input('enter a positive odd integer: '))
#while not (num > 0 and num % 2 != 0):
# num = int(input('must be positive and odd: '))
#print('enter number to multiply, 1 to end')
#product = 1.0
#num = float(input('enter num: '))
#while num != 1:
# product *= num
# num = float(input('enter num: '))
#print(product)
#x = range(1000, 100, -5)
#for n in x:
# print(n)
#for letter in 'Can\'t say':
# print(letter)
# While Loop - Some String
#s = 'some string'
#i = 0
#while i < len(s):
# letter = s[i]
# print(letter)
# i += 1
# For Loop - Some String
#s = 'some string'
#for letter in s:
# print(letter)
# While Loop - n times
#n = int(input('n: '))
#i = 0
#while i < n:
# print('hello')
# i += 1
# For Loop - n times
#n = int(input('n: '))
#for i in range(n):
# print('hello')
# While Loop - low/high
#lo = int(input('low: '))
#hi = int(input('high: '))
#i = lo
#while i <= hi:
# print(i)
# i += 3
# For Loop - low/high
#lo = int(input('low: '))
#hi = int(input('high: '))
#for i in range(lo,hi + 1, 3):
# print(i)
#r = range(3, 10, 2)
# a - range(3, 10, 2)
# b - 4
# c - 7
# d - 9
# e - range(5, 9, 2)
# f - range(3, 7, 2)
# g - True
# h
#for i in r:
# print(i)
# PQ2
alphabet = 'abcdefg'
k = 3
i = 2
#print(alphabet[0])
#print(alphabet[3])
#print(alphabet[i])
#print(alphabet[(i + k) // 2])
#print(alphabet[2:5])
#print(alphabet[i:i + 3])
#print(alphabet[i:i + k])
#for i in range(5):
# print(i, i + k)
#for i in range(len(alphabet) - k + 1):
# print(i)
for i in range(len(alphabet) - k + 1):
print(alphabet[i:i + k])
|
py | 7df915323c1a1fbf99a6ae24a1fc2021e4fe70f9 | # -*- coding: utf-8 -*-
import pytest
from maestral.config.main import DEFAULTS_CONFIG, CONF_VERSION
from maestral.config.user import UserConfig
@pytest.fixture
def config(tmp_path):
config_path = tmp_path / "test-update-config.ini"
# Create an initial config on disk.
conf = UserConfig(
str(config_path),
defaults=DEFAULTS_CONFIG,
version=CONF_VERSION,
backup=True,
remove_obsolete=True,
)
yield conf
conf.cleanup()
|
py | 7df91543313c468156f9eb02665bb907f802f626 | import logging
import json
from typing import List
from fastapi import Depends, Query
from sqlalchemy import or_, orm, func, desc
from sqlalchemy_filters import apply_pagination, apply_sort, apply_filters
from dispatch.auth.models import DispatchUser
from dispatch.auth.service import get_current_user
from dispatch.search.fulltext.composite_search import CompositeSearch
from dispatch.enums import Visibility
from dispatch.feedback.models import Feedback
from dispatch.task.models import Task
from dispatch.project.models import Project
from dispatch.plugin.models import Plugin, PluginInstance
from dispatch.incident.models import Incident
from dispatch.incident_type.models import IncidentType
from dispatch.individual.models import IndividualContact
from dispatch.participant.models import Participant
from .core import (
Base,
get_class_by_tablename,
get_model_name_by_tablename,
get_db,
)
log = logging.getLogger(__file__)
def restricted_incident_filter(query: orm.Query, current_user: DispatchUser):
"""Adds additional incident filters to query (usually for permissions)."""
query = (
query.join(Participant, Incident.id == Participant.incident_id)
.join(IndividualContact)
.filter(
or_(
Incident.visibility == Visibility.open.value,
IndividualContact.email == current_user.email,
)
)
.distinct()
)
return query
def restricted_incident_type_filter(query: orm.Query, current_user: DispatchUser):
"""Adds additional incident type filters to query (usually for permissions)."""
if current_user:
query = query.filter(IncidentType.visibility == Visibility.open.value)
return query
def apply_model_specific_filters(model: Base, query: orm.Query, current_user: DispatchUser):
"""Applies any model specific filter as it pertains to the given user."""
model_map = {
Incident: [restricted_incident_filter],
# IncidentType: [restricted_incident_type_filter],
}
filters = model_map.get(model, [])
for f in filters:
query = f(query, current_user)
return query
def apply_model_specific_joins(model: Base, query: orm.query):
"""Applies any model specific implicity joins."""
model_map = {
Feedback: [(Incident, False), (Project, False)],
Task: [(Incident, False), (Project, False)],
PluginInstance: [(Plugin, False)],
Incident: [(Incident.tags, True), (Incident.terms, True)],
DispatchUser: [(DispatchUser.organizations, True)],
}
joined_models = model_map.get(model, [])
for model, is_outer in joined_models:
query = query.join(model, isouter=is_outer)
return query
def paginate(query: orm.Query, page: int, items_per_page: int):
# Never pass a negative OFFSET value to SQL.
offset_adj = 0 if page <= 0 else page - 1
items = query.limit(items_per_page).offset(offset_adj * items_per_page).all()
total = query.order_by(None).count()
return items, total
def composite_search(*, db_session, query_str: str, models: List[Base], current_user: DispatchUser):
"""Perform a multi-table search based on the supplied query."""
s = CompositeSearch(db_session, models)
query = s.build_query(query_str, sort=True)
# TODO can we do this with composite filtering?
# for model in models:
# query = apply_model_specific_filters(model, query, current_user)
return s.search(query=query)
def search(*, query_str: str, query: Query, model: str, sort=False):
"""Perform a search based on the query."""
search_model = get_class_by_tablename(model)
if not query_str.strip():
return query
vector = search_model.search_vector
query = query.filter(vector.op("@@")(func.tsq_parse(query_str)))
if sort:
query = query.order_by(desc(func.ts_rank_cd(vector, func.tsq_parse(query_str))))
return query.params(term=query_str)
def create_sort_spec(model, sort_by, descending):
"""Creates sort_spec."""
sort_spec = []
if sort_by and descending:
for field, direction in zip(sort_by, descending):
direction = "desc" if direction else "asc"
# we have a complex field, we may need to join
if "." in field:
complex_model, complex_field = field.split(".")[-2:]
sort_spec.append(
{
"model": get_model_name_by_tablename(complex_model),
"field": complex_field,
"direction": direction,
}
)
else:
sort_spec.append({"model": model, "field": field, "direction": direction})
log.debug(f"Sort Spec: {json.dumps(sort_spec, indent=2)}")
return sort_spec
def get_all(*, db_session, model):
"""Fetches a query object based on the model class name."""
return db_session.query(get_class_by_tablename(model))
def common_parameters(
db_session: orm.Session = Depends(get_db),
page: int = 1,
items_per_page: int = Query(5, alias="itemsPerPage"),
query_str: str = Query(None, alias="q"),
filter_spec: str = Query([], alias="filter"),
sort_by: List[str] = Query([], alias="sortBy[]"),
descending: List[bool] = Query([], alias="descending[]"),
current_user: DispatchUser = Depends(get_current_user),
):
if filter_spec:
filter_spec = json.loads(filter_spec)
return {
"db_session": db_session,
"page": page,
"items_per_page": items_per_page,
"query_str": query_str,
"filter_spec": filter_spec,
"sort_by": sort_by,
"descending": descending,
"current_user": current_user,
}
def search_filter_sort_paginate(
db_session,
model,
query_str: str = None,
filter_spec: List[dict] = None,
page: int = 1,
items_per_page: int = 5,
sort_by: List[str] = None,
descending: List[bool] = None,
current_user: DispatchUser = None,
):
"""Common functionality for searching, filtering, sorting, and pagination."""
model_cls = get_class_by_tablename(model)
sort_spec = create_sort_spec(model, sort_by, descending)
query = db_session.query(model_cls)
query = apply_model_specific_joins(model_cls, query)
if query_str:
sort = False if sort_by else True
query = search(query_str=query_str, query=query, model=model, sort=sort)
query = apply_model_specific_filters(model_cls, query, current_user)
if filter_spec:
query = apply_filters(query, filter_spec)
query = apply_sort(query, sort_spec)
if items_per_page == -1:
items_per_page = None
query, pagination = apply_pagination(query, page_number=page, page_size=items_per_page)
return {
"items": query.all(),
"itemsPerPage": pagination.page_size,
"page": pagination.page_number,
"total": pagination.total_results,
}
|
py | 7df9168f01ed7663300314c06c2577f573c7b7e5 | import os
import sys
from subprocess import call, check_call, PIPE, STDOUT, Popen, CalledProcessError
def run_ppiclf(cwd, rea_file, ifmpi, log_suffix='', n_procs=1, verbose=False):
# Paths to executables, files
test = os.path.join(cwd, 'test.out')
logfile = os.path.join(cwd, '{0}.log.{1}{2}'.format(rea_file, n_procs, log_suffix))
if ifmpi:
command = ['mpiexec', '-np', str(n_procs), test]
else:
command = [test]
print("Running test...")
print(' Using command "{0}"'.format(' '.join(command)))
print(' Using working directory "{0}"'.format(cwd))
# Any error here is unexepected
try:
if verbose:
with open(logfile, 'w') as f:
proc =Popen(command, cwd=cwd, stderr=STDOUT, stdout=PIPE)
for line in proc.stdout:
sys.stdout.write(line)
f.write(line)
else:
with open(logfile, 'w') as f:
call(command, cwd=cwd, stdout=f)
except Exception as E:
# TODO: Change to warnings.warn()
print('Could not successfully run test! Caught error: {0}'.format(E))
else:
print('Finished running test!')
|
py | 7df916bff3e4b9a1b11bed6f0f98dfb6248a774f | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Models for displaying maps in Bokeh plots.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import MapType
from ..core.has_props import abstract
from ..core.properties import (
JSON,
Base64String,
Bool,
Enum,
Float,
Instance,
Int,
NonNullable,
Override,
String,
)
from ..core.validation import error, warning
from ..core.validation.errors import INCOMPATIBLE_MAP_RANGE_TYPE, MISSING_GOOGLE_API_KEY, REQUIRED_RANGE
from ..core.validation.warnings import MISSING_RENDERERS
from ..model import Model
from ..models.ranges import Range1d
from .plots import Plot
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'GMapOptions',
'GMapPlot',
'MapOptions',
'MapPlot',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class MapOptions(Model):
''' Abstract base class for map options' models.
'''
lat = NonNullable(Float, help="""
The latitude where the map should be centered.
""")
lng = NonNullable(Float, help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the map.
""")
@abstract
class MapPlot(Plot):
''' Abstract base class for map plot models.
'''
def __init__(self, *args, **kw) -> None:
from ..models.ranges import Range1d
for r in ('x_range', 'y_range'):
if r in kw and not isinstance(kw.get(r), Range1d):
raise ValueError('Invalid value for %r, MapPlot ranges may only be Range1d, not data ranges' % r)
super().__init__(*args, **kw)
@error(INCOMPATIBLE_MAP_RANGE_TYPE)
def _check_incompatible_map_range_type(self):
from ..models.ranges import Range1d
if self.x_range is not None and not isinstance(self.x_range, Range1d):
return "%s.x_range" % str(self)
if self.y_range is not None and not isinstance(self.y_range, Range1d):
return "%s.y_range" % str(self)
class GMapOptions(MapOptions):
''' Options for ``GMapPlot`` objects.
'''
map_type = Enum(MapType, default="roadmap", help="""
The `map type`_ to use for the ``GMapPlot``.
.. _map type: https://developers.google.com/maps/documentation/javascript/reference#MapTypeId
""")
scale_control = Bool(default=False, help="""
Whether the Google map should display its distance scale control.
""")
styles = NonNullable(JSON, help="""
A JSON array of `map styles`_ to use for the ``GMapPlot``. Many example styles can
`be found here`_.
.. _map styles: https://developers.google.com/maps/documentation/javascript/reference#MapTypeStyle
.. _be found here: https://snazzymaps.com
""")
tilt = Int(default=45, help="""
`Tilt`_ angle of the map. The only allowed values are 0 and 45.
Only has an effect on 'satellite' and 'hybrid' map types.
A value of 0 causes the map to always use a 0 degree overhead view.
A value of 45 causes the tilt angle to switch to 45 imagery if available.
.. _Tilt: https://developers.google.com/maps/documentation/javascript/reference/3/map#MapOptions.tilt
""")
class GMapPlot(MapPlot):
''' A Bokeh Plot with a `Google Map`_ displayed underneath.
Data placed on this plot should be specified in decimal lat/lon coordinates
e.g. ``(37.123, -122.404)``. It will be automatically converted into the
web mercator projection to display properly over google maps tiles.
The ``api_key`` property must be configured with a Google API Key in order
for ``GMapPlot`` to function. The key will be stored in the Bokeh Document
JSON.
Note that Google Maps exert explicit control over aspect ratios at all
times, which imposes some limitations on ``GMapPlot``:
* Only ``Range1d`` ranges are supported. Attempting to use other range
types will result in an error.
* Usage of ``BoxZoomTool`` is incompatible with ``GMapPlot``. Adding a
``BoxZoomTool`` will have no effect.
.. _Google Map: https://www.google.com/maps/
'''
# TODO (bev) map plot might not have these
@error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@error(MISSING_GOOGLE_API_KEY)
def _check_missing_google_api_key(self):
if self.api_key is None:
return str(self)
map_options = Instance(GMapOptions, help="""
Options for displaying the plot.
""")
border_fill_color = Override(default="#ffffff")
api_key = NonNullable(Base64String, help="""
Google Maps API requires an API key. See https://developers.google.com/maps/documentation/javascript/get-api-key
for more information on how to obtain your own.
""")
api_version = String(default="3.43", help="""
The version of Google Maps API to use. See https://developers.google.com/maps/documentation/javascript/versions
for more information.
.. note::
Changing this value may result in broken map rendering.
""")
x_range = Override(default=lambda: Range1d())
y_range = Override(default=lambda: Range1d())
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
py | 7df9177e01eb7b776d7d2f81d300dc5ac82dccad | from __future__ import print_function
import numpy as np
import pickle as pkl
def train_vald_split(y, train, rng, classes=10):
inds = [None]*classes
sinds = [None]*classes
num_train = [None]*classes
for i in range(classes):
inds[i] = np.where(y==i)[0]
sinds[i] = inds[i][rng.permutation(len(inds[i]))]
num_train[i] = int(len(inds[i])* train)
itrain = np.concatenate([ind[:num_train[i]] for i,ind in enumerate(sinds)])
ivalid = np.concatenate([ind[num_train[i]:] for i,ind in enumerate(sinds)])
return itrain, ivalid
def confusey(y, noise, rng, classes=10):
ret = np.copy(y)
if noise == 0:
return ret
inds = [None]*classes
sinds = [None]*classes
sels = [None]*classes
for i in range(classes):
inds[i] = np.where(y==i)[0]
sinds[i] = inds[i][rng.permutation(len(inds[i]))]
for i in range(classes):
num = max((int(len(sinds[i])* noise / 100.0), classes-1))
iinds = sinds[i][:num]
chs = np.arange(classes)
chs = np.delete(chs, i)
ret[iinds] = rng.choice(chs, (num,))
return ret
def load(path):
with open(path, 'rb') as ofs:
data= pkl.load(ofs)
return data['trainx'], data['trainy'], data['testx'], data['testy']
def dump(path, trainx, trainy, trainry, validx, validy, validry, testx, testy, ishalf=True):
if ishalf:
trainx = trainx.astype('float16')
validx = validx.astype('float16')
testx = testx.astype('float16')
with open(path, 'wb') as ofs:
pkl.dump({'trainx':trainx, 'trainy':np.reshape(trainy, (-1,)), 'trainry':np.reshape(trainry, (-1,)), 'validx':validx, 'validy':np.reshape(validy, (-1,)), 'validry':np.reshape(validry, (-1,)), 'testx': testx, 'testy': np.reshape(testy, (-1,))}, ofs)
datadir = 'data/'
dataset = 'cifar10_transfer_binary'
exps = 20
trainp = 0.9
c1 = 0 #airplance
c2 = 8 #ship
randstates = np.arange(exps, dtype=np.int)
x_train, y_train, x_test, y_test = load('data/cifar10_transfer.pkl')
inds1 = np.where(y_train==c1)[0]
inds2 = np.where(y_train==c2)[0]
x_train = np.concatenate((x_train[inds1], x_train[inds2]), 0)
y_train = np.concatenate((np.ones((len(inds1),), np.int32), np.zeros((len(inds1),), np.int32)), 0)
inds3 = np.where(y_test==c1)[0]
inds4 = np.where(y_test==c2)[0]
x_test = np.concatenate((x_test[inds3], x_test[inds4]), 0)
y_test = np.concatenate((np.ones((len(inds3),), np.int32), np.zeros((len(inds4),), np.int32)), 0)
print(dataset, ',', np.shape(y_train)[0], ',', np.shape(y_test)[0])
for noise in [0, 5, 10, 15, 20, 25]:
for exp in range(exps):
pklfile = datadir + dataset + '/' + dataset.lower() + '_exp' + str(exp) + '_noise' + str(noise) + '.pkl'
rng = np.random.RandomState(exp)
itrain, ivalid = train_vald_split(y_train, trainp, rng, 2)
rng.shuffle(itrain)
rng.shuffle(ivalid)
trainx = x_train[itrain]
validx = x_train[ivalid]
trainry = y_train[itrain]
validry = y_train[ivalid]
trainny = confusey(trainry, noise, rng, 2)
validny = confusey(validry, noise, rng, 2)
print('train acc=', np.mean(np.equal(trainny, trainry).astype(np.float)))
print('valid acc=', np.mean(np.equal(validny, validry).astype(np.float)))
print(np.shape(trainx))
print(np.shape(trainry))
print(np.shape(trainny))
print(np.shape(validx))
print(np.shape(validry))
print(np.shape(validny))
print(np.shape(x_test))
print(np.shape(y_test))
dump(pklfile, trainx, trainny, trainry, validx, validny, validry, x_test, y_test)
print(pklfile, ' saved') |
py | 7df917a1da8c29ddf1a9e71143b9eaaa382db372 | from django.template import Library
register = Library()
from bs4 import BeautifulSoup
from django.conf import settings
from django.template import defaultfilters
from dolweb.blog.models import BlogSeries
@register.inclusion_tag('blog_chunk_series.html')
def get_recent_blog_series(number=5):
"""Return the most recent visible blog series"""
return {
'recent_series': BlogSeries.objects.filter(visible=True)[:number],
}
@register.filter
def cuthere_excerpt(content):
try:
cut_here = BeautifulSoup(content).find('a', id='cuthere')
return ''.join(map(str, reversed(cut_here.parent.find_previous_siblings())))
except AttributeError:
return defaultfilters.truncatewords(content, 100)
|
py | 7df9187e18e59ed9353682df8da2a4c1e8efe118 | from search import Problem, breadth_first_graph_search
class MissionariesAndCannibals(Problem):
""""""
def __init__(self, initial, goal=(3, 3, 0, 0, 0)):
""" Define goal state and initialize a problem
(3, 3, 0, 0, 0)
(M, C, M, C, B)
"""
self.goal = goal
Problem.__init__(self, initial, goal)
def actions(self, state):
possible_actions = ['ML', 'MR', 'CL', 'CR', 'MML', 'MMR', 'CCL', 'CCR', 'MCL', 'MCR']
is_right_side = (state[-1] == 1)
if is_right_side:
possible_actions.remove('ML')
possible_actions.remove('CL')
possible_actions.remove('MML')
possible_actions.remove('CCL')
possible_actions.remove('MCL')
# MR
if (state[2] - 1 < 0) or (0 < state[2] - 1 < state[3]) or (0 < state[0] + 1 < state[1]):
possible_actions.remove('MR')
# MMR
if (state[2] - 2 < 0) or (0 < state[2] - 2 < state[3]) or (0 < state[0] + 2 < state[1]):
possible_actions.remove('MMR')
# MCR
if (state[2] - 1 < 0) or (state[3] - 1 < 0) or (0 < state[0] + 1 < state[1] + 1):
possible_actions.remove('MCR')
# CR
if (state[3] - 1 < 0) or (0 < state[0] < state[1] + 1):
possible_actions.remove('CR')
# CCR
if (state[3] - 2 < 0) or (0 < state[0] < state[1] + 2):
possible_actions.remove('CCR')
else:
possible_actions.remove('MR')
possible_actions.remove('CR')
possible_actions.remove('MMR')
possible_actions.remove('CCR')
possible_actions.remove('MCR')
# ML
if (state[0] - 1 < 0) or (0 < state[0] - 1 < state[1]) or (0 < state[2] + 1 < state[3]):
possible_actions.remove('ML')
# MML
if (state[0] - 2 < 0) or (0 < state[0] - 2 < state[1]) or (0 < state[2] + 2 < state[3]):
possible_actions.remove('MML')
# MCL
if (state[0] - 1 < 0) or (state[1] - 1 < 0) or (0 < state[2] + 1 < state[3] + 1):
possible_actions.remove('MCL')
# CL
if (state[1] - 1 < 0) or (0 < state[2] < state[3] + 1):
possible_actions.remove('CL')
# CCL
if (state[1] - 2 < 0) or (0 < state[2] < state[3] + 2):
possible_actions.remove('CCL')
return possible_actions
def result(self, state, action):
new_state = list(state)
if action == 'ML':
new_state[0] = new_state[0] - 1
new_state[2] = new_state[2] + 1
elif action == 'MML':
new_state[0] = new_state[0] - 2
new_state[2] = new_state[2] + 2
elif action == 'MCL':
new_state[0] = new_state[0] - 1
new_state[1] = new_state[1] - 1
new_state[2] = new_state[2] + 1
new_state[3] = new_state[3] + 1
elif action == 'CL':
new_state[1] = new_state[1] - 1
new_state[3] = new_state[3] + 1
elif action == 'CCL':
new_state[1] = new_state[1] - 2
new_state[3] = new_state[3] + 2
elif action == 'MR':
new_state[0] = new_state[0] + 1
new_state[2] = new_state[2] - 1
elif action == 'MMR':
new_state[0] = new_state[0] + 2
new_state[2] = new_state[2] - 2
elif action == 'MCR':
new_state[0] = new_state[0] + 1
new_state[1] = new_state[1] + 1
new_state[2] = new_state[2] - 1
new_state[3] = new_state[3] - 1
elif action == 'CR':
new_state[1] = new_state[1] + 1
new_state[3] = new_state[3] - 1
elif action == 'CCR':
new_state[1] = new_state[1] + 2
new_state[3] = new_state[3] - 2
new_state[4] = 1 - new_state[4]
return tuple(new_state)
def value(self, state):
pass
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state == self.goal
def solve():
missionaries_and_cannibals = MissionariesAndCannibals((0, 0, 3, 3, 1))
return breadth_first_graph_search(missionaries_and_cannibals).solution()
|
py | 7df918b55c1b6fd50cec266ccdd1991f4de7023c | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import random
import shutil
import sys
import tempfile
import time
import unittest
if sys.version >= '3':
unicode = str
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
from pyspark.rdd import PythonEvalType
from pyspark.sql import Column
from pyspark.sql.functions import array, col, expr, lit, sum, struct, udf, pandas_udf
from pyspark.sql.types import Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, test_compiled,\
test_not_compiled_message, have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
import pandas as pd
import numpy as np
@pandas_udf('double')
def random_udf(v):
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_pandas_udf_tokenize(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: str.split(' ')),
ArrayType(StringType()))
self.assertEqual(tokenize.returnType, ArrayType(StringType()))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[u'hi', u'boo']), Row(hi=[u'bye', u'boo'])], result.collect())
def test_pandas_udf_nested_arrays(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: [str.split(' ')]),
ArrayType(ArrayType(StringType())))
self.assertEqual(tokenize.returnType, ArrayType(ArrayType(StringType())))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[[u'hi', u'boo']]), Row(hi=[[u'bye', u'boo']])], result.collect())
def test_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
import pyarrow as pa
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
else:
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_struct_type(self):
import pandas as pd
import pyarrow as pa
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
def func(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
f = pandas_udf(func, returnType=return_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(f(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
g = pandas_udf(func, 'id: long, str: string')
actual = df.select(g(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
struct_f = pandas_udf(lambda x: x, return_type)
actual = df.select(struct_f(struct(col('id'), col('id').cast('string').alias('str'))))
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
from py4j.protocol import Py4JJavaError
with self.assertRaisesRegexp(
Py4JJavaError,
'Unsupported type in conversion from Arrow'):
self.assertEqual(expected, actual.collect())
else:
self.assertEqual(expected, actual.collect())
def test_vectorized_udf_struct_complex(self):
import pandas as pd
df = self.spark.range(10)
return_type = StructType([
StructField('ts', TimestampType()),
StructField('arr', ArrayType(LongType()))])
@pandas_udf(returnType=return_type)
def f(id):
return pd.DataFrame({'ts': id.apply(lambda i: pd.Timestamp(i)),
'arr': id.apply(lambda i: [i, i + 1])})
actual = df.withColumn('f', f(col('id'))).collect()
for i, row in enumerate(actual):
id, f = row
self.assertEqual(i, id)
self.assertEqual(pd.Timestamp(i).to_pydatetime(), f[0])
self.assertListEqual([i, i + 1], f[1])
def test_vectorized_udf_nested_struct(self):
nested_type = StructType([
StructField('id', IntegerType()),
StructField('nested', StructType([
StructField('foo', StringType()),
StructField('bar', FloatType())
]))
])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Invalid returnType with scalar Pandas UDFs'):
pandas_udf(lambda x: x, returnType=nested_type)
def test_vectorized_udf_complex(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_chained_struct_type(self):
import pandas as pd
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
@pandas_udf(return_type)
def f(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
g = pandas_udf(lambda x: x, return_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(g(f(col('id'))).alias('struct')).collect()
self.assertEqual(expected, actual)
def test_vectorized_udf_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_struct_with_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))\
.withColumn('name', lit('John Doe'))
@pandas_udf("first string, last string")
def split_expand(n):
return n.str.split(expand=True)
result = df.select(split_expand('name')).collect()
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('John', row[0]['first'])
self.assertEqual('Doe', row[0]['last'])
def test_vectorized_udf_varargs(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*ArrayType.StructType'):
pandas_udf(lambda x: x, ArrayType(StructType([StructField('a', IntegerType())])))
def test_vectorized_udf_dates(self):
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),),
(4, date(2262, 4, 12),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime(2015, 11, 1, 0, 30),
datetime(2015, 11, 1, 1, 30),
datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
import pandas as pd
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
import pandas as pd
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import pandas as pd
import numpy as np
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_scalar import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
py | 7df91b11252856f2fc01a573ce58b37e4ddcce82 | import pytest
from nebullvm.base import DeepLearningFramework
from nebullvm.inference_learners.tvm import TVM_INFERENCE_LEARNERS
from nebullvm.optimizers import ApacheTVMOptimizer
from nebullvm.optimizers.tests.utils import get_onnx_model
@pytest.mark.parametrize("output_library", [DeepLearningFramework.PYTORCH])
def test_tvm(output_library: DeepLearningFramework):
model_path, model_params = get_onnx_model()
optimizer = ApacheTVMOptimizer()
model = optimizer.optimize(model_path, output_library, model_params)
assert isinstance(model, TVM_INFERENCE_LEARNERS[output_library])
res = model.predict(*model.get_inputs_example())
assert res is not None
|
py | 7df91ba65d0feaa96d9b10a242d0c2ccc0982793 | #int(number) : returns only the integer part
#round(number, ndigits) : returns number rounded to ndigits after the decimal points. If ndigits is not given, it returns nearest integer to its input.
def roundOffNumber(number):
integerNumber = int(number)
roundNumber = round(number)
print("Number", number, "can be converted to integer in two ways as ",integerNumber,"and ",roundNumber)
roundNumber = round(number,3)
print("Number",number,"rounded off number after 3 digits is: ",roundNumber)
realNumber = float(input("Enter a real Number: "))
roundOffNumber(realNumber) |
py | 7df91d15ec7f83b9277e8ba2def350f499cfbfd0 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""process dataset"""
import numpy as np
from PIL import Image, ImageOps
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
"""locate crop location"""
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
class GroupCenterCrop:
"""GroupCenterCrop"""
def __init__(self, size):
self.size = size
def __call__(self, img_group):
images = []
for img in img_group:
width, height = img.size
left = (width - self.size)/2
top = (height - self.size)/2
right = (width + self.size)/2
bottom = (height + self.size)/2
images.append(img.crop((left, top, right, bottom)))
return images
class GroupNormalize:
"""GroupNormalize"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
rep_mean = self.mean * (tensor.shape[0]//len(self.mean))
rep_std = self.std * (tensor.shape[0]//len(self.std))
# TODO: make efficient
for i, _ in enumerate(tensor):
tensor[i] = (tensor[i]-rep_mean[i])/rep_std[i]
return tensor
class GroupScale:
""" Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img_group):
images = []
for img in img_group:
w, h = img.size
if w > h:
s = (int(self.size * w / h), self.size)
else:
s = (self.size, int(self.size * h / w))
images.append(img.resize(s, self.interpolation))
return images
class GroupOverSample:
"""GroupOverSample"""
def __init__(self, crop_size, scale_size=None):
self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size)
if scale_size is not None:
self.scale_worker = GroupScale(scale_size)
else:
self.scale_worker = None
def __call__(self, img_group):
if self.scale_worker is not None:
img_group = self.scale_worker(img_group)
image_w, image_h = img_group[0].size
crop_w, crop_h = self.crop_size
offsets = fill_fix_offset(False, image_w, image_h, crop_w, crop_h)
#print(offsets)
oversample_group = list()
for o_w, o_h in offsets:
normal_group = list()
flip_group = list()
for i, img in enumerate(img_group):
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
normal_group.append(crop)
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
if img.mode == 'L' and i % 2 == 0:
flip_group.append(ImageOps.invert(flip_crop))
else:
flip_group.append(flip_crop)
oversample_group.extend(normal_group)
oversample_group.extend(flip_group)
return oversample_group
class Stack:
"""Stack"""
def __init__(self, roll=False):
self.roll = roll
def __call__(self, img_group):
output = []
if img_group[0].mode == 'L':
output = np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2)
elif img_group[0].mode == 'RGB':
if self.roll:
output = np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2)
else:
output = np.concatenate(img_group, axis=2)
return output
class ToTorchFormatTensor:
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
def __init__(self, div=True):
self.div_sign = div
def __call__(self, pic):
if isinstance(pic, np.ndarray):
# handle numpy array
pic = np.array(pic, np.float32)
pic = np.ascontiguousarray(pic)
img = pic.transpose((2, 0, 1))
else:
# handle PIL Image
pic = np.array(pic, np.float32)
pic = np.ascontiguousarray(pic)
img = pic.reshape(pic.size[1], pic.size[0], len(pic.mode))
img = img.transpose((2, 0, 1))
return img/255. if self.div_sign else img
|
py | 7df91e6e4feb61bde3a3f03fb9709be1ff5d7490 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Samplers. They define how the samples in a dataset will be iterated
(e.g. in the order sorted by length). They can also be used to perform bucketing
for speeding up the processing of variable-length sequences."""
__all__ = ['SortedSampler', 'FixedBucketSampler', 'SortedBucketSampler']
import warnings
import numpy as np
from mxnet.gluon.data import Sampler
def _match_bucket_keys(bucket_keys, seq_lengths):
bucket_key_npy = np.array(bucket_keys, dtype=np.int32)
bucket_sample_ids = [list() for _ in range(len(bucket_keys))]
batch_size = 10000
bucket_key_npy = bucket_key_npy.reshape((1,) + bucket_key_npy.shape)
for begin in range(0, len(seq_lengths), batch_size):
end = min(begin + batch_size, len(seq_lengths))
diff = bucket_key_npy - np.expand_dims(seq_lengths[begin:end], axis=1)
if diff.ndim == 3:
is_valid_bucket = np.prod(diff >= 0, axis=2)
pad_val = np.sum(diff, axis=2)
else:
is_valid_bucket = diff >= 0
pad_val = diff
seq_ids_not_found = np.nonzero(is_valid_bucket.sum(axis=1) == 0)[0].tolist()
masked_pad_val = np.ma.array(pad_val, mask=1 - is_valid_bucket)
batch_bucket_id = masked_pad_val.argmin(axis=1).tolist()
if len(seq_ids_not_found) > 0:
raise ValueError('Find elements in seq_lengths that cannot fit in the '
'given buckets, seq_length=%s, bucket_keys=%s. ' \
'You must increase the bucket size.'
% (str(seq_lengths[seq_ids_not_found]), str(bucket_keys)))
for i, bucket_id in enumerate(batch_bucket_id):
bucket_sample_ids[bucket_id].append(i + begin)
return bucket_sample_ids
def _bucket_average_lengths(bucket_sample_ids, seq_lengths):
bucket_average_lengths = []
for sample_ids in bucket_sample_ids:
if len(sample_ids) > 0:
bucket_average_lengths.append(np.mean(seq_lengths[sample_ids]))
else:
bucket_average_lengths.append(0)
return bucket_average_lengths
class SortedSampler(Sampler):
r"""Sort the samples based on the sort key and then sample sequentially.
Parameters
----------
sort_keys : list-like object
List of the sort keys.
reverse : bool, default True
Whether to sort by descending order.
"""
def __init__(self, sort_keys, reverse=True):
assert len(sort_keys) > 0
self._sorted_ids = sorted(range(len(sort_keys)),
key=lambda i: sort_keys[i], reverse=reverse)
def __iter__(self):
return iter(self._sorted_ids)
def __len__(self):
return len(self._sorted_ids)
class FixedBucketSampler(Sampler):
r"""Assign each data sample to a fixed bucket based on its length.
The bucket keys are either given or generated from the input sequence lengths.
Parameters
----------
lengths : list of int or list of tuple/list of int
The length of the sequences in the input data sample.
batch_size : int
The batch size of the sampler.
num_buckets : int or None, default 10
The number of buckets. This will not be used if bucket_keys is set.
bucket_keys : None or list of int or list of tuple, default None
The keys that will be used to create the buckets. It should usually be the lengths of the
sequences. If it is None, the bucket_keys will be generated based on the maximum
lengths of the data.
ratio : float, default 0
Ratio to scale up the batch size of smaller buckets.
Assume the :math:`i` th key is :math:`K_i` ,
the default batch size is :math:`B` , the ratio to scale the batch size is
:math:`\alpha` and
the batch size corresponds to the :math:`i` th bucket is :math:`B_i` . We have:
.. math::
B_i = \max(\alpha B \times \frac{\max_j sum(K_j)}{sum(K_i)}, B)
Thus, setting this to a value larger than 0, like 0.5, will scale up the batch size of the
smaller buckets.
shuffle : bool, default False
Whether to shuffle the batches.
use_average_length : bool, default False
False: each batch contains batch_size sequences, number of sequence elements varies.
True: each batch contains batch_size elements, number of sequences varies. In this case,
ratio option is ignored.
Examples
--------
>>> from gluonnlp.data import FixedBucketSampler
>>> import numpy as np
>>> lengths = [np.random.randint(1, 100) for _ in range(1000)]
>>> sampler = FixedBucketSampler(lengths, 8)
>>> print(sampler.stats())
FixedBucketSampler:
sample_num=1000, batch_num=128
key=[9, 19, 29, 39, 49, 59, 69, 79, 89, 99]
cnt=[95, 103, 91, 97, 86, 79, 102, 100, 128, 119]
batch_size=[8, 8, 8, 8, 8, 8, 8, 8, 8, 8]
>>> sampler = FixedBucketSampler(lengths, 8, ratio=0.5)
>>> print(sampler.stats())
FixedBucketSampler:
sample_num=1000, batch_num=104
key=[9, 19, 29, 39, 49, 59, 69, 79, 89, 99]
cnt=[95, 103, 91, 97, 86, 79, 102, 100, 128, 119]
batch_size=[44, 20, 13, 10, 8, 8, 8, 8, 8, 8]
"""
def __init__(self, lengths, batch_size, num_buckets=10, bucket_keys=None,
ratio=0, shuffle=False, use_average_length=False):
assert len(lengths) > 0, 'FixedBucketSampler does not support empty lengths.'
assert batch_size > 0, 'Batch size must be larger than 0.'
assert ratio >= 0, 'batch size scaling ratio cannot be negative.'
self._batch_size = batch_size
self._ratio = ratio
self._lengths = np.array(lengths, dtype=np.int32)
if self._lengths.ndim == 1:
self._single_element = True
attr_num = 1
else:
assert self._lengths.ndim == 2, \
'Elements in lengths must be either int or tuple/list of int. ' \
'Received lengths=%s' % str(lengths)
self._single_element = False
attr_num = self._lengths.shape[1]
self._shuffle = shuffle
max_lengths = self._lengths.max(axis=0)
min_lengths = self._lengths.min(axis=0)
if self._single_element:
assert min_lengths > 0, 'Sequence lengths must all be larger than 0.'
else:
for _, ele in enumerate(min_lengths):
assert ele > 0, 'Sequence lengths must all be larger than 0.'
# Generate the buckets
if bucket_keys is None:
assert num_buckets > 0, 'num_buckets must be set when bucket_keys is None. Received ' \
'num_buckets=%d' % num_buckets
if not self._single_element:
bucket_width_l = [max((max_len - min_len) // num_buckets, 1)
for max_len, min_len in
zip(max_lengths, min_lengths)]
bucket_keys =\
[tuple(max(max_len - i * width, min_len) for max_len, min_len, width in
zip(max_lengths, min_lengths, bucket_width_l))
for i in range(num_buckets)]
else:
bucket_width = max((max_lengths - min_lengths) // num_buckets, 1)
bucket_keys = [max(max_lengths - i * bucket_width, min_lengths)
for i in range(num_buckets)]
else:
if num_buckets is not None:
warnings.warn('num_buckets will not be used if bucket_keys is not None. '
'bucket_keys=%s, num_buckets=%d' % (str(bucket_keys), num_buckets))
assert len(bucket_keys) > 0
if self._single_element:
assert isinstance(bucket_keys[0], int)
else:
assert isinstance(bucket_keys[0], tuple)
assert len(bucket_keys[0]) == attr_num
bucket_keys = sorted(set(bucket_keys))
# Assign instances to buckets
bucket_sample_ids = _match_bucket_keys(bucket_keys, self._lengths)
unused_bucket_keys = [key for key, sample_ids in zip(bucket_keys, bucket_sample_ids)
if len(sample_ids) == 0]
if len(unused_bucket_keys) > 0:
warnings.warn('Some buckets are empty and will be removed. Unused bucket keys=%s' %
str(unused_bucket_keys))
# Remove empty buckets
self._bucket_keys = [key for key, sample_ids in zip(bucket_keys, bucket_sample_ids)
if len(sample_ids) > 0]
self._bucket_sample_ids = [sample_ids for sample_ids in bucket_sample_ids
if len(sample_ids) > 0]
if not use_average_length:
scale_up_keys = [key if self._single_element else sum(key) for key in self._bucket_keys]
max_scale_up_key = max(scale_up_keys)
self._bucket_batch_sizes = [max(int(max_scale_up_key / float(scale_up_key)
* self._ratio * batch_size), batch_size)
for scale_up_key in scale_up_keys]
else:
if ratio > 0.:
warnings.warn('ratio=%f is ignored when use_average_length is True' % self._ratio)
bucket_average_lengths = _bucket_average_lengths(self._bucket_sample_ids, self._lengths)
self._bucket_batch_sizes = [max(int(batch_size / float(average_length)), 1)
for average_length in bucket_average_lengths]
self._batch_infos = []
for bucket_id, sample_ids, bucket_batch_size in\
zip(range(len(self._bucket_keys) - 1, -1, -1),
self._bucket_sample_ids[::-1],
self._bucket_batch_sizes[::-1]):
for i in range(0, len(sample_ids), bucket_batch_size):
self._batch_infos.append((bucket_id, i))
def __iter__(self):
if self._shuffle:
np.random.shuffle(self._batch_infos)
for bucket_id in range(len(self._bucket_keys)):
np.random.shuffle(self._bucket_sample_ids[bucket_id])
for bucket_id, batch_begin in self._batch_infos:
batch_size = self._bucket_batch_sizes[bucket_id]
batch_end = min(batch_begin + batch_size, len(self._bucket_sample_ids[bucket_id]))
yield self._bucket_sample_ids[bucket_id][batch_begin:batch_end]
def __len__(self):
return len(self._batch_infos)
def stats(self):
"""Return a string representing the statistics of the bucketing sampler.
Returns
-------
ret : str
String representing the statistics of the buckets.
"""
ret = '{name}:\n' \
' sample_num={sample_num}, batch_num={batch_num}\n' \
' key={bucket_keys}\n' \
' cnt={bucket_counts}\n' \
' batch_size={bucket_batch_sizes}'\
.format(name=self.__class__.__name__,
sample_num=len(self._lengths),
batch_num=len(self._batch_infos),
bucket_keys=self._bucket_keys,
bucket_counts=[len(sample_ids) for sample_ids in self._bucket_sample_ids],
bucket_batch_sizes=self._bucket_batch_sizes)
return ret
class SortedBucketSampler(Sampler):
r"""Batches are samled from sorted buckets of data.
First, partition data in buckets of size `batch_size * mult`.
Each bucket contains `batch_size * mult` elements. The samples inside each bucket are sorted
based on sort_key and then batched.
Parameters
----------
sort_keys : list-like object
The keys to sort the samples.
batch_size : int
Batch size of the sampler.
mult : int or float, default 100
The multiplier to determine the bucket size. Each bucket will have size `mult * batch_size`.
reverse : bool, default True
Whether to sort in descending order.
shuffle : bool, default False
Whether to shuffle the data.
Examples
--------
>>> from gluonnlp.data import SortedBucketSampler
>>> import numpy as np
>>> lengths = [np.random.randint(1, 1000) for _ in range(1000)]
>>> sampler = SortedBucketSampler(lengths, 16)
>>> # The sequence lengths within the batch will be sorted
>>> for i, indices in enumerate(sampler):
... if i == 0:
... print([lengths[ele] for ele in indices])
[999, 999, 999, 997, 997, 996, 995, 993, 991, 991, 989, 989, 987, 987, 986, 985]
"""
def __init__(self, sort_keys, batch_size, mult=100, reverse=True, shuffle=False):
assert len(sort_keys) > 0
assert batch_size > 0
assert mult >= 1, 'Bucket size multiplier must be larger than 1'
self._sort_keys = sort_keys
self._batch_size = batch_size
self._mult = mult
self._total_sample_num = len(self._sort_keys)
self._reverse = reverse
self._shuffle = shuffle
def __iter__(self):
if self._shuffle:
sample_ids = np.random.permutation(self._total_sample_num)
else:
sample_ids = list(range(self._total_sample_num))
bucket_size = int(self._mult * self._batch_size)
for bucket_begin in range(0, self._total_sample_num, bucket_size):
bucket_end = min(bucket_begin + bucket_size, self._total_sample_num)
sorted_sample_ids = sorted(sample_ids[bucket_begin:bucket_end],
key=lambda i: self._sort_keys[i], reverse=self._reverse)
batch_begins = list(range(0, len(sorted_sample_ids), self._batch_size))
if self._shuffle:
np.random.shuffle(batch_begins)
for batch_begin in batch_begins:
batch_end = min(batch_begin + self._batch_size, len(sorted_sample_ids))
yield sorted_sample_ids[batch_begin:batch_end]
def __len__(self):
return (len(self._sort_keys) + self._batch_size - 1) // self._batch_size
|
py | 7df91eab100e81530c3af145934cbf4362d4ddc9 | import warnings
import numpy as np
from keras.callbacks import History
from rl.callbacks import TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer, CallbackList
class Agent(object):
def __init__(self):
self.training = False
self.step = 0
def get_config(self):
return {}
def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None):
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_env(env)
callbacks._set_params({
'nb_steps': nb_steps,
})
self._on_train_begin()
callbacks.on_train_begin()
episode = 0
self.step = 0
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = 0
episode_reward = 0.
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = env.reset()
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
callbacks.on_action_begin(action)
observation, _, done, _ = env.step(action)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = env.reset()
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation)
reward = 0.
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history
def _on_train_begin(self):
pass
def _on_train_end(self):
pass
def test(self, env, nb_episodes=1, action_repetition=1, callbacks=None, visualize=True,
nb_max_episode_steps=None, nb_max_start_steps=0, start_step_policy=None, verbose=1):
if not self.compiled:
raise RuntimeError('Your tried to test your agent but it hasn\'t been compiled yet. Please call `compile()` before `test()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = False
self.step = 0
callbacks = [] if not callbacks else callbacks[:]
if verbose >= 1:
callbacks += [TestLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_env(env)
callbacks._set_params({
'nb_episodes': nb_episodes,
})
self._on_test_begin()
callbacks.on_train_begin()
for episode in range(nb_episodes):
callbacks.on_episode_begin(episode)
episode_reward = 0.
episode_step = 0
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = env.reset()
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
callbacks.on_action_begin(action)
observation, _, done, _ = env.step(action)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = env.reset()
break
# Run the episode until we're done.
done = False
while not done:
callbacks.on_step_begin(episode_step)
action = self.forward(observation)
reward = 0.
accumulated_info = {}
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, d, info = env.step(action)
callbacks.on_action_end(action)
reward += r
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
if d:
done = True
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
done = True
self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# Report end of episode.
episode_logs = {
'episode_reward': episode_reward,
'nb_steps': episode_step,
}
callbacks.on_episode_end(episode, episode_logs)
callbacks.on_train_end()
self._on_test_end()
return history
def _on_test_begin(self):
pass
def _on_test_end(self):
pass
def reset_states(self):
pass
def forward(self, observation):
raise NotImplementedError()
def backward(self, reward, terminal):
raise NotImplementedError()
def compile(self, optimizer, metrics=[]):
raise NotImplementedError()
def load_weights(self, filepath):
raise NotImplementedError()
def save_weights(self, filepath, overwrite=False):
raise NotImplementedError()
@property
def metrics_names(self):
return []
class Processor(object):
def process_observation(self, observation):
"""Processed observation will be stored in memory
"""
return observation
def process_state_batch(self, batch):
"""Process for input into NN
"""
return batch
def process_action(self, action):
return action
def process_reward(self, reward):
return reward
@property
def metrics_names(self):
return []
@property
def metrics(self):
return []
# Note: the API of the `Env` and `Space` classes are taken from the OpenAI Gym implementation.
# https://github.com/openai/gym/blob/master/gym/core.py
class Env(object):
"""The abstract environment class that is used by all agents. This class has the exact
same API that OpenAI Gym uses so that integrating with it is trivial. In contrast to the
OpenAI Gym implementation, this class only defines the abstract methods without any actual
implementation.
"""
reward_range = (-np.inf, np.inf)
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the environment
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (boolean): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
raise NotImplementedError()
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
Returns:
observation (object): the initial observation of the space. (Initial reward is assumed to be 0.)
"""
raise NotImplementedError()
def render(self, mode='human', close=False):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
close (bool): close all open renderings
"""
raise NotImplementedError()
def close(self):
"""Override in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
raise NotImplementedError()
def configure(self, *args, **kwargs):
"""Provides runtime configuration to the environment.
This configuration should consist of data that tells your
environment how to run (such as an address of a remote server,
or path to your ImageNet data). It should not affect the
semantics of the environment.
"""
raise NotImplementedError()
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
class Space(object):
"""Abstract model for a space that is used for the state and action spaces. This class has the
exact same API that OpenAI Gym uses so that integrating with it is trivial.
"""
def sample(self, seed=None):
"""Uniformly randomly sample a random element of this space.
"""
raise NotImplementedError()
def contains(self, x):
"""Return boolean specifying if x is a valid member of this space
"""
raise NotImplementedError()
|
py | 7df9202589ea4daf7a87b04ab3b6617a58e01eb7 | from .lion import Lion
from .tiger import Tiger
from .cheetah import Cheetah
from .keeper import Keeper
from .caretaker import Caretaker
from .vet import Vet
class Zoo:
def __init__(self, name, budget, animlal_capacity, workers_capacity):
self.name = name
self.__budget = budget
self.__animal_capacity = animlal_capacity
self.__workers_capacity = workers_capacity
self.animals = []
self.workers = []
def add_animal(self, animal, price):
if not self.__budget - price < 0 and not self.__animal_capacity - (len(self.animals) + 1) < 0:
self.__budget -= price
self.animals.append(animal)
return f'{animal.name} the {animal.__class__.__name__} added to the zoo'
elif self.__budget - price < 0:
return 'Not enough budget'
return 'Not enough space for animal'
def hire_worker(self, worker):
if self.__workers_capacity - (len(self.workers) + 1) < 0:
return 'Not enough space for worker'
self.workers.append(worker)
return f'{worker.name} the {worker.__class__.__name__} hired successfully'
def fire_worker(self, worker_name):
for worker in self.workers:
if worker.name == worker_name:
self.workers.remove(worker)
self.__workers_capacity += 1
return f"{worker_name} fired successfully"
return f'There is no {worker_name} in the zoo'
def pay_workers(self):
salaries = sum([worker.salary for worker in self.workers])
if self.__budget - salaries < 0:
return 'You have no budget to pay your workers. They are unhappy'
self.__budget -= salaries
return f'You payed your workers. They are happy. Budget left: {self.__budget}'
def tend_animals(self):
animals_needs = sum([animal.get_needs() for animal in self.animals])
if self.__budget - animals_needs < 0:
return 'You have no budget to tend the animals. They are unhappy.'
self.__budget -= animals_needs
return f'You tended all the animals. They are happy. Budget left: {self.__budget}'
def profit(self, amount):
self.__budget += amount
def animals_status(self):
lions = [animal for animal in self.animals if isinstance(animal, Lion)]
tigers = [animal for animal in self.animals if isinstance(animal, Tiger)]
cheetahs = [animal for animal in self.animals if isinstance(animal, Cheetah)]
result = f'You have {len(self.animals)} animals\n'
result += f'----- {len(lions)} Lions:\n'
result += '\n'.join(str(l) for l in lions)
result += f'\n----- {len(tigers)} Tigers:\n'
result += '\n'.join(str(t) for t in tigers)
result += f'\n----- {len(cheetahs)} Cheetahs:\n'
result += '\n'.join(str(c) for c in cheetahs)
return result
def workers_status(self):
caretakers = [str(worker) for worker in self.workers if isinstance(worker, Caretaker)]
vet = [str(worker) for worker in self.workers if isinstance(worker, Vet)]
keeper = [str(worker) for worker in self.workers if isinstance(worker, Keeper)]
result = f'You have {len(self.workers)} workers\n'
result += f'----- {len(keeper)} Keepers:\n'
result += '\n'.join(keeper)
result += f'\n----- {len(caretakers)} Caretakers:\n'
result += '\n'.join(caretakers)
result += f'\n----- {len(vet)} Vets:\n'
result += '\n'.join(vet)
return result
|
py | 7df920399ace7a6356a95b80cb61e3997e4c7d8f | import pygame
from os import listdir, getcwd
from sys import exit
from pygame.constants import KEYDOWN, QUIT
pygame.init()
tela = pygame.display.set_mode((640, 480))
pygame.display.set_caption('Sprites do Cable')
path = f'{getcwd()}/sprites/cable/'
filesList = listdir(path)
filesList.sort()
CurrentSprite = 0
sprite = pygame.image.load(path + filesList[CurrentSprite])
spriteRect = sprite.get_rect()
relogio = pygame.time.Clock()
while True:
relogio.tick(16)
for event in pygame.event.get():
if event.type == QUIT:
exit()
tela.fill((0, 0, 0))
tela.blit(sprite, spriteRect)
CurrentSprite += 1
if CurrentSprite > len(filesList) - 1:
CurrentSprite = 0
sprite = pygame.image.load(path + filesList[CurrentSprite])
# sprite = pygame.transform.scale(sprite, (640*2, 480*2))
spriteRect = sprite.get_rect()
pygame.display.flip()
|
py | 7df922138ea332fd46ebd8dab2e45d747f52e75a | # -*- coding: utf-8 -*-
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Turbinia jobs."""
from turbinia.jobs import grep
from turbinia.jobs import hadoop
from turbinia.jobs import http_access_logs
from turbinia.jobs import jenkins
from turbinia.jobs import hindsight
from turbinia.jobs import plaso
from turbinia.jobs import psort
from turbinia.jobs import sshd
from turbinia.jobs import strings
from turbinia.jobs import tomcat
from turbinia.jobs import volatility
from turbinia.jobs import worker_stat
|
py | 7df923933d4886dbc59d3b29f3c46f35d9a7486b | # Random Forest folder created
pass
|
py | 7df924be6a79226ee6f2ec76ce255f493f5e5f31 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=import-error,no-name-in-module,no-self-use
"""Test object relationships in the database."""
import warnings
import pytest
from sqlalchemy import exc as sa_exc
from aiida.common.links import LinkType
from aiida.common.utils import get_new_uuid
from aiida.manage import get_manager
from aiida.orm import CalculationNode, Data
from aiida.storage.psql_dos.models.node import DbNode
from aiida.storage.psql_dos.models.user import DbUser
@pytest.mark.usefixtures('aiida_profile_clean')
class TestRelationshipsSQLA:
"""Class of tests concerning the schema and the correct
implementation of relationships within the AiiDA ORM
The genereal naming convention is the following:
1)tests on one-to-many relationships: test_<Parent>_<child> (Parent class is capitalized).
2)tests on many-to-many relationships: test_<peer>_<peer> (none is
capitalized)."""
def test_outputs_children_relationship(self):
"""This test checks that the outputs_q, children_q relationship and the
corresponding properties work as expected."""
n_1 = Data().store()
n_2 = CalculationNode()
n_3 = Data().store()
# Create a link between these 2 nodes
n_2.add_incoming(n_1, link_type=LinkType.INPUT_CALC, link_label='N1')
n_2.store()
n_3.add_incoming(n_2, link_type=LinkType.CREATE, link_label='N2')
# Check that the result of outputs is a list
assert isinstance(n_1.backend_entity.bare_model.outputs, list), 'This is expected to be a list'
# Check that the result of outputs_q is a query
from sqlalchemy.orm.dynamic import AppenderQuery
assert isinstance(
n_1.backend_entity.bare_model.outputs_q, AppenderQuery
), 'This is expected to be an AppenderQuery'
# Check that the result of outputs is correct
out = {_.pk for _ in n_1.backend_entity.bare_model.outputs}
assert out == set([n_2.pk])
def test_inputs_parents_relationship(self):
"""This test checks that the inputs_q, parents_q relationship and the
corresponding properties work as expected."""
n_1 = Data().store()
n_2 = CalculationNode()
n_3 = Data().store()
# Create a link between these 2 nodes
n_2.add_incoming(n_1, link_type=LinkType.INPUT_CALC, link_label='N1')
n_2.store()
n_3.add_incoming(n_2, link_type=LinkType.CREATE, link_label='N2')
# Check that the result of outputs is a list
assert isinstance(n_1.backend_entity.bare_model.inputs, list), 'This is expected to be a list'
# Check that the result of outputs_q is a query
from sqlalchemy.orm.dynamic import AppenderQuery
assert isinstance(
n_1.backend_entity.bare_model.inputs_q, AppenderQuery
), 'This is expected to be an AppenderQuery'
# Check that the result of inputs is correct
out = {_.pk for _ in n_3.backend_entity.bare_model.inputs}
assert out == set([n_2.pk])
def test_user_node_1(self):
"""Test that when a user and a node having that user are created,
storing NODE induces storage of the USER
Assert the correct storage of user and node."""
# Create user
dbu1 = DbUser('test1@schema', 'spam', 'eggs', 'monty')
# Creat node
node_dict = dict(user=dbu1)
dbn_1 = DbNode(**node_dict)
# Check that the two are neither flushed nor committed
assert dbu1.id is None
assert dbn_1.id is None
session = get_manager().get_profile_storage().get_session()
# Add only the node and commit
session.add(dbn_1)
session.commit()
# Check that a pk has been assigned, which means that things have
# been flushed into the database
assert dbn_1.id is not None
assert dbu1.id is not None
def test_user_node_2(self):
"""Test that when a user and a node having that user are created,
storing USER does NOT induce storage of the NODE
Assert the correct storage of user and node."""
# Create user
dbu1 = DbUser('tests2@schema', 'spam', 'eggs', 'monty')
# Creat node
node_dict = dict(user=dbu1)
dbn_1 = DbNode(**node_dict)
# Check that the two are neither flushed nor committed
assert dbu1.id is None
assert dbn_1.id is None
session = get_manager().get_profile_storage().get_session()
# Catch all the SQLAlchemy warnings generated by the following code
with warnings.catch_warnings(): # pylint: disable=no-member
warnings.simplefilter('ignore', category=sa_exc.SAWarning) # pylint: disable=no-member
# Add only the user and commit
session.add(dbu1)
session.commit()
# Check that a pk has been assigned (or not), which means that things
# have been flushed into the database
assert dbu1.id is not None
assert dbn_1.id is None
def test_user_node_3(self):
"""Test that when a user and two nodes having that user are created,
storing only ONE NODE induces storage of that node, of the user but
not of the other node
Assert the correct storage of the user and node. Assert the
non-storage of the other node."""
# Create user
dbu1 = DbUser('tests3@schema', 'spam', 'eggs', 'monty')
# Creat node
node_dict = dict(user=dbu1)
dbn_1 = DbNode(**node_dict)
dbn_2 = DbNode(**node_dict)
# Check that the two are neither flushed nor committed
assert dbu1.id is None
assert dbn_1.id is None
assert dbn_2.id is None
session = get_manager().get_profile_storage().get_session()
# Add only first node and commit
session.add(dbn_1)
with warnings.catch_warnings():
# suppress known SAWarning that we have not added dbn_2
warnings.simplefilter('ignore', category=sa_exc.SAWarning)
session.commit()
# Check for which object a pk has been assigned, which means that
# things have been at least flushed into the database
assert dbu1.id is not None
assert dbn_1.id is not None
assert dbn_2.id is None
def test_user_node_4(self):
"""Test that when several nodes are created with the same user and each
of them is assigned to the same name, storage of last node object
associated to that node does not trigger storage of all objects.
Assert the correct storage of the user and node. Assert the
non-storage of the other nodes."""
# Create user
dbu1 = DbUser('tests4@schema', 'spam', 'eggs', 'monty')
# Creat node objects assigningd them to the same name
# Check https://docs.python.org/2/tutorial/classes.html subsec. 9.1
for _ in range(5):
# It is important to change the uuid each time (or any other
# variable) so that a different objects (with a different pointer)
# is actually created in this scope.
dbn_1 = DbNode(user=dbu1, uuid=get_new_uuid())
# Check that the two are neither flushed nor committed
assert dbu1.id is None
assert dbn_1.id is None
session = get_manager().get_profile_storage().get_session()
# Add only first node and commit
session.add(dbn_1)
with warnings.catch_warnings():
# suppress known SAWarning that we have not add the other nodes
warnings.simplefilter('ignore', category=sa_exc.SAWarning)
session.commit()
# Check for which object a pk has been assigned, which means that
# things have been at least flushed into the database
assert dbu1.id is not None
assert dbn_1.id is not None
|
py | 7df927b9a4a5497270841fe78fab00812e56fe40 | #!/usr/bin/env python
import parmed as pmd
import random
import sys
import os
if not (len(sys.argv) == 5 or len(sys.argv) == 6):
print "Usage: strip_random.py [-O] <prmtop> <inpcrd> <first_strippable_water> <remaining_wats>"
exit(1)
overwrite = False
arg_shift = 0
if sys.argv[1] == "-O":
overwrite = True
arg_shift = 1
prmtop = sys.argv[1+arg_shift]
inpcrd = sys.argv[2+arg_shift]
first_wat = int(sys.argv[3+arg_shift])
remaining_wats = int(sys.argv[4+arg_shift])
p = pmd.load_file(prmtop)
p.load_rst7(inpcrd)
wats = p[":WAT"]
prmtop_watcnt = len(wats.residues)
if prmtop_watcnt <= remaining_wats:
print "Warning: Number of Remaining wates lower than present waters."
exit(1)
wat_list = list()
no_strip = list()
# Get water resids
for wat_idx in range(prmtop_watcnt):
#Add 1 because parmed internal resiude numbering starts at 1
if wats.residues[wat_idx].number+1 >= first_wat:
wat_list.append(wats.residues[wat_idx].number+1)
else:
no_strip.append(wats.residues[wat_idx].number+1)
random.shuffle(wat_list)
random.shuffle(wat_list)
strip_mask = ":"
rm_mask = ""
for strip_count, wat_resid in enumerate(wat_list):
if strip_count < prmtop_watcnt-remaining_wats-1:
strip_mask += "%s," %wat_resid
#Assuming that the final unit is called 'complex'
rm_mask += "remove complex complex.%s \n" %wat_resid
else:
strip_mask += "%s" %wat_resid
rm_mask += "remove complex complex.%s \n" %wat_resid
break
o = open("strip.cpptraj", "w")
o.write("parm %s \n" %prmtop)
o.write("trajin %s \n" %inpcrd)
o.write("strip %s \n" %strip_mask)
if overwrite:
o.write("trajout %s restart \n" %inpcrd)
o.write("trajout %s pdb \n" %inpcrd.replace("inpcrd", "pdb").replace("incrd", "pdb").replace("rst", "pdb").replace("rst7", "pdb"))
else:
o.write("trajout strip.inpcrd restart \n")
o.write("trajout strip.pdb pdb \n")
o.write("go \n")
o.write("clear all \n")
o.write("parm %s \n" %prmtop)
o.write("parmstrip %s \n" %strip_mask)
if overwrite:
o.write("parmwrite out %s \n" %prmtop)
else:
o.write("parmwrite out strip.prmtop \n")
o.write("go \n")
o.close()
if os.path.exists("strip.cpptraj"):
print "Wrote strip.cpptraj. Please run >> cpptraj -i strip.cpptraj << to complete random water strip."
else:
print "strip.cpptraj not written. Something went wrong."
exit(1)
o = open("strip.leap", "w")
o.write(rm_mask)
if not overwrite:
o.write("saveAmberParm complex strip.prmtop strip.inpcrd \n")
o.write("savePdb complex strip.pdb \n")
o.close()
if os.path.exists("strip.leap"):
print "Wrote strip.leap. Please source strip.lib into your leap file right before you save the prmtop inpcrd files"
print "Note: cpptraj and leap actions should both have the same effect. Use just one of them, NOT both."
exit(0)
else:
print "strip.leap not written. Something went wrong."
exit(1)
|
py | 7df927c490959fb5c795499f6122e01a1e1e8329 | # Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Aniek Roelofs, Richard Hosking, James Diprose
from __future__ import annotations
import gzip
import io
import json
import logging
import os
import pathlib
import random
import shutil
import subprocess
import xml.etree.ElementTree as ET
from typing import Dict, List, Tuple
import jsonlines
import pendulum
import requests
from airflow.exceptions import AirflowException
from airflow.models.taskinstance import TaskInstance
from google.cloud.bigquery import SourceFormat
from observatory.dags.config import schema_path
from observatory.platform.utils.airflow_utils import AirflowVariable as Variable, AirflowVars, check_variables
from observatory.platform.utils.config_utils import (find_schema)
from observatory.platform.utils.gc_utils import (bigquery_partitioned_table_id,
bigquery_table_exists,
create_bigquery_dataset,
load_bigquery_table,
upload_file_to_cloud_storage)
from observatory.platform.utils.proc_utils import wait_for_process
from observatory.platform.utils.template_utils import SubFolder, blob_name, telescope_path, test_data_path
from observatory.platform.utils.url_utils import retry_session
from pendulum import Pendulum
def list_releases(start_date: Pendulum, end_date: Pendulum) -> List[FundrefRelease]:
""" List all available fundref releases
:param start_date:
:param end_date:
:return: list of FundrefRelease instances.
"""
# A selection of headers to prevent 403/forbidden error.
headers_list = [{
'authority': 'gitlab.com',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/84.0.4147.89 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,'
'*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-dest': 'document',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
},
{
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}]
releases_list = []
headers = random.choice(headers_list)
current_page = 1
while True:
# Fetch page
url = f'{FundrefTelescope.TELESCOPE_URL}?per_page=100&page={current_page}'
response = retry_session().get(url, headers=headers)
# Check if correct response code
if response is not None and response.status_code == 200:
# Parse json
num_pages = int(response.headers['X-Total-Pages'])
json_response = json.loads(response.text)
# Parse release information
for release in json_response:
version = float(release['tag_name'].strip('v'))
for source in release['assets']['sources']:
if source['format'] == 'tar.gz':
# Parse release date
if version == 0.1:
release_date = pendulum.datetime(year=2014, month=3, day=1)
elif version < 1.0:
date_string = release['description'].split('\n')[0]
release_date = pendulum.parse('01' + date_string)
else:
release_date = pendulum.parse(release['released_at'])
# Only include release if it is within start and end dates
if start_date <= release_date < end_date:
release = FundrefRelease(source['url'], release_date)
releases_list.append(release)
# Check if we should exit or get the next page
if num_pages <= current_page:
break
current_page += 1
else:
logging.error(f"Error retrieving response")
exit(os.EX_DATAERR)
return releases_list
def download_release(release: FundrefRelease) -> str:
""" Downloads release from url.
:param release: Instance of FundrefRelease class
"""
file_path = release.filepath_download
logging.info(f"Downloading file: {file_path}, url: {release.url}")
# A selection of headers to prevent 403/forbidden error.
headers_list = [{
'authority': 'gitlab.com',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/83.0.4103.116 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,'
'*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-dest': 'document',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
},
{
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://gitlab.com/'
}]
# Download release
with requests.get(release.url, headers=random.choice(headers_list), stream=True) as response:
with open(file_path, 'wb') as file:
shutil.copyfileobj(response.raw, file)
return file_path
def extract_release(release: FundrefRelease) -> str:
""" Extract release.
:param release: Instance of FundrefRelease class
"""
logging.info(f"Extracting file: {release.filepath_download}")
# Tar file contains both README.md and registry.rdf, use tar -ztf to get path of 'registry.rdf'
# Use this path to extract only registry.rdf to a new file.
cmd = f"registry_path=$(tar -ztf {release.filepath_download} | grep -m1 '/registry.rdf'); " \
f"tar -xOzf {release.filepath_download} $registry_path > {release.filepath_extract}"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
stdout, stderr = wait_for_process(p)
if stdout:
logging.info(stdout)
if stderr:
raise AirflowException(f"bash command failed for {release.url}: {stderr}")
logging.info(f"File extracted to: {release.filepath_extract}")
return release.filepath_extract
def strip_whitespace(file_path: str):
""" Strip leading white space from the first line of the file.
This is present in fundref release 2019-06-01. If not removed it will give a XML ParseError.
:param file_path: Path to file from which to trim leading white space.
:return: None.
"""
with open(file_path, 'r') as f_in, open(file_path + '.tmp', 'w') as f_out:
first_line = True
for line in f_in:
if first_line and not line.startswith(' '):
os.remove(file_path + '.tmp')
return
elif first_line and line.startswith(' '):
line = line.lstrip()
f_out.write(line)
first_line = False
os.rename(file_path + '.tmp', file_path)
def transform_release(release: FundrefRelease) -> str:
""" Transform release by parsing the raw rdf file, transforming it into a json file and replacing geoname associated
ids with their geoname name.
:param release: Instance of FundrefRelease class
"""
# Strip leading whitespace from first line if present.
strip_whitespace(release.filepath_extract)
# Parse RDF funders data
funders, funders_by_key = parse_fundref_registry_rdf(release.filepath_extract)
funders = add_funders_relationships(funders, funders_by_key)
# Transform FundRef release into JSON Lines format saving in memory buffer
# Save in memory buffer to gzipped file
with io.BytesIO() as bytes_io:
with gzip.GzipFile(fileobj=bytes_io, mode='w') as gzip_file:
with jsonlines.Writer(gzip_file) as writer:
writer.write_all(funders)
with open(release.filepath_transform, 'wb') as jsonl_gzip_file:
jsonl_gzip_file.write(bytes_io.getvalue())
logging.info(f'Success transforming release: {release.url}')
return release.filepath_transform
class FundrefRelease:
""" Used to store info on a given fundref release """
def __init__(self, url: str, date: Pendulum):
self.url = url
self.date = date
self.filepath_download = self.get_filepath(SubFolder.downloaded)
self.filepath_extract = self.get_filepath(SubFolder.extracted)
self.filepath_transform = self.get_filepath(SubFolder.transformed)
def get_filepath(self, sub_folder: SubFolder) -> str:
""" Gets complete path of file for download/extract/transform directory
:param sub_folder: name of subfolder
:return: path of file.
"""
date_str = self.date.strftime("%Y_%m_%d")
if sub_folder == SubFolder.downloaded:
file_name = f"{FundrefTelescope.DAG_ID}_{date_str}.tar.gz"
elif sub_folder == SubFolder.extracted:
file_name = f"{FundrefTelescope.DAG_ID}_{date_str}.rdf"
else:
file_name = f"{FundrefTelescope.DAG_ID}_{date_str}.jsonl.gz"
file_dir = telescope_path(sub_folder, FundrefTelescope.DAG_ID)
path = os.path.join(file_dir, file_name)
return path
def get_blob_name(self, sub_folder: SubFolder) -> str:
""" Gives blob name that is used to determine path inside storage bucket
:param sub_folder: name of subfolder
:return: blob name
"""
file_name = os.path.basename(self.get_filepath(sub_folder))
blob_name = f'telescopes/{FundrefTelescope.DAG_ID}/{file_name}'
return blob_name
def new_funder_template():
""" Helper Function for creating a new Funder.
:return: a blank funder object.
"""
return {
'funder': None,
'pre_label': None,
'alt_label': [],
'narrower': [],
'broader': [],
'modified': None,
'created': None,
'funding_body_type': None,
'funding_body_sub_type': None,
'region': None,
'country': None,
'country_code': None,
'state': None,
'tax_id': None,
'continuation_of': [],
'renamed_as': [],
'replaces': [],
'affil_with': [],
'merged_with': [],
'incorporated_into': [],
'is_replaced_by': [],
'incorporates': [],
'split_into': [],
'status': None,
'merger_of': [],
'split_from': None,
'formly_known_as': None,
'notation': None
}
def parse_fundref_registry_rdf(registry_file_path: str) -> Tuple[List, Dict]:
""" Helper function to parse a fundref registry rdf file and to return a python list containing each funder.
:param registry_file_path: the filename of the registry.rdf file to be parsed.
:return: funders list containing all the funders parsed from the input rdf and dictionary of funders with their
id as key.
"""
funders = []
funders_by_key = {}
tree = ET.parse(registry_file_path)
root = tree.getroot()
tag_prefix = root.tag.split('}')[0] + '}'
for record in root:
tag = record.tag.split('}')[-1]
if tag == "ConceptScheme":
for nested in record:
tag = nested.tag.split('}')[-1]
if tag == 'hasTopConcept':
funder_id = nested.attrib[tag_prefix + 'resource']
funders_by_key[funder_id] = new_funder_template()
if tag == "Concept":
funder_id = record.attrib[tag_prefix + 'about']
funder = funders_by_key[funder_id]
funder['funder'] = funder_id
for nested in record:
tag = nested.tag.split('}')[-1]
if tag == 'inScheme':
continue
elif tag == 'prefLabel':
funder['pre_label'] = nested[0][0].text
elif tag == 'altLabel':
alt_label = nested[0][0].text
if alt_label is not None:
funder['alt_label'].append(alt_label)
elif tag == 'narrower':
funder['narrower'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'broader':
funder['broader'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'modified':
funder['modified'] = nested.text
elif tag == 'created':
funder['created'] = nested.text
elif tag == 'fundingBodySubType':
funder['funding_body_type'] = nested.text
elif tag == 'fundingBodyType':
funder['funding_body_sub_type'] = nested.text
elif tag == 'region':
funder['region'] = nested.text
elif tag == 'country':
funder['country'] = nested.text
elif tag == 'state':
funder['state'] = nested.text
elif tag == 'address':
funder['country_code'] = nested[0][0].text
elif tag == 'taxId':
funder['tax_id'] = nested.text
elif tag == 'continuationOf':
funder['continuation_of'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'renamedAs':
funder['renamed_as'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'replaces':
funder['replaces'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'affilWith':
funder['affil_with'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'mergedWith':
funder['merged_with'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'incorporatedInto':
funder['incorporated_into'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'isReplacedBy':
funder['is_replaced_by'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'incorporates':
funder['incorporates'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'splitInto':
funder['split_into'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'status':
funder['status'] = nested.attrib[tag_prefix + 'resource']
elif tag == 'mergerOf':
funder['merger_of'].append(nested.attrib[tag_prefix + 'resource'])
elif tag == 'splitFrom':
funder['split_from'] = nested.attrib[tag_prefix + 'resource']
elif tag == 'formerlyKnownAs':
funder['formly_known_as'] = nested.attrib[tag_prefix + 'resource']
elif tag == 'notation':
funder['notation'] = nested.text
else:
print(f"Unrecognized tag for element: {nested}")
funders.append(funder)
return funders, funders_by_key
def add_funders_relationships(funders: List, funders_by_key: Dict) -> List:
""" Adds any children/parent relationships to funder instances in the funders list.
:param funders: List of funders
:param funders_by_key: Dictionary of funders with their id as key.
:return: funders with added relationships.
"""
for funder in funders:
children, returned_depth = recursive_funders(funders_by_key, funder, 0, 'narrower', [])
funder["children"] = children
funder['bottom'] = len(children) > 0
parent, returned_depth = recursive_funders(funders_by_key, funder, 0, 'broader', [])
funder["parents"] = parent
funder['top'] = len(parent) > 0
return funders
def recursive_funders(funders_by_key: Dict, funder: Dict, depth: int, direction: str, parents: List) -> Tuple[
List, int]:
""" Recursively goes through a funder/sub_funder dict. The funder properties can be looked up with the
funders_by_key
dictionary that stores the properties per funder id. Any children/parents for the funder are already given in the
xml element with the 'narrower' and 'broader' tags. For each funder in the list, it will recursively add any
children/parents for those funders in 'narrower'/'broader' and their funder properties.
:param funders_by_key: dictionary with id as key and funders object as value
:param funder: dictionary of a given funder containing 'narrower' and 'broader' info
:param depth: keeping track of nested depth
:param direction: either 'narrower' or 'broader' to get 'children' or 'parents'
:param parents: list to keep track of which funder ids are parents
:return: list of children and current depth
"""
starting_depth = depth
children = []
for funder_id in funder[direction]:
if funder_id in parents:
print(f"funder {funder_id} is it's own parent/child, skipping..")
name = 'NA'
returned = []
returned_depth = depth
else:
try:
sub_funder = funders_by_key[funder_id]
parents.append(sub_funder['funder'])
name = sub_funder['pre_label']
returned, returned_depth = recursive_funders(funders_by_key, sub_funder, starting_depth + 1, direction,
parents)
except KeyError:
print(f'Could not find funder by id: {funder_id}, skipping..')
name = 'NA'
returned = []
returned_depth = depth
if direction == "narrower":
child = {
'funder': funder_id,
'name': name,
'children': returned
}
else:
child = {
'funder': funder_id,
'name': name,
'parent': returned
}
children.append(child)
parents = []
if returned_depth > depth:
depth = returned_depth
return children, depth
def pull_releases(ti: TaskInstance):
""" Pull a list of MagRelease instances with xcom.
:param ti: the Apache Airflow task instance.
:return: the list of MagRelease instances.
"""
return ti.xcom_pull(key=FundrefTelescope.RELEASES_TOPIC_NAME, task_ids=FundrefTelescope.TASK_ID_LIST,
include_prior_dates=False)
class FundrefTelescope:
""" A container for holding the constants and static functions for the Fundref telescope. """
DAG_ID = 'fundref'
DESCRIPTION = 'The Funder Registry dataset: https://www.crossref.org/services/funder-registry/'
DATASET_ID = 'crossref'
RELEASES_TOPIC_NAME = "releases"
QUEUE = "default"
TELESCOPE_URL = 'https://gitlab.com/api/v4/projects/crossref%2Fopen_funder_registry/releases'
TELESCOPE_DEBUG_URL = 'debug_fundref_url'
# DEBUG_FILE_PATH = os.path.join(test_data_path(), 'telescopes', 'fundref.tar.gz')
RETRIES = 3
TASK_ID_CHECK_DEPENDENCIES = "check_dependencies"
TASK_ID_LIST = f"list_releases"
TASK_ID_DOWNLOAD = f"download"
TASK_ID_UPLOAD_DOWNLOADED = 'upload_downloaded'
TASK_ID_EXTRACT = f"extract"
TASK_ID_TRANSFORM = f"transform_releases"
TASK_ID_UPLOAD_TRANSFORMED = 'upload_transformed'
TASK_ID_BQ_LOAD = f"bq_load"
TASK_ID_CLEANUP = f"cleanup"
@staticmethod
def check_dependencies(**kwargs):
""" Check that all variables exist that are required to run the DAG.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
vars_valid = check_variables(AirflowVars.DATA_PATH, AirflowVars.PROJECT_ID, AirflowVars.DATA_LOCATION,
AirflowVars.DOWNLOAD_BUCKET, AirflowVars.TRANSFORM_BUCKET)
if not vars_valid:
raise AirflowException('Required variables are missing')
@staticmethod
def list_releases(**kwargs):
""" Based on a list of all releases, checks which ones were released between this and the next execution date
of the DAG. If the release falls within the time period mentioned above, checks if a bigquery table doesn't
exist yet for the release. A list of releases that passed both checks is passed to the next tasks. If the
list is empty the workflow will stop.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Get variables
project_id = Variable.get(AirflowVars.PROJECT_ID)
# List releases between a start date and an end date
execution_date = kwargs['execution_date']
next_execution_date = kwargs['next_execution_date']
releases_list = list_releases(execution_date, next_execution_date)
logging.info(f'Releases between current ({execution_date}) and next ({next_execution_date}) execution date:')
print(*releases_list, sep='\n')
# Check if the BigQuery table for each release already exists and only process release if the table
# doesn't exist
releases_list_out = []
for release in releases_list:
table_id = bigquery_partitioned_table_id(FundrefTelescope.DAG_ID, release.date)
logging.info('Checking if bigquery table already exists:')
if bigquery_table_exists(project_id, FundrefTelescope.DATASET_ID, table_id):
logging.info(f'Skipping as table exists for {release.url}: '
f'{project_id}.{FundrefTelescope.DATASET_ID}.{table_id}')
else:
logging.info(f"Table doesn't exist yet, processing {release.url} in this workflow")
releases_list_out.append(release)
# If releases_list_out contains items then the DAG will continue (return True) otherwise it will
# stop (return False)
continue_dag = len(releases_list_out)
if continue_dag:
ti: TaskInstance = kwargs['ti']
ti.xcom_push(FundrefTelescope.RELEASES_TOPIC_NAME, releases_list_out, execution_date)
return continue_dag
@staticmethod
def download(**kwargs):
""" Download release to file. If develop environment, copy debug file from this repository to the right
location.
Else download from url.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Get variables
environment = Variable.get(AirflowVars.ENVIRONMENT)
# Download each release
for release in releases_list:
if environment == 'test':
shutil.copy(test_data_path(), release.filepath_download)
else:
download_release(release)
@staticmethod
def upload_downloaded(**kwargs):
""" Upload the downloaded files to a Google Cloud Storage bucket.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Get variables
bucket_name = Variable.get(AirflowVars.DOWNLOAD_BUCKET)
# Upload each release
for release in releases_list:
upload_file_to_cloud_storage(bucket_name, blob_name(release.filepath_download),
file_path=release.filepath_download)
@staticmethod
def extract(**kwargs):
""" Extract release to new file.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Extract each release
for release in releases_list:
extract_release(release)
@staticmethod
def transform(**kwargs):
""" Transform release by parsing the raw rdf file, transforming it into a json file and replacing geoname
associated
ids with their geoname name.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Transform each release
for release in releases_list:
transform_release(release)
@staticmethod
def upload_transformed(**kwargs):
""" Upload the transformed release to a Google Cloud Storage bucket.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Get variables
bucket_name = Variable.get(AirflowVars.TRANSFORM_BUCKET)
# Upload each release
for release in releases_list:
upload_file_to_cloud_storage(bucket_name, blob_name(release.filepath_transform),
file_path=release.filepath_transform)
@staticmethod
def bq_load(**kwargs):
""" Upload transformed release to a bigquery table.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Get variables
project_id = Variable.get(AirflowVars.PROJECT_ID)
data_location = Variable.get(AirflowVars.DATA_LOCATION)
bucket_name = Variable.get(AirflowVars.TRANSFORM_BUCKET)
dataset_id = FundrefTelescope.DATASET_ID
# Create dataset
create_bigquery_dataset(project_id, dataset_id, data_location, FundrefTelescope.DESCRIPTION)
# Load each release into BigQuery
for release in releases_list:
table_id = bigquery_partitioned_table_id(FundrefTelescope.DAG_ID, release.date)
# Select schema file based on release date
analysis_schema_path = schema_path()
schema_file_path = find_schema(analysis_schema_path, FundrefTelescope.DAG_ID, release.date)
if schema_file_path is None:
logging.error(f'No schema found with search parameters: analysis_schema_path={analysis_schema_path}, '
f'table_name={FundrefTelescope.DAG_ID}, release_date={release.date}')
exit(os.EX_CONFIG)
# Load BigQuery table
uri = f"gs://{bucket_name}/{blob_name(release.filepath_transform)}"
logging.info(f"URI: {uri}")
load_bigquery_table(uri, dataset_id, data_location, table_id, schema_file_path,
SourceFormat.NEWLINE_DELIMITED_JSON)
@staticmethod
def cleanup(**kwargs):
""" Delete files of downloaded, extracted and transformed releases.
:param kwargs: the context passed from the PythonOperator. See
https://airflow.apache.org/docs/stable/macros-ref.html
for a list of the keyword arguments that are passed to this argument.
:return: None.
"""
# Pull releases
ti: TaskInstance = kwargs['ti']
releases_list = pull_releases(ti)
# Delete files for each release
for release in releases_list:
try:
pathlib.Path(release.filepath_download).unlink()
except FileNotFoundError as e:
logging.warning(f"No such file or directory {release.filepath_download}: {e}")
try:
pathlib.Path(release.filepath_extract).unlink()
except FileNotFoundError as e:
logging.warning(f"No such file or directory {release.filepath_extract}: {e}")
try:
pathlib.Path(release.filepath_transform).unlink()
except FileNotFoundError as e:
logging.warning(f"No such file or directory {release.filepath_transform}: {e}")
|
py | 7df928d19792f13449aa6447f2e80fe52cc40933 | from onadata.apps.logger.models import Project
from rest_framework import serializers
from rest_framework.fields import SkipField
class ProjectRelatedField(serializers.RelatedField):
"""A custom field to represent the content_object generic relationship"""
def get_attribute(self, instance):
# xform is not an attribute of the MetaData object
if instance and isinstance(instance.content_object, Project):
return instance.content_object
raise SkipField()
def to_internal_value(self, data):
try:
return Project.objects.get(pk=data)
except ValueError:
raise Exception("project id should be an integer")
def to_representation(self, instance):
"""Serialize project object"""
return instance.pk
|
py | 7df928f0cf049118348518c9d6a63a69114f6996 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CategoricalToDiscrete bijector.
This bijector is hidden from public API for now because it is only valid for
categorical distribution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'CategoricalToDiscrete',
]
class CategoricalToDiscrete(bijector.Bijector):
"""Bijector which computes `Y = g(X) = values[X]`.
Example Usage:
```python
bijector = CategoricalToDiscrete(values=[0.01, 0.1, 1., 10.])
bijector.forward([1, 3, 2, 1, 0]) = [1., 10., 1., 0.1, 0.01]
bijector.inverse([1., 10., 1., 0.1, 0.01]) = [1, 3, 2, 1, 0]
```
"""
def __init__(self,
map_values,
validate_args=False,
name='categorical_to_discrete'):
"""Instantiates `CategoricalToDiscrete` bijector.
Args:
map_values: 1D numerical tensor of discrete values to map to, sorted in
strictly increasing order.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
with tf.name_scope(name):
map_values = tf.convert_to_tensor(map_values, name='map_values')
assertions = _maybe_check_valid_map_values(map_values, validate_args)
if assertions:
with tf.control_dependencies(assertions):
map_values = tf.identity(map_values)
self._map_values = map_values
super(CategoricalToDiscrete, self).__init__(
graph_parents=[map_values],
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.validate_args:
with tf.control_dependencies([
assert_util.assert_equal(
(0 <= x) & (x < tf.size(self.map_values)),
True,
message='indices out of bound')
]):
x = tf.identity(x)
# If we want batch dims in self.map_values, we can (after broadcasting),
# use:
# tf.gather(self.map_values, x, batch_dims=-1, axis=-1)
return tf.gather(self.map_values, indices=x)
def _inverse(self, y):
flat_y = tf.reshape(y, shape=[-1])
# Search for the indices of self.map_values that are closest to flat_y.
# Since self.map_values is strictly increasing, the closest is either the
# first one that is strictly greater than flat_y, or the one before it.
upper_candidates = tf.minimum(
tf.size(self.map_values) - 1,
tf.searchsorted(self.map_values, values=flat_y, side='right'))
lower_candidates = tf.maximum(0, upper_candidates - 1)
candidates = tf.stack([lower_candidates, upper_candidates], axis=-1)
lower_cand_diff = tf.abs(flat_y - self._forward(lower_candidates))
upper_cand_diff = tf.abs(flat_y - self._forward(upper_candidates))
if self.validate_args:
with tf.control_dependencies([
assert_util.assert_near(
tf.minimum(lower_cand_diff, upper_cand_diff),
0,
message='inverse value not found')
]):
candidates = tf.identity(candidates)
candidate_selector = tf.stack([
tf.range(tf.size(flat_y), dtype=tf.int32),
tf.argmin([lower_cand_diff, upper_cand_diff], output_type=tf.int32)
],
axis=-1)
return tf.reshape(
tf.gather_nd(candidates, candidate_selector), shape=y.shape)
def _inverse_log_det_jacobian(self, y):
return tf.constant(0., dtype=y.dtype)
@property
def map_values(self):
return self._map_values
def _maybe_check_valid_map_values(map_values, validate_args):
"""Validate `map_values` if `validate_args`==True."""
assertions = []
message = 'Rank of map_values must be 1.'
if tensorshape_util.rank(map_values.shape) is not None:
if tensorshape_util.rank(map_values.shape) != 1:
raise ValueError(message)
elif validate_args:
assertions.append(assert_util.assert_rank(map_values, 1, message=message))
message = 'Size of map_values must be greater than 0.'
if tensorshape_util.num_elements(map_values.shape) is not None:
if tensorshape_util.num_elements(map_values.shape) == 0:
raise ValueError(message)
elif validate_args:
assertions.append(
assert_util.assert_greater(tf.size(map_values), 0, message=message))
if validate_args:
assertions.append(
assert_util.assert_equal(
tf.math.is_strictly_increasing(map_values),
True,
message='map_values is not strictly increasing.'))
return assertions
|
py | 7df92910692ef56444d51e9d6b2c5048c8173449 | # Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
from __future__ import division
import sys
import time
from collections import namedtuple
from tkinter import ttk, Canvas, W
import numpy as np
from common import visualize_training, Entity, ENV_TARGET_NAMES, ENV_ENTITIES, ENV_AGENT_NAMES, \
ENV_ACTIONS, ENV_CAUGHT_REWARD, ENV_BOARD_SHAPE
from six.moves import range
from malmopy.agent import AStarAgent
from malmopy.agent import QLearnerAgent, BaseAgent, RandomAgent
from malmopy.agent.gui import GuiAgent
P_FOCUSED = .75
CELL_WIDTH = 33
class PigChaseQLearnerAgent(QLearnerAgent):
"""A thin wrapper around QLearnerAgent that normalizes rewards to [-1,1]"""
def act(self, state, reward, done, is_training=False):
reward /= ENV_CAUGHT_REWARD
return super(PigChaseQLearnerAgent, self).act(state, reward, done,
is_training)
class PigChaseChallengeAgent(BaseAgent):
"""Pig Chase challenge agent - behaves focused or random."""
def __init__(self, name, visualizer=None):
nb_actions = len(ENV_ACTIONS)
super(PigChaseChallengeAgent, self).__init__(name, nb_actions,
visualizer = visualizer)
self._agents = []
self._agents.append(FocusedAgent(name, ENV_TARGET_NAMES[0],
visualizer = visualizer))
self._agents.append(RandomAgent(name, nb_actions,
visualizer = visualizer))
self.current_agent_id = 0
self.current_agent = self._select_agent(P_FOCUSED)
def _select_agent(self, p_focused):
self.current_agent_id = np.random.choice(range(len(self._agents)),
p = [p_focused, 1. - p_focused])
return self._agents[self.current_agent_id]
def act(self, new_state, reward, done, is_training=False):
if done:
self.current_agent = self._select_agent(P_FOCUSED)
return self.current_agent.act(new_state, reward, done, is_training)
def save(self, out_dir):
self.current_agent.save(out_dir)
def load(self, out_dir):
self.current_agent(out_dir)
def inject_summaries(self, idx):
self.current_agent.inject_summaries(idx)
class FocusedAgent(AStarAgent):
ACTIONS = ENV_ACTIONS
Neighbour = namedtuple('Neighbour', ['cost', 'x', 'z', 'direction', 'action'])
def __init__(self, name, target, visualizer = None):
super(FocusedAgent, self).__init__(name, len(FocusedAgent.ACTIONS),
visualizer = visualizer)
self._target = str(target)
self._previous_target_pos = None
self._action_list = []
def act(self, state, reward, done, is_training=False):
if done:
self._action_list = []
self._previous_target_pos = None
if state is None:
return np.random.randint(0, self.nb_actions)
entities = state[1]
state = state[0]
me = [(j, i) for i, v in enumerate(state) for j, k in enumerate(v) if self.name in k]
me_details = [e for e in entities if e['name'] == self.name][0]
yaw = int(me_details['yaw'])
direction = ((((yaw - 45) % 360) // 90) - 1) % 4 # convert Minecraft yaw to 0=north, 1=east etc.
target = [(j, i) for i, v in enumerate(state) for j, k in enumerate(v) if self._target in k]
# Get agent and target nodes
me = FocusedAgent.Neighbour(1, me[0][0], me[0][1], direction, "")
target = FocusedAgent.Neighbour(1, target[0][0], target[0][1], 0, "")
# If distance to the pig is one, just turn and wait
if self.heuristic(me, target) == 1:
return FocusedAgent.ACTIONS.index("turn 1") # substitutes for a no-op command
if not self._previous_target_pos == target:
# Target has moved, or this is the first action of a new mission - calculate a new action list
self._previous_target_pos = target
path, costs = self._find_shortest_path(me, target, state=state)
self._action_list = []
for point in path:
self._action_list.append(point.action)
if self._action_list is not None and len(self._action_list) > 0:
action = self._action_list.pop(0)
return FocusedAgent.ACTIONS.index(action)
# reached end of action list - turn on the spot
return FocusedAgent.ACTIONS.index("turn 1") # substitutes for a no-op command
def neighbors(self, pos, state=None):
state_width = state.shape[1]
state_height = state.shape[0]
dir_north, dir_east, dir_south, dir_west = range(4)
neighbors = []
inc_x = lambda x, dir, delta: x + delta if dir == dir_east else x - delta if dir == dir_west else x
inc_z = lambda z, dir, delta: z + delta if dir == dir_south else z - delta if dir == dir_north else z
# add a neighbour for each potential action; prune out the disallowed states afterwards
for action in FocusedAgent.ACTIONS:
if action.startswith("turn"):
neighbors.append(
FocusedAgent.Neighbour(1, pos.x, pos.z, (pos.direction + int(action.split(' ')[1])) % 4, action))
if action.startswith("move "): # note the space to distinguish from movemnorth etc
sign = int(action.split(' ')[1])
weight = 1 if sign == 1 else 1.5
neighbors.append(
FocusedAgent.Neighbour(weight, inc_x(pos.x, pos.direction, sign), inc_z(pos.z, pos.direction, sign),
pos.direction, action))
if action == "movenorth":
neighbors.append(FocusedAgent.Neighbour(1, pos.x, pos.z - 1, pos.direction, action))
elif action == "moveeast":
neighbors.append(FocusedAgent.Neighbour(1, pos.x + 1, pos.z, pos.direction, action))
elif action == "movesouth":
neighbors.append(FocusedAgent.Neighbour(1, pos.x, pos.z + 1, pos.direction, action))
elif action == "movewest":
neighbors.append(FocusedAgent.Neighbour(1, pos.x - 1, pos.z, pos.direction, action))
# now prune:
valid_neighbours = [n for n in neighbors if
n.x >= 0 and n.x < state_width and n.z >= 0 and n.z < state_height and state[
n.z, n.x] != 'sand']
return valid_neighbours
def heuristic(self, a, b, state=None):
(x1, y1) = (a.x, a.z)
(x2, y2) = (b.x, b.z)
return abs(x1 - x2) + abs(y1 - y2)
def matches(self, a, b):
return a.x == b.x and a.z == b.z # don't worry about dir and action
class PigChaseHumanAgent(GuiAgent):
def __init__(self, name, environment, keymap, max_episodes, max_actions,
visualizer, quit):
self._max_episodes = max_episodes
self._max_actions = max_actions
self._action_taken = 0
self._episode = 1
self._scores = []
self._rewards = []
self._episode_has_ended = False
self._episode_has_started = False
self._quit_event = quit
super(PigChaseHumanAgent, self).__init__(name, environment, keymap,
visualizer=visualizer)
def _build_layout(self, root):
# Left part of the GUI, first person view
self._first_person_header = ttk.Label(root, text='First Person View', font=(None, 14, 'bold')) \
.grid(row=0, column=0)
self._first_person_view = ttk.Label(root)
self._first_person_view.grid(row=1, column=0, rowspan=10)
# Right part, top
self._first_person_header = ttk.Label(root, text='Symbolic View', font=(None, 14, 'bold')) \
.grid(row=0, column=1)
self._symbolic_view = Canvas(root)
self._symbolic_view.configure(width=ENV_BOARD_SHAPE[0]*CELL_WIDTH,
height=ENV_BOARD_SHAPE[1]*CELL_WIDTH)
self._symbolic_view.grid(row=1, column=1)
# Bottom information
self._information_panel = ttk.Label(root, text='Game stats', font=(None, 14, 'bold'))
self._current_episode_lbl = ttk.Label(root, text='Episode: 0', font=(None, 12))
self._cum_reward_lbl = ttk.Label(root, text='Score: 0', font=(None, 12, 'bold'))
self._last_action_lbl = ttk.Label(root, text='Previous action: None', font=(None, 12))
self._action_done_lbl = ttk.Label(root, text='Actions taken: 0', font=(None, 12))
self._action_remaining_lbl = ttk.Label(root, text='Actions remaining: 0', font=(None, 12))
self._information_panel.grid(row=2, column=1)
self._current_episode_lbl.grid(row=3, column=1, sticky=W, padx=20)
self._cum_reward_lbl.grid(row=4, column=1, sticky=W, padx=20)
self._last_action_lbl.grid(row=5, column=1, sticky=W, padx=20)
self._action_done_lbl.grid(row=6, column=1, sticky=W, padx=20)
self._action_remaining_lbl.grid(row=7, column=1, sticky=W, padx=20)
self._overlay = None
# Main rendering callback
self._pressed_binding = root.bind('<Key>', self._on_key_pressed)
self._user_pressed_enter = False
# UI Update callback
root.after(self._tick, self._poll_frame)
root.after(1000, self._on_episode_start)
root.focus()
def _draw_arrow(self, yaw, x, y, cell_width, colour):
if yaw == 0.:
x1, y1 = (x + .15) * cell_width, (y + .15) * cell_width
x2, y2 = (x + .5) * cell_width, (y + .4) * cell_width
x3, y3 = (x + .85) * cell_width, (y + .85) * cell_width
self._symbolic_view.create_polygon(x1, y1, x2, y3, x3, y1, x2, y2, fill=colour)
elif yaw == 90.:
x1, y1 = (x + .15) * cell_width, (y + .15) * cell_width
x2, y2 = (x + .6) * cell_width, (y + .5) * cell_width
x3, y3 = (x + .85) * cell_width, (y + .85) * cell_width
self._symbolic_view.create_polygon(x1, y2, x3, y1, x2, y2, x3, y3, fill=colour)
elif yaw == 180.:
x1, y1 = (x + .15) * cell_width, (y + .15) * cell_width
x2, y2 = (x + .5) * cell_width, (y + .6) * cell_width
x3, y3 = (x + .85) * cell_width, (y + .85) * cell_width
self._symbolic_view.create_polygon(x1, y3, x2, y1, x3, y3, x2, y2, fill=colour)
else:
x1, y1 = (x + .15) * cell_width, (y + .15) * cell_width
x2, y2 = (x + .4) * cell_width, (y + .5) * cell_width
x3, y3 = (x + .85) * cell_width, (y + .85) * cell_width
self._symbolic_view.create_polygon(x1, y3, x2, y2, x1, y1, x3, y2, fill=colour)
def _poll_frame(self):
"""
Main callback for UI rendering.
Called at regular intervals.
The method will ask the environment to provide a frame if available (not None).
:return:
"""
cell_width = CELL_WIDTH
circle_radius = 10
# are we done?
if self._env.done and not self._episode_has_ended:
self._on_episode_end()
# build symbolic view
board = None
if self._env is not None:
board, _ = self._env._internal_symbolic_builder.build(self._env)
if board is not None:
board = board.T
self._symbolic_view.delete('all') # Remove all previous items from Tkinter tracking
width, height = board.shape
for x in range(width):
for y in range(height):
cell_contents = str.split(str(board[x][y]), '/')
for block in cell_contents:
if block == 'sand':
self._symbolic_view.create_rectangle(x * cell_width, y * cell_width,
(x + 1) * cell_width, (y + 1) * cell_width,
outline="black", fill="orange", tags="square")
elif block == 'grass':
self._symbolic_view.create_rectangle(x * cell_width, y * cell_width,
(x + 1) * cell_width, (y + 1) * cell_width,
outline="black", fill="lawn green", tags="square")
elif block == 'lapis_block':
self._symbolic_view.create_rectangle(x * cell_width, y * cell_width,
(x + 1) * cell_width, (y + 1) * cell_width,
outline="black", fill="black", tags="square")
elif block == ENV_TARGET_NAMES[0]:
self._symbolic_view.create_oval((x + .5) * cell_width - circle_radius,
(y + .5) * cell_width - circle_radius,
(x + .5) * cell_width + circle_radius,
(y + .5) * cell_width + circle_radius,
fill='pink')
elif block == self.name:
yaw = self._env._world_obs['Yaw'] % 360
self._draw_arrow(yaw, x, y, cell_width, 'red')
elif block == ENV_AGENT_NAMES[0]:
# Get yaw of other agent:
entities = self._env._world_obs[ENV_ENTITIES]
other_agent = list(
map(Entity.create, filter(lambda e: e['name'] == ENV_AGENT_NAMES[0], entities)))
if len(other_agent) == 1:
other_agent = other_agent.pop()
yaw = other_agent.yaw % 360
self._draw_arrow(yaw, x, y, cell_width, 'blue')
# display the most recent frame
frame = self._env.frame
if frame is not None:
from PIL import ImageTk
self._first_person_view.image = ImageTk.PhotoImage(image=frame)
self._first_person_view.configure(image=self._first_person_view.image)
self._first_person_view.update()
self._first_person_view.update()
# process game state (e.g., has the episode started?)
if self._episode_has_started and time.time() - self._episode_start_time < 3:
if not hasattr(self, "_init_overlay") or not self._init_overlay:
self._create_overlay()
self._init_overlay.delete("all")
self._init_overlay.create_rectangle(
10, 10, 590, 290, fill="white", outline="red", width="5")
self._init_overlay.create_text(
300, 80, text="Get ready to catch the pig!",
font=('Helvetica', '18'))
self._init_overlay.create_text(
300, 140, text=str(3 - int(time.time() - self._episode_start_time)),
font=('Helvetica', '18'), fill="red")
self._init_overlay.create_text(
300, 220, width=460,
text="How to play: \nUse the left/right arrow keys to turn, "
"forward/back to move. The pig is caught if it is "
"cornered without a free block to escape to.",
font=('Helvetica', '14'), fill="black")
self._root.update()
elif self._episode_has_ended:
if not hasattr(self, "_init_overlay") or not self._init_overlay:
self._create_overlay()
self._init_overlay.delete("all")
self._init_overlay.create_rectangle(
10, 10, 590, 290, fill="white", outline="red", width="5")
self._init_overlay.create_text(
300, 80, text='Finished episode %d of %d' % (self._episode, self._max_episodes),
font=('Helvetica', '18'))
self._init_overlay.create_text(
300, 120, text='Score: %d' % sum(self._rewards),
font=('Helvetica', '18'))
if self._episode > 1:
self._init_overlay.create_text(
300, 160, text='Average over %d episodes: %.2f' % (self._episode, np.mean(self._scores)),
font=('Helvetica', '18'))
self._init_overlay.create_text(
300, 220, width=360,
text="Press RETURN to start the next episode, ESC to exit.",
font=('Helvetica', '14'), fill="black")
self._root.update()
elif hasattr(self, "_init_overlay") and self._init_overlay:
self._destroy_overlay()
# trigger the next update
self._root.after(self._tick, self._poll_frame)
def _create_overlay(self):
self._init_overlay = Canvas(self._root, borderwidth=0, highlightthickness=0, width=600, height=300, bg="gray")
self._init_overlay.place(relx=0.5, rely=0.5, anchor='center')
def _destroy_overlay(self):
self._init_overlay.destroy()
self._init_overlay = None
def _on_key_pressed(self, e):
"""
Main callback for keyboard events
:param e:
:return:
"""
if e.keysym == 'Escape':
self._quit()
if e.keysym == 'Return' and self._episode_has_ended:
if self._episode >= self._max_episodes:
self._quit()
# start the next episode
self._action_taken = 0
self._rewards = []
self._episode += 1
self._env.reset()
self._on_episode_start()
print('Starting episode %d' % self._episode)
if self._episode_has_started and time.time() - self._episode_start_time >= 3:
if e.keysym in self._keymap:
mapped_action = self._keymap.index(e.keysym)
_, reward, done = self._env.do(mapped_action)
self._action_taken += 1
self._rewards.append(reward)
self._on_experiment_updated(mapped_action, reward, done)
def _on_episode_start(self):
self._episode_has_ended = False
self._episode_has_started = True
self._episode_start_time = time.time()
self._on_experiment_updated(None, 0, self._env.done)
def _on_episode_end(self):
# do a turn to ensure we get the final reward and observation
no_op_action = 0
_, reward, done = self._env.do(no_op_action)
self._action_taken += 1
self._rewards.append(reward)
self._on_experiment_updated(no_op_action, reward, done)
# report scores
self._scores.append(sum(self._rewards))
self.visualize(self._episode, 'Reward', sum(self._rewards))
# set flags to start a new episode
self._episode_has_started = False
self._episode_has_ended = True
def _on_experiment_updated(self, action, reward, is_done):
self._current_episode_lbl.config(text='Episode: %d' % self._episode)
self._cum_reward_lbl.config(text='Score: %d' % sum(self._rewards))
self._last_action_lbl.config(text='Previous action: %s' % action)
self._action_done_lbl.config(text='Actions taken: {0}'.format(self._action_taken))
self._action_remaining_lbl.config(text='Actions remaining: %d' % (self._max_actions - self._action_taken))
self._first_person_view.update()
def _quit(self):
self._quit_event.set()
self._root.quit()
sys.exit()
|
py | 7df9293fb0b73dd1d253d6bf7012be34d7488fbd | # -*- coding:utf-8 -*-
import unittest
import mock
import decimal
from ..models import Coupon
class CouponManagerTestCase(unittest.TestCase):
@mock.patch('membership.models.models.Manager.get_queryset')
def test_is_valid_should_call_filter_with_coupon_code_and_claimed_by_as_none_then_call_count_and_return_boolean(
self, get_queryset):
# setup
manager = Coupon.objects
coupon_code = 'acouponcode'
manager_filter = get_queryset.return_value.filter
count = manager_filter.return_value.count
count.return_value = 1
# action
returned_value = manager.is_valid(coupon_code)
# assert
self.assertDictEqual(dict(code=coupon_code, claimed_by__isnull=True),
manager_filter.call_args[1])
self.assertEqual(1, count.call_count)
self.assertTrue(returned_value)
@mock.patch('membership.models.models.Manager.get_queryset')
def test_is_valid_should_return_false_when_coupon_code_is_none(
self, get_queryset):
# setup
manager = Coupon.objects
# action
returned_value = manager.is_valid(None)
# assert
self.assertFalse(returned_value)
@mock.patch('membership.models.models.Manager.get_queryset')
def test_is_valid_should_return_false_when_coupon_code_is_none(
self, get_queryset):
# setup
manager = Coupon.objects
# action
returned_value = manager.is_valid('')
# assert
self.assertFalse(returned_value)
@mock.patch('membership.models.models.Manager.get_queryset')
def test_get_discount_should_return_decimal_discount_from_coupon(
self, get_queryset):
# setup
manager = Coupon.objects
coupon_code = 'acouponcode'
discount = decimal.Decimal(10.24)
get = get_queryset.return_value.get
coupon = get.return_value
coupon.configure_mock(discount=discount)
# action
returned_value = manager.get_discount(coupon_code)
# assert
self.assertDictEqual(dict(code=coupon_code), get.call_args[1])
self.assertEqual(id(discount), id(returned_value))
|
py | 7df92a245c82e864413dab4fe6a785d371344d42 | import smtplib
from imap_tools import MailBox, Q
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from os.path import basename
import os
class Mail:
def __init__(self, email, password):
self.username = email
self.password = password
def send(self, send_to, subject, text=None, html=None, files=None):
msg = MIMEMultipart()
msg['From'] = self.username
msg['To'] = ', '.join(send_to)
msg['Subject'] = subject
if text is not None:
msg.attach(MIMEText(text + "\n\n\n", 'plain'))
if html is not None:
msg.attach(MIMEText(html + "\n\n\n", 'html'))
for f in files or []:
with open(f, "rb") as fil:
ext = f.split('.')[-1:]
attachedfile = MIMEApplication(fil.read(), _subtype=ext)
attachedfile.add_header(
'content-disposition', 'attachment', filename=basename(f))
msg.attach(attachedfile)
smtp = smtplib.SMTP_SSL('smtp.' + self.username.split("@")[1], 465)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.username, send_to, msg.as_string())
smtp.close()
def fetchBox(self, folder=None, Query=None):
if folder is None:
folder = 'INBOX'
if Query is None:
Query = Q(all=True)
server = 'imap.' + self.username.split("@")[1]
mailbox = MailBox(server)
mailbox.login(self.username, self.password, initial_folder=folder) # or mailbox.folder.set instead 3d arg
message_list = []
for mail in mailbox.fetch(Query):
message_list.append(mail)
return message_list
mailbox.logout()
@staticmethod
def save_attachments(email, download_folder):
msg = email.obj
att_path = "No attachment found."
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
att_path = os.path.join(download_folder, filename)
if not os.path.isfile(att_path):
fp = open(att_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
return att_path
'''import win32com
import win32com.client
def folders():
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace('MAPI')
Folders={}
fList = [3,4,5,6,23]
for i in fList:
try:
folder = outlook.GetDefaultFolder(i)
Folders[folder.name] = i
except:
pass
return(Folders)
def accounts():
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace('MAPI')
for account in outlook.Folders:
return account.name
class outlook:
def __init__ (self):
self.outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
self.folders = folders()
self.accounts = accounts()
def emails(self,folder):
i= folders()[folder]
Folder= self.outlook.GetDefaultFolder(i)
messages = Folder.Items
MessageList=[]
for message in messages:
MessageList.append(message)
return MessageList
def DownloadAttachments(self, email, folder, extension = None):
Attachments= email.Attachments
AttachmentNum = Attachments.Count
if AttachmentNum > 0:
try:
for i in range(1,int(AttachmentNum)):
fileType = str(Attachments.item(i)).split(".")[1]
fileType = fileType.lower()
if extension == None:
if fileType != "png" and fileType != "jpg" and fileType != "jpeg" and fileType != "gif":
Attachments.Item(i).SaveASFile(folder + str(Attachments.item(i)))
else:
if fileType == extension.replace(".",""):
Attachments.Item(i).SaveASFile(folder + str(Attachments.item(i)))
except:
pass
def SendEmail(self,To,Subject,Body, Attachment= None):
outlook = win32com.client.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = To
mail.Subject = Subject
mail.Body = Body
#mail.HTMLBody = '<h2>HTML Message body</h2>' #this field is optional
if Attachment != None:
attachment = Attachment
mail.Attachments.Add(attachment)
mail.Send()
'''
|
py | 7df92a7f6bbbe22962a38bed15d28cdc2ab11153 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dw_inputs_fields.ui'
#
# Created: Thu Dec 13 17:14:04 2018
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_DockWidget(object):
def setupUi(self, DockWidget):
DockWidget.setObjectName("DockWidget")
DockWidget.resize(703, 557)
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.gridLayout = QtGui.QGridLayout(self.dockWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.label = QtGui.QLabel(self.dockWidgetContents)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.dockWidgetContents)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 2, 1, 1)
self.label_12 = QtGui.QLabel(self.dockWidgetContents)
self.label_12.setMinimumSize(QtCore.QSize(0, 0))
self.label_12.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 1, 0, 1, 1)
self.fontComboBox = QtGui.QFontComboBox(self.dockWidgetContents)
self.fontComboBox.setMinimumSize(QtCore.QSize(0, 0))
self.fontComboBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.fontComboBox.setObjectName("fontComboBox")
self.gridLayout.addWidget(self.fontComboBox, 1, 1, 1, 1)
self.fontComboBoxDis = QtGui.QFontComboBox(self.dockWidgetContents)
self.fontComboBoxDis.setEnabled(False)
self.fontComboBoxDis.setMinimumSize(QtCore.QSize(0, 0))
self.fontComboBoxDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.fontComboBoxDis.setObjectName("fontComboBoxDis")
self.gridLayout.addWidget(self.fontComboBoxDis, 1, 2, 1, 1)
self.label_3 = QtGui.QLabel(self.dockWidgetContents)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.comboBoxEdit = QtGui.QComboBox(self.dockWidgetContents)
self.comboBoxEdit.setEditable(True)
self.comboBoxEdit.setObjectName("comboBoxEdit")
self.comboBoxEdit.addItem("")
self.comboBoxEdit.addItem("")
self.comboBoxEdit.addItem("")
self.comboBoxEdit.setItemText(2, "")
self.gridLayout.addWidget(self.comboBoxEdit, 2, 1, 1, 1)
self.comboBoxEditDis = QtGui.QComboBox(self.dockWidgetContents)
self.comboBoxEditDis.setEnabled(False)
self.comboBoxEditDis.setEditable(True)
self.comboBoxEditDis.setObjectName("comboBoxEditDis")
self.comboBoxEditDis.addItem("")
self.comboBoxEditDis.addItem("")
self.comboBoxEditDis.addItem("")
self.comboBoxEditDis.setItemText(2, "")
self.gridLayout.addWidget(self.comboBoxEditDis, 2, 2, 1, 1)
self.label_13 = QtGui.QLabel(self.dockWidgetContents)
self.label_13.setMinimumSize(QtCore.QSize(0, 0))
self.label_13.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 3, 0, 1, 1)
self.lineEdit = QtGui.QLineEdit(self.dockWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit.sizePolicy().hasHeightForWidth())
self.lineEdit.setSizePolicy(sizePolicy)
self.lineEdit.setMinimumSize(QtCore.QSize(0, 0))
self.lineEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 3, 1, 1, 1)
self.lineEditDis = QtGui.QLineEdit(self.dockWidgetContents)
self.lineEditDis.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditDis.sizePolicy().hasHeightForWidth())
self.lineEditDis.setSizePolicy(sizePolicy)
self.lineEditDis.setMinimumSize(QtCore.QSize(0, 0))
self.lineEditDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lineEditDis.setObjectName("lineEditDis")
self.gridLayout.addWidget(self.lineEditDis, 3, 2, 1, 1)
self.label_14 = QtGui.QLabel(self.dockWidgetContents)
self.label_14.setMinimumSize(QtCore.QSize(0, 0))
self.label_14.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 4, 0, 1, 1)
self.textEdit = QtGui.QTextEdit(self.dockWidgetContents)
self.textEdit.setMinimumSize(QtCore.QSize(0, 0))
self.textEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 4, 1, 1, 1)
self.textEditDis = QtGui.QTextEdit(self.dockWidgetContents)
self.textEditDis.setEnabled(False)
self.textEditDis.setMinimumSize(QtCore.QSize(0, 0))
self.textEditDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.textEditDis.setObjectName("textEditDis")
self.gridLayout.addWidget(self.textEditDis, 4, 2, 1, 1)
self.label_15 = QtGui.QLabel(self.dockWidgetContents)
self.label_15.setMinimumSize(QtCore.QSize(0, 0))
self.label_15.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.gridLayout.addWidget(self.label_15, 5, 0, 1, 1)
self.plainTextEdit = QtGui.QPlainTextEdit(self.dockWidgetContents)
self.plainTextEdit.setMinimumSize(QtCore.QSize(0, 0))
self.plainTextEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.plainTextEdit.setObjectName("plainTextEdit")
self.gridLayout.addWidget(self.plainTextEdit, 5, 1, 1, 1)
self.plainTextEditDis = QtGui.QPlainTextEdit(self.dockWidgetContents)
self.plainTextEditDis.setEnabled(False)
self.plainTextEditDis.setMinimumSize(QtCore.QSize(0, 0))
self.plainTextEditDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.plainTextEditDis.setObjectName("plainTextEditDis")
self.gridLayout.addWidget(self.plainTextEditDis, 5, 2, 1, 1)
self.label_16 = QtGui.QLabel(self.dockWidgetContents)
self.label_16.setMinimumSize(QtCore.QSize(0, 0))
self.label_16.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.gridLayout.addWidget(self.label_16, 6, 0, 1, 1)
self.spinBox = QtGui.QSpinBox(self.dockWidgetContents)
self.spinBox.setMinimumSize(QtCore.QSize(0, 0))
self.spinBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.spinBox.setObjectName("spinBox")
self.gridLayout.addWidget(self.spinBox, 6, 1, 1, 1)
self.spinBoxDis = QtGui.QSpinBox(self.dockWidgetContents)
self.spinBoxDis.setEnabled(False)
self.spinBoxDis.setMinimumSize(QtCore.QSize(0, 0))
self.spinBoxDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.spinBoxDis.setObjectName("spinBoxDis")
self.gridLayout.addWidget(self.spinBoxDis, 6, 2, 1, 1)
self.label_17 = QtGui.QLabel(self.dockWidgetContents)
self.label_17.setMinimumSize(QtCore.QSize(0, 0))
self.label_17.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.gridLayout.addWidget(self.label_17, 7, 0, 1, 1)
self.doubleSpinBox = QtGui.QDoubleSpinBox(self.dockWidgetContents)
self.doubleSpinBox.setMinimumSize(QtCore.QSize(0, 0))
self.doubleSpinBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.gridLayout.addWidget(self.doubleSpinBox, 7, 1, 1, 1)
self.doubleSpinBoxDis = QtGui.QDoubleSpinBox(self.dockWidgetContents)
self.doubleSpinBoxDis.setEnabled(False)
self.doubleSpinBoxDis.setMinimumSize(QtCore.QSize(0, 0))
self.doubleSpinBoxDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.doubleSpinBoxDis.setObjectName("doubleSpinBoxDis")
self.gridLayout.addWidget(self.doubleSpinBoxDis, 7, 2, 1, 1)
self.label_18 = QtGui.QLabel(self.dockWidgetContents)
self.label_18.setMinimumSize(QtCore.QSize(0, 0))
self.label_18.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.gridLayout.addWidget(self.label_18, 8, 0, 1, 1)
self.timeEdit = QtGui.QTimeEdit(self.dockWidgetContents)
self.timeEdit.setMinimumSize(QtCore.QSize(0, 0))
self.timeEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.timeEdit.setObjectName("timeEdit")
self.gridLayout.addWidget(self.timeEdit, 8, 1, 1, 1)
self.timeEditDis = QtGui.QTimeEdit(self.dockWidgetContents)
self.timeEditDis.setEnabled(False)
self.timeEditDis.setMinimumSize(QtCore.QSize(0, 0))
self.timeEditDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.timeEditDis.setObjectName("timeEditDis")
self.gridLayout.addWidget(self.timeEditDis, 8, 2, 1, 1)
self.label_19 = QtGui.QLabel(self.dockWidgetContents)
self.label_19.setMinimumSize(QtCore.QSize(0, 0))
self.label_19.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
self.gridLayout.addWidget(self.label_19, 9, 0, 1, 1)
self.dateEdit = QtGui.QDateEdit(self.dockWidgetContents)
self.dateEdit.setMinimumSize(QtCore.QSize(0, 0))
self.dateEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.dateEdit.setObjectName("dateEdit")
self.gridLayout.addWidget(self.dateEdit, 9, 1, 1, 1)
self.dateEditDis = QtGui.QDateEdit(self.dockWidgetContents)
self.dateEditDis.setEnabled(False)
self.dateEditDis.setMinimumSize(QtCore.QSize(0, 0))
self.dateEditDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.dateEditDis.setObjectName("dateEditDis")
self.gridLayout.addWidget(self.dateEditDis, 9, 2, 1, 1)
self.label_20 = QtGui.QLabel(self.dockWidgetContents)
self.label_20.setMinimumSize(QtCore.QSize(0, 0))
self.label_20.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.gridLayout.addWidget(self.label_20, 10, 0, 1, 1)
self.dateTimeEdit = QtGui.QDateTimeEdit(self.dockWidgetContents)
self.dateTimeEdit.setMinimumSize(QtCore.QSize(0, 0))
self.dateTimeEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.dateTimeEdit.setObjectName("dateTimeEdit")
self.gridLayout.addWidget(self.dateTimeEdit, 10, 1, 1, 1)
self.dateTimeEditDis = QtGui.QDateTimeEdit(self.dockWidgetContents)
self.dateTimeEditDis.setEnabled(False)
self.dateTimeEditDis.setMinimumSize(QtCore.QSize(0, 0))
self.dateTimeEditDis.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.dateTimeEditDis.setObjectName("dateTimeEditDis")
self.gridLayout.addWidget(self.dateTimeEditDis, 10, 2, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 11, 0, 1, 1)
self.label_51 = QtGui.QLabel(self.dockWidgetContents)
self.label_51.setAlignment(QtCore.Qt.AlignCenter)
self.label_51.setObjectName("label_51")
self.gridLayout.addWidget(self.label_51, 12, 0, 1, 3)
DockWidget.setWidget(self.dockWidgetContents)
self.retranslateUi(DockWidget)
QtCore.QObject.connect(self.fontComboBox, QtCore.SIGNAL("editTextChanged(QString)"), self.fontComboBoxDis.setEditText)
QtCore.QObject.connect(self.lineEdit, QtCore.SIGNAL("textEdited(QString)"), self.lineEditDis.setText)
QtCore.QObject.connect(self.spinBox, QtCore.SIGNAL("valueChanged(int)"), self.spinBoxDis.setValue)
QtCore.QObject.connect(self.doubleSpinBox, QtCore.SIGNAL("valueChanged(double)"), self.doubleSpinBoxDis.setValue)
QtCore.QObject.connect(self.timeEdit, QtCore.SIGNAL("timeChanged(QTime)"), self.timeEditDis.setTime)
QtCore.QObject.connect(self.dateEdit, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), self.dateEditDis.setDateTime)
QtCore.QObject.connect(self.dateTimeEdit, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), self.dateTimeEditDis.setDateTime)
QtCore.QMetaObject.connectSlotsByName(DockWidget)
def retranslateUi(self, DockWidget):
DockWidget.setWindowTitle(QtGui.QApplication.translate("DockWidget", "Inputs - Fields", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("DockWidget", "Enabled", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("DockWidget", "Disabled", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setText(QtGui.QApplication.translate("DockWidget", "FontComboBox", None, QtGui.QApplication.UnicodeUTF8))
self.fontComboBox.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.fontComboBox.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.fontComboBox.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.fontComboBoxDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.fontComboBoxDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.fontComboBoxDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("DockWidget", "<html><head/><body><p><span style=\" font-weight:600;\">ComboBox</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxEdit.setItemText(0, QtGui.QApplication.translate("DockWidget", "ComboBoxEditable", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxEdit.setItemText(1, QtGui.QApplication.translate("DockWidget", "Second option", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxEditDis.setItemText(0, QtGui.QApplication.translate("DockWidget", "ComboBoxEditable", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxEditDis.setItemText(1, QtGui.QApplication.translate("DockWidget", "Second option", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("DockWidget", "LineEdit", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setText(QtGui.QApplication.translate("DockWidget", "LineEdit", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditDis.setText(QtGui.QApplication.translate("DockWidget", "LineEdit", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("DockWidget", "TextEdit", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setHtml(QtGui.QApplication.translate("DockWidget", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Cantarell\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">TextEdit</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.textEditDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.textEditDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.textEditDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.textEditDis.setHtml(QtGui.QApplication.translate("DockWidget", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Cantarell\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">TextEdit</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setText(QtGui.QApplication.translate("DockWidget", "PlainTextEdit", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEdit.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEdit.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEdit.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEdit.setPlainText(QtGui.QApplication.translate("DockWidget", "PlainTextEdit", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEditDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEditDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEditDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.plainTextEditDis.setPlainText(QtGui.QApplication.translate("DockWidget", "PlainTextEdit", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setText(QtGui.QApplication.translate("DockWidget", "SpinBox", None, QtGui.QApplication.UnicodeUTF8))
self.spinBox.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.spinBox.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.spinBox.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.spinBoxDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.spinBoxDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.spinBoxDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setText(QtGui.QApplication.translate("DockWidget", "DoubleSpinBox", None, QtGui.QApplication.UnicodeUTF8))
self.doubleSpinBox.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.doubleSpinBox.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.doubleSpinBox.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.doubleSpinBoxDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.doubleSpinBoxDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.doubleSpinBoxDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setText(QtGui.QApplication.translate("DockWidget", "TimeEdit", None, QtGui.QApplication.UnicodeUTF8))
self.timeEdit.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.timeEdit.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.timeEdit.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.timeEditDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.timeEditDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.timeEditDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setText(QtGui.QApplication.translate("DockWidget", "DateEdit", None, QtGui.QApplication.UnicodeUTF8))
self.dateEdit.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateEdit.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateEdit.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.dateEditDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateEditDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateEditDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setText(QtGui.QApplication.translate("DockWidget", "TimeDateEdit", None, QtGui.QApplication.UnicodeUTF8))
self.dateTimeEdit.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateTimeEdit.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateTimeEdit.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.dateTimeEditDis.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateTimeEditDis.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.dateTimeEditDis.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_51.setToolTip(QtGui.QApplication.translate("DockWidget", "This is a tool tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_51.setStatusTip(QtGui.QApplication.translate("DockWidget", "This is a status tip", None, QtGui.QApplication.UnicodeUTF8))
self.label_51.setWhatsThis(QtGui.QApplication.translate("DockWidget", "This is \"what is this\"", None, QtGui.QApplication.UnicodeUTF8))
self.label_51.setText(QtGui.QApplication.translate("DockWidget", "Inside DockWidget", None, QtGui.QApplication.UnicodeUTF8))
|
py | 7df92aa3d2f00e6a51997c00b270ff1cdc167086 | from micropython import const
from machine import Pin
from abutton import Pushbutton
PIN_ANTENNA = const(14)
PIN_SOCKET = const(12)
def __btn(num):
return Pushbutton(
pin=Pin(num, Pin.IN, Pin.PULL_UP),
# Don't call release handler after long press.
suppress=True
)
class Panel:
def __init__(self, client):
self.client = client
# Initialise async handling of buttons.
self.antenna = __btn(PIN_ANTENNA)
self.socket = __btn(PIN_SOCKET)
# Bind the button input to controller handlers.
self.antenna.release_func(self.on_antenna_short)
self.socket.release_func(self.on_socket_short)
self.antenna.long_func(self.on_antenna_long)
self.socket.long_func(self.on_socket_long)
async def on_antenna_short(self):
print("Volume up")
await self.client.send('mopidy/c/vol', '+5')
async def on_socket_short(self):
print("Volume down")
await self.client.send('mopidy/c/vol', '-5')
async def on_antenna_long(self):
print("Speakers toggle")
await self.client.send('cmnd/speakers/power', 'toggle')
async def on_socket_long(self):
print("Playback toggle")
await self.client.send('mopidy/c/plb', 'toggle')
|
py | 7df92af57a6e16af92c1e9fa7974751239a6122e | from collections import Counter
from heapq import heappush, heappushpop, heappop
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
cnt = Counter(nums)
cnt = [(v[1], v[0]) for v in cnt.items()]
pq = []
for v in cnt:
if len(pq) < k:
heappush(pq, v)
else:
heappushpop(pq, v)
result = []
while pq: result.append(heappop(pq)[1])
return result[::-1]
|
py | 7df92c32b57b9f19b62a4dd5bba0deb97ef0e3aa | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class YoloLoss(nn.Module):
def __init__(self,S,B,l_coord,l_noobj):
super(YoloLoss,self).__init__()
self.S = S
self.B = B
self.l_coord = l_coord
self.l_noobj = l_noobj
def compute_iou(self, box1, box2):
'''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
Return:
(tensor) iou, sized [N,M].
'''
N = box1.size(0)
M = box2.size(0)
lt = torch.max(
box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]
)
rb = torch.min(
box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]
)
wh = rb - lt # [N,M,2]
wh[wh<0] = 0 # clip at 0
inter = wh[:,:,0] * wh[:,:,1] # [N,M]
area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]
area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]
area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]
area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]
iou = inter / (area1 + area2 - inter)
return iou
def get_class_prediction_loss(self, classes_pred, classes_target):
"""
Parameters:
classes_pred : (tensor) size (batch_size, S, S, 20)
classes_target : (tensor) size (batch_size, S, S, 20)
Returns:
class_loss : scalar
"""
##### CODE #####
# Measures the Binary Cross Entropy between the target and the output.
class_loss = F.mse_loss(classes_pred,classes_target, reduction = 'sum')
return class_loss
def get_regression_loss(self, box_pred_response, box_target_response):
"""
Parameters:
box_pred_response : (tensor) size (-1, 5)
box_target_response : (tensor) size (-1, 5)
Note : -1 corresponds to ravels the tensor into the dimension specified
See : https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view_as
Returns:
reg_loss : scalar
"""
##### CODE #####
# Calculate Mean square Error for x, y, sqrt(w), sqrt(h)
loss_xy = F.mse_loss(box_pred_response[:, :2], box_target_response[:, :2], reduction = "sum")
loss_wh = F.mse_loss(torch.sqrt(box_pred_response[:, 2:4]), torch.sqrt(box_target_response[:, 2:4]), reduction = "sum")
reg_loss = loss_xy + loss_wh
return reg_loss
def get_contain_conf_loss(self, box_pred_response, box_target_response_iou):
"""
Parameters:
box_pred_response : (tensor) size ( -1 , 5)
box_target_response_iou : (tensor) size ( -1 , 5)
Note : -1 corresponds to ravels the tensor into the dimension specified
See : https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view_as
Returns:
contain_loss : scalar
"""
##### CODE #####
pred = box_pred_response[:, 4]
target = Variable(box_target_response_iou[:, 4].detach())
# target = box_target_response_iou[:, 4]
contain_loss = F.mse_loss(pred, target, reduction = "sum")
return contain_loss
def get_no_object_loss(self, target_tensor, pred_tensor, no_object_mask):
"""
Parameters:
target_tensor : (tensor) size (batch_size, S , S, 30)
pred_tensor : (tensor) size (batch_size, S , S, 30)
no_object_mask : (tensor) size (batch_size, S , S, 30)
Returns:
no_object_loss : scalar
Hints:
1) Create a 2 tensors no_object_prediction and no_object_target which only have the
values which have no object.
2) Have another tensor no_object_prediction_mask of the same size such that
mask with respect to both confidences of bounding boxes set to 1.
3) Create 2 tensors which are extracted from no_object_prediction and no_object_target using
the mask created above to find the loss.
"""
##### CODE #####
no_object_prediction = pred_tensor[no_object_mask.bool()].view((-1, 30))
no_object_target = target_tensor[no_object_mask.bool()].view((-1, 30))
no_object_prediction_mask = torch.zeros(no_object_target.size())
no_object_prediction_mask[:, 4] = 1
no_object_prediction_mask[:, 9] = 1
pred = no_object_prediction[no_object_prediction_mask.bool()]
target = no_object_target[no_object_prediction_mask.bool()]
no_object_loss = F.mse_loss(pred, target, reduction = "sum")
return no_object_loss
def find_best_iou_boxes(self, box_target, box_pred):
"""
Parameters:
box_target : (tensor) size (-1, 5)
box_pred : (tensor) size (-1, 5)
Note : -1 corresponds to ravels the tensor into the dimension specified
See : https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view_as
Returns:
box_target_iou: (tensor)
contains_object_response_mask : (tensor)
Hints:
1) Find the iou's of each of the 2 bounding boxes of each grid cell of each image.
2) Set the corresponding contains_object_response_mask of the bounding box with the max iou
of the 2 bounding boxes of each grid cell to 1.
3) For finding iou's use the compute_iou function
4) Before using compute preprocess the bounding box coordinates in such a way that
if for a Box b the coordinates are represented by [x, y, w, h] then
x, y = x/S - 0.5*w, y/S - 0.5*h ; w, h = x/S + 0.5*w, y/S + 0.5*h
Note: Over here initially x, y are the center of the box and w,h are width and height.
We perform this transformation to convert the correct coordinates into bounding box coordinates.
5) Set the confidence of the box_target_iou of the bounding box to the maximum iou
"""
##### CODE #####
b1 = torch.zeros(box_pred.size())
b2 = torch.zeros(box_target.size())
b1[:, :2] = box_pred[:, :2] / self.S - 0.5 * box_pred[:, 2:4]
b1[:, 2:4] = box_pred[:, :2] / self.S + 0.5 * box_pred[:, 2:4]
b2[:, :2] = box_target[:, :2] / self.S - 0.5 * box_target[:, 2:4]
b2[:, 2:4] = box_target[:, :2] / self.S + 0.5 * box_target[:, 2:4]
iou = self.compute_iou(b1[:, :4], b2[:, :4])
coo_response_mask = torch.cuda.ByteTensor(box_target.size())
coo_response_mask.zero_()
box_target_iou = torch.zeros(box_target.size(), requires_grad = True).cuda()
for i in range(iou.size(0) // 2):
iou1 = iou[i * 2][i * 2]
iou2 = iou[i * 2 + 1][i * 2 + 1]
if iou1 > iou2:
box_target_iou[i * 2, 4] = Variable(iou1.detach())
coo_response_mask[i * 2] = 1
else:
box_target_iou[i * 2 + 1, 4] = Variable(iou2.detach())
coo_response_mask[i * 2 + 1] = 1
# print(box_target_iou)
return box_target_iou, coo_response_mask
def forward(self, pred_tensor,target_tensor):
'''
pred_tensor: (tensor) size(batchsize,S,S,Bx5+20=30)
where B - number of bounding boxes this grid cell is a part of = 2
5 - number of bounding box values corresponding to [x, y, w, h, c]
where x - x_coord, y - y_coord, w - width, h - height, c - confidence of having an object
20 - number of classes
target_tensor: (tensor) size(batchsize,S,S,30)
Returns:
Total Loss
'''
N = pred_tensor.size()[0]
total_loss = None
# Create 2 tensors contains_object_mask and no_object_mask
# of size (Batch_size, S, S) such that each value corresponds to if the confidence of having
# an object > 0 in the target tensor.
##### CODE #####
# target_tensor = target_tensor.reshape((-1, 30))
# pred_tensor = pred_tensor.reshape((-1, 30))
contains_object_mask = target_tensor[:,:,:,4]
contains_object_mask = contains_object_mask.unsqueeze(-1).expand_as(target_tensor)
no_object_mask = target_tensor[:,:,:,4] == 0
no_object_mask = no_object_mask.unsqueeze(-1).expand_as(target_tensor)
# Create a tensor contains_object_pred that corresponds to
# to all the predictions which seem to confidence > 0 for having an object
# Split this tensor into 2 tensors :
# 1) bounding_box_pred : Contains all the Bounding box predictions of all grid cells of all images
# 2) classes_pred : Contains all the class predictions for each grid cell of each image
# Hint : Use contains_object_mask
##### CODE #####
# pred_tensor = pred_tensor.view((-1, 30))
# target_tensor = target_tensor.view((-1, 30))
contains_object_pred = pred_tensor[contains_object_mask.bool()].reshape(-1,30)
bounding_box_pred = contains_object_pred[:, :10].reshape(-1, 5)
classes_pred = contains_object_pred[:, 10:]
# Similarly as above create 2 tensors bounding_box_target and
# classes_target.
##### CODE #####
contains_object_target = target_tensor[contains_object_mask.bool()].reshape(-1,30)
bounding_box_target = contains_object_target[:, :10].reshape(-1, 5)
classes_target = contains_object_target[:, 10:]
# Compute the No object loss here
##### CODE #####
no_object_loss = self.get_no_object_loss(target_tensor, pred_tensor, no_object_mask)
# Compute the iou's of all bounding boxes and the mask for which bounding box
# of 2 has the maximum iou the bounding boxes for each grid cell of each image.
##### CODE #####
box_target_iou, coo_response_mask = self.find_best_iou_boxes(bounding_box_target, bounding_box_pred)
# Create 3 tensors :
# 1) box_prediction_response - bounding box predictions for each grid cell which has the maximum iou
# 2) box_target_response_iou - bounding box target ious for each grid cell which has the maximum iou
# 3) box_target_response - bounding box targets for each grid cell which has the maximum iou
# Hint : Use contains_object_response_mask
box_prediction_response = bounding_box_pred[coo_response_mask[:, 4].bool()].reshape(-1,5)
box_target_response = bounding_box_target[coo_response_mask[:, 4].bool()].reshape(-1,5)
box_target_response_iou = box_target_iou[box_target_iou[:, 4].bool()].reshape(-1,5)
##### CODE #####
# Find the class_loss, containing object loss and regression loss
##### CODE #####
class_loss = self.get_class_prediction_loss(classes_pred, classes_target)
regression_loss = self.get_regression_loss(box_prediction_response, box_target_response)
contain_object_loss = self.get_contain_conf_loss(box_prediction_response, box_target_response_iou)
total_loss = self.l_coord * regression_loss + self.l_noobj * no_object_loss + contain_object_loss + class_loss
total_loss /= N
return total_loss
|
py | 7df92c597c20efa3513994b49ee52d8bd6d104fc | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a text ad with ad parameters. To get ad groups, run
get_ad_groups.py. To get keywords, run add_keywords.py.
Tags: AdGroupAdService.mutate, AdParamService.mutate
Api: AdWordsOnly
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
criterion_id = 'INSERT_KEYWORD_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetAdGroupAdService(
'https://adwords-sandbox.google.com', 'v201109_1')
ad_param_service = client.GetAdParamService(
'https://adwords-sandbox.google.com', 'v201109_1')
# Construct operations for adding text ad object and add to an ad group.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Low-gravity fun for {param1:cheap}.',
'description2': 'Only {param2:a few} seats left!',
'headline': 'Luxury Mars Cruises'
},
'status': 'ENABLED'
}
}]
ads = ad_group_ad_service.Mutate(operations)[0]['value']
# Display results.
for ad in ads:
print ('Text ad with id \'%s\' was successfully added to an ad group with '
'id \'%s\'.' % (ad['adGroupId'], ad['ad']['id']))
# Construct operations for setting ad parameters.
operations = [
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': '£100',
'paramIndex': '1'
}
},
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': '50',
'paramIndex': '2'
}
}
]
ad_params = ad_param_service.Mutate(operations)
# Display results.
for ad_param in ad_params:
print ('Ad parameter with text \'%s\' was successfully set for criterion '
'with id \'%s\' and ad group id \'%s\'.'
% (ad_param['insertionText'], ad_param['criterionId'],
ad_param['adGroupId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id, criterion_id)
|
py | 7df92c6e0a52c34bdc2aa3d0339a122913d7c3df | #!/usr/bin/env python
"""
simple-nasbench.py
- Reproduce random search results from NASBENCH paper
- https://arxiv.org/pdf/1902.09635.pdf
- Simple NAS algorithm, which appears to strongly outperform algorithms from paper
- Train LinearSVR on small random sample of architectures
- Rank remaining architectures
- Train in order
"""
import numpy as np
import pandas as pd
from tqdm import tqdm, trange
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import LinearSVR
from nasbench.api import NASBench
from rsub import *
from matplotlib import pyplot as plt
# --
# Helpers
cummin = np.minimum.accumulate
cummax = np.maximum.accumulate
cumsum = np.cumsum
def cumargmax(x):
z = np.arange(x.shape[0], dtype=np.float)
z[x != cummax(x)] = np.nan
z = pd.Series(z).fillna(method='ffill')
return z.values.astype(int)
def make_edge_sel(num_nodes=7):
edges = []
for idx in range(num_nodes ** 2):
row = idx // num_nodes
col = idx % num_nodes
if row < col:
edges.append(idx)
return np.array(edges)
def sample_one_column(x):
i = np.arange(x.shape[0])
j = np.random.choice(x.shape[1], x.shape[0], replace=True)
return x[(i, j)]
max_nodes = 7
edge_sel = make_edge_sel(num_nodes=max_nodes)
# --
# IO
path = 'data/nasbench_only108.tfrecord'
api = NASBench(path)
# --
# ETL
hashes = np.array(list(api.hash_iterator()))
feats = [None] * len(hashes)
results = [None] * len(hashes)
test_acc = [None] * len(hashes)
valid_acc = [None] * len(hashes)
cost = [None] * len(hashes)
for i, h in tqdm(enumerate(hashes), total=len(hashes)):
spec, result = api.get_metrics_from_hash(h)
# --
# Clean + featurize architecture
num_nodes = spec['module_adjacency'].shape[0]
padding = max_nodes - num_nodes
if padding > 0:
spec['module_adjacency'] = np.pad(spec['module_adjacency'],
((0,padding), (0,padding)), mode='constant')
spec['module_operations'] += (['__noop__'] * padding)
edge_feat = list(np.hstack(spec['module_adjacency'])[edge_sel].astype(np.float64))
node_feat = spec['module_operations']
feats[i] = edge_feat + node_feat
# --
# Store results
result = result[108]
valid_acc[i] = [r['final_validation_accuracy'] for r in result]
test_acc[i] = [r['final_test_accuracy'] for r in result]
cost[i] = [r['final_training_time'] for r in result]
valid_acc = np.vstack(valid_acc)
test_acc = np.vstack(test_acc)
cost = np.vstack(cost)
mean_valid_acc = valid_acc.mean(axis=-1)
mean_test_acc = test_acc.mean(axis=-1)
mean_cost = cost.mean(axis=-1)
# --
# Featurize
num_edges = np.arange(max_nodes).sum()
nominal_indices = np.arange(num_edges, num_edges + num_nodes)
featurizer = make_pipeline(
ColumnTransformer(
transformers=[
('nominal', make_pipeline(
OneHotEncoder(handle_unknown='ignore'),
), nominal_indices)
],
remainder='passthrough',
)
)
Xf = featurizer.fit_transform(feats)
Xf = Xf.astype(np.float64) / Xf.shape[1]
# --
# Reproducing random baseline
def run_simple_random(valid_acc, test_acc, cost,
models_per_run=1e4, n_repeats=1000, valid_mode='mean'):
n_models = len(valid_acc)
models_per_run = min(int(models_per_run), n_models)
cum_valid_acc = [None] * n_repeats
cum_test_acc = [None] * n_repeats
cum_costs = [None] * n_repeats
for i in trange(n_repeats):
sel = np.random.choice(n_models, models_per_run, replace=False)
valid_acc_sel = valid_acc[sel]
test_acc_sel = test_acc[sel]
cost_sel = cost[sel]
if valid_mode == 'mean':
valid_acc_sel = valid_acc_sel.mean(axis=-1)
cost_mult = 3
elif valid_mode == 'sample_one':
valid_acc_sel = sample_one_column(valid_acc_sel)
cost_mult = 1
else:
raise Exception
test_acc_sel = test_acc_sel.mean(axis=-1)
cost_sel = cost_mult * cost_sel.mean(axis=-1)
cum_valid_acc[i] = cummax(valid_acc_sel)
cum_test_acc[i] = test_acc_sel[cumargmax(valid_acc_sel)]
cum_costs[i] = cumsum(cost_sel)
return (
np.stack(cum_valid_acc),
np.stack(cum_test_acc),
np.stack(cum_costs),
)
randm_cum_valid_acc, randm_cum_test_acc, randm_cum_costs = run_simple_random(
valid_acc=valid_acc,
test_acc=test_acc,
cost=cost,
valid_mode='mean',
)
mean_randm_cum_valid_acc = randm_cum_valid_acc.mean(axis=0)
mean_randm_cum_test_acc = randm_cum_test_acc.mean(axis=0)
mean_randm_cum_costs = randm_cum_costs.mean(axis=0)
rand1_cum_valid_acc, rand1_cum_test_acc, rand1_cum_costs = run_simple_random(
valid_acc=valid_acc,
test_acc=test_acc,
cost=cost,
valid_mode='sample_one'
)
mean_rand1_cum_valid_acc = rand1_cum_valid_acc.mean(axis=0)
mean_rand1_cum_test_acc = rand1_cum_test_acc.mean(axis=0)
mean_rand1_cum_costs = rand1_cum_costs.mean(axis=0)
# This agrees roughly w/ Fig7 in the paper
_ = plt.plot(mean_randm_cum_costs, mean_test_acc.max() - mean_randm_cum_test_acc, c='red', label='randm')
# for i in range(256):
# _ = plt.plot(randm_cum_costs[i], mean_test_acc.max() - randm_cum_test_acc[i], c='red', alpha=0.01)
_ = plt.plot(mean_rand1_cum_costs, mean_test_acc.max() - mean_rand1_cum_test_acc, c='orange', label='rand1')
# for i in range(256):
# _ = plt.plot(rand1_cum_costs[i], mean_test_acc.max() - rand1_cum_test_acc[i], c='orange', alpha=0.01)
_ = plt.xscale('log')
_ = plt.yscale('log')
_ = plt.ylim(1e-3, 1e-1)
_ = plt.legend()
_ = plt.grid(which='both', alpha=0.5)
show_plot()
# --
# Model baseline
def run_svr(Xf, valid_acc, test_acc, cost,
train_samples=100, n_repeats=32, C=1, valid_mode='mean'):
n_models = len(valid_acc)
cum_valid_acc = [None] * n_repeats
cum_test_acc = [None] * n_repeats
cum_costs = [None] * n_repeats
for i in trange(n_repeats):
perm = np.random.permutation(Xf.shape[0])
train_sel, test_sel = perm[:train_samples], perm[train_samples:]
Xf_train, Xf_test = Xf[train_sel], Xf[test_sel]
valid_acc_train, valid_acc_test = valid_acc[train_sel], valid_acc[test_sel]
test_acc_train, test_acc_test = test_acc[train_sel], test_acc[test_sel]
cost_train, cost_test = cost[train_sel], cost[test_sel]
# >>
if valid_mode == 'mean':
valid_acc_train = valid_acc_train.mean(axis=-1)
valid_acc_test = valid_acc_test.mean(axis=-1)
cost_mult = 3
elif valid_mode == 'sample_one':
valid_acc_train = sample_one_column(valid_acc_train)
valid_acc_test = sample_one_column(valid_acc_test)
cost_mult = 1
# <<
test_acc_train = test_acc_train.mean(axis=-1)
test_acc_test = test_acc_test.mean(axis=-1)
cost_train = cost_train.mean(axis=-1)
cost_test = cost_test.mean(axis=-1)
model = LinearSVR(C=C, max_iter=int(1e5)).fit(Xf_train, valid_acc_train)
pred_test = model.predict(Xf_test)
rank_test = np.argsort(-pred_test)
valid_acc_sel = np.concatenate([valid_acc_train, valid_acc_test[rank_test]])
test_acc_sel = np.concatenate([test_acc_train, test_acc_test[rank_test]])
cost_sel = np.concatenate([cost_train, cost_test[rank_test]])
cum_valid_acc[i] = cummax(valid_acc_sel)
cum_test_acc[i] = test_acc_sel[cumargmax(valid_acc_sel)]
cum_costs[i] = cost_mult * cumsum(cost_sel)
return (
np.stack(cum_valid_acc),
np.stack(cum_test_acc),
np.stack(cum_costs),
)
svr1_cum_valid_acc, svr1_cum_test_acc, svr1_cum_costs = run_svr(
Xf=Xf,
valid_acc=valid_acc,
test_acc=test_acc,
cost=cost,
valid_mode='sample_one',
C=1
)
mean_svr1_cum_valid_acc = svr1_cum_valid_acc.mean(axis=0)
mean_svr1_cum_test_acc = svr1_cum_test_acc.mean(axis=0)
mean_svr1_cum_costs = svr1_cum_costs.mean(axis=0)
svrm_cum_valid_acc, svrm_cum_test_acc, svrm_cum_costs = run_svr(
Xf=Xf,
valid_acc=valid_acc,
test_acc=test_acc,
cost=cost,
valid_mode='mean',
C=1
)
mean_svrm_cum_valid_acc = svrm_cum_valid_acc.mean(axis=0)
mean_svrm_cum_test_acc = svrm_cum_test_acc.mean(axis=0)
mean_svrm_cum_costs = svrm_cum_costs.mean(axis=0)
# plot random (same as above)
_ = plt.plot(mean_rand_cum_costs, mean_test_acc.max() - mean_rand_cum_test_acc,
c='red', label='rand')
# for i in range(rand_cum_test_acc.shape[0]):
# _ = plt.plot(rand_cum_costs[i], mean_test_acc.max() - rand_cum_test_acc[i], c='red', alpha=0.01)
# plot svr
_ = plt.plot(mean_svr1_cum_costs, mean_test_acc.max() - mean_svr1_cum_test_acc,
c='blue', label='svr1')
# for i in range(svr_cum_test_acc.shape[0]):
# _ = plt.plot(svr_cum_costs[i], mean_test_acc.max() - svr_cum_test_acc[i], c='blue', alpha=0.1)
_ = plt.plot(mean_svrm_cum_costs, mean_test_acc.max() - mean_svrm_cum_test_acc,
c='green', label='svrm')
_ = plt.xscale('log')
_ = plt.yscale('log')
_ = plt.ylim(5e-4, 1e-1)
_ = plt.legend()
_ = plt.grid(which='both', alpha=0.5)
show_plot()
|
py | 7df92e5218551a0f0516a001bbb2dbcba03dd697 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from config import config
import os
import sys
sys.path.append(os.getcwd())
config_name = os.getenv('APP_SETTINGS')
database_uri = config.get(config_name).SQLALCHEMY_DATABASE_URI
engine = create_engine(database_uri, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
|
py | 7df92edcc3ddc458aeacdbaafa9b6d384bef988e | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
def test_const():
x = tvm.const(1, "int32")
print(x.dtype)
assert x.dtype == tvm.int32
assert isinstance(x, tvm.expr.IntImm)
def test_scalar_dtype_inference():
for data in [True, np.bool(1), np.uint8(1), np.uint16(1), np.uint32(1), np.uint64(1),
np.int8(1), np.int16(1), np.int32(1), np.int64(1),
np.float16(1), np.float32(1), np.float64(1)]:
assert tvm.const(data).dtype == str(np.array(data).dtype)
assert tvm.const(1).dtype == 'int32'
assert tvm.const(1.0).dtype == 'float32'
for data in [True, np.bool(1), np.uint8(1), np.uint16(1), np.uint32(1), np.uint64(1),
np.int8(1), np.int16(1), np.int32(1), np.int64(1),
np.float16(1), np.float32(1), np.float64(1)]:
assert tvm.convert(data).dtype == str(np.array(data).dtype)
assert tvm.convert(1).dtype == 'int32'
assert tvm.convert(1.0).dtype == 'float32'
def test_make():
x = tvm.const(1, "int32")
y = tvm.var("x")
z = x + y
assert isinstance(tvm.max(x, y), tvm.expr.Max)
assert isinstance(tvm.min(x, y), tvm.expr.Min)
def test_ir():
x = tvm.const(1, "int32")
y = tvm.make.IntImm('int32', 1)
z = x + y
stmt = tvm.make.Evaluate(z)
assert isinstance(stmt, tvm.stmt.Evaluate)
def test_ir2():
x = tvm.var("n")
a = tvm.var("array", tvm.handle)
st = tvm.make.Store(a, x + 1, 1)
assert isinstance(st, tvm.stmt.Store)
assert(st.buffer_var == a)
def test_let():
x = tvm.var('x')
y = tvm.var('y')
stmt = tvm.make.LetStmt(
x, 10, tvm.make.Evaluate(x + 1));
def test_cast():
x = tvm.var('x', dtype="float32")
y = x.astype("int32")
z = x.astype("float32x4")
assert isinstance(y, tvm.expr.Cast)
assert isinstance(z, tvm.expr.Broadcast)
assert z.lanes == 4
def test_attr():
x = tvm.var('x')
y = tvm.var('y')
stmt = tvm.make.AttrStmt(
y, "stride", 10, tvm.make.Evaluate(x + 1));
assert stmt.node == y
a = tvm.convert(1)
assert a.value == 1
try:
a.no_field
assert False
except AttributeError:
pass
def test_basic():
a = tvm.var('a')
b = tvm.var('b')
c = a + b
assert str(c) == '(%s + %s)' % (a.name, b.name)
def test_stmt():
x = tvm.make.Evaluate(0)
tvm.make.For(tvm.var('i'), 0, 1,
tvm.stmt.For.Serial, 0,
x)
def test_dir():
x = tvm.var('x')
dir(x)
def test_dtype():
x = tvm.var('x')
assert x.dtype == 'int32'
y = tvm.var('y')
assert (x > y).dtype == 'bool'
def test_any():
x = tvm.var('x')
y = tvm.var('y')
z = tvm.var('z')
try:
t = x or x
assert False
except ValueError:
pass
try:
tvm.any()
assert False
except ValueError:
pass
assert str(tvm.any(x < y)) == '(%s < %s)' % (x.name, y.name)
assert str(tvm.any(x < y, x > z)) == '((%s < %s) || (%s > %s))' % (
x.name, y.name, x.name, z.name)
assert str(tvm.any(x < y, y > z + 1, x < z * 2)) == \
'(((%s < %s) || (%s > (%s + 1))) || (%s < (%s*2)))' % (
x.name, y.name, y.name, z.name, x.name, z.name)
def test_all():
x = tvm.var('x')
y = tvm.var('y')
z = tvm.var('z')
try:
t = x and x
assert False
except ValueError:
pass
try:
tvm.all()
assert False
except ValueError:
pass
assert str(tvm.all(x < y)) == '(%s < %s)' % (x.name, y.name)
assert str(tvm.all(x < y, x > z)) == '((%s < %s) && (%s > %s))' % (
x.name, y.name, x.name, z.name)
assert str(tvm.all(x < y, y > z + 1, x < z * 2)) == \
'(((%s < %s) && (%s > (%s + 1))) && (%s < (%s*2)))' % (
x.name, y.name, y.name, z.name, x.name, z.name)
def test_bitwise():
x = tvm.var('x')
y = tvm.var('y')
assert str(x << y) == 'shift_left(x, y)'
assert str(x >> y) == 'shift_right(x, y)'
assert str(x & y) == 'bitwise_and(x, y)'
assert str(x | y) == 'bitwise_or(x, y)'
assert str(x ^ y) == 'bitwise_xor(x, y)'
assert str(~x) == 'bitwise_not(x)'
assert(tvm.const(1, "int8x2") >> 1).dtype == "int8x2"
assert(x >> tvm.const(1, "int32x2")).dtype == "int32x2"
assert(tvm.var("z", "int8x2") << tvm.const(1, "int8x2")).dtype == "int8x2"
def test_equality():
a = tvm.var('a')
b = tvm.var('b')
c = (a == b)
assert not c
d = (c != c)
assert not d
def test_equality_string_imm():
x = 'a'
y = tvm.make.StringImm(x)
x == y.value
x == y
if __name__ == "__main__":
test_cast()
test_attr()
test_const()
test_scalar_dtype_inference()
test_make()
test_ir()
test_basic()
test_stmt()
test_let()
test_dir()
test_dtype()
test_any()
test_all()
test_bitwise()
test_equality()
test_equality_string_imm()
|
py | 7df92faac10f2c51d855b2d51f70b7c865f2e9be | '''
Add a single slider
In the previous exercise, you added a single plot to the "current document" of your application. In this exercise, you'll practice adding a layout to your current document.
Your job here is to create a single slider, use it to create a widgetbox layout, and then add this layout to the current document.
The slider you create here cannot be used for much, but in the later exercises, you'll use it to update your plots!
INSTRUCTIONS
100XP
Import curdoc from bokeh.io, widgetbox from bokeh.layouts, and Slider from bokeh.models.
Create a slider called slider by using the Slider() function and specifying the parameters title, start, end, step, and value.
Use the slider to create a widgetbox layout called layout.
Add the layout to the current document using curdoc().add_root(). It needs to be passed in as an argument to add_root().
'''
# Perform the necessary imports
from bokeh.io import curdoc
from bokeh.layouts import widgetbox
from bokeh.models import Slider
# Create a slider: slider
slider = Slider(title='my slider', start=0, end=10, step=0.1, value=2)
# Create a widgetbox layout: layout
layout = widgetbox(slider)
# Add the layout to the current document
curdoc().add_root(layout)
|
py | 7df9318e61748a12c4217f03f35ff298c5addbd3 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Doc String
"""
__author__ = "[:VIM_EVAL:]jaskeleton_user[:END_EVAL:]"
__email__ = "[:VIM_EVAL:]jaskeleton_email[:END_EVAL:]"
__created__ = "[:VIM_EVAL:]jaskeleton_date[:END_EVAL:]"
__modified__ = "[:VIM_EVAL:]jaskeleton_date[:END_EVAL:]"
if __name__ == '__main__':
pass
|
py | 7df932ce18a754059d84d6b53284042bffb040e0 | class MXL7ApplicationCategories(object):
def __init__(self, session):
super(MXL7ApplicationCategories, self).__init__()
self._session = session
def getNetworkL7FirewallRulesApplicationCategories(self, networkId: str):
"""
**Return the L7 firewall application categories and their associated applications for an MX network**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-l-7-firewall-rules-application-categories
- networkId (string)
"""
metadata = {
'tags': ['MX L7 application categories'],
'operation': 'getNetworkL7FirewallRulesApplicationCategories',
}
resource = f'/networks/{networkId}/l7FirewallRules/applicationCategories'
return self._session.get(metadata, resource)
|
py | 7df9347cc1cd48b5a1262e169421661e6deacef2 | import os
import re
import shutil
from textwrap import dedent
from traitlets import Bool, List, Unicode
from typing import Optional
from .base import BasePlugin
from ..utils import unzip
class ExtractorPlugin(BasePlugin):
"""Submission archive files extractor plugin for the
:class:`~nbgrader.apps.zipcollectapp.ZipCollectApp`.
Extractor plugin subclasses MUST inherit from this class.
"""
force = Bool(
default_value=False,
help="Force overwrite of existing files."
).tag(config=True)
zip_ext = List(
['.zip', '.gz'],
help=dedent(
"""
List of valid archive (zip) filename extensions to extract. Any
archive (zip) files with an extension not in this list are copied
to the `extracted_directory`.
"""
)
).tag(config=True)
def extract(self, archive_path: str, extracted_path: str) -> None:
"""Extract archive (zip) files and submission files in the
`archive_directory`. Files are extracted to the `extracted_directory`.
Non-archive (zip) files found in the `archive_directory` are copied to
the `extracted_directory`.
This is the main function called by the
:class:`~nbgrader.apps.zipcollectapp.ZipCollectApp` for each archive
file to be extracted.
Arguments
---------
archive_path:
Absolute path to the `archive_directory`.
extracted_path:
Absolute path to the `extracted_directory`.
"""
if not os.listdir(archive_path):
self.log.warning(
"No files found in directory: {}".format(archive_path))
return
for root, _, archive_files in os.walk(archive_path):
if not archive_files:
continue
extract_to = os.path.normpath(os.path.join(
extracted_path,
os.path.relpath(root, archive_path)
))
if not os.path.isdir(extract_to):
os.makedirs(extract_to)
for zfile in archive_files:
zfile = os.path.join(root, zfile)
filename, ext = os.path.splitext(os.path.basename(zfile))
# unzip (tree) each archive file in archive_path
if ext in self.zip_ext:
# double splitext for .tar.gz
fname, ext = os.path.splitext(os.path.basename(filename))
if ext == '.tar':
filename = fname
self.log.info("Extracting from: {}".format(zfile))
self.log.info(" Extracting to: {}".format(
os.path.join(extract_to, filename)))
unzip(
zfile,
extract_to,
zip_ext=self.zip_ext,
create_own_folder=True,
tree=True
)
# move each non-archive file in archive_path
else:
dest = os.path.join(extract_to, os.path.basename(zfile))
self.log.info("Copying from: {}".format(zfile))
self.log.info(" Copying to: {}".format(dest))
shutil.copy(zfile, dest)
class FileNameCollectorPlugin(BasePlugin):
"""Submission filename collector plugin for the
:class:`~nbgrader.apps.zipcollectapp.ZipCollectApp`.
Collect plugin subclasses MUST inherit from this class.
"""
named_regexp = Unicode(
default_value='',
help=dedent(
r"""
This regular expression is applied to each submission filename and
MUST be supplied by the instructor. This regular expression MUST
provide the `(?P<student_id>...)` and `(?P<file_id>...)` named
group expressions. Optionally this regular expression can also
provide the `(?P<first_name>...)`, `(?P<last_name>...)`,
`(?P<email>...)`, and `(?P<timestamp>...)` named group expressions.
For example if the filename is:
`ps1_bitdiddle_attempt_2016-01-30-15-00-00_problem1.ipynb`
then this `named_regexp` could be:
".*_(?P<student_id>\w+)_attempt_(?P<timestamp>[0-9\-]+)_(?P<file_id>\w+)"
For named group regular expression examples see
https://docs.python.org/3/howto/regex.html
"""
)
).tag(config=True)
valid_ext = List(
default_value=['.ipynb'],
help=dedent(
"""
List of valid submission filename extensions to collect. Any
submitted file with an extension not in this list is skipped.
"""
)
).tag(config=True)
def _match(self, filename: str) -> Optional[dict]:
"""Match the named group regular expression to the beginning of the
filename and return the match groupdict or None if no match.
"""
if not self.named_regexp:
self.log.warning(
"Regular expression not provided for plugin. Run with "
"`--help-all` flag for more information."
)
return None
match = re.match(self.named_regexp, filename)
if not match or not match.groups():
self.log.warning(
"Regular expression '{}' did not match anything in: {}"
"".format(self.named_regexp, filename)
)
return None
gd = match.groupdict()
self.log.debug(
"Regular expression '{}' matched\n'{}' in: {}"
"".format(self.named_regexp, gd, filename)
)
return gd
def collect(self, submitted_file: str) -> Optional[dict]:
"""This is the main function called by the
:class:`~nbgrader.apps.zipcollectapp.ZipCollectApp` for each submitted
file. Note this function must also return a dictionary or None for
sub-classed plugins.
Arguments
---------
submitted_file:
Each submitted file in the ``extracted_directory`` (absolute path).
Returns
-------
groupdict:
Collected data from the filename or None if the file should be
skipped. Collected data is a dict of the form::
{
file_id: file_id, # MUST be provided
student_id: student_id, # MUST be provided
timestamp: timestamp # Can optional be provided
}
Note: ``file_id`` MUST include the the relative path to the
assignment if you are collecting files in assignment sub-folders.
"""
_, ext = os.path.splitext(submitted_file)
# Skip any files without the correct extension
if ext not in self.valid_ext:
self.log.debug("Invalid file extension {}: {}".format(ext, submitted_file))
return None
groupdict = self._match(submitted_file)
if not groupdict:
return None
return groupdict
|
py | 7df93494363c789de56f8722fc9a4ba921e27557 | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.ads.google_ads.v6.proto.services import shopping_performance_view_service_pb2_grpc
class ShoppingPerformanceViewServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v6.services ShoppingPerformanceViewService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(self, channel=None, credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.',
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
'grpc.max_send_message_length': -1,
'grpc.max_receive_message_length': -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'shopping_performance_view_service_stub': shopping_performance_view_service_pb2_grpc.ShoppingPerformanceViewServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def get_shopping_performance_view(self):
"""Return the gRPC stub for :meth:`ShoppingPerformanceViewServiceClient.get_shopping_performance_view`.
Returns the requested Shopping performance view in full detail.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['shopping_performance_view_service_stub'].GetShoppingPerformanceView |
py | 7df935de21261e1cc99454dc30d28c4451714690 | import os
from string import Template
import numpy as np
import aesara
from aesara import Apply
from aesara.tensor import as_tensor_variable
from aesara.tensor.sort import TopKOp
from .basic_ops import (
GpuKernelBase,
Kernel,
as_gpuarray_variable,
gpuarray_helper_inc_dir,
infer_context_name,
)
from .opt import op_lifter, register_opt, register_opt2
from .type import GpuArrayType
try:
import pygpu
import pygpu.gpuarray as ga
except ImportError:
# To make sure aesara is importable
pass
# TODO GPU sort / argsort
class GpuTopKOp(GpuKernelBase, TopKOp):
"""Implements TopKOp on gpu
Currently the output seem sorted, but we do not test it. So as on
the CPU, we only support sorted=False for now.
"""
__props__ = TopKOp.__props__
_f16_ok = True
def __init__(
self,
axis=-1,
sorted=True,
idx_dtype="int64",
return_values=True,
return_indices=True,
):
if sorted:
raise NotImplementedError(
"GpuTopK currently is not sure to give sorted output even if they look sorted.."
)
GpuKernelBase.__init__(self)
TopKOp.__init__(
self,
axis=axis,
sorted=sorted,
idx_dtype=idx_dtype,
return_values=return_values,
return_indices=return_indices,
)
def perform(self, node, inputs, output_storage, params):
raise NotImplementedError()
def c_headers(self):
return ["gpuarray_api.h", "gpuarray_helper.h", "numpy_compat.h"]
def c_header_dirs(self):
return [
os.path.dirname(__file__),
gpuarray_helper_inc_dir(),
pygpu.get_include(),
]
def c_code_cache_version(self):
return (4,)
def gpu_kernels(self, node, nodename):
# load kernel source
device_type = node.inputs[0].type.context.kind
kernel_ext = {b"cuda": ".cu", b"opencl": ".cl"}[device_type]
common_ext = {b"cuda": ".cuh", b"opencl": ".h"}[device_type]
# prepare "$" macros
if device_type == b"cuda":
ndim = node.inputs[0].ndim
dstv_strides_code = "".join(
"ssize_t dstv_strides_%d, " % i for i in range(ndim)
)
dsti_strides_code = "".join(
"ssize_t dsti_strides_%d, " % i for i in range(ndim)
)
src_strides_code = "".join(
"ssize_t src_strides_%d, " % i for i in range(ndim)
)
set_slice_code = """
gidx = gid %% dims_%(i)d;
gid /= dims_%(i)d;
{dstv};
{dsti};
src = ptr_add(src, gidx*src_strides_%(i)d);\n""".format(
dstv="dstv = ptr_add(dstv, gidx*dstv_strides_%(i)d)"
if self.return_values
else "",
dsti="dsti = ptr_add(dsti, gidx*dsti_strides_%(i)d)"
if self.return_indices
else "",
)
set_slice_code = "".join(set_slice_code % dict(i=j) for j in range(1, ndim))
if self.return_values:
set_slice_code += """
dstv = ptr_add(dstv, dstv_offset);
"""
if self.return_indices:
set_slice_code += """
dsti = ptr_add(dsti, dsti_offset);
"""
set_slice_code += """
src = ptr_add(src, src_offset);
"""
flags = Kernel.get_flags(node.inputs[0].dtype)
subs = dict(
inp_t=ga.dtype_to_ctype(node.inputs[0].dtype),
out_t=ga.dtype_to_ctype(self.idx_dtype),
dims="".join("size_t dims_%d, " % i for i in range(1, ndim)),
dstv="INPUT_TYPE *dstv," if self.return_values else "",
dstv_offset="size_t dstv_offset," if self.return_values else "",
dsti="INDEX_TYPE *dsti," if self.return_indices else "",
dsti_offset="size_t dsti_offset," if self.return_indices else "",
dstv_strides=dstv_strides_code if self.return_values else "",
dsti_strides=dsti_strides_code if self.return_indices else "",
src_strides=src_strides_code,
set_slice=set_slice_code,
write_value=int(self.return_values),
write_index=int(self.return_indices),
ndim=str(ndim),
)
elif device_type == b"opencl":
raise NotImplementedError()
# setup parameters
param_types = [ga.SIZE] * (ndim - 1) # dims
for _ in range(self.return_values + self.return_indices):
param_types.append(ga.GpuArray) # dst*
param_types.append(ga.SIZE) # offset
param_types.extend([ga.SSIZE] * ndim) # dst*_strides
param_types.append(ga.SIZE) # k
param_types.append(ga.GpuArray) # src
param_types.append(ga.SIZE) # offset
param_types.extend([ga.SSIZE] * ndim) # src_strides
param_types.append(ga.SIZE) # size
# load and compile kernels
with open(
os.path.join(
os.path.dirname(__file__), "c_code", "topk_common" + common_ext
)
) as f:
common_src = f.read()
kernels = []
def build_kernel(fname, kname, subs):
with open(os.path.join(os.path.dirname(__file__), "c_code", fname)) as f:
kernel_src = f.read()
ker = Kernel(
code=(
"#include <cluda.h>\n"
+ Template(common_src + kernel_src).substitute(**subs)
),
name=kname,
params=param_types,
flags=flags,
objvar=kname + nodename,
)
return ker
subs["count_t"] = "int"
kernels.append(build_kernel("topk_dense" + kernel_ext, "k_topk_dense", subs))
subs["kname"] = "k_topk_dense_large"
kernels.append(
build_kernel("topk_dense_large" + kernel_ext, "k_topk_dense_large", subs)
)
subs["count_t"] = "long long"
subs["kname"] = "k_topk_dense_xlarge"
kernels.append(
build_kernel("topk_dense_large" + kernel_ext, "k_topk_dense_xlarge", subs)
)
return kernels
def c_code(self, node, nodename, inps, outs, sub):
context = node.inputs[0].type.context
if context.kind != b"cuda":
raise NotImplementedError(
"%s: We only have CUDA "
"implementation so far." % self.__class__.__name__
)
x, k = inps
inp_dtc = ga.dtype_to_typecode(node.inputs[0].dtype)
if not self.return_indices:
(yv,) = outs
elif self.return_values:
yv, yi = outs
else:
(yi,) = outs
out_dtype_s = self.idx_dtype
out_dtc = ga.dtype_to_typecode(out_dtype_s)
fail = sub["fail"]
ctx = sub["params"]
k_dtype = node.inputs[1].type.dtype_specs()[1]
# max threads per block
MAX_TPB = context.maxlsize0
# max blocks per grid
MAX_BPG = context.maxgsize0
WARP_SIZE = 32
ndim = node.inputs[0].ndim
reordered_axes = list(range(ndim))
axis = self.axis % ndim
del reordered_axes[axis]
reordered_axes = [axis] + reordered_axes
dims = "".join("dims[%d], " % i for i in reordered_axes[1:])
prep_output = ""
if self.return_values:
def_dvstrides = "const ssize_t *dvstrides = PyGpuArray_STRIDES(%s)" % yv
params_dv = "{}->ga.data, {}->ga.offset,\n".format(yv, yv)
params_dv += "".join("dvstrides[%d], " % i for i in reordered_axes)
prep_output += (
"""
if (0 != aesara_prep_output(
&%(yv)s, %(ndim)d, odims,
%(inp_dtc)s, GA_C_ORDER, %(ctx)s)) {
%(fail)s;
}\n"""
% locals()
)
else:
def_dvstrides = params_dv = ""
if self.return_indices:
def_distrides = "const ssize_t *distrides = PyGpuArray_STRIDES(%s)" % yi
params_di = "{}->ga.data, {}->ga.offset,\n".format(yi, yi)
params_di += "".join("distrides[%d], " % i for i in reordered_axes)
prep_output += (
"""
if (0 != aesara_prep_output(
&%(yi)s, %(ndim)d, odims,
%(out_dtc)s, GA_C_ORDER, %(ctx)s)) {
%(fail)s;
}\n"""
% locals()
)
else:
def_distrides = params_di = ""
sstrides = ", ".join("sstrides[%d]" % i for i in reordered_axes)
code = """
{
const ssize_t k_ = ((%(k_dtype)s*)(PyArray_DATA(%(k)s)))[0];
const size_t *dims = PyGpuArray_DIMS(%(x)s);
size_t odims[%(ndim)d];
for (int i=0; i<%(ndim)d; i++)
odims[i] = dims[i];
odims[%(axis)d] = k_>=0 ? k_ : -k_;
if (0 == odims[%(axis)d]) {
PyErr_SetString(
PyExc_ValueError,
"topk: kth must not be zero");
%(fail)s;
} else if (dims[%(axis)d] < odims[%(axis)d]) {
PyErr_SetString(
PyExc_ValueError,
"topk: kth cannot be larger than the size of specified axis %(axis)d");
%(fail)s;
}
%(prep_output)s
size_t grid_size=1, block_size=1;
for (int i=0; i<%(ndim)d; ++i) {
if (i!=%(axis)d)
grid_size *= dims[i];
else
block_size = dims[i];
}
// round up to multiples of warp size
block_size = ((block_size + %(WARP_SIZE)d - 1) / %(WARP_SIZE)d) * %(WARP_SIZE)d;
if (grid_size > %(MAX_BPG)d) {
PyErr_SetString(
PyExc_ValueError,
"topk: too many slices to work with, expected <= %(MAX_BPG)d");
%(fail)s;
}
%(def_dvstrides)s;
%(def_distrides)s;
const ssize_t *sstrides = PyGpuArray_STRIDES(%(x)s);
int err;
if (dims[%(axis)d] > (1u << 31)) {
block_size = %(MAX_TPB)d;
err = k_topk_dense_xlarge_call(
1, &grid_size, &block_size, 0,
%(dims)s
%(params_dv)s
%(params_di)s
k_,
%(x)s->ga.data,
%(x)s->ga.offset,
%(sstrides)s,
dims[%(axis)d]
);
} else if (block_size > %(MAX_TPB)d) {
block_size = %(MAX_TPB)d;
err = k_topk_dense_large_call(
1, &grid_size, &block_size, 0,
%(dims)s
%(params_dv)s
%(params_di)s
k_,
%(x)s->ga.data,
%(x)s->ga.offset,
%(sstrides)s,
dims[%(axis)d]
);
} else {
err = k_topk_dense_call(
1, &grid_size, &block_size, 0,
%(dims)s
%(params_dv)s
%(params_di)s
k_,
%(x)s->ga.data,
%(x)s->ga.offset,
%(sstrides)s,
dims[%(axis)d]
);
}
if (err != GA_NO_ERROR) {
PyErr_SetString(
PyExc_RuntimeError,
"topk: gpu kernel failed to execute");
%(fail)s;
}
}
"""
return code % locals()
def make_node(self, inp, kth):
ctx_name = infer_context_name(inp)
inp = as_gpuarray_variable(inp, ctx_name)
kth = as_tensor_variable(kth)
bcast = inp.type.broadcastable
outs = []
if self.return_values:
outs.append(inp.type())
if self.return_indices:
outs.append(
GpuArrayType(
dtype=self.idx_dtype, broadcastable=bcast, context_name=ctx_name
)()
)
return Apply(self, [inp, kth], outs)
def get_params(self, node):
return node.inputs[0].type.context
class ValuesEqApproxNoOrder:
"""
We ignore the order of elements on a given axis during the comparison.
"""
def __init__(self, axis):
self.axis = axis
def __call__(self, val1, val2):
v1 = np.sort(val1, axis=self.axis)
v2 = np.sort(val2, axis=self.axis)
ret = aesara.tensor.type.values_eq_approx(v1, v2)
return ret
@register_opt("fast_compile")
@op_lifter([TopKOp], cuda_only=True)
@register_opt2([TopKOp], "fast_compile")
def local_gpua_topkop(op, ctx_name, inputs, outputs):
axis = op.axis
rv = op.return_values
ri = op.return_indices
x, k = inputs
x = as_gpuarray_variable(x, ctx_name)
if op.sorted:
return
gpu_op = GpuTopKOp(
axis=axis,
sorted=op.sorted,
idx_dtype=op.idx_dtype,
return_values=rv,
return_indices=ri,
)
rets = gpu_op(x, k, return_list=True)
c = ValuesEqApproxNoOrder(axis)
for r in rets:
r.tag.values_eq_approx = c
return rets
|
py | 7df93607cfcd46c931b878f23d7a58ab150ae921 | # coding: utf-8
"""
CloudEndure API documentation
© 2017 CloudEndure All rights reserved # General Request authentication in CloudEndure's API is done using session cookies. A session cookie is returned upon successful execution of the \"login\" method. This value must then be provided within the request headers of all subsequent API requests. ## Errors Some errors are not specifically written in every method since they may always return. Those are: 1) 401 (Unauthorized) - for unauthenticated requests. 2) 405 (Method Not Allowed) - for using a method that is not supported (POST instead of GET). 3) 403 (Forbidden) - request is authenticated, but the user is not allowed to access. 4) 422 (Unprocessable Entity) - for invalid input. ## Formats All strings with date-time format are according to RFC3339. All strings with \"duration\" format are according to ISO8601. For example, a full day duration can be specified with \"PNNNND\". # noqa: E501
OpenAPI spec version: 5
Contact: https://bit.ly/2T54hSc
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CloudEndureError:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {"message": "str", "code": "str"}
attribute_map = {"message": "message", "code": "code"}
def __init__(self, message=None, code=None): # noqa: E501
"""CloudEndureError - a model defined in Swagger""" # noqa: E501
self._message = None
self._code = None
self.discriminator = None
if message is not None:
self.message = message
if code is not None:
self.code = code
@property
def message(self):
"""Gets the message of this CloudEndureError. # noqa: E501
:return: The message of this CloudEndureError. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this CloudEndureError.
:param message: The message of this CloudEndureError. # noqa: E501
:type: str
"""
self._message = message
@property
def code(self):
"""Gets the code of this CloudEndureError. # noqa: E501
:return: The code of this CloudEndureError. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this CloudEndureError.
:param code: The code of this CloudEndureError. # noqa: E501
:type: str
"""
self._code = code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CloudEndureError, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudEndureError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7df936ea92372c43878204ef7a92c35dd8dc3f7c | r, g, b = map(int, input().split())
if (100 * r + g * 10 + b) % 4 == 0:
print("YES")
else:
print("NO") |
py | 7df9370f6697b4e7862b8778bd119966d76768d7 | import pytest
from tri_struct import merged
from iommi._db_compat import field_defaults_factory
@pytest.mark.django
def test_field_defaults_factory():
from django.db import models
base = dict(parse_empty_string_as_none=True, required=True, display_name=None)
assert field_defaults_factory(models.CharField(null=False, blank=False)) == merged(
base, dict(parse_empty_string_as_none=False)
)
assert field_defaults_factory(models.CharField(null=False, blank=True)) == merged(
base, dict(parse_empty_string_as_none=False, required=False)
)
assert field_defaults_factory(models.CharField(null=True, blank=False)) == merged(base, dict(required=False))
assert field_defaults_factory(models.CharField(null=True, blank=True)) == merged(base, dict(required=False))
@pytest.mark.django
def test_field_defaults_factory_boolean():
from django.db import models
django_null_default = not models.BooleanField().null
base = dict(parse_empty_string_as_none=django_null_default, display_name=None)
assert field_defaults_factory(models.BooleanField(null=False, blank=False)) == merged(
base, dict(parse_empty_string_as_none=False)
)
assert field_defaults_factory(models.BooleanField(null=False, blank=True)) == merged(
base, dict(parse_empty_string_as_none=False)
)
assert field_defaults_factory(models.BooleanField(null=True, blank=False)) == base
assert field_defaults_factory(models.BooleanField(null=True, blank=True)) == base
|
py | 7df9377ffcc55402d32a700d90d2a635b001ea27 | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="DocxConvertOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import WordProcessingConvertOptions
class DocxConvertOptions(WordProcessingConvertOptions):
"""
Docx convert options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of DocxConvertOptions""" # noqa: E501
base = super(DocxConvertOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DocxConvertOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7df938f3fd4d2242ccfe5f7e693c14a6ada44fa1 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: sale_order.py
@time: 2018-07-24 16:59
"""
from __future__ import unicode_literals
from flask import jsonify, make_response
from flask_restful import Resource, marshal, reqparse
from web_api.yonyou.outputs.sale_order import fields_item_sale_order, fields_item_sale_order_cn
from web_api.yonyou.reqparsers.sale_order import (
structure_key_item,
structure_key_items,
structure_key_item_cn,
structure_key_items_cn,
)
from web_api.commons.exceptions import BadRequest, NotFound
from web_api.yonyou.apis.sale_order import (
get_sale_order_row_by_id,
edit_sale_order,
delete_sale_order,
get_sale_order_limit_rows_by_last_id,
add_sale_order,
get_sale_order_pagination,
)
from web_api.commons.http_token_auth import token_auth
from web_api import app
SUCCESS_MSG = app.config['SUCCESS_MSG']
FAILURE_MSG = app.config['FAILURE_MSG']
class SaleOrderResource(Resource):
"""
SaleOrderResource
"""
decorators = [token_auth.login_required]
def get(self, pk):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_order/1
:param pk:
:return:
"""
data = get_sale_order_row_by_id(pk)
if not data:
raise NotFound
result = marshal(data, fields_item_sale_order_cn, envelope=structure_key_item_cn)
return jsonify(result)
def delete(self, pk):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_order/1 -X DELETE
:param pk:
:return:
"""
result = delete_sale_order(pk)
if result:
success_msg = SUCCESS_MSG.copy()
return make_response(jsonify(success_msg), 204)
else:
failure_msg = FAILURE_MSG.copy()
return make_response(jsonify(failure_msg), 400)
class SaleOrderListResource(Resource):
"""
SaleOrderListResource
"""
decorators = [token_auth.login_required]
def get(self):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_orders
curl http://0.0.0.0:5000/yonyou/sale_orders?last_pk=1000&limit_num=2
:return:
"""
# 条件参数
filter_parser = reqparse.RequestParser(bundle_errors=True)
filter_parser.add_argument('last_pk', type=int, default=0, location='args')
filter_parser.add_argument('limit_num', type=int, default=20, location='args')
filter_parser_args = filter_parser.parse_args()
# data = get_sale_order_rows()
data = get_sale_order_limit_rows_by_last_id(**filter_parser_args)
result = marshal(data, fields_item_sale_order_cn, envelope=structure_key_items_cn)
return jsonify(result)
class SaleOrderPaginationResource(Resource):
"""
SaleOrderPaginationResource
"""
decorators = [token_auth.login_required]
def get(self):
"""
Example:
curl http://0.0.0.0:5000/yonyou/sale_orders/pagination
curl http://0.0.0.0:5000/yonyou/sale_orders/pagination?page=2000&per_page=2
:return:
"""
# 条件参数
filter_parser = reqparse.RequestParser(bundle_errors=True)
filter_parser.add_argument('page', type=int, default=1, location='args')
filter_parser.add_argument('per_page', type=int, default=20, location='args')
filter_parser_args = filter_parser.parse_args()
pagination_obj = get_sale_order_pagination(**filter_parser_args)
result = marshal(pagination_obj.items, fields_item_sale_order, envelope=structure_key_items)
result['total'] = pagination_obj.total
return jsonify(result)
|
py | 7df939a2865f522cc1bad5f90ae56bdc4705ba62 | import ctypes
import re
import signal
import subprocess
import winreg
from ctypes.wintypes import BYTE, DWORD, WCHAR, WORD
import psutil
import wmi
kernel32 = ctypes.WinDLL(str("kernel32"), use_last_error=True)
def kill_proc(pid):
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
children.append(parent)
for p in children:
p.send_signal(signal.SIGTERM)
gone, alive = psutil.wait_procs(children, timeout=20, callback=None)
except:
pass
def enable_rdp():
with winreg.CreateKeyEx(
winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Control\\Terminal Server",
0,
winreg.KEY_ALL_ACCESS,
) as key:
winreg.SetValueEx(key, "fDenyTSConnections", 0, winreg.REG_DWORD, 0)
subprocess.run(
'netsh advfirewall firewall set rule group="remote desktop" new enable=Yes',
capture_output=True,
shell=True,
timeout=15,
)
def disable_sleep_hibernate():
with winreg.CreateKeyEx(
winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Power",
0,
winreg.KEY_ALL_ACCESS,
) as key:
winreg.SetValueEx(key, "HiberbootEnabled", 0, winreg.REG_DWORD, 0)
commands = [
lambda x: f"powercfg /set{x}valueindex scheme_current sub_buttons lidaction 0",
lambda x: f"powercfg /x -standby-timeout-{x} 0",
lambda x: f"powercfg /x -hibernate-timeout-{x} 0",
lambda x: f"powercfg /x -disk-timeout-{x} 0",
lambda x: f"powercfg /x -monitor-timeout-{x} 0",
lambda x: f"powercfg /x -standby-timeout-{x} 0",
]
for x in ["ac", "dc"]:
for i in commands:
subprocess.run(i(x), capture_output=True, shell=True)
subprocess.run("powercfg -S SCHEME_CURRENT", capture_output=True, shell=True)
def enable_ping():
subprocess.run(
'netsh advfirewall firewall add rule name="ICMP Allow incoming V4 echo request" protocol=icmpv4:8,any dir=in action=allow',
capture_output=True,
shell=True,
)
def bytes2human(n):
# http://code.activestate.com/recipes/578019
symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y")
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return "%.1f%s" % (value, s)
return "%sB" % n
# source: https://github.com/saltstack/salt/blob/master/salt/grains/core.py
def os_version_info_ex():
class OSVersionInfo(ctypes.Structure):
_fields_ = (
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
)
def __init__(self, *args, **kwds):
super(OSVersionInfo, self).__init__(*args, **kwds)
self.dwOSVersionInfoSize = ctypes.sizeof(self)
kernel32.GetVersionExW(ctypes.byref(self))
class OSVersionInfoEx(OSVersionInfo):
_fields_ = (
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
)
return OSVersionInfoEx()
def get_os_version_info():
info = os_version_info_ex()
c = wmi.WMI()
c_info = c.Win32_OperatingSystem()[0]
ret = {
"MajorVersion": info.dwMajorVersion,
"MinorVersion": info.dwMinorVersion,
"BuildNumber": info.dwBuildNumber,
"PlatformID": info.dwPlatformId,
"ServicePackMajor": info.wServicePackMajor,
"ServicePackMinor": info.wServicePackMinor,
"SuiteMask": info.wSuiteMask,
"ProductType": info.wProductType,
"Caption": c_info.Caption,
"Arch": c_info.OSArchitecture,
"Version": c_info.Version,
}
return ret
# source: https://github.com/saltstack/salt/blob/master/salt/grains/core.py
def get_windows_os_release_grain(caption, product_type):
version = "Unknown"
release = ""
if "Server" in caption:
for item in caption.split(" "):
if re.match(r"\d+", item):
version = item
if re.match(r"^R\d+$", item):
release = item
os_release = f"{version}Server{release}"
else:
for item in caption.split(" "):
if re.match(r"^(\d+(\.\d+)?)|Thin|Vista|XP$", item):
version = item
os_release = version
if os_release in ["Unknown"]:
os_release = platform.release()
server = {
"Vista": "2008Server",
"7": "2008ServerR2",
"8": "2012Server",
"8.1": "2012ServerR2",
"10": "2016Server",
}
# (Product Type 1 is Desktop, Everything else is Server)
if product_type > 1 and os_release in server:
os_release = server[os_release]
return os_release
|
py | 7df939f8399d5f441d50b3905a77c195e2e5863d | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
HDFS NameNode Metrics
---------------------
hdfs.namenode.capacity_total Total disk capacity in bytes
hdfs.namenode.capacity_used Disk usage in bytes
hdfs.namenode.capacity_remaining Remaining disk space left in bytes
hdfs.namenode.total_load Total load on the file system
hdfs.namenode.fs_lock_queue_length Lock queue length
hdfs.namenode.blocks_total Total number of blocks
hdfs.namenode.max_objects Maximum number of files HDFS supports
hdfs.namenode.files_total Total number of files
hdfs.namenode.pending_replication_blocks Number of blocks pending replication
hdfs.namenode.under_replicated_blocks Number of under replicated blocks
hdfs.namenode.scheduled_replication_blocks Number of blocks scheduled for replication
hdfs.namenode.pending_deletion_blocks Number of pending deletion blocks
hdfs.namenode.num_live_data_nodes Total number of live data nodes
hdfs.namenode.num_dead_data_nodes Total number of dead data nodes
hdfs.namenode.num_decom_live_data_nodes Number of decommissioning live data nodes
hdfs.namenode.num_decom_dead_data_nodes Number of decommissioning dead data nodes
hdfs.namenode.volume_failures_total Total volume failures
hdfs.namenode.estimated_capacity_lost_total Estimated capacity lost in bytes
hdfs.namenode.num_decommissioning_data_nodes Number of decommissioning data nodes
hdfs.namenode.num_stale_data_nodes Number of stale data nodes
hdfs.namenode.num_stale_storages Number of stale storages
hdfs.namenode.missing_blocks Number of missing blocks
hdfs.namenode.corrupt_blocks Number of corrupt blocks
'''
# stdlib
from urlparse import urljoin
# 3rd party
import requests
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
from simplejson import JSONDecodeError
# Project
from checks import AgentCheck
# Service check names
JMX_SERVICE_CHECK = 'hdfs.namenode.jmx.can_connect'
# URL Paths
JMX_PATH = 'jmx'
# Namesystem state bean
HDFS_NAME_SYSTEM_STATE_BEAN = 'Hadoop:service=NameNode,name=FSNamesystemState'
# Namesystem bean
HDFS_NAME_SYSTEM_BEAN = 'Hadoop:service=NameNode,name=FSNamesystem'
# Metric types
GAUGE = 'gauge'
# HDFS metrics
HDFS_NAME_SYSTEM_STATE_METRICS = {
'CapacityTotal' : ('hdfs.namenode.capacity_total', GAUGE),
'CapacityUsed' : ('hdfs.namenode.capacity_used', GAUGE),
'CapacityRemaining' : ('hdfs.namenode.capacity_remaining', GAUGE),
'TotalLoad' : ('hdfs.namenode.total_load', GAUGE),
'FsLockQueueLength' : ('hdfs.namenode.fs_lock_queue_length', GAUGE),
'BlocksTotal' : ('hdfs.namenode.blocks_total', GAUGE),
'MaxObjects' : ('hdfs.namenode.max_objects', GAUGE),
'FilesTotal' : ('hdfs.namenode.files_total', GAUGE),
'PendingReplicationBlocks' : ('hdfs.namenode.pending_replication_blocks', GAUGE),
'UnderReplicatedBlocks' : ('hdfs.namenode.under_replicated_blocks', GAUGE),
'ScheduledReplicationBlocks' : ('hdfs.namenode.scheduled_replication_blocks', GAUGE),
'PendingDeletionBlocks' : ('hdfs.namenode.pending_deletion_blocks', GAUGE),
'NumLiveDataNodes' : ('hdfs.namenode.num_live_data_nodes', GAUGE),
'NumDeadDataNodes' : ('hdfs.namenode.num_dead_data_nodes', GAUGE),
'NumDecomLiveDataNodes' : ('hdfs.namenode.num_decom_live_data_nodes', GAUGE),
'NumDecomDeadDataNodes' : ('hdfs.namenode.num_decom_dead_data_nodes', GAUGE),
'VolumeFailuresTotal' : ('hdfs.namenode.volume_failures_total', GAUGE),
'EstimatedCapacityLostTotal' : ('hdfs.namenode.estimated_capacity_lost_total', GAUGE),
'NumDecommissioningDataNodes' : ('hdfs.namenode.num_decommissioning_data_nodes', GAUGE),
'NumStaleDataNodes' : ('hdfs.namenode.num_stale_data_nodes', GAUGE),
'NumStaleStorages' : ('hdfs.namenode.num_stale_storages', GAUGE),
}
HDFS_NAME_SYSTEM_METRICS = {
'MissingBlocks' : ('hdfs.namenode.missing_blocks', GAUGE),
'CorruptBlocks' : ('hdfs.namenode.corrupt_blocks', GAUGE)
}
class HDFSNameNode(AgentCheck):
def check(self, instance):
jmx_address = instance.get('hdfs_namenode_jmx_uri')
disable_ssl_validation = instance.get('disable_ssl_validation', False)
tags = instance.get('tags', [])
if jmx_address is None:
raise Exception('The JMX URL must be specified in the instance configuration')
tags.append('namenode_url:' + jmx_address)
tags = list(set(tags))
# Get metrics from JMX
self._hdfs_namenode_metrics(jmx_address, disable_ssl_validation,
HDFS_NAME_SYSTEM_STATE_BEAN,
HDFS_NAME_SYSTEM_STATE_METRICS, tags)
self._hdfs_namenode_metrics(jmx_address, disable_ssl_validation,
HDFS_NAME_SYSTEM_BEAN,
HDFS_NAME_SYSTEM_METRICS, tags)
self.service_check(JMX_SERVICE_CHECK,
AgentCheck.OK,
tags=tags,
message='Connection to %s was successful' % jmx_address)
def _hdfs_namenode_metrics(self, jmx_uri, disable_ssl_validation, bean_name, metrics, tags):
'''
Get HDFS namenode metrics from JMX
'''
response = self._rest_request_to_json(jmx_uri, disable_ssl_validation,
JMX_PATH,
query_params={'qry':bean_name}, tags=tags)
beans = response.get('beans', [])
if beans:
bean = next(iter(beans))
bean_name = bean.get('name')
if bean_name != bean_name:
raise Exception("Unexpected bean name {0}".format(bean_name))
for metric, (metric_name, metric_type) in metrics.iteritems():
metric_value = bean.get(metric)
if metric_value is not None:
self._set_metric(metric_name, metric_type, metric_value, tags)
if 'CapacityUsed' in bean and 'CapacityTotal' in bean:
self._set_metric('hdfs.namenode.capacity_in_use', GAUGE,
float(bean['CapacityUsed']) / float(bean['CapacityTotal']), tags)
def _set_metric(self, metric_name, metric_type, value, tags=None):
'''
Set a metric
'''
if metric_type == GAUGE:
self.gauge(metric_name, value, tags=tags)
else:
self.log.error('Metric type "%s" unknown' % (metric_type))
def _rest_request_to_json(self, address, disable_ssl_validation, object_path, query_params, tags=None):
'''
Query the given URL and return the JSON response
'''
response_json = None
url = address
if object_path:
url = self._join_url_dir(url, object_path)
# Add query_params as arguments
if query_params:
query = '&'.join(['{0}={1}'.format(key, value) for key, value in query_params.iteritems()])
url = urljoin(url, '?' + query)
self.log.debug('Attempting to connect to "%s"' % url)
try:
response = requests.get(url, timeout=self.default_integration_http_timeout, verify=not disable_ssl_validation)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(JMX_SERVICE_CHECK,
AgentCheck.CRITICAL,
tags=tags,
message="Request timeout: {0}, {1}".format(url, e))
raise
except (HTTPError,
InvalidURL,
ConnectionError) as e:
self.service_check(JMX_SERVICE_CHECK,
AgentCheck.CRITICAL,
tags=tags,
message="Request failed: {0}, {1}".format(url, e))
raise
except JSONDecodeError as e:
self.service_check(JMX_SERVICE_CHECK,
AgentCheck.CRITICAL,
tags=tags,
message='JSON Parse failed: {0}, {1}'.format(url, e))
raise
except ValueError as e:
self.service_check(JMX_SERVICE_CHECK,
AgentCheck.CRITICAL,
tags=tags,
message=str(e))
raise
return response_json
def _join_url_dir(self, url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
|
py | 7df93c773058c12be49249849e792e3f847eb9cd | import numpy as np
from collections import OrderedDict
from bokeh.charts import Area, show
from bokeh.layouts import gridplot
from bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,
Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,
PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,
RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,
YlGnBu9, YlOrBr9, YlOrRd9, Inferno9, Magma9,
Plasma9, Viridis9, Accent8, Dark2_8, Paired9,
Pastel1_9, Pastel2_8, Set1_9, Set2_8, Set3_9)
standard_palettes = OrderedDict([("Blues9", Blues9), ("BrBG9", BrBG9),
("BuGn9", BuGn9), ("BuPu9", BuPu9),
("GnBu9", GnBu9), ("Greens9", Greens9),
("Greys9", Greys9), ("OrRd9", OrRd9),
("Oranges9", Oranges9), ("PRGn9", PRGn9),
("PiYG9", PiYG9), ("PuBu9", PuBu9),
("PuBuGn9", PuBuGn9), ("PuOr9", PuOr9),
("PuRd9", PuRd9), ("Purples9", Purples9),
("RdBu9", RdBu9), ("RdGy9", RdGy9),
("RdPu9", RdPu9), ("RdYlBu9", RdYlBu9),
("RdYlGn9", RdYlGn9), ("Reds9", Reds9),
("Spectral9", Spectral9), ("YlGn9", YlGn9),
("YlGnBu9", YlGnBu9), ("YlOrBr9", YlOrBr9),
("YlOrRd9", YlOrRd9), ("Inferno9", Inferno9),
("Magma9", Magma9), ("Plasma9", Plasma9),
("Viridis9", Viridis9), ("Accent8", Accent8),
("Dark2_8", Dark2_8), ("Paired9", Paired9),
("Pastel1_9", Pastel1_9),
("Pastel2_8", Pastel2_8), ("Set1_9", Set1_9),
("Set2_8", Set2_8), ("Set3_9", Set3_9)])
def create_area_chart(data, palette):
return Area(data,
title=palette,
stack=True,
palette=standard_palettes.get(palette),
legend=None,
xlabel='',
ylabel='',
xgrid=False,
ygrid=False,
tools='')
data = np.random.random_integers(low=5, high=13, size=[9, 20])
area_charts = [create_area_chart(data, palette) for palette in standard_palettes.keys()]
grid = gridplot(area_charts, ncols=3, plot_width=300, plot_height=300)
show(grid)
|
py | 7df93cdaf0fddc84933cf0c55657e46c28b7000e | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (576,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 960
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.RPN_NORMALIZE_TARGETS = False
__C.TRAIN.RPN_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.RPN_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# whether use class aware box or not
__C.TRAIN.AGNOSTIC = False
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (576,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 960
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'selective_search'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# whether use class aware box or not
__C.TEST.AGNOSTIC = False
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net=None):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is not None:
outdir = osp.join(outdir, net.name)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
|
py | 7df93d13df93f3f3b4be36be90e14fdc0b483b22 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global max pooling 3D layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend
from keras.layers.pooling.base_global_pooling3d import GlobalPooling3D
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D')
class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_max` or `np.max`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format='channels_first'`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
else:
return backend.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
# Alias
GlobalMaxPool3D = GlobalMaxPooling3D
|
py | 7df93d73df599f33b24f4d3e0bc981b9173d1240 | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import random
import json
# django 模块或方法
# HTTP 响应相关的函数和类
from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponse
from django.urls import reverse
from django.utils import timezone
# 导入 django 的用户认证模块
from django.contrib.auth.backends import ModelBackend
# 用户加密
from django.contrib.auth.hashers import make_password
# 用户认证, 登入和登出方法
from django.contrib.auth import authenticate, login, logout
# 导入 django 的数据库查询对象
from django.db.models import Q
# 导入 django 的视图基类
from django.views.generic.base import View
# 导入 django 的消息模块,可以用于后台向前端传送一些消息,并可以被模版标签识别
from django.contrib import messages
# 导入自定的模型
from .models import UserProfiles, EmailVerifyCode, RecodeImage
from .forms import LoginForm, RegisterForm, ForgetPasswordForm, ResetPasswordForm
from assist_function.email.email import send_email_verify_record
from food.models import FoodArticle, FoodImage
from operation.models import UserMessage
# 生成两个随机数字,并获取验证码图片
def create_numbers(request):
a, b = random.randint(1, 9), random.randint(1, 9)
recode_image = RecodeImage.objects.get(recode_number_a=a, recode_number_b=b)
request.session["number_a"] = a
request.session["number_b"] = b
return recode_image
class CustomBackend(ModelBackend):
"""
自定义登录验证方式,实现用户名,邮箱均可登录
实现逻辑:将用户输入的数据进入后台查询,如果查询成功,则认证成功,出现异常或失败,认证失败
"""
def authenticate(self, request, username=None, password=None, **kwargs):
try:
# 使用「Q」类实现「并集」查询
user = UserProfiles.objects.get(Q(username=username) | Q(email=username))
# 调用「UserProfiles」继承的方法「check_password」,将传入的明文密码进行加密,并与数据库中的密文密码进行对比
# 如果对比成功,则验证通过,返回「user」,对比失败或者出现异常,则返回「None」
if user.check_password(password):
return user
except Exception as e:
return None
class LoginView(View):
"""用户登录视图类"""
def get(self, request):
"""用户 get 请求登录页面, 给出响应"""
# 获取浏览器请求的前向页面,以便于用户登录完成后,返回登录前的页面
# 将获取到的信息保存到 request 的 session 中, 如果没有获取到,怎返回首页
request.session['login_reference_page'] = request.META.get("HTTP_REFERER", '/')
# 获取验证码图片
recode_image = create_numbers(request)
return render(request, "user/login.html", {"recode_image": recode_image})
def post(self, request):
"""post 方法的表单提交"""
# 这里会的「login_form」实例会自动从「request.POST」获取相应的字段值
# 所以「LoginForm」中定义的「键名」一定要和「html」页面中「form」标签提交的「键名」对应起来,否则会获取失败
login_form = LoginForm(request.POST)
if login_form.is_valid():
# 获取表单信息
username = request.POST.get('username', '')
password = request.POST.get('password', '')
recode = int(request.POST.get("recode", 0))
# 获取 session 中的验证码,并计算结果
if recode != request.session["number_a"] + request.session["number_b"]:
# 验证码错误,重新获取图片
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "recode", "recode_image": recode_image}
# 这里出现先了很大的问题,具体的模型实例 json 化的问题
# 解决方案的是在模型中自定义一个函数 -- 将模型字段和值组成一个键值对方放入一个字典表
# 字典化的过程中,还出现了一个问题,就是 ImageField 字段的值需要调用 str() 函数字符串化
# 详见 RecodeImage 模型类中的定义
return JsonResponse(message, safe=False)
# 用户名验证
verify_user_name = UserProfiles.objects.filter(Q(username=username) | Q(email=username))
if not verify_user_name: # 数据库中未查询到用户名
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "username", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 用户密码验证
user = authenticate(username=username, password=password) # 如果验证未通过,user = None
if not user: # 用户名与密码不匹配
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "password", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 验证用户是否激活
if not user.is_active: # 用户未激活
send_email_verify_record(user.email)
return JsonResponse({
"status": "fail",
"fail_type": "not_active",
"message": "用户未激活,已重发激活连接至注册邮箱,请前往邮箱激活..."
})
else:
login(request, user) # 通过全部验证,执行登录操作
# 页面重定向
refer_page = request.session.get("login_reference_page", '/')
if refer_page in \
(reverse("user:login"), reverse("user:reset_password")):
return JsonResponse({"status": "success", "url": reverse("home_page")})
else:
return JsonResponse({"status": "success", "url": request.session["login_reference_page"]})
# 表单数据 form 验证失败
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "form", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 实现用户的登出,并重定向到主页
def user_logout(request):
logout(request)
return redirect(request.META.get('HTTP_REFERER', '/'))
class RegisterView(View):
"""用户注册视图类"""
def get(self, request):
"""get 请求响应"""
# 获取验证码图片
recode_image = create_numbers(request)
return render(request, "user/register.html", {"recode_image": recode_image})
def post(self, request):
"""post 请求响应"""
# form 验证
register_form = RegisterForm(request.POST)
if register_form.is_valid():
# 获取用户的输入表单信息
username = request.POST.get("username")
email = request.POST.get("email")
password = request.POST.get("password")
recode = int(request.POST.get("recode"))
# 获取 session 中的验证码,并计算结果
if recode != request.session["number_a"] + request.session["number_b"]:
# 验证码错误,重新获取图片
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "recode", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 验证用户名是否已经被注册
verify_username = UserProfiles.objects.filter(username=username)
if verify_username: # 用户名已被注册
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "username", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 验证邮箱是否已被注册
verify_email = UserProfiles.objects.filter(email=email)
if verify_email: # 邮箱已被注册
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "email", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 自动发送邮件验证码至用户邮箱
send_status = send_email_verify_record(email)
if send_status:
# 通过验证,邮件发送成功,将新的用户数据写入数据库
new_user = UserProfiles()
new_user.username = username
new_user.email = email
new_user.is_active = False # 设定为未激活状态
new_user.password = make_password(password) # 对密码进行加密
new_user.save() # 写入数据库
# 用户站内消息功能,添加欢迎注册消息
user_message = UserMessage()
user_message.user = new_user
user_message.message_title = '欢迎注册<凡肴网-fansfood.com>\n'
user_message.message_content = f"""
嗨,{username}!很高兴您能注册<凡肴网-fansfood.com>。\n\n
这是一个关于美食的网站,网站提供了许多美食的制作教程和美食图片,希望您能够找到心仪的美食和图片!\n\n
\t\t\t\t\t\t\t\t一个美食爱好者:nick\n
\t\t\t\t\t\t\t\t{timezone.now().strftime('%Y-%m-%d %H:%M')}\n
"""
user_message.save()
return JsonResponse({"status": "success"})
else:
return JsonResponse({"status": "fail", "fail_type": "send_email"})
# 表单数据 form 验证失败
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "form", "recode_image": recode_image}
return JsonResponse(message, safe=False)
class ActivationView(View):
"""用户激活视图类"""
def get(self, request, active_code):
email_verify_record = EmailVerifyCode.objects.filter(code=active_code)
if email_verify_record:
recode = email_verify_record[0]
# (timezone.now() - recode.send_time) 是一个 datetime.timedelta 对象
# 使用 total_seconds() 方法获取秒数
if (timezone.now() - recode.send_time).total_seconds() < 600: # 验证码的有效时间是 600 s
user = UserProfiles.objects.get(email=recode.email)
user.is_active = True
user.save()
recode.delete() # 激活后,删除激活邮件验证码
messages.add_message(request, messages.INFO, "用户已激活,请重新登录")
return redirect("user:login") # 返回用户登录页面
else:
send_email_verify_record(recode.email)
recode.delete() # 验证码超时,执行删除,重新发送验证码
messages.add_message(request, messages.INFO, "连接失效,已重发验证邮件,请前往邮箱重新激活")
return redirect("user:login")
# 验证失败,返回首页
messages.add_message(
request,
messages.INFO,
"无效的激活验证,页面已重置,请输入邮箱信息,重新获取激活链接"
)
return redirect("user:login")
class Reactive(View):
"""新用户激活失败,重新获取激活链接的视图类"""
def get(self, request):
# 获取验证码图片
recode_image = create_numbers(request)
return render(request, "user/reactive.html", {"recode_image": recode_image})
def post(self, request):
reactive_form = ForgetPasswordForm(request.POST)
if reactive_form.is_valid():
username = request.POST.get("username")
email = request.POST.get("email")
recode = int(request.POST.get("recode"))
# 获取 session 中的验证码,并计算结果
if recode != request.session["number_a"] + request.session["number_b"]:
# 验证码错误,重新获取图片
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "recode", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 验证用户名与邮箱是否匹配
verify_user = UserProfiles.objects.filter(username=username)
if not verify_user:
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "username", "recode_image": recode_image}
return JsonResponse(message, safe=False)
else:
user = verify_user[0]
if user.username != username or user.email != email:
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "email", "recode_image": recode_image}
return JsonResponse(message, safe=False)
else:
send_email_verify_record(email, "register") # 发送重置密码的邮件激活码
return JsonResponse({"status": "success"})
# 表单验证失败
recode_image = create_numbers(request)
message = {"status": "fail", "fail_type": "form", "recode_image": recode_image}
return JsonResponse(message, safe=False)
class ForgetPasswordView(View):
"""忘记密码的视图类"""
def get(self, request):
# 获取验证码图片
recode_image = create_numbers(request)
return render(request, "user/forget_password.html", {"recode_image": recode_image})
def post(self, request):
forget_password_form = ForgetPasswordForm(request.POST)
if forget_password_form.is_valid():
username = request.POST.get("username")
email = request.POST.get("email")
recode = int(request.POST.get("recode"))
# 获取 session 中的验证码,并计算结果
if recode != request.session["number_a"] + request.session["number_b"]:
# 验证码错误,重新获取图片
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "recode", "recode_image": recode_image}
return JsonResponse(message, safe=False)
# 验证用户名与邮箱是否匹配
verify_user = UserProfiles.objects.filter(username=username)
if not verify_user:
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "username", "recode_image": recode_image}
return JsonResponse(message, safe=False)
else:
user = verify_user[0]
if user.username != username or user.email != email:
recode_image = create_numbers(request).to_json()
message = {"status": "fail", "fail_type": "email", "recode_image": recode_image}
return JsonResponse(message, safe=False)
else:
send_email_verify_record(email, "forget") # 发送重置密码的邮件激活码
return JsonResponse({"status": "success"})
# 表单验证失败
recode_image = create_numbers(request)
message = {"status": "fail", "fail_type": "form", "recode_image": recode_image}
return JsonResponse(message, safe=False)
class ResetPasswordCodeView(View):
"""用户重置密码的邮件链接响应的视图函数"""
def get(self, request, reset_password_code):
verify_code = EmailVerifyCode.objects.filter(code=reset_password_code)
if verify_code:
reset_code = verify_code[0]
email = reset_code.email
request.session["email"] = email # 将邮箱保存在 session 中
request.session["reset_password_code"] = reset_password_code
return redirect("user:reset_password")
messages.add_message(request, messages.INFO, "连接失效,页面已重置,请重新获取")
return redirect("user:forget_password")
class ResetPasswordView(View):
"""重置密码视图类"""
def get(self, request):
return render(request, 'user/reset_password.html')
def post(self, request):
reset_password_form = ResetPasswordForm(request.POST)
if reset_password_form.is_valid():
password = request.POST.get("password")
password2 = request.POST.get("password2")
# 从 session 中获取 email 和 reset_password_code 信息,如果无法获取,表示无效
try:
email = request.session["email"] # 从 session 中获取 email
code = request.session["reset_password_code"] # 获取 reset_password_code
except:
return JsonResponse({"status": "fail", "fail_type": "email"})
# 验证两次密码输入是否一致
if password != password2:
return JsonResponse({"status": "fail", "fail_type": "not_equal"})
# 密码一致性验证成功
user = UserProfiles.objects.get(email=email) # 根据 email 从用户数据库中提取用户
user.password = make_password(password) # 修改密码
user.save() # 数据更新
# 完成密码的修改,从数据库中删除验证码
verify_cord = EmailVerifyCode.objects.get(code=code)
verify_cord.delete()
del request.session["email"] # 删除 session 中的 email
del request.session["reset_password_code"] # 删除 session 中的 reset_password_code
# return redirect("user:login") # 重定向至登录页面
# 这里出现了一个问题,ajax 的请求,不会执行重定向操作
# 需要在 ajax 中执行操作
return JsonResponse({"status": "success"})
# 表单验证失败
return JsonResponse({"status": "fail", "fail_type": "form"})
class HomePageView(View):
"""主页视图"""
def get(self, request):
# 随机从数据库中获取 6 张图片
random_image = FoodImage.objects.order_by("?")[:6]
# 随机从 mysql 数据库中提取 6 个数据
random_food = FoodArticle.objects.order_by("?")[:6]
# 热门食物--做过的人最多
popular_food = FoodArticle.objects.order_by("-fav")[:3]
return render(request, "home_page.html", {
"random_food": random_food,
"popular_food": popular_food,
"random_image": random_image,
"focus": "home", # 选中状态标志
})
class FlushRecodeImage(View):
"""刷新图片验证码"""
def post(self, request):
recode_image = create_numbers(request).to_json()
message = {"status": "success", "recode_image": recode_image}
return JsonResponse(message, safe=False)
def about(request):
return render(request, 'about.html', {'focus': 'about'}) |
py | 7df93e72633ddad2eb99080b09ad996f025225e5 | # Import numpy
import numpy as np
# Import matplotlib
import matplotlib.pylab as plt
# Configure
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
# Optionally set font to Computer Modern to avoid common missing font errors
params = {
'axes.labelsize': 24,
'legend.fontsize': 14,
'xtick.labelsize': 24,
'ytick.labelsize': 24,
'text.usetex': True}
plt.rcParams.update(params)
# Latex math
plt.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath}']
plt.rcParams['font.family'] = 'sans-serif'
# plt.rcParams['font.sans-serif'] = 'courier'
plt.rcParams['font.size'] = 18
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['lines.linewidth'] = 4
plt.rcParams['lines.color'] = 'r'
# Make sure everything is within the frame
plt.rcParams.update({'figure.autolayout': True})
# Set marker size
markerSize = 11.0
# bar chart settings
alpha = 0.8
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
mevery = 3
class PostOpt:
"""
Class to handle plotting
"""
@staticmethod
def plot_solution(data1, data2, name):
plt.figure()
fig, ax = plt.subplots()
# remove the upper and right borders
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.plot(data1[:,0], data1[:,1], '-D', label='(a) cg',
ms=markerSize, mec='black', color=tableau20[0], markevery=mevery,
alpha=alpha)
ax.plot(data1[:,0], data1[:,2], '-v', label='(a) pcg',
ms=markerSize, mec='black', color=tableau20[0], markevery=mevery,
alpha=alpha)
ax.plot(data1[:,0], data1[:,3], '-*', label='(a) exact',
ms=markerSize, mec='black', color=tableau20[0], markevery=mevery,
alpha=alpha)
ax.plot(data2[:,0], data2[:,1], '-D', label='(b) cg',
ms=markerSize, mec='black', color=tableau20[2], markevery=mevery,
alpha=alpha)
ax.plot(data2[:,0], data2[:,2], '-v', label='(b) pcg',
ms=markerSize, mec='black', color=tableau20[2], markevery=mevery,
alpha=alpha)
ax.plot(data2[:,0], data2[:,3], '-*', label='(b) exact',
ms=markerSize, mec='black', color=tableau20[2], markevery=mevery,
alpha=alpha)
ax.set_ylabel('Solution u(x)')
ax.set_xlabel('Domain x')
#niters_labl = [1, 20, 40, 60, 80, 100]
#ax.set_yticks(np.logspace(-5, 0, 6, endpoint=True))
#ax.set_yticks(np.linspace(0, 1, 5, endpoint=True, dtype=np.int))
ax.set_yticks(np.linspace(0, 1.5, 4, endpoint=True))
ax.legend(loc='lower left', framealpha=0.0)
plt.savefig(name, bbox_inches='tight', pad_inches=0.05)
return
if __name__ == "__main__":
inpFile = open("dirichlet.dat", "r")
fdirichlet = list(inpFile.readlines())
inpFile.close()
inpFile = open("mixed.dat", "r")
fmixed = list(inpFile.readlines())
inpFile.close()
soldirichlet = []
for line in fdirichlet:
entry = line.split()
soldirichlet.append([float(entry[0]),
float(entry[1]),
float(entry[2]),
float(entry[3])])
soldirichlet = np.array(soldirichlet)
solmixed = []
for line in fmixed:
entry = line.split()
solmixed.append([float(entry[0]), float(entry[1]), float(entry[2]), float(entry[3])])
solmixed = np.array(solmixed)
# Plot the solution in the profile
PostOpt.plot_solution(soldirichlet, solmixed, "profile.pdf")
|
py | 7df93ebe1be5969ab595cc2cf1303dbe63b7ae53 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "template_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 7df93fed3f2c71e664b5766e0979a42cbfaf8dd9 | #!/bin/python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from getpass import getpass
from secrets import token_hex
def enchachacry(data):
nonce = bytes(token_hex(8), "UTF-8")
salt = bytes(token_hex(8), "UTF-8")
keyraw = salt+bytes(getpass(), "UTF-8")
key = hashes.Hash(hashes.BLAKE2b(64), backend=default_backend())
key.update(keyraw)
key = key.finalize()
k3y = bytes()
for bits in range(0,32,1):
stream = [key[bits] ^ key[(bits+32)]]
k3y = k3y + bytes(stream)
if bits == 31:
key = k3y
algorithm = algorithms.ChaCha20(key, nonce)
cipher = Cipher(algorithm, mode=None, backend=default_backend())
marker = bytes(b"\x8c\x14\xf6t\xea\xd5\xe3\x88Z\xa8~\xceE\x02w\\\xf4/w\xfa\xc6\xc5g}")
encrypt = cipher.encryptor()
return (marker+salt+nonce+encrypt.update(data))
def dechachacry(data, salt, nonce):
keyraw = salt+bytes(getpass(), "UTF-8")
key = hashes.Hash(hashes.BLAKE2b(64), backend=default_backend())
key.update(keyraw)
key = key.finalize()
k3y = bytes()
for bits in range(0,32,1):
stream = [key[bits] ^ key[(bits+32)]]
k3y = k3y + bytes(stream)
if bits == 31:
key = k3y
algorithm = algorithms.ChaCha20(key, nonce)
cipher = Cipher(algorithm, mode=None, backend=default_backend())
decrypt = cipher.decryptor()
return decrypt.update(data)
print("Be aware that this program directly decrypts the given file on the hard drive and not on a temporary file system!")
print("If the file's directory, or the physical hard drive, is not protected from external access, you should copy/move it to a temporary file system before decrypting (mount -t tmpfs -o size=<size> none /PATH/TO/MOUNT/TMPFS)")
print("Enter the path including the file that has to be encrypted/decrypted! If there are more files, you can use tar (tar -cf <merged_file.tar> <file1> <file2>) to merge them. To quit the program, enter q")
print()
target = ""
file = ""
while 1:
try:
target = input()
if target == "q":
break
with open(target, "b+r") as f:
file = f.read()
f.closed
break
except:
print("File or path not existing, or cannot be accessed. Enter another /path/to/file or enter q for quit")
print()
if target != "q":
pycry = ""
if file[:24] == bytes(b"\x8c\x14\xf6t\xea\xd5\xe3\x88Z\xa8~\xceE\x02w\\\xf4/w\xfa\xc6\xc5g}"):
file = file[24:]
salt = file[0:16]
file = file[16:]
nonce = file[0:16]
file = file[16:]
pycry = dechachacry(file, salt, nonce)
else:
pycry = enchachacry(file)
if pycry != "":
with open(target, "b+w") as f:
file = f.write(pycry)
f.closed
|
py | 7df94056de007b1110a910b2448d179ed64f6b5b | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class PrefetchingKernelsOpsTest(test.TestCase):
def setUp(self):
self._event = threading.Event()
def _create_ds_and_iterator(self, device0, initializable=False):
def gen():
for i in range(1, 10):
yield [float(i)]
if i == 6:
self._event.set()
with ops.device(device0):
ds = dataset_ops.Dataset.from_generator(gen, (dtypes.float32))
if initializable:
ds_iterator = ds.make_initializable_iterator()
else:
ds_iterator = ds.make_one_shot_iterator()
return (ds, ds_iterator)
def _create_ops(self, ds, ds_iterator, buffer_name, device0, device1):
ds_iterator_handle = ds_iterator.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, ds.output_types, ds.output_shapes)
return remote_iterator.get_next()
target = constant_op.constant(device0)
with ops.device(device1):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_remote_fn,
output_types=[dtypes.float32],
target_device=target,
string_arg=ds_iterator_handle,
buffer_size=3,
shared_name=buffer_name)
with ops.device(device1):
prefetch_op = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=buffer_resource_handle,
output_types=[dtypes.float32])
reset_op = prefetching_ops.function_buffering_resource_reset(
function_buffer_resource=buffer_resource_handle)
destroy_op = resource_variable_ops.destroy_resource_op(
buffer_resource_handle, ignore_lookup_error=True)
return (prefetch_op, reset_op, destroy_op)
def _prefetch_fn_helper_one_shot(self, buffer_name, device0, device1):
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=False)
prefetch_op, _, destroy_op = self._create_ops(ds, ds_iterator, buffer_name,
device0, device1)
with self.test_session(config=worker_config) as sess:
elem = sess.run(prefetch_op)
self.assertEqual(elem, [1.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [2.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [3.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [4.0])
self._event.wait()
elem = sess.run(prefetch_op)
self.assertEqual(elem, [5.0])
sess.run(destroy_op)
def testSameDeviceCPU(self):
self._prefetch_fn_helper_one_shot("same_device_cpu",
"/job:localhost/replica:0/task:0/cpu:0",
"/job:localhost/replica:0/task:0/cpu:0")
def testDifferentDeviceCPU(self):
self._prefetch_fn_helper_one_shot("diff_device_cpu",
"/job:localhost/replica:0/task:0/cpu:0",
"/job:localhost/replica:0/task:0/cpu:1")
def testDifferentDeviceCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
self._prefetch_fn_helper_one_shot("cpu_gpu",
"/job:localhost/replica:0/task:0/cpu:0",
"/job:localhost/replica:0/task:0/gpu:0")
def testReinitialization(self):
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
device0 = "/job:localhost/replica:0/task:0/cpu:0"
device1 = "/job:localhost/replica:0/task:0/cpu:1"
ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=True)
prefetch_op, reset_op, destroy_op = self._create_ops(
ds, ds_iterator, "reinit", device0, device1)
with self.test_session(config=worker_config) as sess:
sess.run(ds_iterator.initializer)
elem = sess.run(prefetch_op)
self.assertEqual(elem, [1.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [2.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [3.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [4.0])
self._event.wait()
elem = sess.run(prefetch_op)
self.assertEqual(elem, [5.0])
# Lets reset the function buffering resource and reinitialize the
# iterator. Should be able to go through this again.
self._event.clear()
sess.run(reset_op)
sess.run(ds_iterator.initializer)
elem = sess.run(prefetch_op)
self.assertEqual(elem, [1.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [2.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [3.0])
elem = sess.run(prefetch_op)
self.assertEqual(elem, [4.0])
self._event.wait()
elem = sess.run(prefetch_op)
self.assertEqual(elem, [5.0])
sess.run(destroy_op)
def testReinitializationOutOfRange(self):
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
device0 = "/job:localhost/replica:0/task:0/cpu:0"
device1 = "/job:localhost/replica:0/task:0/cpu:1"
ds, ds_iterator = self._create_ds_and_iterator(device0, initializable=True)
prefetch_op, reset_op, destroy_op = self._create_ops(
ds, ds_iterator, "reinit", device0, device1)
with self.test_session(config=worker_config) as sess:
sess.run(ds_iterator.initializer)
for i in range(1, 10):
elem = sess.run(prefetch_op)
self.assertEqual(elem, [float(i)])
# Try fetching after its over twice to test out end of sequence.
with self.assertRaises(errors.OutOfRangeError):
sess.run(prefetch_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(prefetch_op)
# Now reset everything and try it out again.
self._event.clear()
sess.run(reset_op)
sess.run(ds_iterator.initializer)
for i in range(1, 10):
elem = sess.run(prefetch_op)
self.assertEqual(elem, [float(i)])
# Try fetching after its over twice to test out end of sequence.
with self.assertRaises(errors.OutOfRangeError):
sess.run(prefetch_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(prefetch_op)
sess.run(destroy_op)
def testStringsGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
device0 = "/job:localhost/replica:0/task:0/cpu:0"
device1 = "/job:localhost/replica:0/task:0/gpu:0"
ds = dataset_ops.Dataset.from_tensor_slices(["a", "b", "c"])
ds_iterator = ds.make_one_shot_iterator()
ds_iterator_handle = ds_iterator.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, ds.output_types, ds.output_shapes)
return remote_iterator.get_next()
target = constant_op.constant(device0)
with ops.device(device1):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=_remote_fn,
output_types=[dtypes.string],
target_device=target,
string_arg=ds_iterator_handle,
buffer_size=3,
shared_name="strings")
with ops.device(device1):
prefetch_op = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=buffer_resource_handle,
output_types=[dtypes.string])
destroy_op = resource_variable_ops.destroy_resource_op(
buffer_resource_handle, ignore_lookup_error=True)
with self.test_session() as sess:
self.assertEqual([b"a"], sess.run(prefetch_op))
self.assertEqual([b"b"], sess.run(prefetch_op))
self.assertEqual([b"c"], sess.run(prefetch_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(prefetch_op)
sess.run(destroy_op)
class PrefetchToDeviceTest(test.TestCase):
def testPrefetchToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
# NOTE(mrry): This device block creates the "host" dataset and iterator on
# /cpu:0, and ensures that the prefetching is across devices. In typical use
# this would not be necessary, because the GPU device would not support any
# of the dataset-related ops.
with ops.device("/cpu:0"):
iterator = device_dataset.make_one_shot_iterator()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
next_element = iterator.get_next()
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device(
"/job:localhost/replica:0/task:0/device:CPU:0"))
# NOTE(mrry): This device block creates the "host" dataset and iterator on
# /cpu:0, and ensures that the prefetching is across devices. In typical use
# this would not be necessary, because the GPU device would not support any
# of the dataset-related ops.
with ops.device("/cpu:0"):
iterator = device_dataset.make_one_shot_iterator()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
next_element = iterator.get_next()
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
with self.test_session() as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
# NOTE(mrry): This device block creates the "host" dataset and iterator on
# /cpu:0, and ensures that the prefetching is across devices. In typical use
# this would not be necessary, because the GPU device would not support any
# of the dataset-related ops.
with ops.device("/cpu:0"):
iterator = device_dataset.make_one_shot_iterator()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
next_element = iterator.get_next()
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual({"a": i}, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchSparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i*[1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
# NOTE(mrry): This device block creates the "host" dataset and iterator on
# /cpu:0, and ensures that the prefetching is across devices. In typical use
# this would not be necessary, because the GPU device would not support any
# of the dataset-related ops.
with ops.device("/cpu:0"):
iterator = device_dataset.make_one_shot_iterator()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
next_element = iterator.get_next()
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
actual = sess.run(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
# NOTE(mrry): This device block creates the "host" dataset and iterator on
# /cpu:0, and ensures that the prefetching is across devices. In typical use
# this would not be necessary, because the GPU device would not support any
# of the dataset-related ops.
with ops.device("/cpu:0"):
iterator = device_dataset.make_initializable_iterator()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
next_element = iterator.get_next()
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
sess.run(iterator.initializer)
for i in range(5):
self.assertEqual(i, sess.run(next_element))
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(5):
self.assertEqual(i, sess.run(next_element))
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
class CopyToDeviceTest(test.TestCase):
def testCopyToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceInt32(self):
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int32, next_element.dtype)
self.assertEqual((4,), next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
self.assertAllEqual([0, 1, 2, 3], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0"))
with ops.device("/cpu:0"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual({"a": i}, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyDictToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
self.assertEqual({"a": i}, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopySparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
actual = sess.run(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopySparseTensorsToDeviceWithPrefetch(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
for i in range(10):
actual = sess.run(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuWithPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuInt32(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuInt32AndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
self.assertAllEqual(["a", "b", "c"], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuStringsAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
self.assertAllEqual(["a", "b", "c"], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDevicePingPongCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with compat.forward_compatibility_horizon(2018, 8, 4):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
back_to_cpu_dataset = device_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
with ops.device("/cpu:0"):
iterator = back_to_cpu_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
sess.run(iterator.initializer)
for i in range(5):
self.assertEqual(i, sess.run(next_element))
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceWithReInitAndPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
self.assertEqual(host_dataset.output_types, device_dataset.output_types)
self.assertEqual(host_dataset.output_types, iterator.output_types)
self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)
self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)
self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)
self.assertEqual(host_dataset.output_classes, iterator.output_classes)
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config) as sess:
sess.run(iterator.initializer)
for i in range(5):
self.assertEqual(i, sess.run(next_element))
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(5):
self.assertEqual(i, sess.run(next_element))
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testCopyToDeviceGpuWithReInitAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(iterator.initializer)
for i in range(5):
self.assertEqual(i, sess.run(next_element))
sess.run(iterator.initializer)
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
if __name__ == "__main__":
test.main()
|
py | 7df940cd1d7d9f27210461402eb876ceb7f5b78f | # https://leetcode.com/problems/minimum-path-sum/
class Solution:
def minPathSum(self, grid: list[list[int]]) -> int:
rows = len(grid)
cols = len(grid[0])
inf = float('inf')
for row_idx in range(rows):
for col_idx in range(cols):
if row_idx == col_idx == 0:
continue
left = inf if col_idx == 0 else grid[row_idx][col_idx - 1]
top = inf if row_idx == 0 else grid[row_idx - 1][col_idx]
grid[row_idx][col_idx] += min(left, top)
return grid[-1][-1]
|
py | 7df9410f2da2b2580faed621143d0b57799623b1 | import socket
import select
import mysql.connector
import sys
HEADER_LENGTH = 100
IP = "127.0.0.1"
PORT = 5000
# Create a socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.settimeout(500)
# SO_ - socket option ... Sets REUSEADDR (as a socket option) to 1 on socket
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind, so server informs operating system that it's going to use given IP and port
# For a server using 0.0.0.0 means to listen on all available interfaces, useful to connect locally to 127.0.0.1 and remotely to LAN interface IP
server_socket.bind((IP, PORT))
# This makes server listen to new connections
server_socket.listen()
# List of sockets for select.select()
sockets_list = [server_socket]
# List of connected clients - socket as a key, user header and name as data
clients = {}
print(f'Listening for connections on {IP}:{PORT}...')
# CONNECTING TO DB AND CREATING TABLE IF NOT EXIST
def DBConnection():
try:
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="mysql",
database="Socket")
print("DB CONNECTED SUCCESSFULLY")
mycursor = db.cursor(buffered=True)
mycursor.execute("CREATE TABLE IF NOT EXISTS CLIENTS(IP VARCHAR (255) NOT NULL PRIMARY KEY,PORT VARCHAR(255) NOT NULL , Dname VARCHAR(255),Mname VARCHAR(255),Lname VARCHAR(255),phone INT(50),mail VARCHAR(255) UNIQUE,Birth_date Date,Doctor_ID INT(150) UNIQUE,syndicate_number INT (100) UNIQUE,salary INT(50),gender VARCHAR(255),address text,job_rank VARCHAR(255),access_level int DEFAULT 2,image LONGBLOB,calendarid VARCHAR (600) UNIQUE )")
db.commit()
print("TABLE CREATED SUCCESSFULLY")
#QMessageBox.about(self, 'Connection', 'Database Connected Successfully')
except mysql.connector.Error as e:
#QMessageBox.about(self, 'Connection', 'Failed To Connect Database')
print("Failed To Connect Database")
#sys.exit(1)
def insert_data(ADDRESS,NAME):
try:
# CONNECTING TO DB AND CREATING TABLE IF NOT EXIST
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="mysql",
database="Socket")
print("DB CONNECTED SUCCESSFULLY")
mycursor = db.cursor()
mycursor.execute("CREATE TABLE IF NOT EXISTS CLIENTS(IPPORT VARCHAR (255) NOT NULL , NAME VARCHAR(255), MSG VARCHAR(255))")
#db.commit()
print("TABLE CREATED SUCCESSFULLY")
# INSERTING DATA IN THE TABLE
sql = "INSERT INTO CLIENTS (IPPORT,NAME) VALUES (%s,%s)"
val = (ADDRESS,NAME)
mycursor.execute(sql, val)
# COMMITING CHANGES TO THE DB
db.commit()
print("DATA INSERTED SUCCESSFULLY")
except mysql.connector.Error as e:
#self.labelResult.setText("Error Inserting Data")
print(e.errno)
def update_data(ADDRESS,MSG):
try:
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="mysql",
database="Socket")
mycursor = db.cursor()
sql = "UPDATE CLIENTS SET MSG = %s WHERE IPPORT = %s"
val = (MSG,ADDRESS)
mycursor.execute(sql, val)
db.commit()
print(MSG)
print("MESSAGE UPDATED SUCCESSFULLY")
except mysql.connector.Error as e:
print('An exception occurred... ',e)
# Handles message receiving
def receive_message(client_socket):
try:
# Receive our "header" containing message length, it's size is defined and constant
message_header = client_socket.recv(HEADER_LENGTH)
# If we received no data, client gracefully closed a connection, for example using socket.close() or socket.shutdown(socket.SHUT_RDWR)
if not len(message_header):
return False
# Convert header to int value
message_length = int(message_header.decode('utf-8').strip())
# Return an object of message header and message data
return {'header': message_header, 'data': client_socket.recv(message_length)}
except:
# If we are here, client closed connection violently, for example by pressing ctrl+c on his script or just lost his connection
# socket.close() also invokes socket.shutdown(socket.SHUT_RDWR) what sends information about closing the socket (shutdown read/write) and that's also a cause when we receive an empty message
return False
while True:
# Calls Unix select() system call or Windows select() WinSock call with three parameters:
# - rlist - sockets to be monitored for incoming data
# - wlist - sockets for data to be send to (checks if for example buffers are not full and socket is ready to send some data)
# - xlist - sockets to be monitored for exceptions (we want to monitor all sockets for errors, so we can use rlist)
# Returns lists:
# - reading - sockets we received some data on (that way we don't have to check sockets manually)
# - writing - sockets ready for data to be send thru them
# - errors - sockets with some exceptions
# This is a blocking call, code execution will "wait" here and "get" notified in case any action should be taken
read_sockets, _, exception_sockets = select.select(sockets_list, [], sockets_list)
# Iterate over notified sockets
for notified_socket in read_sockets:
# If notified socket is a server socket - new connection, accept it
if notified_socket == server_socket:
# Accept new connection That gives us new socket - client socket, connected to this given client only, it's unique for that client
# The other returned object is ip/port set
client_socket, client_address = server_socket.accept()
# Client should send his name right away, receive it
user = receive_message(client_socket)
# If False - client disconnected before he sent his name
if user is False:
continue
# Add accepted socket to select.select() list
sockets_list.append(client_socket)
# Also save username and username header
clients[client_socket] = user
print('Accepted new connection from {}:{}, username: {}'.format(*client_address, user['data'].decode('utf-8')))
# print(client_socket)
print("client_address: {}:{}".format(*client_address))
print("username: {}".format(user['data'].decode('utf-8')))
insert_data("{}:{}".format(*client_address),user['data'].decode('utf-8'))
# Else existing socket is sending a message
else:
# Receive message
message = receive_message(notified_socket)
# If False, client disconnected, cleanup
if message is False:
print('Closed connection from: {}'.format(clients[notified_socket]['data'].decode('utf-8')))
# Remove from list for socket.socket()
sockets_list.remove(notified_socket)
# Remove from our list of users
del clients[notified_socket]
continue
# Get user by notified socket, so we will know who sent the message
user = clients[notified_socket]
print(f'Received message from {user["data"].decode("utf-8")}: {message["data"].decode("utf-8")}')
print("client_address: {}:{}".format(*client_address))
print("username: {}".format(user['data'].decode('utf-8')))
update_data("{}:{}".format(*client_address),message['data'].decode('utf-8'))
# Iterate over connected clients and broadcast message
for client_socket in clients:
# But don't sent it to sender
if client_socket != notified_socket:
# Send user and message (both with their headers)
# We are reusing here message header sent by sender, and saved username header send by user when he connected
client_socket.send(user['header'] + user['data'] + message['header'] + message['data'])
# mes = input('server: ')
# for client_socket in clients:
# # But don't sent it to sender
# if client_socket != notified_socket:
# # Send user and message (both with their headers)
# # We are reusing here message header sent by sender, and saved username header send by user when he connected
# client_socket.send(bytes(mes,'utf-8'))
# It's not really necessary to have this, but will handle some socket exceptions just in case
for notified_socket in exception_sockets:
# Remove from list for socket.socket()
sockets_list.remove(notified_socket)
# Remove from our list of users
del clients[notified_socket]
|
py | 7df941257ae642f238dd0fbfd3491700c71a486a | from django.urls import path
from allauth.socialaccount.providers.oauth2_provider.urls import default_urlpatterns
from .provider import AppleProvider
from .views import oauth2_finish_login
urlpatterns = default_urlpatterns(AppleProvider)
urlpatterns += [
path(
AppleProvider.get_slug() + "/login/callback/finish/",
oauth2_finish_login,
name="apple_finish_callback",
),
]
|
py | 7df94127bd0a68e3ba1c5cd140d3f5d8641e0cc9 | from plugin import plugin, require
import os
voice_control_installed = True
try:
import speech_recognition as sr
import pyaudio
except ImportError:
voice_control_installed = False
if voice_control_installed:
requirements = []
else:
requirements = ['voice_control_requirements (install portaudio + re-run setup.sh)']
@require(native=requirements)
@plugin("hear")
def hear(jarvis, s):
r = sr.Recognizer() # intializing the speech_recognition
listen = False
_jarvis = jarvis._jarvis # calling jarvis object.
_jarvis.speech.text_to_speech("Say listen to start voice mode")
while listen is False:
try:
with sr.Microphone() as source:
os.system('reset') # for clearing the terminal.
print("Say listen to start listening")
r.adjust_for_ambient_noise(source) # Eleminating the noise.
audio = r.listen(source) # Storing audio.
pinger = r.recognize_google(audio) # Converting speech to text
try:
if (pinger.lower() == "listen"):
listen = True
_jarvis.speech.text_to_speech("Voice mode activated")
print("Voice mode activated. Say something!")
break
else:
continue
except LookupError:
continue # For ignoring if your are not speaking anything.
except sr.UnknownValueError:
continue # For ignoring the unreconized words error
while listen is True:
print("Say somthing")
try:
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
pinger = r.recognize_google(audio).lower()
if (pinger == "stop"):
listen = False
print("Listening stopped.")
_jarvis.speech.text_to_speech("Listening stopped.")
break
else:
print(pinger)
if listen:
line = pinger
jarvis.eval(line)
except LookupError:
_jarvis.speech.text_to_speech('Audio cannot be read!')
print("Could not understand audio")
_jarvis.speech.text_to_speech("unable to recognize voice")
except sr.UnknownValueError:
continue
except sr.RequestError:
print("Could not request results from Google Recognition service")
continue # It will ignore connecting server error.
|
py | 7df94224ab9169904888f8968de54c70951be114 | # ffgen0xx saves an array of filling factor that is an input for the magn0xx procedure
# the final array ff is a 3 x N array, each slice of 3 elements is a combination of filling factors which sum is 100
# the final array is saved into a pickle file.
# Only change the ff_increment variable, which sets the increment between two possible filling factor values.
# for exemple ff_increment = 25 means ff0 = [0,25,50,75,100]
import numpy as np
import pickle
import os as os
print('[info] ffgen0xxx.py | generates filling factor combinations for magn0xx')
print('[info] v1.0 | 2018-11-21 | [email protected]')
# Variables
ff_increment = 4
ff_max = 100
print('[info] generating a filling factor array with ff_increment={} & ff_max={}'.format(ff_increment,ff_max))
# Creating the filling factors arrays
ff0 = np.arange(ff_max/ff_increment+1, dtype=int) * ff_increment
ff1 = np.arange(ff_max/ff_increment+1, dtype=int) * ff_increment
ff2 = np.arange(ff_max/ff_increment+1, dtype=int) * ff_increment
# ff will store the final array of filling factor
ff = np.empty((0,3), int)
print('[info] entering the main loop')
# looping through all the filling factor possibilities and checking in which cases
# the sum of all filling factor is 100%: is yes -> store into ff array
for ii0 in range(len(ff0)):
for ii1 in range(len(ff1)):
for ii2 in range(len(ff2)):
if ff0[ii0] + ff1[ii1] + ff2[ii2] == 100:
ff = np.append(ff, np.array([[ff0[ii0],ff1[ii1],ff2[ii2]]]), axis=0)
# the main loop is done.
ff = ff.T
print('[result] generated a final array with shape {}'.format(ff.shape))
# Storing the ff array into a HDF5 file now
if os.path.exists('./ff0xx.p'):
os.remove('./ff0xx.p')
print('[info] removed an existing "ff0xx.p" file')
pickle.dump( ff, open( "ff0xx.p", "wb" ) )
print('[result] filling factor array saved to "ff0xx.p"')
|
py | 7df9426a15476a83ec43574d9fc208a9ba3f1c51 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='django_serverside_datatable',
version='0.1.0',
description="Simple Server-side Datatable processing view for Django",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/umesh-krishna/django_serverside_datatable",
license="MIT",
author="Umesh Krishna",
author_email='[email protected]',
install_requires=['Django>=1.8'],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
py | 7df942b78510327e969baff003bb011e75a60b1f | import argparse
import os
import shutil
import time, math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
import torch.utils.model_zoo as model_zoo
from torch.autograd.variable import Variable
'''
Pytorch model for the iTracker.
Author: Petr Kellnhofer ( pkel_lnho (at) gmai_l.com // remove underscores and spaces), 2018.
Website: http://gazecapture.csail.mit.edu/
Cite:
Eye Tracking for Everyone
K.Krafka*, A. Khosla*, P. Kellnhofer, H. Kannan, S. Bhandarkar, W. Matusik and A. Torralba
IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016
@inproceedings{cvpr2016_gazecapture,
Author = {Kyle Krafka and Aditya Khosla and Petr Kellnhofer and Harini Kannan and Suchendra Bhandarkar and Wojciech Matusik and Antonio Torralba},
Title = {Eye Tracking for Everyone},
Year = {2016},
Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}
}
'''
class ItrackerImageModel(nn.Module):
# Used for both eyes (with shared weights) and the face (with unqiue weights)
def __init__(self):
super(ItrackerImageModel, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.CrossMapLRN2d(size=5, alpha=0.0001, beta=0.75, k=1.0),
nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2, groups=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.CrossMapLRN2d(size=5, alpha=0.0001, beta=0.75, k=1.0),
nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 64, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
return x
class FaceImageModel(nn.Module):
def __init__(self):
super(FaceImageModel, self).__init__()
self.conv = ItrackerImageModel()
self.fc = nn.Sequential(
nn.Linear(12*12*64, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 64),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
x = self.fc(x)
return x
class FaceGridModel(nn.Module):
# Model for the face grid pathway
def __init__(self, gridSize = 25):
super(FaceGridModel, self).__init__()
self.fc = nn.Sequential(
nn.Linear(gridSize * gridSize, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 128),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ITrackerModel(nn.Module):
def __init__(self):
super(ITrackerModel, self).__init__()
self.eyeModel = ItrackerImageModel()
self.faceModel = FaceImageModel()
self.gridModel = FaceGridModel()
# Joining both eyes
self.eyesFC = nn.Sequential(
nn.Linear(2*12*12*64, 128),
nn.ReLU(inplace=True),
)
# Joining everything
self.fc = nn.Sequential(
nn.Linear(128+64+128, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 2),
)
def forward(self, faces, eyesLeft, eyesRight, faceGrids):
# Eye nets
xEyeL = self.eyeModel(eyesLeft)
xEyeR = self.eyeModel(eyesRight)
# Cat and FC
xEyes = torch.cat((xEyeL, xEyeR), 1)
xEyes = self.eyesFC(xEyes)
# Face net
xFace = self.faceModel(faces)
xGrid = self.gridModel(faceGrids)
# Cat all
x = torch.cat((xEyes, xFace, xGrid), 1)
x = self.fc(x)
return x
|
py | 7df9454357f715a7cdded01bd27d65133db771ab | """
==============================
2020/12/30
rkouhei
==============================
"""
# ライブラリのインポート
from utility.handling_file_directory import read_file, make_directory
from utility.handling_data import read_iteration, read_sep, check_length
import os
import re
import statsmodels.api as sm
import pandas as pd
from tqdm import tqdm
class method:
# 変数
mode = 6 # モード番号
path = "" # 読み込み対象のファイルへのパス
sep = "" # ファイルの区切り文字の種類
iteration = 0 # 計算回数指定用変数
def __init__(self, path):
"""
ファイルの読み込みを行う。一度に、複数ファイルの読み込みも可能。
相対パスと絶対パス、どちらでも可能。
Input
------
path : 読み込みたいのファイルへのパス(一括指定可)
Raises
------
計算回数で、数字以外を入力した時。
"""
self.sep = read_sep()
self.iteration = read_iteration()
self.path = path
def write_file(self, df, path, out_dir):
"""
計算結果を出力する。
出力はindexの有無で、2種類存在する。
Input
------
df : 保存したい計算結果
path : 元データのファイルパス
out_dir : 保存先のディレクトリ
"""
input_fname = re.split("/", path)[-1] # ファイルネームだけ取り出す
ext = os.path.splitext(input_fname) # ファイル名と拡張子に分解
# index有りのファイル名
out_fname = ext[0] + "_" + str(self.iteration) + ext[1]
out_path = out_dir + out_fname
# indexなしのファイル名
out_fname_noindex = ext[0] + "_" + str(self.iteration) + "_noindex" + ext[1]
out_path_noindex = out_dir + out_fname_noindex
df.to_csv(out_path, sep=" ", header=False, encoding="utf-8") # index有り
df.to_csv(out_path_noindex, sep=" ", header=False, encoding="utf-8", index=False) # indexなし
def calc(self, df_array, path_array, out_dir):
"""
自己相関係数の計算を行う。
Input
------
df_array : 読み込んだデータ群の配列
path_array : 元データのファイルパス配列
out_dir : 計算結果を保存先するディレクトリへのパス
"""
# プログレスバーの設定
bar = tqdm(total=len(path_array))
bar.set_description("calc_acf")
for df, path in zip(df_array, path_array):
df_data = pd.Series(df['data'], dtype='float') # 自己相関の計算のためにpd.Seriesに格納
part_df = df_data.copy()
part_iterations = int(self.iteration)
acf = sm.tsa.stattools.acf(part_df, nlags=part_iterations, fft=True)
index = pd.Series([times*0.0002 for times in range(part_iterations)])
out_pd = pd.Series(acf[:part_iterations], index=['{:.4f}'.format(i) for i in index])
self.write_file(out_pd, path, out_dir)
bar.update(1) # プログレスバーの更新
def fit(self):
"""
計算の実行。
"""
df_array, path_array = read_file(self.path, self.sep) # データの読み込み
check_length(df_array, path_array, self.iteration) # ずらす回数とデータ数を比較しておく
out_dir, _ = make_directory(self.mode) # 書き込みを行うディレクトリ
self.calc(df_array, path_array, out_dir) # 計算の実行と保存 |
py | 7df94566ff4a5025976282960a35271b2dbf6212 | import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
|
py | 7df94635ee66d2e68cea12a428b7a44f8b2b8c36 | from .utils import match_affix
class Context(object):
def __init__(self, original_word, dictionary, visitor_provider):
self.removals = []
self.original_word = original_word
self.current_word = self.original_word
self.dictionary = dictionary
self.result = self.original_word
self.visitor_provider = visitor_provider
self.is_stopped = False
self.init_visitors()
def init_visitors(self):
self.visitors = self.visitor_provider.visitors
self.suffix_visitors = self.visitor_provider.suffix_visitors
self.prefix_visitors = self.visitor_provider.prefix_visitors
def stop_process(self):
self.is_stopped = True
def add_removal(self, removal):
self.removals.append(removal)
def execute(self):
# step 1 - 5
self.start_stemming_process()
# step 6
if self.current_word in self.dictionary:
self.result = self.current_word
else:
self.result = self.original_word
def precedence(self, word):
"""
Confix Stripping Rule Precedence Adjustment Specification.
Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval"
Page 78-79.
@link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf
"""
return True if any([
match_affix(word, 'be', 'lah'),
match_affix(word, 'be', 'an'),
match_affix(word, 'me', 'i'),
match_affix(word, 'di', 'i'),
match_affix(word, 'pe', 'i'),
match_affix(word, 'ter', 'i'),
]) else False
def start_stemming_process(self):
# step 1
if self.current_word in self.dictionary:
return
self.accept_visitors(self.visitors)
if self.current_word in self.dictionary:
return
# Confix Stripping
# Try to remove prefix before suffix if the specification is met
if self.precedence(self.original_word):
# step 4, 5
self.remove_prefixes()
if self.current_word in self.dictionary:
return
# step 2, 3
self.remove_suffixes()
if self.current_word in self.dictionary:
return
else:
# if the trial is failed, restore the original word
# and continue to normal rule precedence (suffix first, prefix afterwards)
self.current_word = self.original_word
self.removals = []
# step 2, 3
self.remove_suffixes()
if self.current_word in self.dictionary:
return
# step 4, 5
self.remove_prefixes()
if self.current_word in self.dictionary:
return
# ECS loop pengembalian akhiran
self.loop_pengembalian_akhiran()
def remove_prefixes(self):
for i in range(3):
self.accept_prefix_visitors(self.prefix_visitors)
if self.current_word in self.dictionary:
return
def remove_suffixes(self):
self.accept_visitors(self.suffix_visitors)
def accept(self, visitor):
visitor.visit(self)
def accept_visitors(self, visitors):
for visitor in visitors:
self.accept(visitor)
if self.current_word in self.dictionary:
return self.current_word
if self.is_stopped:
return self.current_word
def accept_prefix_visitors(self, visitors):
removal_count = len(self.removals)
for visitor in visitors:
self.accept(visitor)
if self.current_word in self.dictionary:
return self.current_word
if self.is_stopped:
return self.current_word
if len(self.removals) > removal_count:
return
def loop_pengembalian_akhiran(self):
"""ECS Loop Pengembalian Akhiran"""
self.restore_prefix()
removals = self.removals
reversed_removals = reversed(removals)
current_word = self.current_word
for removal in reversed_removals:
if not self.is_suffix_removal(removal):
continue
if removal.removed_part == 'kan':
self.current_word = removal.result + 'k'
# step 4,5
self.remove_prefixes()
if self.current_word in self.dictionary:
return
self.current_word = removal.result + 'kan'
else:
self.current_word = removal.subject
# step 4,5
self.remove_prefixes()
if self.current_word in self.dictionary:
return
self.removals = removals
self.current_word = current_word
def is_suffix_removal(self, removal):
"""Check wether the removed part is a suffix"""
return removal.affix_type == 'DS' or \
removal.affix_type == 'PP' or \
removal.affix_type == 'P'
def restore_prefix(self):
"""Restore prefix to proceed with ECS loop pengembalian akhiran"""
for removal in self.removals:
# return the word before precoding (the subject of first prefix removal)
self.current_word = removal.subject
break
for removal in self.removals:
if removal.affix_type == 'DP':
self.removals.remove(removal)
|
py | 7df946763a4bdb692a8f61144eacb45503c119d8 | # -*- coding: utf-8 -*-
import sys
import os
import errno
import re
import logging
log = logging.getLogger(__name__)
# global configuration
import vprimer.glv as glv
import vprimer.utils as utl
import subprocess as sbp
import pandas as pd
class Blast(object):
def __init__(self):
pass
@classmethod
def primer_blast_check(cls,
left_fasta_id, right_fasta_id,
left_primer_seq, right_primer_seq):
fasta_list = [">{}".format(left_fasta_id)]
fasta_list += ["{}".format(left_primer_seq)]
fasta_list += [">{}".format(right_fasta_id)]
fasta_list += ["{}".format(right_primer_seq)]
primer_fasta = '\n'.join(fasta_list)
blast_check_result_list = cls._do_blastn_pipe(primer_fasta)
return blast_check_result_list
@classmethod
def _do_blastn_pipe(cls, primer_fasta):
# https://www.haya-programming.com/entry/2018/03/25/214957
blastn_short = ['blastn']
blastn_short += ['-db']
blastn_short += ["{}".format(glv.conf.blastdb)]
blastn_short += ['-num_threads']
blastn_short += ["{}".format(glv.conf.blast_num_threads)]
blastn_short += ['-word_size']
blastn_short += ["{}".format(glv.conf.blast_word_size)]
blastn_short += ['-ungapped']
blastn_short += ['-task']
blastn_short += ['blastn-short']
blastn_short += ['-outfmt']
outfmt = '6 qseqid sseqid qlen pident length mismatch'
outfmt += ' gapopen qstart qend sstart send evalue sstrand'
blastn_short += [outfmt]
blastn_short_p = sbp.Popen(
blastn_short,
stdin=sbp.PIPE,
stdout=sbp.PIPE)
blastn_out = blastn_short_p.communicate(
primer_fasta.encode())[0].decode()
blast_check_result_list = cls._check_alternate_alignment(blastn_out)
return blast_check_result_list
@classmethod
def _check_alternate_alignment(cls, blastn_out):
blast_check_result_list = list()
#print("{}".format(blastn_out))
check_dict = dict()
# 0 qseqid # 1 sseqid # 2 qlen # 3 pident # 4 length # 5 mismatch
# 6 gapopen # 7 qstart # 8 qend # 9 sstart # 10 send
# 11 evalue # 12 sstrand
for row in blastn_out.split('\n'):
if row == '':
continue
item = row.split('\t')
#log.debug("")
#log.debug("start row {}".format(row))
query_id, primer_chrom, primer_abs_stt_pos, \
primer_abs_end_pos, primer_strand, \
subject_id, query_length, alignment_length, \
mismatches, gap_opens, \
subject_abs_stt_pos, subject_abs_end_pos, \
subject_strand = \
cls._get_align_info(item)
#log.debug("query_length={}, alignment_length={}".format(
# query_length, alignment_length))
#log.debug("mismatches={}, gap_opens={}".format(
# mismatches, gap_opens))
if query_length != alignment_length or \
mismatches != 0 or gap_opens != 0:
#log.debug("found mismatch continue")
#log.debug("")
continue
#else:
# log.debug("not found mismatch continue")
# log.debug("")
# check own alignment
if cls._check_own_alignment(
primer_chrom,
primer_abs_stt_pos, primer_abs_end_pos,
primer_strand,
subject_id,
subject_abs_stt_pos, subject_abs_end_pos, \
subject_strand) == True:
continue
# 辞書を作成する
align_info = "{}:{}:{}".format(
subject_abs_stt_pos, subject_abs_end_pos, subject_strand)
#log.debug("align_info {}".format(align_info))
# キーが存在しない場合は、
if not subject_id in check_dict:
check_dict[subject_id] = dict()
check_dict[subject_id]['plus'] = list()
check_dict[subject_id]['minus'] = list()
check_dict[subject_id][primer_strand].append(align_info)
#log.debug("check_dict {}".format(check_dict))
# primerのdistanceを確認する
blast_check_result_list = cls._primer_distance_check(check_dict)
#log.debug("distance_result {}".format(blast_check_result_list))
return blast_check_result_list
@classmethod
def _check_own_alignment(
cls, primer_chrom,
primer_abs_stt_pos, primer_abs_end_pos,
primer_strand,
subject_id,
subject_abs_stt_pos, subject_abs_end_pos,
subject_strand):
own = False
check_stt = primer_abs_stt_pos
check_end = primer_abs_end_pos
#log.debug("{} {}".format(primer_strand, subject_strand))
if primer_strand != subject_strand:
check_stt = primer_abs_end_pos
check_end = primer_abs_stt_pos
if primer_chrom == subject_id and \
check_stt == subject_abs_stt_pos and \
check_end == subject_abs_end_pos:
#log.debug("Me next {} q {} == s {} and q {} == s {}".format(
# primer_chrom,
# check_stt, subject_abs_stt_pos,
# check_end, subject_abs_end_pos))
own = True
#else:
# log.debug("NotMe {} q {} == {} s {} and q {} == s {}".format(
# primer_chrom,
# check_stt,
# subject_id,
# subject_abs_stt_pos,
# check_end, subject_abs_end_pos))
return own
@classmethod
def _get_align_info(cls, item):
query_id = str(item[0])
primer_chrom, primer_abs_stt_pos, \
primer_abs_end_pos, primer_strand = \
cls._separate_primer_name(query_id)
subject_id = str(item[1])
query_length = int(item[2])
alignment_length = int(item[4])
mismatches = int(item[5])
gap_opens = int(item[6])
s_stt = int(item[9])
s_end = int(item[10])
subject_strand = str(item[12])
# chrom:small-big:strand small always small < big
subject_abs_stt_pos = s_stt
subject_abs_end_pos = s_end
if subject_strand == 'minus':
subject_abs_stt_pos = s_end
subject_abs_end_pos = s_stt
return \
query_id, primer_chrom, primer_abs_stt_pos, \
primer_abs_end_pos, primer_strand, \
subject_id, query_length, alignment_length, \
mismatches, gap_opens, \
subject_abs_stt_pos, subject_abs_end_pos, \
subject_strand
@classmethod
def _primer_distance_check(cls, check_dict):
#log.debug("{}".format(check_dict))
# 指定の距離
alternate_distance = glv.conf.alternate_distance
blast_check_result_list = list()
# {
# 'NC_028450.1':
# {
# 'plus':
# [
# '9985:10009:plus',
# '32680:32704:plus',
# '56651:56675:plus',
# '3033129:3033153:plus',
# '3055745:3055769:plus',
# '3067736:3067760:plus',
# '3079717:3079741:plus'
# ],
# 'minus':
# [
# '10365:10341:minus',
# '33060:33036:minus',
# '45056:45032:minus',
# '57031:57007:minus',
# '3033509:3033485:minus',
# '3056125:3056101:minus',
# '3068116:3068092:minus',
# '3080097:3080073:minus'
# ]
# }
# }
# contigごとにplusのリストの方向と向かい合うminusのリストを調べて、
# 距離を測り適用になる場合にリストに入れる。
for contig in check_dict:
#log.debug("(1) {}".format(contig))
for plus_primer in check_dict[contig]['plus']:
#log.debug("{}".format(plus_primer))
p_stt, p_end, p_strand = plus_primer.split(':')
p_stt = int(p_stt)
p_end = int(p_end)
#log.debug("(2) \t{} p_stt={} p_end={} p_strand={}".format(
# plus_primer, p_stt, p_end, p_strand))
for minus_primer in check_dict[contig]['minus']:
#log.debug("\t{}".format(minus_primer))
m_stt, m_end, m_strand = minus_primer.split(':')
m_stt = int(m_stt)
m_end = int(m_end)
#log.debug(
# "(3) \t\t{} m_stt={} m_end={} m_strand={}".format(
# minus_primer, m_stt, m_end, m_strand))
if p_strand != m_strand:
# 逆である。
# 向かい合っているかどうか
#log.debug("(4) \t\t\t{} not {} p={} m={}".format(
# p_strand, m_strand, plus_primer, minus_primer))
# (4) plus not minus p=9985:10009:plus
# m=10365:10341:minus
dist_start = 0
dist_end = alternate_distance + 1
if p_strand == 'plus':
# p=p
# p_end| |m_stt
# +++++++++> <--------- ok
if p_end < m_stt:
# ok
dist_start = p_stt
dist_end = m_end
else:
# p=m
# m_end| |p_stt
# ---------> <+++++++++ ok
if m_end < p_stt:
# ok
dist_start = m_stt
dist_end = p_end
distance = dist_end - dist_start
if distance <= alternate_distance:
alt = "{}:{}-{}({})".format(
contig, dist_start,
dist_end, distance)
blast_check_result_list.append(alt)
#log.debug("{}".format(alt))
return blast_check_result_list
@classmethod
def _separate_primer_name(cls, primer_name):
# [NC_028450.1]44676.44700.plus
#chrom_str, remain_str = primer_name.split('}')
#chrom = chrom_str.lstrip('{')
#abs_primer_stt_pos, abs_primer_end_pos, strand = \
#remain_str.split('.')
#s = 'NC_0:28450.1:44676-44700:plus'
m = re.match(r'^(.*):([0-9]+)-([0-9]+):([a-z]+)$', primer_name)
#print(m.groups())
#('NC_0:28450.1', '44676', '44700', 'plus')
#log.debug("{}".format(primer_name))
#log.debug("{}".format(m))
#log.debug("{}".format(m[0]))
#log.debug("{}".format(m[1]))
chrom = str(m[1])
abs_primer_stt_pos = int(m[2])
abs_primer_end_pos = int(m[3])
strand = str(m[4])
#log.debug("{}".format(type(m[1])))
#sys.exit(1)
return \
chrom, \
abs_primer_stt_pos, \
abs_primer_end_pos, \
strand
@classmethod
def makeblastdb(cls):
fobj_list = utl.check_for_files(
"{}*.nsq".format(glv.conf.blastdb))
# exit blastdb
if len(fobj_list) != 0:
return
root_ext_pair = os.path.splitext(glv.conf.ref_fasta)
cmd1 = ''
if root_ext_pair[1] == '.gz':
bgzip = "bgzip -cd -@ {} {}"
mkdb = "makeblastdb -in - -title {} -dbtype nucl -out {}"
cmd1 = "{} | {}".format(
bgzip.format(
glv.conf.parallele_full_thread,
glv.conf.ref_fasta),
mkdb.format(
glv.conf.blastdb_title,
glv.conf.blastdb))
else:
mkdb = "makeblastdb -in {} -title {} -dbtype nucl -out {}"
cmd1 = "{}".format(
mkdb.format(
glv.conf.ref_fasta,
glv.conf.blastdb_title,
glv.conf.blastdb))
utl.try_exec(cmd1)
|
py | 7df9467812e78b55041c73ff2d33a4dc602146b9 | import sys
from collections import deque, namedtuple
import os
from typing import List
ArgFlag = namedtuple('ArgFlag', ('key', 'default'),
defaults=(None, None))
ArgParameter = namedtuple('ArgOption',
('key', 'required', 'default'),
defaults=(None, False, ''))
def arg_parser(args, arg_desc: List[namedtuple], script_name=None):
all_keys = {a.key for a in arg_desc}
key_to_desc = {desc.key: desc for desc in arg_desc}
required_keys = {a.key for a in arg_desc if isinstance(a, ArgParameter) and a.required}
flag_keys = {a.key for a in arg_desc if isinstance(a, ArgFlag)}
optional_keys = all_keys - required_keys - flag_keys
script_name = script_name or os.path.basename(sys.argv[0])
def usage(error=None):
arg_suggest = []
for key in flag_keys:
arg_suggest.append(f'[--{key}]')
for key in required_keys:
arg_suggest.append(f'--{key} <{key}>')
for key in optional_keys:
arg_suggest.append(f'[--{key} <{key}>]')
text_args_suggest = ' '.join(arg_suggest)
if error:
print(f'Error: {error}!')
print('Usage:')
print(f' python3 {script_name} {text_args_suggest}')
exit(0)
if not args:
usage()
args = deque(args)
results = {}
for key in flag_keys:
results[key] = False
is_key = lambda k: k.startswith('--')
current_key = None
while args:
arg = args.popleft()
if current_key is None:
if is_key(arg):
current_key = arg[2:]
if not current_key:
usage('empty key')
if current_key not in all_keys:
usage(f'unknown key {arg}')
if current_key in flag_keys:
results[current_key] = True
current_key = None
else:
usage('expected key name staring with --')
else:
if is_key(arg):
usage('expected value without --')
results[current_key] = arg
current_key = None
if not all(key in results for key in required_keys):
missing_keys = set(required_keys) - set(results.keys())
err_text = ', '.join(f'--{k}' for k in missing_keys)
usage(f'not all required key filled: {err_text}')
for key in optional_keys:
if key not in results:
results[key] = key_to_desc[key].default
return results
|
py | 7df9474c34e68081ea5b5cbb0c5daa7e4770869d | import os
import rastervision as rv
from .data import (get_rio_training_scene_info, get_rio_val_scene_info)
AOI_URI = "s3://spacenet-dataset/AOI_1_Rio/srcData/buildingLabels/Rio_OUTLINE_Public_AOI.geojson"
class ChipClassificationExperiments(rv.ExperimentSet):
"""Chip classificaiton experiments on SpaceNet Rio data.
Be sure you've run the data prep notebook before running this experiment.
"""
def scene_maker(self, task):
def f(x):
(raster_uri, label_uri) = x
id = os.path.splitext(os.path.basename(raster_uri))[0]
label_source = rv.LabelSourceConfig.builder(rv.CHIP_CLASSIFICATION_GEOJSON) \
.with_uri(label_uri) \
.with_ioa_thresh(0.5) \
.with_use_intersection_over_cell(False) \
.with_pick_min_class_id(True) \
.with_background_class_id(2) \
.with_infer_cells(True) \
.build()
return rv.SceneConfig.builder() \
.with_task(task) \
.with_id(id) \
.with_raster_source(raster_uri) \
.with_label_source(label_source) \
.with_aoi_uri(AOI_URI) \
.build()
return f
def exp_rio_resnet50_200chip(self, root_uri):
task = rv.TaskConfig.builder(rv.CHIP_CLASSIFICATION) \
.with_chip_size(200) \
.with_classes({
"building": (1, "red"),
"no_building": (2, "black")
}) \
.build()
backend = rv.BackendConfig.builder(rv.KERAS_CLASSIFICATION) \
.with_task(task) \
.with_model_defaults(rv.RESNET50_IMAGENET) \
.with_debug(True) \
.with_train_options(replace_model=True) \
.with_batch_size(16) \
.with_num_epochs(40) \
.with_config({
"trainer": {
"options": {
"saveBest": True,
"lrSchedule": [
{
"epoch": 0,
"lr": 0.0005
},
{
"epoch": 15,
"lr": 0.0001
},
{
"epoch": 30,
"lr": 0.00001
}
]
}
}
}, set_missing_keys=True) \
.build()
make_scene = self.scene_maker(task)
train_scenes = list(map(make_scene, get_rio_training_scene_info()))
val_scenes = list(map(make_scene, get_rio_val_scene_info()))
dataset = rv.DatasetConfig.builder() \
.with_train_scenes(train_scenes) \
.with_validation_scenes(val_scenes) \
.build()
experiment = rv.ExperimentConfig.builder() \
.with_id('spacenet-rio-chip-classification') \
.with_root_uri(root_uri) \
.with_task(task) \
.with_backend(backend) \
.with_dataset(dataset) \
.build()
return experiment
if __name__ == '__main__':
rv.main()
|
py | 7df947ac61e1ec356cb778151ee3d69c52bf268c | import os
import sys
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = "3.0.5"
def readme():
readme_short = """
Quilt is a data management tool designed for data discoverability, data dependency
management, and data version control using `data packages <https://blog.quiltdata.com/data-packages-for-fast-reproducible-python-analysis-c74b78015c7f>`_.
The `quilt` PyPi package allows you to build, push, and pull data packages in Quilt using Python.
Visit the `documentation quickstart <https://docs.quiltdata.com/quickstart>`_ for more information.
"""
return readme_short
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="quilt3",
version=VERSION,
packages=find_packages(),
description='Quilt: where data comes together',
long_description=readme(),
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
author='quiltdata',
author_email='[email protected]',
license='LICENSE',
url='https://github.com/quiltdata/quilt',
keywords='',
install_requires=[
'appdirs>=1.4.0',
'aws-requests-auth>=0.4.2',
'boto3>=1.8.0',
'elasticsearch~=6.3.1',
'jsonlines==1.2.0',
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'packaging>=16.8',
'pandas>=0.19.2',
'pyarrow>=0.9.0,<0.14', # as of 7/5/19: linux/circleci bugs on 0.14
'requests>=2.12.4',
'ruamel.yaml<=0.15.70',
'tqdm>=4.26.0',
'urllib3<1.25,>=1.21.1', # required by requests
'xattr>=0.9.6; platform_system!="Windows"',
'humanize'
],
extras_require={
'tests': [
'codecov',
'pytest',
'pytest-cov',
'responses',
'tox',
'detox',
'tox-pytest-summary',
],
},
include_package_data=True,
entry_points={
'console_scripts': ['quilt3=quilt3.main:main'],
},
cmdclass={
'verify': VerifyVersionCommand,
}
)
|
py | 7df949cada78aba1d801708de35dcf3b2c6daad2 | """
A User model, used for authentication.
"""
from __future__ import annotations
import datetime
import hashlib
import logging
import secrets
import typing as t
from piccolo.columns import Boolean, Secret, Timestamp, Varchar
from piccolo.columns.column_types import Serial
from piccolo.columns.readable import Readable
from piccolo.table import Table
from piccolo.utils.sync import run_sync
logger = logging.getLogger(__file__)
class BaseUser(Table, tablename="piccolo_user"):
"""
Provides a basic user, with authentication support.
"""
id: Serial
username = Varchar(length=100, unique=True)
password = Secret(length=255)
first_name = Varchar(null=True)
last_name = Varchar(null=True)
email = Varchar(length=255, unique=True)
active = Boolean(default=False)
admin = Boolean(
default=False, help_text="An admin can log into the Piccolo admin GUI."
)
superuser = Boolean(
default=False,
help_text=(
"If True, this user can manage other users's passwords in the "
"Piccolo admin GUI."
),
)
last_login = Timestamp(
null=True,
default=None,
required=False,
help_text="When this user last logged in.",
)
_min_password_length = 6
_max_password_length = 128
def __init__(self, **kwargs):
# Generating passwords upfront is expensive, so might need reworking.
password = kwargs.get("password", None)
if password:
if not password.startswith("pbkdf2_sha256"):
kwargs["password"] = self.__class__.hash_password(password)
super().__init__(**kwargs)
@classmethod
def get_salt(cls):
return secrets.token_hex(16)
@classmethod
def get_readable(cls) -> Readable:
"""
Used to get a readable string, representing a table row.
"""
return Readable(template="%s", columns=[cls.username])
###########################################################################
@classmethod
def update_password_sync(cls, user: t.Union[str, int], password: str):
"""
A sync equivalent of :meth:`update_password`.
"""
return run_sync(cls.update_password(user, password))
@classmethod
async def update_password(cls, user: t.Union[str, int], password: str):
"""
The password is the raw password string e.g. ``'password123'``.
The user can be a user ID, or a username.
"""
if isinstance(user, str):
clause = cls.username == user
elif isinstance(user, int):
clause = cls.id == user
else:
raise ValueError(
"The `user` arg must be a user id, or a username."
)
password = cls.hash_password(password)
await cls.update({cls.password: password}).where(clause).run()
###########################################################################
@classmethod
def hash_password(
cls, password: str, salt: str = "", iterations: int = 10000
) -> str:
"""
Hashes the password, ready for storage, and for comparing during
login.
:raises ValueError:
If an excessively long password is provided.
"""
if len(password) > cls._max_password_length:
logger.warning("Excessively long password provided.")
raise ValueError("The password is too long.")
if not salt:
salt = cls.get_salt()
hashed = hashlib.pbkdf2_hmac(
"sha256",
bytes(password, encoding="utf-8"),
bytes(salt, encoding="utf-8"),
iterations,
).hex()
return f"pbkdf2_sha256${iterations}${salt}${hashed}"
def __setattr__(self, name: str, value: t.Any):
"""
Make sure that if the password is set, it's stored in a hashed form.
"""
if name == "password" and not value.startswith("pbkdf2_sha256"):
value = self.__class__.hash_password(value)
super().__setattr__(name, value)
@classmethod
def split_stored_password(cls, password: str) -> t.List[str]:
elements = password.split("$")
if len(elements) != 4:
raise ValueError("Unable to split hashed password")
return elements
###########################################################################
@classmethod
def login_sync(cls, username: str, password: str) -> t.Optional[int]:
"""
A sync equivalent of :meth:`login`.
"""
return run_sync(cls.login(username, password))
@classmethod
async def login(cls, username: str, password: str) -> t.Optional[int]:
"""
Make sure the user exists and the password is valid. If so, the
``last_login`` value is updated in the database.
:returns:
The id of the user if a match is found, otherwise ``None``.
"""
if len(username) > cls.username.length:
logger.warning("Excessively long username provided.")
return None
if len(password) > cls._max_password_length:
logger.warning("Excessively long password provided.")
return None
response = (
await cls.select(cls._meta.primary_key, cls.password)
.where(cls.username == username)
.first()
.run()
)
if not response:
# No match found
return None
stored_password = response["password"]
algorithm, iterations, salt, hashed = cls.split_stored_password(
stored_password
)
if (
cls.hash_password(password, salt, int(iterations))
== stored_password
):
await cls.update({cls.last_login: datetime.datetime.now()}).where(
cls.username == username
)
return response["id"]
else:
return None
###########################################################################
@classmethod
def create_user_sync(
cls, username: str, password: str, **extra_params
) -> BaseUser:
"""
A sync equivalent of :meth:`create_user`.
"""
return run_sync(
cls.create_user(
username=username, password=password, **extra_params
)
)
@classmethod
async def create_user(
cls, username: str, password: str, **extra_params
) -> BaseUser:
"""
Creates a new user, and saves it in the database. It is recommended to
use this rather than instantiating and saving ``BaseUser`` directly, as
we add extra validation.
:raises ValueError:
If the username or password is invalid.
:returns:
The created ``BaseUser`` instance.
"""
if not username:
raise ValueError("A username must be provided.")
if not password:
raise ValueError("A password must be provided.")
if len(password) < cls._min_password_length:
raise ValueError("The password is too short.")
if len(password) > cls._max_password_length:
raise ValueError("The password is too long.")
if password.startswith("pbkdf2_sha256"):
logger.warning(
"Tried to create a user with an already hashed password."
)
raise ValueError("Do not pass a hashed password.")
user = cls(username=username, password=password, **extra_params)
await user.save()
return user
|
py | 7df94b2cc3b192b41558e17c8a589749f8f8b6b7 | # -*- coding:utf-8 -*-
"""
This program is designed to download all images of a page of https://www.mzitu.com/zipai/comment-page-1/#comments
"""
from bs4 import BeautifulSoup
from hyper.contrib import HTTP20Adapter
import requests
import re
import os
folder = './imgs' # Folder to save images
s = requests.Session()
def getHeaders(url):
return {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Accept-Encoding': 'gzip, deflate',
'Referer': url,
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0'
}
def downloadFile(url):
name = os.path.basename(url)
if not os.path.exists(folder):
os.mkdir(folder)
path = os.path.join(folder, name)
f = open(path, 'wb')
f.write(s.get(url, headers=getHeaders(url)).content)
f.close
def getImgUrls(url):
s.mount(url, HTTP20Adapter())
html_content = s.get(url, headers=getHeaders(url)).text
soup = BeautifulSoup(html_content, "html.parser")
img_urls = soup.findAll('img', class_='lazy')
for i in img_urls:
downloadFile(i['data-original'])
def analysis(t):
s = re.findall('http[s]?://', t)
if len(s) == 1:
getImgUrls(t)
else:
print('Invalid url')
def main():
u = input('Please input target url')
s = re.findall('http[s]?://', u)
if len(s) == 1:
analysis(u)
else:
print('Invalid url')
if __name__ == '__main__':
main()
|
py | 7df94b5820e5d46a72e2b753fac500233ed9514c | from datetime import datetime
from dateutil.tz import tzlocal
import pytest
from pandas.compat import IS64
from pandas import (
DateOffset,
DatetimeIndex,
Index,
Series,
bdate_range,
date_range,
)
import pandas._testing as tm
from pandas.tseries.offsets import (
BDay,
Day,
Hour,
)
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps:
def test_ops_properties_basic(self, datetime_series):
# sanity check that the behavior didn't change
# GH#7206
for op in ["year", "day", "second", "weekday"]:
msg = f"'Series' object has no attribute '{op}'"
with pytest.raises(AttributeError, match=msg):
getattr(datetime_series, op)
# attribute access should still work!
s = Series({"year": 2000, "month": 1, "day": 10})
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
@pytest.mark.parametrize(
"freq,expected",
[
("A", "day"),
("Q", "day"),
("M", "day"),
("D", "day"),
("H", "hour"),
("T", "minute"),
("S", "second"),
("L", "millisecond"),
("U", "microsecond"),
],
)
def test_resolution(self, request, tz_naive_fixture, freq, expected):
tz = tz_naive_fixture
if freq == "A" and not IS64 and isinstance(tz, tzlocal):
request.node.add_marker(
pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
)
idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_infer_freq(self, freq_sample):
# GH 11018
idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10)
result = DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq_sample
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx._data.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, DateOffset)
# can reset to None
idx._data.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx._data.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx._data.freq = "foo"
def test_freq_view_safe(self):
# Setting the freq for one DatetimeIndex shouldn't alter the freq
# for another that views the same data
dti = date_range("2016-01-01", periods=5)
dta = dti._data
dti2 = DatetimeIndex(dta)._with_freq(None)
assert dti2.freq is None
# Original was not altered
assert dti.freq == "D"
assert dta.freq == "D"
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename("foo")
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END, freq="C")
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
|
py | 7df94be77e7b949e23eb709082b53f939c29d3da | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FirewallPolicyRuleCollectionGroupsOperations:
"""FirewallPolicyRuleCollectionGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified FirewallPolicyRuleCollectionGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
:type rule_collection_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
rule_collection_group_name=rule_collection_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
**kwargs
) -> "_models.FirewallPolicyRuleCollectionGroup":
"""Gets the specified FirewallPolicyRuleCollectionGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
:type rule_collection_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallPolicyRuleCollectionGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.FirewallPolicyRuleCollectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
parameters: "_models.FirewallPolicyRuleCollectionGroup",
**kwargs
) -> "_models.FirewallPolicyRuleCollectionGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallPolicyRuleCollectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_collection_group_name: str,
parameters: "_models.FirewallPolicyRuleCollectionGroup",
**kwargs
) -> AsyncLROPoller["_models.FirewallPolicyRuleCollectionGroup"]:
"""Creates or updates the specified FirewallPolicyRuleCollectionGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_collection_group_name: The name of the FirewallPolicyRuleCollectionGroup.
:type rule_collection_group_name: str
:param parameters: Parameters supplied to the create or update
FirewallPolicyRuleCollectionGroup operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.FirewallPolicyRuleCollectionGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FirewallPolicyRuleCollectionGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.FirewallPolicyRuleCollectionGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
rule_collection_group_name=rule_collection_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleCollectionGroupName': self._serialize.url("rule_collection_group_name", rule_collection_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName}'} # type: ignore
def list(
self,
resource_group_name: str,
firewall_policy_name: str,
**kwargs
) -> AsyncIterable["_models.FirewallPolicyRuleCollectionGroupListResult"]:
"""Lists all FirewallPolicyRuleCollectionGroups in a FirewallPolicy resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyRuleCollectionGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.FirewallPolicyRuleCollectionGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleCollectionGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyRuleCollectionGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups'} # type: ignore
|
py | 7df94c577862d6b9eda6d928a8a94d9c89f3439d | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
import copy
import hashlib
import inspect
import logging
import math
import pickle as pkl
import re
import uuid
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from itertools import product
from typing import Any, Dict, List, Optional, Set, Tuple, TYPE_CHECKING
import geohash
import numpy as np
import pandas as pd
import polyline
import simplejson as json
from dateutil import relativedelta as rdelta
from flask import request
from flask_babel import lazy_gettext as _
from geopy.point import Point
from markdown import markdown
from pandas.tseries.frequencies import to_offset
from superset import app, cache, get_manifest_files, security_manager
from superset.constants import NULL_STRING
from superset.exceptions import NullValueException, SpatialException
from superset.models.helpers import QueryResult
from superset.typing import VizData
from superset.utils import core as utils
from superset.utils.core import (
DTTM_ALIAS,
JS_MAX_INTEGER,
merge_extra_filters,
to_adhoc,
)
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
config = app.config
stats_logger = config["STATS_LOGGER"]
relative_start = config["DEFAULT_RELATIVE_START_TIME"]
relative_end = config["DEFAULT_RELATIVE_END_TIME"]
logger = logging.getLogger(__name__)
METRIC_KEYS = [
"metric",
"metrics",
"percent_metrics",
"metric_2",
"secondary_metric",
"x",
"y",
"size",
]
class BaseViz:
"""All visualizations derive this base class"""
viz_type: Optional[str] = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
cache_type = "df"
enforce_numerical_metrics = True
def __init__(
self,
datasource: "BaseDatasource",
form_data: Dict[str, Any],
force: bool = False,
):
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get("token", "token_" + uuid.uuid4().hex[:8])
self.groupby = self.form_data.get("groupby") or []
self.time_shift = timedelta()
self.status: Optional[str] = None
self.error_msg = ""
self.results: Optional[QueryResult] = None
self.error_message: Optional[str] = None
self.force = force
# Keeping track of whether some data came from cache
# this is useful to trigger the <CachedLabel /> when
# in the cases where visualization have many queries
# (FilterBox for instance)
self._any_cache_key: Optional[str] = None
self._any_cached_dttm: Optional[str] = None
self._extra_chart_data: List[Tuple[str, pd.DataFrame]] = []
self.process_metrics()
def process_metrics(self):
# metrics in TableViz is order sensitive, so metric_dict should be
# OrderedDict
self.metric_dict = OrderedDict()
fd = self.form_data
for mkey in METRIC_KEYS:
val = fd.get(mkey)
if val:
if not isinstance(val, list):
val = [val]
for o in val:
label = utils.get_metric_name(o)
self.metric_dict[label] = o
# Cast to list needed to return serializable object in py3
self.all_metrics = list(self.metric_dict.values())
self.metric_labels = list(self.metric_dict.keys())
@staticmethod
def handle_js_int_overflow(data):
for d in data.get("records", dict()):
for k, v in list(d.items()):
if isinstance(v, int):
# if an int is too big for Java Script to handle
# convert it to a string
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
def run_extra_queries(self):
"""Lifecycle method to use when more than one query is needed
In rare-ish cases, a visualization may need to execute multiple
queries. That is the case for FilterBox or for time comparison
in Line chart for instance.
In those cases, we need to make sure these queries run before the
main `get_payload` method gets called, so that the overall caching
metadata can be right. The way it works here is that if any of
the previous `get_df_payload` calls hit the cache, the main
payload's metadata will reflect that.
The multi-query support may need more work to become a first class
use case in the framework, and for the UI to reflect the subtleties
(show that only some of the queries were served from cache for
instance). In the meantime, since multi-query is rare, we treat
it with a bit of a hack. Note that the hack became necessary
when moving from caching the visualization's data itself, to caching
the underlying query(ies).
"""
pass
def apply_rolling(self, df):
fd = self.form_data
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ("mean", "std", "sum") and rolling_periods:
kwargs = dict(window=rolling_periods, min_periods=min_periods)
if rolling_type == "mean":
df = df.rolling(**kwargs).mean()
elif rolling_type == "std":
df = df.rolling(**kwargs).std()
elif rolling_type == "sum":
df = df.rolling(**kwargs).sum()
elif rolling_type == "cumsum":
df = df.cumsum()
if min_periods:
df = df[min_periods:]
return df
def get_samples(self):
query_obj = self.query_obj()
query_obj.update(
{
"groupby": [],
"metrics": [],
"row_limit": 1000,
"columns": [o.column_name for o in self.datasource.columns],
}
)
df = self.get_df(query_obj)
return df.to_dict(orient="records")
def get_df(self, query_obj: Optional[Dict[str, Any]] = None) -> pd.DataFrame:
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return pd.DataFrame()
self.error_msg = ""
timestamp_format = None
if self.datasource.type == "table":
granularity_col = self.datasource.get_column(query_obj["granularity"])
if granularity_col:
timestamp_format = granularity_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
# Column has already been formatted as a timestamp.
dttm_col = df[DTTM_ALIAS]
one_ts_val = dttm_col[0]
# convert time column to pandas Timestamp, but different
# ways to convert depending on string or int types
try:
int(one_ts_val)
is_integral = True
except (ValueError, TypeError):
is_integral = False
if is_integral:
unit = "s" if timestamp_format == "epoch_s" else "ms"
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def df_metrics_to_num(self, df):
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = self.metric_labels
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def process_query_filters(self):
utils.convert_legacy_filters_into_adhoc(self.form_data)
merge_extra_filters(self.form_data)
utils.split_adhoc_filters_into_base_filters(self.form_data)
def query_obj(self) -> Dict[str, Any]:
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = form_data.get("groupby") or []
metrics = self.all_metrics or []
columns = form_data.get("columns") or []
groupby = list(set(gb + columns))
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(form_data.get("row_limit") or config["ROW_LIMIT"])
# default order direction
order_desc = form_data.get("order_desc", True)
since, until = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
time_shift = form_data.get("time_shift", "")
self.time_shift = utils.parse_past_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise Exception(_("From date cannot be larger than to date"))
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
"druid_time_origin": form_data.get("druid_time_origin", ""),
"having": form_data.get("having", ""),
"having_druid": form_data.get("having_filters", []),
"time_grain_sqla": form_data.get("time_grain_sqla"),
"time_range_endpoints": form_data.get("time_range_endpoints"),
"where": form_data.get("where", ""),
}
d = {
"granularity": granularity,
"from_dttm": from_dttm,
"to_dttm": to_dttm,
"is_timeseries": is_timeseries,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"filter": self.form_data.get("filters", []),
"timeseries_limit": limit,
"extras": extras,
"timeseries_limit_metric": timeseries_limit_metric,
"order_desc": order_desc,
}
return d
@property
def cache_timeout(self):
if self.form_data.get("cache_timeout") is not None:
return int(self.form_data.get("cache_timeout"))
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
return config["CACHE_DEFAULT_TIMEOUT"]
def get_json(self):
return json.dumps(
self.get_payload(), default=utils.json_int_dttm_ser, ignore_nan=True
)
def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm"]:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
cache_dict["rls"] = security_manager.get_rls_ids(self.datasource)
cache_dict["changed_on"] = self.datasource.changed_on
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode("utf-8")).hexdigest()
def get_payload(self, query_obj=None):
"""Returns a payload of metadata and data"""
self.run_extra_queries()
payload = self.get_df_payload(query_obj)
df = payload.get("df")
if self.status != utils.QueryStatus.FAILED:
payload["data"] = self.get_data(df)
if "df" in payload:
del payload["df"]
return payload
def get_df_payload(self, query_obj=None, **kwargs):
"""Handles caching around the df payload retrieval"""
if not query_obj:
query_obj = self.query_obj()
cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
logger.info("Cache key: {}".format(cache_key))
is_loaded = False
stacktrace = None
df = None
cached_dttm = datetime.utcnow().isoformat().split(".")[0]
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr("loading_from_cache")
try:
cache_value = pkl.loads(cache_value)
df = cache_value["df"]
self.query = cache_value["query"]
self._any_cached_dttm = cache_value["dttm"]
self._any_cache_key = cache_key
self.status = utils.QueryStatus.SUCCESS
is_loaded = True
stats_logger.incr("loaded_from_cache")
except Exception as e:
logger.exception(e)
logger.error(
"Error reading cache: " + utils.error_msg_from_exception(e)
)
logger.info("Serving from cache")
if query_obj and not is_loaded:
try:
df = self.get_df(query_obj)
if self.status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
if not self.force:
stats_logger.incr("loaded_from_source_without_force")
is_loaded = True
except Exception as e:
logger.exception(e)
if not self.error_message:
self.error_message = "{}".format(e)
self.status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if (
is_loaded
and cache_key
and cache
and self.status != utils.QueryStatus.FAILED
):
try:
cache_value = dict(dttm=cached_dttm, df=df, query=self.query)
cache_value = pkl.dumps(cache_value, protocol=pkl.HIGHEST_PROTOCOL)
logger.info(
"Caching {} chars at key {}".format(len(cache_value), cache_key)
)
stats_logger.incr("set_cache_key")
cache.set(cache_key, cache_value, timeout=self.cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logger.warning("Could not cache key {}".format(cache_key))
logger.exception(e)
cache.delete(cache_key)
return {
"cache_key": self._any_cache_key,
"cached_dttm": self._any_cached_dttm,
"cache_timeout": self.cache_timeout,
"df": df,
"error": self.error_message,
"form_data": self.form_data,
"is_cached": self._any_cache_key is not None,
"query": self.query,
"status": self.status,
"stacktrace": stacktrace,
"rowcount": len(df.index) if df is not None else 0,
}
def json_dumps(self, obj, sort_keys=False):
return json.dumps(
obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
)
def payload_json_and_has_error(self, payload):
has_error = (
payload.get("status") == utils.QueryStatus.FAILED
or payload.get("error") is not None
)
return self.json_dumps(payload), has_error
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
"form_data": self.form_data,
"token": self.token,
"viz_name": self.viz_type,
"filter_select_enabled": self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config["CSV_EXPORT"])
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (fd.get("granularity") and fd.get("granularity") != "all") or (
fd.get("granularity_sqla") and fd.get("time_grain_sqla")
)
if fd.get("include_time") and not conditions_met:
raise Exception(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return fd.get("include_time")
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if fd.get("all_columns") and (
fd.get("groupby") or fd.get("metrics") or fd.get("percent_metrics")
):
raise Exception(
_(
"Choose either fields to [Group By] and [Metrics] and/or "
"[Percentage Metrics], or [Columns], not both"
)
)
sort_by = fd.get("timeseries_limit_metric")
if fd.get("all_columns"):
d["columns"] = fd.get("all_columns")
d["groupby"] = []
order_by_cols = fd.get("order_by_cols") or []
d["orderby"] = [json.loads(t) for t in order_by_cols]
elif sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in utils.get_metric_names(d["metrics"]):
d["metrics"] += [sort_by]
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
# Add all percent metrics that are not already in the list
if "percent_metrics" in fd:
d["metrics"].extend(
m for m in fd["percent_metrics"] or [] if m not in d["metrics"]
)
d["is_timeseries"] = self.should_be_timeseries()
return d
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform the query result to the table representation.
:param df: The interim dataframe
:returns: The table visualization data
The interim dataframe comprises of the group-by and non-group-by columns and
the union of the metrics representing the non-percent and percent metrics. Note
the percent metrics have yet to be transformed.
"""
non_percent_metric_columns = []
# Transform the data frame to adhere to the UI ordering of the columns and
# metrics whilst simultaneously computing the percentages (via normalization)
# for the percent metrics.
if DTTM_ALIAS in df:
if self.should_be_timeseries():
non_percent_metric_columns.append(DTTM_ALIAS)
else:
del df[DTTM_ALIAS]
non_percent_metric_columns.extend(
self.form_data.get("all_columns") or self.form_data.get("groupby") or []
)
non_percent_metric_columns.extend(
utils.get_metric_names(self.form_data.get("metrics") or [])
)
timeseries_limit_metric = utils.get_metric_name(
self.form_data.get("timeseries_limit_metric")
)
if timeseries_limit_metric:
non_percent_metric_columns.append(timeseries_limit_metric)
percent_metric_columns = utils.get_metric_names(
self.form_data.get("percent_metrics") or []
)
df = pd.concat(
[
df[non_percent_metric_columns],
(
df[percent_metric_columns]
.div(df[percent_metric_columns].sum())
.add_prefix("%")
),
],
axis=1,
)
data = self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)
return data
def json_dumps(self, obj, sort_keys=False):
return json.dumps(
obj, default=utils.json_iso_dttm_ser, sort_keys=sort_keys, ignore_nan=True
)
class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
viz_type = "time_table"
verbose_name = _("Time Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if not fd.get("metrics"):
raise Exception(_("Pick at least one metric"))
if fd.get("groupby") and len(fd.get("metrics")) > 1:
raise Exception(
_("When using 'Group By' you are limited to use a single metric")
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
columns = None
values = self.metric_labels
if fd.get("groupby"):
values = self.metric_labels[0]
columns = fd.get("groupby")
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
return dict(
records=pt.to_dict(orient="index"),
columns=list(pt.columns),
is_group_by=len(fd.get("groupby", [])) > 0,
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super().query_obj()
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
metrics = self.form_data.get("metrics")
transpose = self.form_data.get("transpose_pivot")
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception(_("Please choose at least one 'Group by' field "))
if transpose and not columns:
raise Exception(
_(
(
"Please choose at least one 'Columns' field when "
"select 'Transpose Pivot' option"
)
)
)
if not metrics:
raise Exception(_("Please choose at least one metric"))
if set(groupby) & set(columns):
raise Exception(_("Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
if self.form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
aggfunc = self.form_data.get("pandas_aggfunc") or "sum"
# Ensure that Pandas's sum function mimics that of SQL.
if aggfunc == "sum":
aggfunc = lambda x: x.sum(min_count=1)
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
if self.form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
metrics = [utils.get_metric_name(m) for m in self.form_data["metrics"]]
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfunc,
margins=self.form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if self.form_data.get("combine_metric"):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep="null",
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover"
).split(" "),
),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def query_obj(self):
return None
def get_df(self, query_obj: Optional[Dict[str, Any]] = None) -> pd.DataFrame:
return pd.DataFrame()
def get_data(self, df: pd.DataFrame) -> VizData:
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", "")
if markup_type == "markdown":
code = markdown(code)
return dict(html=code, theme_css=get_manifest_files("theme", "css"))
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super().query_obj()
d["groupby"] = [self.form_data.get("series")]
return d
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v} for n, v in zip(df.index, df[metric])]
else:
result = [
{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]
]
return result
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.set_index(self.form_data.get("groupby"))
chart_data = [
{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns
]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = "<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>"
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10 ** 9)] = obj.get(metric)
data[metric] = values
start, end = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
if not start or not end:
raise Exception("Please provide both time bounds (Since and Until)")
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1 # type: ignore
else:
range_ = diff_secs // (60 * 60) + 1 # type: ignore
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["metrics"] = fd.get("metrics")
return d
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type: Optional[str] = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed="", title_suffix=""):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "nanmedian":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({"label": chart_label, "values": box})
return chart_data
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
form_data = self.form_data
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.nanpercentile(series, 25)
def Q3(series):
return np.nanpercentile(series, 75)
whisker_type = form_data.get("whisker_options")
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
return series[series <= upper_outer_lim].max()
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
return series[series >= lower_outer_lim].min()
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type: # type: ignore
low, high = whisker_type.replace(" percentiles", "").split( # type: ignore
"/"
)
def whisker_high(series):
return np.nanpercentile(series, int(high))
def whisker_low(series):
return np.nanpercentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.nanmedian, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get("groupby")).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [form_data.get("entity")]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
# dedup groupby if it happens to be the same
d["groupby"] = list(dict.fromkeys(d["groupby"]))
self.x_metric = form_data.get("x")
self.y_metric = form_data.get("y")
self.z_metric = form_data.get("size")
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [self.z_metric, self.x_metric, self.y_metric]
if len(set(self.metric_labels)) < 3:
raise Exception(_("Please use 3 different metric labels"))
if not all(d["metrics"] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["x"] = df[[utils.get_metric_name(self.x_metric)]]
df["y"] = df[[utils.get_metric_name(self.y_metric)]]
df["size"] = df[[utils.get_metric_name(self.z_metric)]]
df["shape"] = "circle"
df["group"] = df[[self.series]]
series: Dict[Any, List[Any]] = defaultdict(list)
for row in df.to_dict(orient="records"):
series[row["group"]].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({"key": k, "values": v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
self.metric = form_data.get("metric")
def as_strings(field):
value = form_data.get(field)
return value.split(",") if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats("ranges")
self.range_labels = as_strings("range_labels")
self.markers = as_floats("markers")
self.marker_labels = as_strings("marker_labels")
self.marker_lines = as_floats("marker_lines")
self.marker_line_labels = as_strings("marker_line_labels")
d["metrics"] = [self.metric]
if not self.metric:
raise Exception(_("Pick a metric to display"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
df["metric"] = df[[utils.get_metric_name(self.metric)]]
values = df["metric"].values
return {
"measures": values.tolist(),
"ranges": self.ranges or [0, values.max() * 1.1],
"rangeLabels": self.range_labels or None,
"markers": self.markers or None,
"markerLabels": self.marker_labels or None,
"markerLines": self.marker_lines or None,
"markerLineLabels": self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise Exception(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return d
def get_data(self, df: pd.DataFrame) -> VizData:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=[],
values=self.metric_labels,
dropna=False,
aggfunc=np.min, # looking for any (only) value, preserving `None`
)
df = self.apply_rolling(df)
df[DTTM_ALIAS] = df.index
return super().get_data(df)
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise Exception(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
# Limiting rows is not required as only one cell is returned
d["row_limit"] = None
return d
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
pivot_fill_value: Optional[int] = None
def to_series(self, df, classed="", title_suffix=""):
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, list):
series_title = [str(title) for title in name]
elif isinstance(name, tuple):
series_title = tuple(str(title) for title in name)
else:
series_title = str(name)
if (
isinstance(series_title, (list, tuple))
and len(series_title) > 1
and len(self.metric_labels) == 1
):
# Removing metric from series name if only one metric
series_title = series_title[1:]
if title_suffix:
if isinstance(series_title, str):
series_title = (series_title, title_suffix)
elif isinstance(series_title, (list, tuple)):
series_title = series_title + (title_suffix,)
values = []
non_nan_cnt = 0
for ds in df.index:
if ds in ys:
d = {"x": ds, "y": ys[ds]}
if not np.isnan(ys[ds]):
non_nan_cnt += 1
else:
d = {}
values.append(d)
if non_nan_cnt == 0:
continue
d = {"key": series_title, "values": values}
if classed:
d["classed"] = classed
chart_data.append(d)
return chart_data
def process_data(self, df: pd.DataFrame, aggregate: bool = False) -> VizData:
fd = self.form_data
if fd.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
if df.empty:
return df
if aggregate:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=0,
aggfunc=sum,
)
else:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=self.pivot_fill_value,
)
rule = fd.get("resample_rule")
method = fd.get("resample_method")
if rule and method:
df = getattr(df.resample(rule), method)()
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
df = self.apply_rolling(df)
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
return df
def run_extra_queries(self):
fd = self.form_data
time_compare = fd.get("time_compare") or []
# backwards compatibility
if not isinstance(time_compare, list):
time_compare = [time_compare]
for option in time_compare:
query_object = self.query_obj()
delta = utils.parse_past_timedelta(option)
query_object["inner_from_dttm"] = query_object["from_dttm"]
query_object["inner_to_dttm"] = query_object["to_dttm"]
if not query_object["from_dttm"] or not query_object["to_dttm"]:
raise Exception(
_(
"`Since` and `Until` time bounds should be specified "
"when using the `Time Shift` feature."
)
)
query_object["from_dttm"] -= delta
query_object["to_dttm"] -= delta
df2 = self.get_df_payload(query_object, time_compare=option).get("df")
if df2 is not None and DTTM_ALIAS in df2:
label = "{} offset".format(option)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
self._extra_chart_data.append((label, df2))
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
comparison_type = fd.get("comparison_type") or "values"
df = self.process_data(df)
if comparison_type == "values":
# Filter out series with all NaN
chart_data = self.to_series(df.dropna(axis=1, how="all"))
for i, (label, df2) in enumerate(self._extra_chart_data):
chart_data.extend(
self.to_series(
df2, classed="time-shift-{}".format(i), title_suffix=label
)
)
else:
chart_data = []
for i, (label, df2) in enumerate(self._extra_chart_data):
# reindex df2 into the df2 index
combined_index = df.index.union(df2.index)
df2 = (
df2.reindex(combined_index)
.interpolate(method="time")
.reindex(df.index)
)
if comparison_type == "absolute":
diff = df - df2
elif comparison_type == "percentage":
diff = (df - df2) / df2
elif comparison_type == "ratio":
diff = df / df2
else:
raise Exception(
"Invalid `comparison_type`: {0}".format(comparison_type)
)
# remove leading/trailing NaNs from the time shift difference
diff = diff[diff.first_valid_index() : diff.last_valid_index()]
chart_data.extend(
self.to_series(
diff, classed="time-shift-{}".format(i), title_suffix=label
)
)
if not self.sort_series:
chart_data = sorted(chart_data, key=lambda x: tuple(x["key"]))
return chart_data
class MultiLineViz(NVD3Viz):
"""Pile on multiple line charts"""
viz_type = "line_multi"
verbose_name = _("Time Series - Multiple Line Charts")
is_timeseries = True
def query_obj(self):
return None
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
# Late imports to avoid circular import issues
from superset.models.slice import Slice
from superset import db
slice_ids1 = fd.get("line_charts")
slices1 = db.session.query(Slice).filter(Slice.id.in_(slice_ids1)).all()
slice_ids2 = fd.get("line_charts_2")
slices2 = db.session.query(Slice).filter(Slice.id.in_(slice_ids2)).all()
return {
"slices": {
"axis1": [slc.data for slc in slices1],
"axis2": [slc.data for slc in slices2],
}
}
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super().query_obj()
m1 = self.form_data.get("metric")
m2 = self.form_data.get("metric_2")
d["metrics"] = [m1, m2]
if not m1:
raise Exception(_("Pick a metric for left axis!"))
if not m2:
raise Exception(_("Pick a metric for right axis!"))
if m1 == m2:
raise Exception(
_("Please choose different metrics" " on left and right axis")
)
return d
def to_series(self, df, classed=""):
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
metrics = [self.form_data.get("metric"), self.form_data.get("metric_2")]
for i, m in enumerate(metrics):
m = utils.get_metric_name(m)
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{"x": ds, "y": ys[ds] if ds in ys else None} for ds in df.index
],
"yAxis": i + 1,
"type": "line",
}
chart_data.append(d)
return chart_data
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
if self.form_data.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
metric = utils.get_metric_name(fd.get("metric"))
metric_2 = utils.get_metric_name(fd.get("metric_2"))
df = df.pivot_table(index=DTTM_ALIAS, values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3TimePivotViz(NVD3TimeSeriesViz):
"""Time Series - Periodicity Pivot"""
viz_type = "time_pivot"
sort_series = True
verbose_name = _("Time Series - Period Pivot")
def query_obj(self):
d = super().query_obj()
d["metrics"] = [self.form_data.get("metric")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
df = self.process_data(df)
freq = to_offset(fd.get("freq"))
try:
freq = type(freq)(freq.n, normalize=True, **freq.kwds)
except ValueError:
freq = type(freq)(freq.n, **freq.kwds)
df.index.name = None
df[DTTM_ALIAS] = df.index.map(freq.rollback)
df["ranked"] = df[DTTM_ALIAS].rank(method="dense", ascending=False) - 1
df.ranked = df.ranked.map(int)
df["series"] = "-" + df.ranked.map(str)
df["series"] = df["series"].str.replace("-0", "current")
rank_lookup = {
row["series"]: row["ranked"] for row in df.to_dict(orient="records")
}
max_ts = df[DTTM_ALIAS].max()
max_rank = df["ranked"].max()
df[DTTM_ALIAS] = df.index + (max_ts - df[DTTM_ALIAS])
df = df.pivot_table(
index=DTTM_ALIAS,
columns="series",
values=utils.get_metric_name(fd.get("metric")),
)
chart_data = self.to_series(df)
for serie in chart_data:
serie["rank"] = rank_lookup[serie["key"]]
serie["perc"] = 1 - (serie["rank"] / (max_rank + 1))
return chart_data
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = "compare"
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
pivot_fill_value = 0
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
metric = self.metric_labels[0]
df = df.pivot_table(index=self.groupby, values=[metric])
df.sort_values(by=metric, ascending=False, inplace=True)
df = df.reset_index()
df.columns = ["x", "y"]
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super().query_obj()
d["row_limit"] = self.form_data.get("row_limit", int(config["VIZ_ROW_LIMIT"]))
numeric_columns = self.form_data.get("all_columns_x")
if numeric_columns is None:
raise Exception(_("Must have at least one numeric column specified"))
self.columns = numeric_columns
d["columns"] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d["groupby"] = []
return d
def labelify(self, keys, column):
if isinstance(keys, str):
keys = (keys,)
# removing undesirable characters
labels = [re.sub(r"\W+", r"_", k) for k in keys]
if len(self.columns) > 1 or not self.groupby:
# Only show numeric column in label if there are many
labels = [column] + labels
return "__".join(labels)
def get_data(self, df: pd.DataFrame) -> VizData:
"""Returns the chart data"""
if df.empty:
return None
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend(
[
{
"key": self.labelify(keys, column),
"values": data[column].tolist(),
}
for column in self.columns
]
)
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if len(d["groupby"]) < len(fd.get("groupby") or []) + len(
fd.get("columns") or []
):
raise Exception(_("Can't have overlap between Series and Breakdowns"))
if not fd.get("metrics"):
raise Exception(_("Pick at least one metric"))
if not fd.get("groupby"):
raise Exception(_("Pick at least one field for [Series]"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
metrics = self.metric_labels
columns = fd.get("columns") or []
# pandas will throw away nulls when grouping/pivoting,
# so we substitute NULL_STRING for any nulls in the necessary columns
filled_cols = self.groupby + columns
df[filled_cols] = df[filled_cols].fillna(value=NULL_STRING)
row = df.groupby(self.groupby).sum()[metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
pt = df.pivot_table(index=self.groupby, columns=columns, values=metrics)
if fd.get("contribution"):
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.items():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, str):
series_title = name
else:
offset = 0 if len(metrics) > 1 else 1
series_title = ", ".join([str(s) for s in name[offset:]])
values = []
for i, v in ys.items():
x = i
if isinstance(x, (tuple, list)):
x = ", ".join([str(s) for s in x])
else:
x = str(x)
values.append({"x": x, "y": v})
d = {"key": series_title, "values": values}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
"Kerry Rodden "
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>'
)
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
cols = fd.get("groupby") or []
cols.extend(["m1", "m2"])
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
if metric == secondary_metric or secondary_metric is None:
df.rename(columns={df.columns[-1]: "m1"}, inplace=True)
df["m2"] = df["m1"]
else:
df.rename(columns={df.columns[-2]: "m1"}, inplace=True)
df.rename(columns={df.columns[-1]: "m2"}, inplace=True)
# Re-order the columns as the query result set column ordering may differ from
# that listed in the hierarchy.
df = df[cols]
return df.to_numpy().tolist()
def query_obj(self):
qry = super().query_obj()
fd = self.form_data
qry["metrics"] = [fd["metric"]]
secondary_metric = fd.get("secondary_metric")
if secondary_metric and secondary_metric != fd["metric"]:
qry["metrics"].append(secondary_metric)
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super().query_obj()
if len(qry["groupby"]) != 2:
raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
df.columns = ["source", "target", "value"]
df["source"] = df["source"].astype(str)
df["target"] = df["target"].astype(str)
recs = df.to_dict(orient="records")
hierarchy: Dict[str, Set[str]] = defaultdict(set)
for row in recs:
hierarchy[row["source"]].add(row["target"])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(
_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}"
).format(cycle)
)
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super().query_obj()
if len(self.form_data["groupby"]) != 2:
raise Exception(_("Pick exactly 2 columns to 'Group By'"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
df.columns = ["source", "target", "value"]
return df.to_dict(orient="records")
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super().query_obj()
fd = self.form_data
qry["groupby"] = [fd.get("groupby"), fd.get("columns")]
qry["metrics"] = [fd.get("metric")]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df["source"]) | set(df["target"]))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {"nodes": list(nodes), "matrix": m}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = "From bl.ocks.org By john-guerra"
def query_obj(self):
qry = super().query_obj()
qry["metrics"] = [self.form_data["metric"]]
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
cols = [fd.get("entity")]
metric = self.metric_labels[0]
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ["country_id", "metric"]
d = df.to_dict(orient="records")
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super().query_obj()
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
from superset.examples import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
country = countries.get(fd.get("country_fieldtype"), row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
cache_type = "get_data"
filter_row_limit = 1000
def query_obj(self):
return None
def run_extra_queries(self):
qry = super().query_obj()
filters = self.form_data.get("filter_configs") or []
qry["row_limit"] = self.filter_row_limit
self.dataframes = {}
for flt in filters:
col = flt.get("column")
if not col:
raise Exception(
_("Invalid filter configuration, please select a column")
)
qry["groupby"] = [col]
metric = flt.get("metric")
qry["metrics"] = [metric] if metric else []
df = self.get_df_payload(query_obj=qry).get("df")
self.dataframes[col] = df
def get_data(self, df: pd.DataFrame) -> VizData:
filters = self.form_data.get("filter_configs") or []
d = {}
for flt in filters:
col = flt.get("column")
metric = flt.get("metric")
df = self.dataframes.get(col)
if df is not None:
if metric:
df = df.sort_values(
utils.get_metric_name(metric), ascending=flt.get("asc")
)
d[col] = [
{"id": row[0], "text": row[0], "metric": row[1]}
for row in df.itertuples(index=False)
]
else:
df = df.sort_values(col, ascending=flt.get("asc"))
d[col] = [
{"id": row[0], "text": row[0]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
return None
def get_df(self, query_obj: Optional[Dict[str, Any]] = None) -> pd.DataFrame:
return pd.DataFrame()
def get_data(self, df: pd.DataFrame) -> VizData:
return {"iframe": True}
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
"Syntagmatic's library</a>"
)
is_timeseries = False
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["groupby"] = [fd.get("series")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
"bl.ocks.org</a>"
)
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["metrics"] = [fd.get("metric")]
d["groupby"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
x = fd.get("all_columns_x")
y = fd.get("all_columns_y")
v = self.metric_labels[0]
if x == y:
df.columns = ["x", "y", "v"]
else:
df = df[[x, y, v]]
df.columns = ["x", "y", "v"]
norm = fd.get("normalize_across")
overall = False
max_ = df.v.max()
min_ = df.v.min()
if norm == "heatmap":
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df["perc"] = gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min())
)
df["rank"] = gb.apply(lambda x: x.v.rank(pct=True))
if overall:
df["perc"] = (df.v - min_) / (max_ - min_)
df["rank"] = df.v.rank(pct=True)
return {"records": df.to_dict(orient="records"), "extents": [min_, max_]}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
"d3-horizon-chart</a>"
)
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = "<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>"
def query_obj(self):
d = super().query_obj()
fd = self.form_data
label_col = fd.get("mapbox_label")
if not fd.get("groupby"):
if fd.get("all_columns_x") is None or fd.get("all_columns_y") is None:
raise Exception(_("[Longitude] and [Latitude] must be set"))
d["columns"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(
_(
"Must have a [Group By] column to have 'count' as the "
+ "[Label]"
)
)
d["columns"].append(label_col[0])
if fd.get("point_radius") != "Auto":
d["columns"].append(fd.get("point_radius"))
d["columns"] = list(set(d["columns"]))
else:
# Ensuring columns chosen are all in group by
if (
label_col
and len(label_col) >= 1
and label_col[0] != "count"
and label_col[0] not in fd.get("groupby")
):
raise Exception(_("Choice of [Label] must be present in [Group By]"))
if fd.get("point_radius") != "Auto" and fd.get(
"point_radius"
) not in fd.get("groupby"):
raise Exception(
_("Choice of [Point Radius] must be present in [Group By]")
)
if fd.get("all_columns_x") not in fd.get("groupby") or fd.get(
"all_columns_y"
) not in fd.get("groupby"):
raise Exception(
_(
"[Longitude] and [Latitude] columns must be present in "
+ "[Group By]"
)
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
label_col = fd.get("mapbox_label")
has_custom_metric = label_col is not None and len(label_col) > 0
metric_col = [None] * len(df.index)
if has_custom_metric:
if label_col[0] == fd.get("all_columns_x"): # type: ignore
metric_col = df[fd.get("all_columns_x")]
elif label_col[0] == fd.get("all_columns_y"): # type: ignore
metric_col = df[fd.get("all_columns_y")]
else:
metric_col = df[label_col[0]] # type: ignore
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")]
)
# limiting geo precision as long decimal values trigger issues
# around json-bignumber in Mapbox
GEO_PRECISION = 10
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"metric": metric, "radius": point_radius},
"geometry": {
"type": "Point",
"coordinates": [
round(lon, GEO_PRECISION),
round(lat, GEO_PRECISION),
],
},
}
for lon, lat, metric, point_radius in zip(
df[fd.get("all_columns_x")],
df[fd.get("all_columns_y")],
metric_col,
point_radius_col,
)
],
}
x_series, y_series = df[fd.get("all_columns_x")], df[fd.get("all_columns_y")]
south_west = [x_series.min(), y_series.min()]
north_east = [x_series.max(), y_series.max()]
return {
"geoJSON": geo_json,
"hasCustomMetric": has_custom_metric,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"bounds": [south_west, north_east],
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class DeckGLMultiLayer(BaseViz):
"""Pile on multiple DeckGL layers"""
viz_type = "deck_multi"
verbose_name = _("Deck.gl - Multiple Layers")
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
def query_obj(self):
return None
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
# Late imports to avoid circular import issues
from superset.models.slice import Slice
from superset import db
slice_ids = fd.get("deck_slices")
slices = db.session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
return {
"mapboxApiKey": config["MAPBOX_API_KEY"],
"slices": [slc.data for slc in slices],
}
class BaseDeckGLViz(BaseViz):
"""Base class for deck.gl visualizations"""
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
spatial_control_keys: List[str] = []
def get_metrics(self):
self.metric = self.form_data.get("size")
return [self.metric] if self.metric else []
def process_spatial_query_obj(self, key, group_by):
group_by.extend(self.get_spatial_columns(key))
def get_spatial_columns(self, key):
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
return [spatial.get("lonCol"), spatial.get("latCol")]
elif spatial.get("type") == "delimited":
return [spatial.get("lonlatCol")]
elif spatial.get("type") == "geohash":
return [spatial.get("geohashCol")]
@staticmethod
def parse_coordinates(s):
if not s:
return None
try:
p = Point(s)
return (p.latitude, p.longitude) # pylint: disable=no-member
except Exception:
raise SpatialException(_("Invalid spatial point encountered: %s" % s))
@staticmethod
def reverse_geohash_decode(geohash_code):
lat, lng = geohash.decode(geohash_code)
return (lng, lat)
@staticmethod
def reverse_latlong(df, key):
df[key] = [tuple(reversed(o)) for o in df[key] if isinstance(o, (list, tuple))]
def process_spatial_data_obj(self, key, df):
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
df[key] = list(
zip(
pd.to_numeric(df[spatial.get("lonCol")], errors="coerce"),
pd.to_numeric(df[spatial.get("latCol")], errors="coerce"),
)
)
elif spatial.get("type") == "delimited":
lon_lat_col = spatial.get("lonlatCol")
df[key] = df[lon_lat_col].apply(self.parse_coordinates)
del df[lon_lat_col]
elif spatial.get("type") == "geohash":
df[key] = df[spatial.get("geohashCol")].map(self.reverse_geohash_decode)
del df[spatial.get("geohashCol")]
if spatial.get("reverseCheckbox"):
self.reverse_latlong(df, key)
if df.get(key) is None:
raise NullValueException(
_(
"Encountered invalid NULL spatial entry, \
please consider filtering those out"
)
)
return df
def add_null_filters(self):
fd = self.form_data
spatial_columns = set()
for key in self.spatial_control_keys:
for column in self.get_spatial_columns(key):
spatial_columns.add(column)
if fd.get("adhoc_filters") is None:
fd["adhoc_filters"] = []
line_column = fd.get("line_column")
if line_column:
spatial_columns.add(line_column)
for column in sorted(spatial_columns):
filter_ = to_adhoc({"col": column, "op": "IS NOT NULL", "val": ""})
fd["adhoc_filters"].append(filter_)
def query_obj(self):
fd = self.form_data
# add NULL filters
if fd.get("filter_nulls", True):
self.add_null_filters()
d = super().query_obj()
gb = []
for key in self.spatial_control_keys:
self.process_spatial_query_obj(key, gb)
if fd.get("dimension"):
gb += [fd.get("dimension")]
if fd.get("js_columns"):
gb += fd.get("js_columns")
metrics = self.get_metrics()
gb = list(set(gb))
if metrics:
d["groupby"] = gb
d["metrics"] = metrics
d["columns"] = []
else:
d["columns"] = gb
return d
def get_js_columns(self, d):
cols = self.form_data.get("js_columns") or []
return {col: d.get(col) for col in cols}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
# Processing spatial info
for key in self.spatial_control_keys:
df = self.process_spatial_data_obj(key, df)
features = []
for d in df.to_dict(orient="records"):
feature = self.get_properties(d)
extra_props = self.get_js_columns(d)
if extra_props:
feature["extraProps"] = extra_props
features.append(feature)
return {
"features": features,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"metricLabels": self.metric_labels,
}
def get_properties(self, d):
raise NotImplementedError()
class DeckScatterViz(BaseDeckGLViz):
"""deck.gl's ScatterLayer"""
viz_type = "deck_scatter"
verbose_name = _("Deck.gl - Scatter plot")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
self.point_radius_fixed = fd.get("point_radius_fixed") or {
"type": "fix",
"value": 500,
}
return super().query_obj()
def get_metrics(self):
self.metric = None
if self.point_radius_fixed.get("type") == "metric":
self.metric = self.point_radius_fixed.get("value")
return [self.metric]
return None
def get_properties(self, d):
return {
"metric": d.get(self.metric_label),
"radius": self.fixed_value
if self.fixed_value
else d.get(self.metric_label),
"cat_color": d.get(self.dim) if self.dim else None,
"position": d.get("spatial"),
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
self.point_radius_fixed = fd.get("point_radius_fixed")
self.fixed_value = None
self.dim = self.form_data.get("dimension")
if self.point_radius_fixed and self.point_radius_fixed.get("type") != "metric":
self.fixed_value = self.point_radius_fixed.get("value")
return super().get_data(df)
class DeckScreengrid(BaseDeckGLViz):
"""deck.gl's ScreenGridLayer"""
viz_type = "deck_screengrid"
verbose_name = _("Deck.gl - Screen Grid")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = fd.get("time_grain_sqla") or fd.get("granularity")
return super().query_obj()
def get_properties(self, d):
return {
"position": d.get("spatial"),
"weight": d.get(self.metric_label) or 1,
"__timestamp": d.get(DTTM_ALIAS) or d.get("__time"),
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
class DeckGrid(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_grid"
verbose_name = _("Deck.gl - 3D Grid")
spatial_control_keys = ["spatial"]
def get_properties(self, d):
return {"position": d.get("spatial"), "weight": d.get(self.metric_label) or 1}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
def geohash_to_json(geohash_code):
p = geohash.bbox(geohash_code)
return [
[p.get("w"), p.get("n")],
[p.get("e"), p.get("n")],
[p.get("e"), p.get("s")],
[p.get("w"), p.get("s")],
[p.get("w"), p.get("n")],
]
class DeckPathViz(BaseDeckGLViz):
"""deck.gl's PathLayer"""
viz_type = "deck_path"
verbose_name = _("Deck.gl - Paths")
deck_viz_key = "path"
is_timeseries = True
deser_map = {
"json": json.loads,
"polyline": polyline.decode,
"geohash": geohash_to_json,
}
def query_obj(self):
fd = self.form_data
self.is_timeseries = fd.get("time_grain_sqla") or fd.get("granularity")
d = super().query_obj()
self.metric = fd.get("metric")
line_col = fd.get("line_column")
if d["metrics"]:
self.has_metrics = True
d["groupby"].append(line_col)
else:
self.has_metrics = False
d["columns"].append(line_col)
return d
def get_properties(self, d):
fd = self.form_data
line_type = fd.get("line_type")
deser = self.deser_map[line_type]
line_column = fd.get("line_column")
path = deser(d[line_column])
if fd.get("reverse_long_lat"):
path = [(o[1], o[0]) for o in path]
d[self.deck_viz_key] = path
if line_type != "geohash":
del d[line_column]
d["__timestamp"] = d.get(DTTM_ALIAS) or d.get("__time")
return d
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
class DeckPolygon(DeckPathViz):
"""deck.gl's Polygon Layer"""
viz_type = "deck_polygon"
deck_viz_key = "polygon"
verbose_name = _("Deck.gl - Polygon")
def query_obj(self):
fd = self.form_data
self.elevation = fd.get("point_radius_fixed") or {"type": "fix", "value": 500}
return super().query_obj()
def get_metrics(self):
metrics = [self.form_data.get("metric")]
if self.elevation.get("type") == "metric":
metrics.append(self.elevation.get("value"))
return [metric for metric in metrics if metric]
def get_properties(self, d):
super().get_properties(d)
fd = self.form_data
elevation = fd["point_radius_fixed"]["value"]
type_ = fd["point_radius_fixed"]["type"]
d["elevation"] = (
d.get(utils.get_metric_name(elevation)) if type_ == "metric" else elevation
)
return d
class DeckHex(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_hex"
verbose_name = _("Deck.gl - 3D HEX")
spatial_control_keys = ["spatial"]
def get_properties(self, d):
return {"position": d.get("spatial"), "weight": d.get(self.metric_label) or 1}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric)
return super(DeckHex, self).get_data(df)
class DeckGeoJson(BaseDeckGLViz):
"""deck.gl's GeoJSONLayer"""
viz_type = "deck_geojson"
verbose_name = _("Deck.gl - GeoJSON")
def query_obj(self):
d = super().query_obj()
d["columns"] += [self.form_data.get("geojson")]
d["metrics"] = []
d["groupby"] = []
return d
def get_properties(self, d):
geojson = d.get(self.form_data.get("geojson"))
return json.loads(geojson)
class DeckArc(BaseDeckGLViz):
"""deck.gl's Arc Layer"""
viz_type = "deck_arc"
verbose_name = _("Deck.gl - Arc")
spatial_control_keys = ["start_spatial", "end_spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d):
dim = self.form_data.get("dimension")
return {
"sourcePosition": d.get("start_spatial"),
"targetPosition": d.get("end_spatial"),
"cat_color": d.get(dim) if dim else None,
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
d = super().get_data(df)
return {
"features": d["features"], # type: ignore
"mapboxApiKey": config["MAPBOX_API_KEY"],
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self):
query = super().query_obj()
form_data = self.form_data
event_key = form_data.get("all_columns_x")
entity_key = form_data.get("entity")
meta_keys = [
col
for col in form_data.get("all_columns")
if col != event_key and col != entity_key
]
query["columns"] = [event_key, entity_key] + meta_keys
if form_data["order_by_entity"]:
query["orderby"] = [(entity_key, True)]
return query
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = "paired_ttest"
verbose_name = _("Time Series - Paired t-test")
sort_series = False
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform received data frame into an object of the form:
{
'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
}, ...
], ...
}
"""
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby")
metrics = self.metric_labels
df = df.pivot_table(index=DTTM_ALIAS, columns=groups, values=metrics)
cols = []
# Be rid of falsey keys
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
data: Dict = {}
series = df.to_dict("series")
for nameSet in df.columns:
# If no groups are defined, nameSet will be the metric name
hasGroup = not isinstance(nameSet, str)
Y = series[nameSet]
d = {
"group": nameSet[1:] if hasGroup else "All",
"values": [{"x": t, "y": Y[t] if t in Y else None} for t in df.index],
}
key = nameSet[0] if hasGroup else nameSet
if key in data:
data[key].append(d)
else:
data[key] = [d]
return data
class RoseViz(NVD3TimeSeriesViz):
viz_type = "rose"
verbose_name = _("Time Series - Nightingale Rose Chart")
sort_series = False
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
data = super().get_data(df)
result: Dict = {}
for datum in data: # type: ignore
key = datum["key"]
for val in datum["values"]:
timestamp = val["x"].value
if not result.get(timestamp):
result[timestamp] = []
value = 0 if math.isnan(val["y"]) else val["y"]
result[timestamp].append(
{
"key": key,
"value": value,
"name": ", ".join(key) if isinstance(key, list) else key,
"time": val["x"],
}
)
return result
class PartitionViz(NVD3TimeSeriesViz):
"""
A hierarchical data visualization with support for time series.
"""
viz_type = "partition"
verbose_name = _("Partition Diagram")
def query_obj(self):
query_obj = super().query_obj()
time_op = self.form_data.get("time_series_option", "not_time")
# Return time series data if the user specifies so
query_obj["is_timeseries"] = time_op != "not_time"
return query_obj
def levels_for(self, time_op, groups, df):
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean()
if time_op == "agg_mean"
else agg_df.sum(numeric_only=True)
)
return levels
def levels_for_diff(self, time_op, groups, df):
# Obtain a unique list of the time grains
times = list(set(df[DTTM_ALIAS]))
times.sort()
until = times[len(times) - 1]
since = times[0]
# Function describing how to calculate the difference
func = {
"point_diff": [pd.Series.sub, lambda a, b, fill_value: a - b],
"point_factor": [pd.Series.div, lambda a, b, fill_value: a / float(b)],
"point_percent": [
lambda a, b, fill_value=0: a.div(b, fill_value=fill_value) - 1,
lambda a, b, fill_value: a / float(b) - 1,
],
}[time_op]
agg_df = df.groupby(DTTM_ALIAS).sum()
levels = {
0: pd.Series(
{
m: func[1](agg_df[m][until], agg_df[m][since], 0)
for m in agg_df.columns
}
)
}
for i in range(1, len(groups) + 1):
agg_df = df.groupby([DTTM_ALIAS] + groups[:i]).sum()
levels[i] = pd.DataFrame(
{
m: func[0](agg_df[m][until], agg_df[m][since], fill_value=0)
for m in agg_df.columns
}
)
return levels
def levels_for_time(self, groups, df):
procs = {}
for i in range(0, len(groups) + 1):
self.form_data["groupby"] = groups[:i]
df_drop = df.drop(groups[i:], 1)
procs[i] = self.process_data(df_drop, aggregate=True)
self.form_data["groupby"] = groups
return procs
def nest_values(self, levels, level=0, metric=None, dims=()):
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if not level:
return [
{
"name": m,
"val": levels[0][m],
"children": self.nest_values(levels, 1, m),
}
for m in levels[0].index
]
if level == 1:
return [
{
"name": i,
"val": levels[1][metric][i],
"children": self.nest_values(levels, 2, metric, (i,)),
}
for i in levels[1][metric].index
]
if level >= len(levels):
return []
return [
{
"name": i,
"val": levels[level][metric][dims][i],
"children": self.nest_values(levels, level + 1, metric, dims + (i,)),
}
for i in levels[level][metric][dims].index
]
def nest_procs(self, procs, level=-1, dims=(), time=None):
if level == -1:
return [
{"name": m, "children": self.nest_procs(procs, 0, (m,))}
for m in procs[0].columns
]
if not level:
return [
{
"name": t,
"val": procs[0][dims[0]][t],
"children": self.nest_procs(procs, 1, dims, t),
}
for t in procs[0].index
]
if level >= len(procs):
return []
return [
{
"name": i,
"val": procs[level][dims][i][time],
"children": self.nest_procs(procs, level + 1, dims + (i,), time),
}
for i in procs[level][dims].columns
]
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
groups = fd.get("groupby", [])
time_op = fd.get("time_series_option", "not_time")
if not len(groups):
raise ValueError("Please choose at least one groupby")
if time_op == "not_time":
levels = self.levels_for("agg_sum", groups, df)
elif time_op in ["agg_sum", "agg_mean"]:
levels = self.levels_for(time_op, groups, df)
elif time_op in ["point_diff", "point_factor", "point_percent"]:
levels = self.levels_for_diff(time_op, groups, df)
elif time_op == "adv_anal":
procs = self.levels_for_time(groups, df)
return self.nest_procs(procs)
else:
levels = self.levels_for("agg_sum", [DTTM_ALIAS] + groups, df)
return self.nest_values(levels)
viz_types = {
o.viz_type: o
for o in globals().values()
if (
inspect.isclass(o)
and issubclass(o, BaseViz)
and o.viz_type not in config["VIZ_TYPE_BLACKLIST"]
)
}
|
py | 7df94de94bb02d5147681b7425416d0fd0709317 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Simon Schaefer
# Description : Build video from images using OpenCV.
# Arguments : Path to images (directory)
# Output video name
# Image tag (optional, if multiple kinds of images in directory)
# =============================================================================
import argparse
import cv2
import glob
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.patches as patches
import matplotlib.pyplot as plt
# Read input arguments.
parser = argparse.ArgumentParser(description="imgs_to_video")
parser.add_argument("--directory", type=str, default="")
parser.add_argument("--video_name", type=str, default="video.avi")
parser.add_argument("--tag", type=str, default="")
args = parser.parse_args()
assert os.path.isdir(args.directory)
tag = "_" + args.tag if args.tag != "" else None
# Make list of images in img_dir having the tag.
images = glob.glob(args.directory + "/*" + ".png")
for x in range(0, len(images)): images[x] = args.directory+"/hr"+str(x)+"x4_x4b.png"
#images = sorted(images)
if tag is not None: images = [x for x in images if x.count(tag) > 0]
# Create video using OpenCV functionalities.
fps = 10
frame = cv2.imread(images[0])
height, width, _ = frame.shape
video = cv2.VideoWriter(args.video_name, 0, fps, (width,height))
for image in images:
print(image)
video.write(cv2.imread(image))
cv2.destroyAllWindows()
video.release()
|
py | 7df94dee0dc5613c50fcc359e55c89a3829a88ef | """
This script tests the approach on the BUCC 2018 shared task on finding parallel sentences:
https://comparable.limsi.fr/bucc2018/bucc2018-task.html
You can download the necessary files from there.
We have used it in our paper (https://arxiv.org/pdf/2004.09813.pdf) in Section 4.2 to evaluate different multilingual models.
This script requires that you have FAISS installed:
https://github.com/facebookresearch/faiss
"""
from sentence_transformers import SentenceTransformer, models
from collections import defaultdict
import os
import pickle
from sklearn.decomposition import PCA
import torch
from bitext_mining_utils import *
#Model we want to use for bitext mining. LaBSE achieves state-of-the-art performance
model_name = 'LaBSE'
model = SentenceTransformer(model_name)
#Intput files for BUCC2018 shared task
source_file = "bucc2018/de-en/de-en.training.de"
target_file = "bucc2018/de-en/de-en.training.en"
labels_file = "bucc2018/de-en/de-en.training.gold"
# We base the scoring on k nearest neighbors for each element
knn_neighbors = 4
# Min score for text pairs. Note, score can be larger than 1
min_threshold = 1
#Do we want to use exact search of approximate nearest neighbor search (ANN)
#Exact search: Slower, but we don't miss any parallel sentences
#ANN: Faster, but the recall will be lower
use_ann_search = True
#Number of clusters for ANN. Optimal number depends on dataset size
ann_num_clusters = 32768
#How many cluster to explorer for search. Higher number = better recall, slower
ann_num_cluster_probe = 5
#To save memory, we can use PCA to reduce the dimensionality from 768 to for example 128 dimensions
#The encoded embeddings will hence require 6 times less memory. However, we observe a small drop in performance.
use_pca = False
pca_dimensions = 128
#We store the embeddings on disc, so that they can later be loaded from disc
source_embedding_file = '{}_{}_{}.emb'.format(model_name, os.path.basename(source_file), pca_dimensions if use_pca else model.get_sentence_embedding_dimension())
target_embedding_file = '{}_{}_{}.emb'.format(model_name, os.path.basename(target_file), pca_dimensions if use_pca else model.get_sentence_embedding_dimension())
#Use PCA to reduce the dimensionality of the sentence embedding model
if use_pca:
# We use a smaller number of training sentences to learn the PCA
train_sent = []
num_train_sent = 20000
with open(source_file, encoding='utf8') as fSource, open(target_file, encoding='utf8') as fTarget:
for line_source, line_target in zip(fSource, fTarget):
id, sentence = line_source.strip().split("\t", maxsplit=1)
train_sent.append(sentence)
id, sentence = line_target.strip().split("\t", maxsplit=1)
train_sent.append(sentence)
if len(train_sent) >= num_train_sent:
break
print("Encode training embeddings for PCA")
train_matrix = model.encode(train_sent, show_progress_bar=True, convert_to_numpy=True)
pca = PCA(n_components=pca_dimensions)
pca.fit(train_matrix)
dense = models.Dense(in_features=model.get_sentence_embedding_dimension(), out_features=pca_dimensions, bias=False, activation_function=torch.nn.Identity())
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca.components_))
model.add_module('dense', dense)
print("Read source file")
source = {}
with open(source_file, encoding='utf8') as fIn:
for line in fIn:
id, sentence = line.strip().split("\t", maxsplit=1)
source[id] = sentence
print("Read target file")
target = {}
with open(target_file, encoding='utf8') as fIn:
for line in fIn:
id, sentence = line.strip().split("\t", maxsplit=1)
target[id] = sentence
labels = defaultdict(lambda: defaultdict(bool))
num_total_parallel = 0
with open(labels_file) as fIn:
for line in fIn:
src_id, trg_id = line.strip().split("\t")
if src_id in source and trg_id in target:
labels[src_id][trg_id] = True
labels[trg_id][src_id] = True
num_total_parallel += 1
print("Source Sentences:", len(source))
print("Target Sentences:", len(target))
print("Num Parallel:", num_total_parallel)
### Encode source sentences
source_ids = list(source.keys())
source_sentences = [source[id] for id in source_ids]
if not os.path.exists(source_embedding_file):
print("Encode source sentences")
source_embeddings = model.encode(source_sentences, show_progress_bar=True, convert_to_numpy=True)
with open(source_embedding_file, 'wb') as fOut:
pickle.dump(source_embeddings, fOut)
else:
with open(source_embedding_file, 'rb') as fIn:
source_embeddings = pickle.load(fIn)
### Encode target sentences
target_ids = list(target.keys())
target_sentences = [target[id] for id in target_ids]
if not os.path.exists(target_embedding_file):
print("Encode target sentences")
target_embeddings = model.encode(target_sentences, show_progress_bar=True, convert_to_numpy=True)
with open(target_embedding_file, 'wb') as fOut:
pickle.dump(target_embeddings, fOut)
else:
with open(target_embedding_file, 'rb') as fIn:
target_embeddings = pickle.load(fIn)
##### Now we start to search for parallel (translated) sentences
# Normalize embeddings
x = source_embeddings
y = target_embeddings
print("Shape Source:", x.shape)
print("Shape Target:", y.shape)
x = x / np.linalg.norm(x, axis=1, keepdims=True)
y = y / np.linalg.norm(y, axis=1, keepdims=True)
# Perform kNN in both directions
x2y_sim, x2y_ind = kNN(x, y, knn_neighbors, use_ann_search, ann_num_clusters, ann_num_cluster_probe)
x2y_mean = x2y_sim.mean(axis=1)
y2x_sim, y2x_ind = kNN(y, x, knn_neighbors, use_ann_search, ann_num_clusters, ann_num_cluster_probe)
y2x_mean = y2x_sim.mean(axis=1)
# Compute forward and backward scores
margin = lambda a, b: a / b
fwd_scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y, x, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y.shape[0]), bwd_scores.argmax(axis=1)]
indices = np.stack([np.concatenate([np.arange(x.shape[0]), bwd_best]), np.concatenate([fwd_best, np.arange(y.shape[0])])], axis=1)
scores = np.concatenate([fwd_scores.max(axis=1), bwd_scores.max(axis=1)])
seen_src, seen_trg = set(), set()
#Extact list of parallel sentences
bitext_list = []
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
src_ind = int(src_ind)
trg_ind = int(trg_ind)
if scores[i] < min_threshold:
break
if src_ind not in seen_src and trg_ind not in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
bitext_list.append([scores[i], source_ids[src_ind], target_ids[trg_ind]])
# Measure Performance by computing the threshold
# that leads to the best F1 score performance
bitext_list = sorted(bitext_list, key=lambda x: x[0], reverse=True)
n_extract = n_correct = 0
threshold = 0
best_f1 = best_recall = best_precision = 0
average_precision = 0
for idx in range(len(bitext_list)):
score, id1, id2 = bitext_list[idx]
n_extract += 1
if labels[id1][id2] or labels[id2][id1]:
n_correct += 1
precision = n_correct / n_extract
recall = n_correct / num_total_parallel
f1 = 2 * precision * recall / (precision + recall)
average_precision += precision
if f1 > best_f1:
best_f1 = f1
best_precision = precision
best_recall = recall
threshold = (bitext_list[idx][0] + bitext_list[min(idx + 1, len(bitext_list)-1)][0]) / 2
print("Best Threshold:", threshold)
print("Recall:", best_recall)
print("Precision:", best_precision)
print("F1:", best_f1)
|
py | 7df94e072ab96e51b113d99ed1666064a8f84527 | # Author: Mathieu Blondel
# License: BSD 3 clause
from ._stochastic_gradient import BaseSGDClassifier
class Perceptron(BaseSGDClassifier):
"""Linear perceptron classifier.
Read more in the :ref:`User Guide <perceptron>`.
Parameters
----------
penalty : {'l2','l1','elasticnet'}, default=None
The penalty (aka regularization term) to be used.
alpha : float, default=0.0001
Constant that multiplies the regularization term if regularization is
used.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`.
`l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1.
Only used if `penalty='elasticnet'`.
.. versionadded:: 0.24
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
eta0 : double, default=1
Constant by which the updates are multiplied.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution. See
:term:`the Glossary <warm_start>`.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
loss_function_ : concrete LossFunction
The function that determines the loss, or difference between the
output of the algorithm and the target values.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
See Also
--------
sklearn.linear_model.SGDClassifier : Linear classifiers
(SVM, logistic regression, etc.) with SGD training.
Notes
-----
``Perceptron`` is a classification algorithm which shares the same
underlying implementation with ``SGDClassifier``. In fact,
``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)`.
References
----------
https://en.wikipedia.org/wiki/Perceptron and references therein.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import Perceptron
>>> X, y = load_digits(return_X_y=True)
>>> clf = Perceptron(tol=1e-3, random_state=0)
>>> clf.fit(X, y)
Perceptron()
>>> clf.score(X, y)
0.939...
"""
def __init__(
self,
*,
penalty=None,
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
eta0=1.0,
n_jobs=None,
random_state=0,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
class_weight=None,
warm_start=False,
):
super().__init__(
loss="perceptron",
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
learning_rate="constant",
eta0=eta0,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
power_t=0.5,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs,
)
|
py | 7df94e17bb5389758924e59571888de083de7f20 | # -*- coding: utf-8 -*-
'''
This module is a central location for all salt exceptions
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
import time
# Import Salt libs
import salt.defaults.exitcodes
from salt.ext import six
log = logging.getLogger(__name__)
def _nested_output(obj):
'''
Serialize obj and format for output
'''
# Explicit late import to avoid circular import
from salt.output import nested
nested.__opts__ = {}
ret = nested.output(obj).rstrip()
return ret
def get_error_message(error):
'''
Get human readable message from Python Exception
'''
return error.args[0] if error.args else u''
class SaltException(Exception):
'''
Base exception class; all Salt-specific exceptions should subclass this
'''
def __init__(self, message=u''):
super(SaltException, self).__init__(message)
self.strerror = message
def pack(self):
'''
Pack this exception into a serializable dictionary that is safe for
transport via msgpack
'''
if six.PY3:
# The message should be a str type, not a unicode
return {u'message': str(self), u'args': self.args}
return dict(message=self.__unicode__(), args=self.args)
class SaltClientError(SaltException):
'''
Problem reading the master root key
'''
class SaltMasterError(SaltException):
'''
Problem reading the master root key
'''
class SaltNoMinionsFound(SaltException):
'''
An attempt to retrieve a list of minions failed
'''
class SaltSyndicMasterError(SaltException):
'''
Problem while proxying a request in the syndication master
'''
class MasterExit(SystemExit):
'''
Rise when the master exits
'''
class AuthenticationError(SaltException):
'''
If sha256 signature fails during decryption
'''
class CommandNotFoundError(SaltException):
'''
Used in modules or grains when a required binary is not available
'''
class CommandExecutionError(SaltException):
'''
Used when a module runs a command which returns an error and wants
to show the user the output gracefully instead of dying
'''
def __init__(self, message=u'', info=None):
self.error = exc_str_prefix = message
self.info = info
if self.info:
if exc_str_prefix:
if exc_str_prefix[-1] not in u'.?!':
exc_str_prefix += u'.'
exc_str_prefix += u' '
exc_str_prefix += u'Additional info follows:\n\n'
# NOTE: exc_str will be passed to the parent class' constructor and
# become self.strerror.
exc_str = exc_str_prefix + _nested_output(self.info)
# For states, if self.info is a dict also provide an attribute
# containing a nested output of the info dict without the changes
# (since they will be in the 'changes' key of the state return and
# this information would be redundant).
if isinstance(self.info, dict):
info_without_changes = copy.deepcopy(self.info)
info_without_changes.pop(u'changes', None)
if info_without_changes:
self.strerror_without_changes = \
exc_str_prefix + _nested_output(info_without_changes)
else:
# 'changes' was the only key in the info dictionary. We no
# longer have any additional info to display. Use the
# original error message.
self.strerror_without_changes = self.error
else:
self.strerror_without_changes = exc_str
else:
self.strerror_without_changes = exc_str = self.error
super(CommandExecutionError, self).__init__(exc_str)
class LoaderError(SaltException):
'''
Problems loading the right renderer
'''
class PublishError(SaltException):
'''
Problems encountered when trying to publish a command
'''
class MinionError(SaltException):
'''
Minion problems reading uris such as salt:// or http://
'''
class FileserverConfigError(SaltException):
'''
Used when invalid fileserver settings are detected
'''
class FileLockError(SaltException):
'''
Used when an error occurs obtaining a file lock
'''
def __init__(self, msg, time_start=None, *args, **kwargs):
super(FileLockError, self).__init__(msg, *args, **kwargs)
if time_start is None:
log.warning(
u'time_start should be provided when raising a FileLockError. '
u'Defaulting to current time as a fallback, but this may '
u'result in an inaccurate timeout.'
)
self.time_start = time.time()
else:
self.time_start = time_start
class GitLockError(SaltException):
'''
Raised when an uncaught error occurs in the midst of obtaining an
update/checkout lock in salt.utils.gitfs.
NOTE: While this uses the errno param similar to an OSError, this exception
class is *not* as subclass of OSError. This is done intentionally, so that
this exception class can be caught in a try/except without being caught as
an OSError.
'''
def __init__(self, errno, strerror, *args, **kwargs):
super(GitLockError, self).__init__(strerror, *args, **kwargs)
self.errno = errno
self.strerror = strerror
class GitRemoteError(SaltException):
'''
Used by GitFS to denote a problem with the existence of the "origin" remote
or part of its configuration
'''
class SaltInvocationError(SaltException, TypeError):
'''
Used when the wrong number of arguments are sent to modules or invalid
arguments are specified on the command line
'''
class PkgParseError(SaltException):
'''
Used when of the pkg modules cannot correctly parse the output from
the CLI tool (pacman, yum, apt, aptitude, etc)
'''
class SaltRenderError(SaltException):
'''
Used when a renderer needs to raise an explicit error. If a line number and
buffer string are passed, get_context will be invoked to get the location
of the error.
'''
def __init__(self,
message,
line_num=None,
buf=u'',
marker=u' <======================',
trace=None):
self.error = message
exc_str = copy.deepcopy(message)
self.line_num = line_num
self.buffer = buf
self.context = u''
if trace:
exc_str += u'\n{0}\n'.format(trace)
if self.line_num and self.buffer:
import salt.utils
import salt.utils.stringutils
self.context = salt.utils.get_context(
self.buffer,
self.line_num,
marker=marker
)
exc_str += '; line {0}\n\n{1}'.format( # future lint: disable=non-unicode-string
self.line_num,
salt.utils.stringutils.to_str(self.context),
)
super(SaltRenderError, self).__init__(exc_str)
class SaltClientTimeout(SaltException):
'''
Thrown when a job sent through one of the Client interfaces times out
Takes the ``jid`` as a parameter
'''
def __init__(self, msg, jid=None, *args, **kwargs):
super(SaltClientTimeout, self).__init__(msg, *args, **kwargs)
self.jid = jid
class SaltCacheError(SaltException):
'''
Thrown when a problem was encountered trying to read or write from the salt cache
'''
class SaltReqTimeoutError(SaltException):
'''
Thrown when a salt master request call fails to return within the timeout
'''
class TimedProcTimeoutError(SaltException):
'''
Thrown when a timed subprocess does not terminate within the timeout,
or if the specified timeout is not an int or a float
'''
class EauthAuthenticationError(SaltException):
'''
Thrown when eauth authentication fails
'''
class TokenAuthenticationError(SaltException):
'''
Thrown when token authentication fails
'''
class AuthorizationError(SaltException):
'''
Thrown when runner or wheel execution fails due to permissions
'''
class SaltDaemonNotRunning(SaltException):
'''
Throw when a running master/minion/syndic is not running but is needed to
perform the requested operation (e.g., eauth).
'''
class SaltRunnerError(SaltException):
'''
Problem in runner
'''
class SaltWheelError(SaltException):
'''
Problem in wheel
'''
class SaltConfigurationError(SaltException):
'''
Configuration error
'''
class SaltSystemExit(SystemExit):
'''
This exception is raised when an unsolvable problem is found. There's
nothing else to do, salt should just exit.
'''
def __init__(self, code=0, msg=None):
SystemExit.__init__(self, msg)
class SaltCloudException(SaltException):
'''
Generic Salt Cloud Exception
'''
class SaltCloudSystemExit(SaltCloudException):
'''
This exception is raised when the execution should be stopped.
'''
def __init__(self, message, exit_code=salt.defaults.exitcodes.EX_GENERIC):
SaltCloudException.__init__(self, message)
self.message = message
self.exit_code = exit_code
class SaltCloudConfigError(SaltCloudException):
'''
Raised when a configuration setting is not found and should exist.
'''
class SaltCloudNotFound(SaltCloudException):
'''
Raised when some cloud provider function cannot find what's being searched.
'''
class SaltCloudExecutionTimeout(SaltCloudException):
'''
Raised when too much time has passed while querying/waiting for data.
'''
class SaltCloudExecutionFailure(SaltCloudException):
'''
Raised when too much failures have occurred while querying/waiting for data.
'''
class SaltCloudPasswordError(SaltCloudException):
'''
Raise when virtual terminal password input failed
'''
class NotImplemented(SaltException):
'''
Used when a module runs a command which returns an error and wants
to show the user the output gracefully instead of dying
'''
class TemplateError(SaltException):
'''
Used when a custom error is triggered in a template
'''
# Validation related exceptions
class InvalidConfigError(CommandExecutionError):
'''
Used when the input is invalid
'''
# VMware related exceptions
class VMwareSaltError(CommandExecutionError):
'''
Used when a VMware object cannot be retrieved
'''
class VMwareRuntimeError(VMwareSaltError):
'''
Used when a runtime error is encountered when communicating with the
vCenter
'''
class VMwareConnectionError(VMwareSaltError):
'''
Used when the client fails to connect to a either a VMware vCenter server or
to a ESXi host
'''
class VMwareObjectRetrievalError(VMwareSaltError):
'''
Used when a VMware object cannot be retrieved
'''
class VMwareApiError(VMwareSaltError):
'''
Used when representing a generic VMware API error
'''
class VMwareSystemError(VMwareSaltError):
'''
Used when representing a generic VMware system error
'''
|
py | 7df94eab56e026e707810a29c7437bd669399c9e | from datetime import datetime
import pytest
from autossl import ssl, exception
from tests import util as tests_util
@pytest.mark.parametrize('cert1,cert2,is_same', [
# fully identical
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
True),
# fully identical but different san order
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name3', 'name2'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
True),
# fully common_name
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name4', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
False),
# different san
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name2', 'name4'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
False),
# different expiration
({'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2020, 2, 3, 14, 38, 21)},
{'common_name': 'name1', 'sans': ['name2', 'name3'], 'expiration': datetime(2021, 2, 3, 14, 38, 21)},
False),
])
def test_ssl_blueprint___eq__(cert1, cert2, is_same):
assert (ssl.SslCertificate(**cert1) == ssl.SslCertificate(**cert2)) is is_same
def test_missing_blueprint():
with pytest.raises(IOError):
ssl.SslBlueprint('dummy/path')
def test_ssl_blueprint_no_server():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.ov.example.com_no-server.yaml')
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'OV'
assert ssl_blueprint.certificate.certificate_authority == 'Sectigo'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert len(ssl_blueprint.servers) == 0
def test_ov_ssl_blueprint():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.ov.example.com.yaml')
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'OV'
assert ssl_blueprint.certificate.certificate_authority == 'Sectigo'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert ssl_blueprint.certificate.renewal_delay == 30
assert len(ssl_blueprint.servers) == 1
assert len(ssl_blueprint.certificate.sans) == 5
assert ssl_blueprint.certificate.organization['company_name'] == 'Autossl corporation'
assert ssl_blueprint.certificate.organization['street_address'] == 'Newbury street'
assert ssl_blueprint.certificate.organization['city'] == 'Boston'
assert ssl_blueprint.certificate.organization['state'] == 'Massachusetts'
assert ssl_blueprint.certificate.organization['postal_code'] == '02115'
assert ssl_blueprint.certificate.organization['country_code'] == 'US'
def test_dv_ssl_blueprint():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.dv.example.com.yaml')
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'DV'
assert ssl_blueprint.certificate.certificate_authority == 'LetsEncrypt'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert ssl_blueprint.certificate.renewal_delay == 30
assert len(ssl_blueprint.servers) == 2
assert len(ssl_blueprint.certificate.sans) == 5
assert ssl_blueprint.certificate.organization is None
def test_ssl_blueprint_with_global_config():
ssl_blueprint = ssl.SslBlueprint(
yaml_path=tests_util.DATA_PATH / 'tst.ov.example.com_minimal.yaml',
global_config_path=tests_util.DATA_PATH / 'global_config.yaml',
)
assert ssl_blueprint.name == 'auto_tst.autossl.example.com'
assert ssl_blueprint.certificate.certificate_type == 'DV'
assert ssl_blueprint.certificate.certificate_authority == 'LetsEncrypt'
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert ssl_blueprint.certificate.renewal_delay == 30
assert len(ssl_blueprint.servers) == 1
assert len(ssl_blueprint.certificate.sans) == 5
assert ssl_blueprint.certificate.organization['company_name'] == 'Autossl corporation'
assert ssl_blueprint.certificate.organization['street_address'] == 'Newbury street'
assert ssl_blueprint.certificate.organization['city'] == 'Boston'
assert ssl_blueprint.certificate.organization['state'] == 'Massachusetts'
assert ssl_blueprint.certificate.organization['postal_code'] == '02115'
assert ssl_blueprint.certificate.organization['country_code'] == 'US'
def test_ssl_blueprint_no_common_name(tmp_path):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
san:
- tst.autossl.example.com
- uat.tst.autossl.example.com
- pit.tst.autossl.example.com
...
"""
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
ssl_blueprint = ssl.SslBlueprint(str(blueprint_path))
assert ssl_blueprint.certificate.common_name is None
assert len(ssl_blueprint.certificate.sans) == 3
def test_ssl_blueprint_no_san(tmp_path):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
common_name: tst.autossl.example.com
...
"""
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
ssl_blueprint = ssl.SslBlueprint(str(blueprint_path))
assert ssl_blueprint.certificate.common_name == 'tst.autossl.example.com'
assert len(ssl_blueprint.certificate.sans) == 0
def test_ssl_blueprint_no_commmon_name_no_san(tmp_path):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
...
"""
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
with pytest.raises(ValueError):
ssl.SslBlueprint(str(blueprint_path))
@pytest.mark.parametrize('common_name,is_valid', [
('test2_valid-test.example.com', True),
('*.example.com', True),
(' test.example.com', False),
('test.example.com ', False),
('test.*.com', False),
('%1.example.com', False),
])
def test_ssl_blueprint_validate_common_name(tmp_path, common_name, is_valid):
blueprint_content = u"""
---
name: auto_tst.autossl.example.com
servers:
- type: autossl.server.local.LocalServer
parameters:
path: /etc/ssl/my_certificates
certificate:
type: DV
certificate_authority: LetsEncrypt
common_name: '{}'
...
""".format(common_name)
blueprint_path = tmp_path / 'blueprint.yaml'
blueprint_path.write_text(blueprint_content, encoding='utf-8')
if is_valid:
ssl.SslBlueprint(str(blueprint_path))
else:
with pytest.raises(ValueError):
ssl.SslBlueprint(str(blueprint_path))
def test_get_domains():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.dv.example.com.yaml')
assert ssl_blueprint.domains == {
'tst.autossl.example.com',
'uat.tst.autossl.example.com',
'pit.tst.autossl.example.com',
'cit.tst.autossl.example.com',
'mgt.tst.autossl.example.com'
}
def test_is_domain_matching():
assert ssl.is_domain_matching('test.example.com', 'test.example.com')
assert ssl.is_domain_matching('test.example.com', 'test.example.com', True)
assert ssl.is_domain_matching('test.example.com', 'test.example.com', False)
assert ssl.is_domain_matching('test.example.com', 'test2.example.com') is False
assert ssl.is_domain_matching('test.example.com', 'test2.example.com', True) is False
assert ssl.is_domain_matching('test.example.com', 'test2.example.com', False) is False
assert ssl.is_domain_matching('test.example.com', '*.example.com') is True
assert ssl.is_domain_matching('test.example.com', '*.example.com', True) is False
assert ssl.is_domain_matching('test.example.com', '*.example.com', False) is True
def test_is_domain_list_matching():
assert ssl.is_domain_list_matching(['test.example.com'], ['test.example.com'])
assert ssl.is_domain_list_matching(['test.example.com'], ['test.example.com', 'test2.example.com'])
assert ssl.is_domain_list_matching(['test.example.com', 'test2.example.com'], ['test.example.com']) is False
assert ssl.is_domain_list_matching(['test.example.com', 'test2.example.com'], ['*.example.com'])
assert ssl.is_domain_list_matching(
['test.example.com', 'test2.example.com'], ['*.example.com'], exact_match=True) is False
def test_get_config():
ssl_blueprint = ssl.SslBlueprint(tests_util.DATA_PATH / 'tst.dv.example.com.yaml')
assert ssl_blueprint.get_config(name='tracking', path=['dummy_path'], default=[]) == []
assert ssl_blueprint.get_config(name='tracking', path=None, default=None) is None
assert ssl_blueprint.get_config(name='storage', path=None, default=None) == {
'credentials': 'credential_1',
'data': [{'type': 'key'}, {'type': 'csr'}, {'type': 'crt'}],
'parameters': {
'git_url': 'https://git.autossl.com/git/scm/ssl/certificates.git',
'config_user_name': 'Test User',
'config_user_email': '[email protected]',
},
'type': 'autossl.storage.gitscm.GitStorage'}
def test_check_chain_of_trust(tmp_path):
crt_path = tmp_path / 'local.crt'
ca_crt_path = tmp_path / 'local_ca.crt'
ca_key_path = tmp_path / 'local_ca.key'
# generate CA certificate
key, crt = tests_util.create_ca_certificate(ca_name='Autossl')
ca_crt_path.write_bytes(crt)
ca_key_path.write_bytes(key)
# sign a new certificate with the CA
_, csr_path = ssl.generate_csr(name='autossl_cert', common_name='test.autossl.com', output_path=str(tmp_path))
crt_content = tests_util.create_signed_certificate(
csr_path=csr_path,
ca_crt_path=ca_crt_path,
ca_key_path=ca_key_path,
)
crt_path.write_bytes(crt_content)
# valid trust chain should no raise any error
ssl.check_chain_of_trust(
chain_of_trust=[crt.decode('utf-8')], # Chain of trust comes normally from SSL blueprint so it not in bytes
crt_path=crt_path,
)
# generate self-signed certificate
self_signed_key_path, self_signed_crt_path = tests_util.create_self_signed_certificate(
crt_name="self_signed_local.crt",
output_path=tmp_path,
common_name='self_signed.test.autossl.com',
)
# self signed certificate should not be validated by this CA
with pytest.raises(exception.InvalidTrustChain):
ssl.check_chain_of_trust(
chain_of_trust=[crt.decode('utf-8')], # Chain of trust comes normally from SSL blueprint so it not in bytes
crt_path=self_signed_crt_path,
)
|
py | 7df94fc037bcc66a3b8e933c0e0e1557087aa385 | """
-----------------
Collapse Gaussfit
-----------------
This was an early attempt to automate gaussian fitting over a data cube using
(multiple) gaussian decomposition for each spectrum. It's reasonably
effective, but the uses are somewhat minimal. I've tried shifting my
cube-related work to `pyspeckit <pyspeckit.bitbucket.org>`_.
"""
try:
import scipy
from scipy import optimize,sqrt
from scipy.optimize import leastsq
#from scipy.stats.stats import nanmedian,nanmean,_nanmedian
except ImportError:
print "Scipy cold not be loaded. Collapse_gaussfit may fail"
import numpy
from numpy import vectorize,zeros,exp,median,where,asarray,array,nonzero,ma,arange,square
import matplotlib
#matplotlib.use('Agg')
from pylab import indices,figure,clf,savefig,plot,legend,text,axes,title
import pickle
import pyfits
import time
from mad import MAD
from ratosexagesimal import ratos,dectos
def nanmedian(arr):
""" nanmedian - this version is NOT capable of broadcasting (operating along axes) """
return median(arr[arr==arr])
def nanmean(arr):
""" nanmean - this version is NOT capable of broadcasting (operating along axes) """
return (arr[arr==arr]).mean()
# read in file
# filename = sys.argv[1]
# fitsfile = pyfits.open(filename)
# cube = fitsfile[0].data
# def gaussian(dx,sigma):
# return lambda x: exp( - (x-dx)**2 / sigma**2 )
# def return_param(xarr,param):
# errorfunction = lambda p:gaussian(*p)(*indices(xarr.shape))-xarr
# pars, cov, infodict, errmsg, success = optimize.leastsq(errorfunction, [len(xarr)/2.,1], full_output=1)
# print errmsg
# if param == 'width':
# return pars[1]
# elif param == 'center':
# return pars[0]
# else:
# return
def gaussian(dx,sigma,a):
return lambda x: a*exp( - (x-dx)**2 / sigma**2 )
def double_gaussian(dx1,dx2,sigma1,sigma2,a1,a2):
return lambda x: a1*exp( - (x-dx1)**2 / sigma1**2 ) + a2*exp( - (x-dx2)**2 / sigma2**2 )
def triple_gaussian(dx1,dx2,dx3,sigma1,sigma2,sigma3,a1,a2,a3):
return lambda x: abs(a1)*exp( - (x-dx1)**2 / sigma1**2 ) + abs(a2)*exp( - (x-dx2)**2 / sigma2**2 ) + abs(a3)*exp( - (x-dx3)**2 / sigma3**2 )
def n_gaussian(dx,sigma,a):
def g(x):
v = zeros(len(x))
for i in range(len(dx)):
v += a[i] * exp( - ( x - dx[i] )**2 / sigma[i]**2 )
return v
return g
def gerr(xarr):
return lambda p:xarr-gaussian(*p)(*indices(xarr.shape))
def double_gerr(xarr):
return lambda p:xarr-double_gaussian(*p)(*indices(xarr.shape))
def triple_gerr(xarr):
return lambda p:xarr-triple_gaussian(*p)(*indices(xarr.shape))
def return_param(xarr,params=None,negamp=False):
if params == None:
if negamp:
params = [xarr.argmin(),5,xarr.min()]
else:
params = [xarr.argmax(),5,xarr.max()]
pars, cov, infodict, errmsg, success = optimize.leastsq(gerr(xarr), params, full_output=1)
return pars
def return_double_param(xarr,params=None):
if params == None:
params = [xarr.argmax(),xarr.argmax()+3,4.2,2.3,xarr.max(),xarr.max()/2]
pars, cov, infodict, errmsg, success = optimize.leastsq(double_gerr(xarr), params, full_output=1)
return pars
def return_triple_param(xarr,params=None):
"""
input parameters: center[1-3],width[1-3],amplitude[1-3]
"""
if params == None:
params = [xarr.argmax(),xarr.argmax()+3,xarr.argmax(),4.2,2.3,10,xarr.max(),xarr.max()/2.,xarr.max()/5.]
pars, cov, infodict, errmsg, success = optimize.leastsq(triple_gerr(xarr), params, full_output=1)
return pars
def adaptive_collapse_gaussfit(cube,axis=2,nsig=3,nrsig=4,prefix='interesting',
vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x,doplot=True):
"""
Attempts to fit one or two Gaussians to each spectrum in a data cube and returns the parameters of the fits.
Adaptively determines where to fit two Gaussian components based on residuals. Will fit 3 gaussians if a
two-gaussian fit is not better than a certain threshold (specified by nsig), and those fits will be output
to images with filename prefix+(coordinate).png. The 3-gaussian fit parameters will not be returned because
the automated fitting is very unlikely to get that part right.
inputs:
cube - a data cube with two spatial and one spectral dimensions
axis - the axis of the spectral dimension
nsig - number of sigma over the mean residual to trigger double-gaussian fitting
also, cutoff to do any fitting at all
prefix - the prefix (including directory name) of the output images from 3-gaussian fitting
doplot - option to turn off plotting of triple-gaussian fits
vconv,xtora,ytodec - functions to convert the axes from pixel coordinates to ra/dec/velocity coordinates
returns:
width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2
The Gaussian widths, line centers (in pixel units), amplitudes, and the chi-squared value, not in that order
These returns are identical to the returns from double_gaussian, but all components will be zero for the second
gaussian in the case of a single-gaussian fit
the triple gaussian is guessed to be the double gaussian plus a broad, low-amplitude gaussian. Ideally this should
fit outflows reasonably well, but who knows if it really will.
Another option is to fit a negative-amplitude gaussian to account for self-absorption
"""
std_coll = cube.std(axis=axis) # standard deviation of each spectrum
# mad_coll = MAD(cube,axis=axis)
mean_std = median(std_coll.ravel()) # median standard deviation (to reject high-signal spectra that have high std)
if axis > 0: # force spectral axis to first axis
cube = cube.swapaxes(0,axis)
width_arr = zeros(cube.shape[1:]) # define gaussian param arrays
width_arr1 = zeros(cube.shape[1:]) # define gaussian param arrays
width_arr2 = zeros(cube.shape[1:]) # define gaussian param arrays
amp_arr = zeros(cube.shape[1:]) # define gaussian param arrays
amp_arr1 = zeros(cube.shape[1:]) # define gaussian param arrays
amp_arr2 = zeros(cube.shape[1:]) # define gaussian param arrays
chi2_arr = zeros(cube.shape[1:]) # define gaussian param arrays
resid_arr = zeros(cube.shape[1:]) # define gaussian param arrays
offset_arr = zeros(cube.shape[1:]) # define gaussian param arrays
offset_arr1 = zeros(cube.shape[1:]) # define gaussian param arrays
offset_arr2 = zeros(cube.shape[1:]) # define gaussian param arrays
ncarr = (cube.max(axis=0) > mean_std*nsig) # cutoff: don't fit no-signal spectra
starttime = time.time() # timing for output
print cube.shape
print "Fitting a total of %i spectra with peak signal above %f" % (ncarr.sum(),mean_std*nsig)
for i in xrange(cube.shape[1]): # Loop over all elements for
t0 = time.time()
nspec = (cube[:,i,:].max(axis=0) > mean_std*nsig).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if cube[:,i,j].max() > mean_std*nsig:
# if cube[:,i,j].max() > MAD(cube[:,i,j]):
pars = return_param(cube[:,i,j])
width_arr[i,j] = pars[1]
width_arr1[i,j] = pars[1]
amp_arr[i,j] = pars[2]
amp_arr1[i,j] = pars[2]
# chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2)
resid_arr[i,j] = (gerr(cube[:,i,j])(pars)).sum()
offset_arr[i,j] = pars[0]
offset_arr1[i,j] = pars[0]
else:
width_arr1[i,j] = numpy.nan
chi2_arr[i,j] = numpy.nan
resid_arr[i,j] = numpy.nan
offset_arr1[i,j] = numpy.nan
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
else:
print
chi2_arr = resid_arr**2
resids = ma.masked_where(numpy.isnan(chi2_arr),chi2_arr) # hide bad values
# residcut = (resids.mean() + (resids.std() * nrsig) ) # Old versino - used standard deviation and mean
residcut = (nanmedian(chi2_arr.ravel()) + (MAD(chi2_arr.ravel()) * nrsig) ) # New version: set cutoff by median + nrsig * MAD
to_refit = (resids > residcut).astype('bool')
# to_refit[numpy.isnan(to_refit)] = 0
inds = array(nonzero(to_refit)).transpose()
dgc,tgc = 0,0
print "Refitting a total of %i spectra with peak residual above %f" % (to_refit.sum(),residcut)
f=open("%s_triples.txt" % prefix,'w')
# vconv = lambda x: (x-p3+1)*dv+v0 # convert to velocity frame
vind = vconv(arange(cube[:,0,0].shape[0]))
xind = arange(cube[:,0,0].shape[0])
for ind in inds:
i,j = ind
doublepars = return_double_param(cube[:,i,j])
old_chi2 = chi2_arr[i,j]
new_chi2 = sum(square( double_gerr(cube[:,i,j])(doublepars) ))
if new_chi2 < old_chi2: # if 2 gaussians is an improvement, use it!
chi2_arr[i,j] = new_chi2
width_arr1[i,j] = doublepars[2]
width_arr2[i,j] = doublepars[3]
amp_arr1[i,j] = doublepars[4]
amp_arr2[i,j] = doublepars[5]
offset_arr1[i,j] = doublepars[0]
offset_arr2[i,j] = doublepars[1]
ncarr[i,j] += 1
if new_chi2 > residcut: # Even if double was better, see if a triple might be better yet [but don't store it in the params arrays!]
print >>f,"Triple-gaussian fitting at %i,%i (%i'th double, %i'th triple)" % (i,j,dgc,tgc)
if tgc % 100 == 0:
print "Triple-gaussian fitting at %i,%i (%i'th double, %i'th triple)" % (i,j,dgc,tgc)
tgc += 1
tpguess = [doublepars[0],doublepars[1],(doublepars[0]+doublepars[1])/2.,doublepars[2],doublepars[3],doublepars[2]*5.,doublepars[4],doublepars[5],doublepars[4]/5.]
triplepars = return_triple_param(cube[:,i,j],params=tpguess)
pars = [offset_arr[i,j],width_arr[i,j],amp_arr[i,j]]
if doplot: # if you don't, there's really no point in fitting at all...
ax = axes([.05,.05,.7,.9])
plot(vind,cube[:,i,j],color='black',linestyle='steps',linewidth='.5')
plot(vind,gaussian(*pars)(xind),'r-.',label="Single %f" % ( (gerr(cube[:,i,j])(pars)).sum() ) )
plot(vind,double_gaussian(*doublepars)(xind),'g--',label="Double %f" % ( (double_gerr(cube[:,i,j])(doublepars)).sum() ))
plot(vind,triple_gaussian(*triplepars)(xind),'b:',label="Triple %f" % ( (triple_gerr(cube[:,i,j])(triplepars)).sum() ),linewidth=2)
pars[0] = vconv(pars[0])
text(1.05,.8,"c1 %3.2f w1 %3.2f a1 %3.2f" % tuple(pars),transform=ax.transAxes,size='smaller')
dp = [ vconv(doublepars[0]) , doublepars[2], doublepars[4], vconv(doublepars[1]), doublepars[3], doublepars[5] ]
text(1.05,.6,"c1 %3.2f w1 %3.2f a1 %3.2f\nc2 %3.2f w2 %3.2f a2 %3.2f" % tuple(dp),transform=ax.transAxes,size='smaller')
tp = [ vconv(triplepars[0]) , triplepars[3], triplepars[6], vconv(triplepars[1]), triplepars[4], triplepars[7], vconv(triplepars[2]), triplepars[5], triplepars[8] ]
text(1.05,.4,"c1 %3.2f w1 %3.2f a1 %3.2f\nc2 %3.2f w2 %3.2f a2 %3.2f\nc3 %3.2f w3 %3.2f a3 %3.2f" % tuple(tp),transform=ax.transAxes,size='smaller')
title("Spectrum at %s %s" % (ratos(xtora(i)),dectos(ytodec(j))) )
legend(loc='best')
savefig("%s_%s.%s.png" % (prefix,i,j))
clf()
ncarr[i,j] += 1
print >>f,triplepars
dgc += 1
f.close()
print "Total time %f seconds for %i double and %i triple gaussians" % (time.time()-starttime,dgc,tgc)
return width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2,ncarr
def collapse_gaussfit(cube,axis=2,negamp=False):
std_coll = cube.std(axis=axis)
mean_std = median(std_coll.ravel())
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr = zeros(cube.shape[1:])
amp_arr = zeros(cube.shape[1:])
chi2_arr = zeros(cube.shape[1:])
offset_arr = zeros(cube.shape[1:])
starttime = time.time()
print cube.shape
print "Fitting a total of %i spectra with peak signal above %f" % ((cube.max(axis=0) > mean_std).sum(),mean_std)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (cube[:,i,:].max(axis=0) > mean_std).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if not negamp and cube[:,i,j].max() > mean_std:
pars = return_param(cube[:,i,j],negamp=negamp)
width_arr[i,j] = pars[1]
chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2)
offset_arr[i,j] = pars[0]
amp_arr[i,j] = pars[2]
elif negamp and cube[:,i,j].min() < -1*mean_std:
pars = return_param(cube[:,i,j],negamp=negamp)
width_arr[i,j] = pars[1]
chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2)
offset_arr[i,j] = pars[0]
amp_arr[i,j] = pars[2]
else:
width_arr[i,j] = numpy.nan
chi2_arr[i,j] = numpy.nan
offset_arr[i,j] = numpy.nan
amp_arr[i,j] = numpy.nan
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
print "Total time %f seconds" % (time.time()-starttime)
return width_arr,offset_arr,amp_arr,chi2_arr
# next step: find 2-gaussian fits
def collapse_double_gaussfit(cube,axis=2):
std_coll = cube.std(axis=axis)
mean_std = median(std_coll.ravel())
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr1 = zeros(cube.shape[1:])
width_arr2 = zeros(cube.shape[1:])
amp_arr1 = zeros(cube.shape[1:])
amp_arr2 = zeros(cube.shape[1:])
chi2_arr = zeros(cube.shape[1:])
offset_arr1 = zeros(cube.shape[1:])
offset_arr2 = zeros(cube.shape[1:])
starttime = time.time()
print cube.shape
print "Fitting a total of %i spectra with peak signal above %f" % ((cube.max(axis=0) > mean_std).sum(),mean_std)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (cube[:,i,:].max(axis=0) > mean_std).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if cube[:,i,j].max() > mean_std:
pars = return_double_param(cube[:,i,j])
width_arr1[i,j] = pars[2]
width_arr2[i,j] = pars[3]
amp_arr1[i,j] = pars[4]
amp_arr2[i,j] = pars[5]
chi2_arr[i,j] = sum(( double_gerr(cube[:,i,j])(pars) )**2)
offset_arr1[i,j] = pars[0]
offset_arr2[i,j] = pars[1]
else:
width_arr1[i,j] = numpy.nan
width_arr2[i,j] = numpy.nan
chi2_arr[i,j] = numpy.nan
offset_arr1[i,j] = numpy.nan
offset_arr2[i,j] = numpy.nan
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
print "Total time %f seconds" % (time.time()-starttime)
return width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2
def wrap_collapse_gauss(filename,outprefix,redo='no'):
"""
redo - if not equal to 'no', then...
if collapse_gaussfit succeeded (to the extent that the .pysav files were written),
but some part of the file writing or successive procedures failed, re-do those
procedures without redoing the whole collapse
"""
fitsfile = pyfits.open(filename)
dv,v0,p3 = fitsfile[0].header['CD3_3'],fitsfile[0].header['CRVAL3'],fitsfile[0].header['CRPIX3']
cube = fitsfile[0].data
cube = where(numpy.isnan(cube),0,cube)
if redo=='no':
doubleB = asarray(collapse_double_gaussfit(cube,axis=0))
doubleB[numpy.isnan(doubleB)] = 0
pickle.dump(doubleB,open('%s_doubleB.pysav' % outprefix,'w'))
else:
doubleB = pickle.load(open('%s_doubleB.pysav' % outprefix,'r'))
db = doubleB
gcd = double_gaussian(db[3],db[4],db[0],db[1],db[5],db[6])(indices(cube.shape)[0])
fitsfile[0].data = gcd
fitsfile.writeto('%s_doublegausscube.fits' % outprefix,clobber=True)
gcd[numpy.isnan(gcd)] = 0
doubleResids = cube-gcd
fitsfile[0].data = doubleResids
fitsfile.writeto('%s_doublegaussresids.fits' % outprefix,clobber=True)
#doubleB[4] = (doubleB[4]-v0) / dv + p3-1
#doubleB[3] = (doubleB[3]-v0) / dv + p3-1
doubleB[4] = (doubleB[4]-p3+1) * dv + v0
doubleB[3] = (doubleB[3]-p3+1) * dv + v0
fitsfile[0].data = asarray(doubleB)
fitsfile.writeto('%s_doublegausspars.fits' % outprefix,clobber=True)
if redo=='no':
singleB = asarray(collapse_gaussfit(cube,axis=0))
pickle.dump(singleB,open('%s_singleB.pysav' % outprefix,'w'))
else:
singleB = pickle.load(open('%s_singleB.pysav' % outprefix,'r'))
gc = gaussian(singleB[1],singleB[0],singleB[2])(indices(cube.shape)[0])
singleB[1] = (singleB[1]-p3+1) * dv + v0
fitsfile[0].data = gc
fitsfile.writeto('%s_singlegausscube.fits' % outprefix,clobber=True)
gc[numpy.isnan(gc)]=0
singleResids = cube-gc
fitsfile[0].data = singleResids
fitsfile.writeto('%s_singlegaussresids.fits' % outprefix,clobber=True)
fitsfile[0].data = asarray(singleB)
fitsfile.writeto('%s_singlegausspars.fits' % outprefix,clobber=True)
fitsfile[0].header.__delitem__('CD3_3')
fitsfile[0].header.__delitem__('CRVAL3')
fitsfile[0].header.__delitem__('CRPIX3')
fitsfile[0].header.__delitem__('CUNIT3')
fitsfile[0].header.__delitem__('CTYPE3')
doubleResids[numpy.isnan(doubleResids)] = 0
totalDResids = doubleResids.sum(axis=0)
fitsfile[0].data = totalDResids
fitsfile.writeto('%s_doublegauss_totalresids.fits' % outprefix,clobber=True)
singleResids[numpy.isnan(singleResids)] = 0
totalSResids = singleResids.sum(axis=0)
fitsfile[0].data = totalSResids
fitsfile.writeto('%s_singlegauss_totalresids.fits' % outprefix,clobber=True)
return singleB,doubleB
def wrap_collapse_adaptive(filename,outprefix,redo='no',nsig=5,nrsig=2,doplot=True):
"""
redo - if not equal to 'no', then...
if collapse_gaussfit succeeded (to the extent that the .pysav files were written),
but some part of the file writing or successive procedures failed, re-do those
procedures without redoing the whole collapse
"""
fitsfile = pyfits.open(filename)
dv,v0,p3 = fitsfile[0].header['CD3_3'],fitsfile[0].header['CRVAL3'],fitsfile[0].header['CRPIX3']
dr,r0,p1 = fitsfile[0].header['CD1_1'],fitsfile[0].header['CRVAL1'],fitsfile[0].header['CRPIX1']
dd,d0,p2 = fitsfile[0].header['CD2_2'],fitsfile[0].header['CRVAL2'],fitsfile[0].header['CRPIX2']
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
cube = fitsfile[0].data
cube = where(numpy.isnan(cube),0,cube)
if redo=='no':
adaptB = asarray(adaptive_collapse_gaussfit(cube,axis=0,prefix=outprefix+'_triple',
nsig=nsig,nrsig=nrsig,vconv=vconv,xtora=xtora,ytodec=ytodec,doplot=doplot))
adaptB[numpy.isnan(adaptB)] = 0
pickle.dump(adaptB,open('%s_adaptB.pysav' % outprefix,'w'))
else:
adaptB = pickle.load(open('%s_adaptB.pysav' % outprefix,'r'))
db = adaptB
gcd = double_gaussian(db[3],db[4],db[0],db[1],db[5],db[6])(indices(cube.shape)[0])
fitsfile[0].data = gcd
fitsfile.writeto('%s_adaptgausscube.fits' % outprefix,clobber=True)
gcd[numpy.isnan(gcd)] = 0
adaptResids = cube-gcd
fitsfile[0].data = adaptResids
fitsfile.writeto('%s_adaptgaussresids.fits' % outprefix,clobber=True)
#adaptB[4] = (adaptB[4]-v0) / dv + p3-1
#adaptB[3] = (adaptB[3]-v0) / dv + p3-1
adaptB[4] = (adaptB[4]-p3+1) * dv + v0
adaptB[3] = (adaptB[3]-p3+1) * dv + v0
fitsfile[0].data = asarray(adaptB)
fitsfile.writeto('%s_adaptgausspars.fits' % outprefix,clobber=True)
fitsfile[0].header.__delitem__('CD3_3')
fitsfile[0].header.__delitem__('CRVAL3')
fitsfile[0].header.__delitem__('CRPIX3')
fitsfile[0].header.__delitem__('CUNIT3')
fitsfile[0].header.__delitem__('CTYPE3')
adaptResids[numpy.isnan(adaptResids)] = 0
totalDResids = adaptResids.sum(axis=0)
fitsfile[0].data = totalDResids
fitsfile.writeto('%s_adaptgauss_totalresids.fits' % outprefix,clobber=True)
return adaptB
|
py | 7df95062ae6d097697d29a88a3bab503fdc7abd5 | from django.db import models
from django.conf import settings
# Create your models here.
class AcademicYear(models.Model):
year = models.CharField(max_length=50)
active = models.BooleanField(default=False)
def __str__(self):
return self.year
class Term(models.Model):
name = models.CharField(max_length=30, null=True)
active = models.BooleanField(default=False)
def __str__(self):
return self.name
class Subject(models.Model):
name = models.CharField(max_length=30, null=True)
def __str__(self):
return self.name
class Grade(models.Model):
first_test = models.FloatField(null=True)
second_test = models.FloatField(null=True)
exam = models.FloatField(null=True)
term = models.ForeignKey(Term, on_delete=models.CASCADE, related_name='grade_term', null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='grade_user', null=True)
subject = models.ForeignKey(Subject, related_name="grade_subject", on_delete=models.CASCADE, null=True)
academic_year = models.ForeignKey(AcademicYear, on_delete=models.CASCADE, related_name="grade_academic_year", null=True)
def __str__(self):
return f"{self.user} grades for {self.subjects}, {self.term}"
# class Result(models.Model):
# pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.