ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a57a825e91f7e34153eff53c793cae9275c813c
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from yaml import dump, load, Dumper, Loader
from data_validation import cli_tools, clients, consts, jellyfish_distance
from data_validation.config_manager import ConfigManager
from data_validation.data_validation import DataValidation
def _get_arg_config_file(args):
"""Return String yaml config file path."""
if not args.config_file:
raise ValueError("YAML Config File was not supplied.")
return args.config_file
def _get_yaml_config_from_file(config_file_path):
"""Return Dict of yaml validation data."""
with open(config_file_path, "r") as yaml_file:
yaml_configs = load(yaml_file.read(), Loader=Loader)
return yaml_configs
def get_aggregate_config(args, config_manager):
"""Return list of formated aggregation objects.
Args:
config_manager (ConfigManager): Validation config manager instance.
"""
aggregate_configs = [config_manager.build_config_count_aggregate()]
if args.count:
col_args = None if args.count == "*" else cli_tools.get_arg_list(args.count)
aggregate_configs += config_manager.build_config_column_aggregates(
"count", col_args, None
)
if args.sum:
col_args = None if args.sum == "*" else cli_tools.get_arg_list(args.sum)
aggregate_configs += config_manager.build_config_column_aggregates(
"sum", col_args, consts.NUMERIC_DATA_TYPES
)
if args.avg:
col_args = None if args.avg == "*" else cli_tools.get_arg_list(args.avg)
aggregate_configs += config_manager.build_config_column_aggregates(
"avg", col_args, consts.NUMERIC_DATA_TYPES
)
if args.min:
col_args = None if args.min == "*" else cli_tools.get_arg_list(args.min)
aggregate_configs += config_manager.build_config_column_aggregates(
"min", col_args, consts.NUMERIC_DATA_TYPES
)
if args.max:
col_args = None if args.max == "*" else cli_tools.get_arg_list(args.max)
aggregate_configs += config_manager.build_config_column_aggregates(
"max", col_args, consts.NUMERIC_DATA_TYPES
)
return aggregate_configs
def build_config_from_args(args, config_manager):
"""Return config manager object ready to execute.
Args:
config_manager (ConfigManager): Validation config manager instance.
"""
config_manager.append_aggregates(get_aggregate_config(args, config_manager))
if config_manager.validation_type in [
consts.GROUPED_COLUMN_VALIDATION,
consts.ROW_VALIDATION,
]:
grouped_columns = cli_tools.get_arg_list(args.grouped_columns)
config_manager.append_query_groups(
config_manager.build_config_grouped_columns(grouped_columns)
)
if config_manager.validation_type in [consts.ROW_VALIDATION]:
primary_keys = cli_tools.get_arg_list(args.primary_keys, default_value=[])
config_manager.append_primary_keys(
config_manager.build_config_grouped_columns(primary_keys)
)
# TODO(GH#18): Add query filter config logic
return config_manager
def build_config_managers_from_args(args):
"""Return a list of config managers ready to execute."""
configs = []
config_type = args.type
source_conn = cli_tools.get_connection(args.source_conn)
target_conn = cli_tools.get_connection(args.target_conn)
labels = cli_tools.get_labels(args.labels)
result_handler_config = None
if args.bq_result_handler:
result_handler_config = cli_tools.get_result_handler(
args.bq_result_handler, args.service_account
)
elif args.result_handler_config:
result_handler_config = cli_tools.get_result_handler(
args.result_handler_config, args.service_account
)
filter_config = []
if args.filters:
filter_config = cli_tools.get_filters(args.filters)
source_client = clients.get_data_client(source_conn)
target_client = clients.get_data_client(target_conn)
threshold = args.threshold if args.threshold else 0.0
is_filesystem = True if source_conn["source_type"] == "FileSystem" else False
tables_list = cli_tools.get_tables_list(
args.tables_list, default_value=[], is_filesystem=is_filesystem
)
for table_obj in tables_list:
config_manager = ConfigManager.build_config_manager(
config_type,
source_conn,
target_conn,
source_client,
target_client,
table_obj,
labels,
threshold,
result_handler_config=result_handler_config,
filter_config=filter_config,
verbose=args.verbose,
)
configs.append(build_config_from_args(args, config_manager))
return configs
def build_config_managers_from_yaml(args):
"""Returns List[ConfigManager] instances ready to be executed."""
config_managers = []
config_file_path = _get_arg_config_file(args)
yaml_configs = _get_yaml_config_from_file(config_file_path)
source_conn = cli_tools.get_connection(yaml_configs[consts.YAML_SOURCE])
target_conn = cli_tools.get_connection(yaml_configs[consts.YAML_TARGET])
source_client = clients.get_data_client(source_conn)
target_client = clients.get_data_client(target_conn)
for config in yaml_configs[consts.YAML_VALIDATIONS]:
config[consts.CONFIG_SOURCE_CONN] = source_conn
config[consts.CONFIG_TARGET_CONN] = target_conn
config[consts.CONFIG_RESULT_HANDLER] = yaml_configs[consts.YAML_RESULT_HANDLER]
config_manager = ConfigManager(
config, source_client, target_client, verbose=args.verbose
)
config_managers.append(config_manager)
return config_managers
def _compare_match_tables(source_table_map, target_table_map):
"""Return dict config object from matching tables."""
# TODO(dhercher): evaluate if improved comparison and score cutoffs should be used.
table_configs = []
target_keys = target_table_map.keys()
for source_key in source_table_map:
target_key = jellyfish_distance.extract_closest_match(
source_key, target_keys, score_cutoff=0.8
)
if target_key is None:
continue
table_config = {
consts.CONFIG_SCHEMA_NAME: source_table_map[source_key][
consts.CONFIG_SCHEMA_NAME
],
consts.CONFIG_TABLE_NAME: source_table_map[source_key][
consts.CONFIG_TABLE_NAME
],
consts.CONFIG_TARGET_SCHEMA_NAME: target_table_map[target_key][
consts.CONFIG_SCHEMA_NAME
],
consts.CONFIG_TARGET_TABLE_NAME: target_table_map[target_key][
consts.CONFIG_TABLE_NAME
],
}
table_configs.append(table_config)
return table_configs
def get_table_map(client, allowed_schemas=None):
"""Return dict with searchable keys for table matching."""
table_map = {}
table_objs = clients.get_all_tables(client, allowed_schemas=allowed_schemas)
for table_obj in table_objs:
table_key = ".".join([t for t in table_obj if t])
table_map[table_key] = {
consts.CONFIG_SCHEMA_NAME: table_obj[0],
consts.CONFIG_TABLE_NAME: table_obj[1],
}
return table_map
def find_tables_using_string_matching(args):
"""Return JSON String with matched tables for use in validations."""
source_conn = cli_tools.get_connection(args.source_conn)
target_conn = cli_tools.get_connection(args.target_conn)
source_client = clients.get_data_client(source_conn)
target_client = clients.get_data_client(target_conn)
allowed_schemas = cli_tools.get_arg_list(args.allowed_schemas)
source_table_map = get_table_map(source_client, allowed_schemas=allowed_schemas)
target_table_map = get_table_map(target_client)
table_configs = _compare_match_tables(source_table_map, target_table_map)
return json.dumps(table_configs)
def run_raw_query_against_connection(args):
"""Return results of raw query for adhoc usage."""
conn = cli_tools.get_connection(args.conn)
client = clients.get_data_client(conn)
with client.raw_sql(args.query, results=True) as cur:
return cur.fetchall()
def convert_config_to_yaml(args, config_managers):
"""Return dict objects formatted for yaml validations.
Args:
config_managers (list[ConfigManager]): List of config manager instances.
"""
yaml_config = {
consts.YAML_SOURCE: args.source_conn,
consts.YAML_TARGET: args.target_conn,
consts.YAML_RESULT_HANDLER: config_managers[0].result_handler_config,
consts.YAML_VALIDATIONS: [],
}
for config_manager in config_managers:
yaml_config[consts.YAML_VALIDATIONS].append(
config_manager.get_yaml_validation_block()
)
return yaml_config
def run_validation(config_manager, verbose=False):
"""Run a single validation.
Args:
config_manager (ConfigManager): Validation config manager instance.
verbose (bool): Validation setting to log queries run.
"""
validator = DataValidation(
config_manager.config,
validation_builder=None,
result_handler=None,
verbose=verbose,
)
validator.execute()
def run_validations(args, config_managers):
"""Run and manage a series of validations.
Args:
config_managers (list[ConfigManager]): List of config manager instances.
"""
# TODO(issue/31): Add parallel execution logic
for config_manager in config_managers:
run_validation(config_manager, verbose=args.verbose)
def store_yaml_config_file(args, config_managers):
"""Build a YAML config file fromt he supplied configs.
Args:
config_managers (list[ConfigManager]): List of config manager instances.
"""
config_file_path = _get_arg_config_file(args)
yaml_configs = convert_config_to_yaml(args, config_managers)
yaml_config_str = dump(yaml_configs, Dumper=Dumper)
with open(config_file_path, "w") as yaml_file:
yaml_file.write(yaml_config_str)
def run(args):
""" """
config_managers = build_config_managers_from_args(args)
if args.config_file:
store_yaml_config_file(args, config_managers)
else:
run_validations(args, config_managers)
def run_connections(args):
""" Run commands related to connection management."""
if args.connect_cmd == "list":
cli_tools.list_connections()
elif args.connect_cmd == "add":
conn = cli_tools.get_connection_config_from_args(args)
# Test getting a client to validate connection details
_ = clients.get_data_client(conn)
cli_tools.store_connection(args.connection_name, conn)
else:
raise ValueError(f"Connections Argument '{args.connect_cmd}' is not supported")
def main():
# Create Parser and Get Deployment Info
args = cli_tools.get_parsed_args()
if args.command == "run":
run(args)
elif args.command == "connections":
run_connections(args)
elif args.command == "run-config":
config_managers = build_config_managers_from_yaml(args)
run_validations(args, config_managers)
elif args.command == "find-tables":
print(find_tables_using_string_matching(args))
elif args.command == "query":
print(run_raw_query_against_connection(args))
else:
raise ValueError(f"Positional Argument '{args.command}' is not supported")
if __name__ == "__main__":
main()
|
py
|
1a57a875626ff7824de2a8b9728e9aa320185572
|
from setuptools import setup
setup(
name = 'ep-sphere_mesh',
version = '0.0.3',
description = 'A ChRIS ds plugin wrapper for sphere_mesh',
author = 'Jennings Zhang',
author_email = '[email protected]',
url = 'https://github.com/FNNDSC/ep-sphere_mesh',
py_modules = ['sphere_mesh_wrapper'],
install_requires = ['chris_plugin', 'pycivet'],
license = 'MIT',
python_requires = '>=3.10.2',
entry_points = {
'console_scripts': [
'sphere_mesh_wrapper = sphere_mesh_wrapper:main'
]
},
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
]
)
|
py
|
1a57a9027e16d361e042149fb0adfc16e9e41b14
|
"""
Servidor: Lida com múltiplos clientes em paralelo com select. Usa select
para manualmente lidar com um conjunto de sockets: Sockets principais que
aceitam novas conexões, e sockets de input conectadas para aceitar clientes.
"""
import time
from select import select
from socket import socket, AF_INET, SOCK_STREAM
def agora(): return time.ctime(time.time())
# Configurações do servidor
meuHost = ''
minhaPort = 50007
# Número de sockets usados
numPortSocks = 2
# Lista de sockets criados por função de cada socket
socks_principais, le_socks, escreve_socks = [], [], []
# Cria um socket para cada função
for i in range(numPortSocks):
# Configura um socket TCP/IP
portsock = socket(AF_INET, SOCK_STREAM)
# Configura o socket
portsock.bind((meuHost, minhaPort))
portsock.listen(5)
# O adiciona a lista de principais e leitoras
socks_principais.append(portsock)
le_socks.append(portsock)
# Aumenta o valor da port para mudar o próximo socket
minhaPort += 1
print('Loop de seleção de socket iniciado')
while True:
# Vemos todos os sockets legiveis e escreviveis e os selecionamos
legiveis, escreviveis, excessões = select(le_socks, escreve_socks, [])
# Para cada socket legivel
for sockobj in legiveis:
# Se ele é um socket principal
if sockobj in socks_principais:
# Aceita o socket
novo_sock, endereço = sockobj.accept()
# Imprime as conexões
print('Conecta:', endereço, id(novo_sock))
# E o coloca no socket de leitura
le_socks.append(novo_sock)
else:
# Lemos o que está no socket
data = sockobj.recv(1024)
# Imprime a menssagem recebida
print('\tRecebeu', data, 'em', id(sockobj))
# Se não recebermos nada
if not data:
# Fechamos o socket
sockobj.close()
# E o removemos do socket de leitura
le_socks.remove(sockobj)
# Caso contrário
else:
# Preparamos uma resposta a ser enviada
resposta = 'Eco=>%s as %s' % (data, agora())
sockobj.send(resposta.encode())
|
py
|
1a57a927203a36b316b812bbbbdaebc511fe71ab
|
from manimlib import *
import networkx as nx
from .algo_vgroup import *
from .algo_node import *
import queue
class AlgoSegTreeNode(object):
def __init__(self, id, l, r, v, left=None, right=None):
self.l = l
self.r = r
self.v = v
self.id = id
self.left = left
self.right = right
class AlgoSegTree(AlgoVGroup):
def __init__(self, scene, datas = [], **kwargs):
self.datas = datas
self.arrows = {}
self.node_objs = {}
self.scene = scene
self.edges = []
self.nodes = []
super().__init__(**kwargs)
self.build_id = 0
self.root = self.build(datas, 0, len(datas)-1)
self.travel_to_nodes(self.root)
self.init_networkx(self.nodes, self.edges)
for k in self.nodes:
n = AlgoNode(str(k["data"]))
p = self.get_node_pos(k["id"])
n.shift(p)
self.node_objs[k["id"]] = n
self.add(n)
for k in self.edges:
self.add_edge_internal(k[0], k[1])
self.center()
def get_build_id(self):
self.build_id += 1
return self.build_id
def travel_to_nodes(self, root):
q = []
q.append(root)
while len(q)>0:
p = q.pop(0)
self.nodes.append({"id":p.id, "data": p.v})
if p.left:
self.edges.append([p.id, p.left.id])
q.append(p.left)
if p.right:
self.edges.append([p.id, p.right.id])
q.append(p.right)
def hide_all(self):
for k in self.node_objs:
self.remove(self.node_objs[k])
for k in self.arrows:
self.remove(self.arrows[k])
def show_node(self, id):
n = self.get_node(id)
self.scene.play(FadeIn(n))
def show_edge(self, i, j):
a = self.arrows[(i, j)]
self.scene.play(FadeIn(a))
def build(self, datas, l, r):
if l == r:
return AlgoSegTreeNode(self.get_build_id(), l, r, datas[l])
m = math.floor((l+r)/2)
left = self.build(datas, l, m)
right = self.build(datas, m+1, r)
val = left.v+right.v
return AlgoSegTreeNode(self.get_build_id(), l, r, val, left, right)
def init_networkx(self, nodes, edges):
self.g = nx.Graph()
for k in nodes:
self.g.add_node(k["id"])
for k in edges:
self.g.add_edge(*k)
self.pos_infos = nx.nx_agraph.graphviz_layout(self.g, prog='dot', args='-Grankdir="TB"')
def get_node_pos(self, k):
p = self.pos_infos[k]
ratio = 60
return [p[0]/ratio, p[1]/ratio, 0]
def clear_edges(self):
self.g.clear_edges()
for k in self.arrows:
self.scene.play(FadeOut(k, run_time=0.3))
self.arrows = []
def add_edge_internal(self, i, j):
color = "#6e6e6c"
if i == j:
a = Arrow(self.get_node_pos(i), self.get_node_pos(j)+RIGHT*0.1,
path_arc=np.pi*1.5, thickness=0.03, color=color).scale(0.5)
self.arrows[(i, j)] = a
a.set_color(color)
self.add(a)
else:
a = Arrow(self.get_node_pos(i), self.get_node_pos(j), thickness=0.03, color=color)
self.add(a)
a.set_color(color)
self.arrows[(i, j)] = a
def add_edge(self, i, j):
color = "#6e6e6c"
ni = self.node_objs[i]
nj = self.node_objs[j]
if i == j:
a = Arrow(ni.get_center(), nj.get_center()+RIGHT*0.1, path_arc=np.pi*1.5, thickness=0.03, color=color).scale(0.5)
self.arrows[(i, j)] = a
self.add(a)
self.scene.play(FadeIn(a), run_time=0.3)
else:
a = Arrow(ni.get_center(), nj.get_center(), thickness=0.03, color=color)
self.add(a)
self.arrows[(i, j)] = a
self.scene.play(FadeIn(a), run_time=0.3)
def remove_edge(self, i, j):
a = self.arrows[(i, j)]
self.remove(a)
self.scene.play(FadeOut(a))
del self.arrows[(i, j)]
def get_edge(self, i, j):
return self.arrows[(i, j)]
def get_node(self, i):
return self.node_objs[i]
|
py
|
1a57a99bc8adcd385cdfc8d46b112db398d1247e
|
# Generated by Django 3.2 on 2021-04-17 10:03
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import drf_file_upload.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="AnonymousUploadedFile",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("file", models.FileField(upload_to=drf_file_upload.models.get_anonymous_uploaded_file_path)),
("uuid", models.CharField(max_length=64, unique=True)),
("created", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="AuthenticatedUploadedFile",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("file", models.FileField(upload_to=drf_file_upload.models.get_authenticated_uploaded_file_path)),
("created", models.DateTimeField(auto_now_add=True)),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py
|
1a57a9c5a8025a1a30850439f95e584e7aa5f900
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Read pages from Parameter namespace in old wiki and save in new wiki."""
import pywikibot
import pywikibot.pagegenerators
FAC_NS = 102
MACHINE_NS = 116
TABLE_NS = 118
old_site = pywikibot.Site('en', 'siriuswiki')
new_site = pywikibot.Site('en', 'newsiriuswiki')
comment = ('Moving from local wiki')
g = pywikibot.pagegenerators.AllpagesPageGenerator(
site=old_site,
namespace=FAC_NS
)
titles = []
for page in g:
titles.append(page.title())
print(titles)
for title in titles:
old_page = pywikibot.Page(old_site, title)
new_page = pywikibot.Page(new_site, title)
new_page.text = old_page.text
try:
# print(new_page.text)
new_page.save(comment)
except pywikibot.PageNotSaved:
print("Error saving %s" % title)
|
py
|
1a57aae50d1d30d6c32cd8ad46677e760d2014d9
|
from unittest import mock
from urllib.parse import urlencode
import pytest
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from templated_email import get_connection
import saleor.account.emails as account_emails
import saleor.order.emails as emails
from saleor.core.emails import get_email_context, prepare_url
from saleor.order.utils import add_variant_to_draft_order
def test_get_email_context(site_settings):
site = site_settings.site
expected_send_kwargs = {"from_email": site_settings.default_from_email}
proper_context = {
"domain": site.domain,
"site_name": site.name,
}
send_kwargs, received_context = get_email_context()
assert send_kwargs == expected_send_kwargs
assert proper_context == received_context
def test_collect_data_for_order_confirmation_email(order):
"""Order confirmation email requires extra data, which should be present
in email's context.
"""
template = emails.CONFIRM_ORDER_TEMPLATE
email_data = emails.collect_data_for_email(order.pk, template)
email_context = email_data["context"]
assert email_context["order"] == order
assert "schema_markup" in email_context
def test_collect_data_for_fulfillment_email(fulfilled_order):
template = emails.CONFIRM_FULFILLMENT_TEMPLATE
fulfillment = fulfilled_order.fulfillments.first()
fulfillment_data = emails.collect_data_for_fulfillment_email(
fulfilled_order.pk, template, fulfillment.pk
)
email_context = fulfillment_data["context"]
assert email_context["fulfillment"] == fulfillment
email_data = emails.collect_data_for_email(fulfilled_order.pk, template)
assert all([key in email_context for key, item in email_data["context"].items()])
def test_collect_data_for_email(order):
template = emails.CONFIRM_PAYMENT_TEMPLATE
email_data = emails.collect_data_for_email(order.pk, template)
email_context = email_data["context"]
# This properties should be present only for order confirmation email
assert "schema_markup" not in email_context
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_email_payment_confirmation(mocked_templated_email, order, site_settings):
template = emails.CONFIRM_PAYMENT_TEMPLATE
emails.send_payment_confirmation(order.pk)
email_data = emails.collect_data_for_email(order.pk, template)
recipients = [order.get_customer_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": template,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_staff_emails_without_notification_recipient(
mocked_templated_email, order, site_settings
):
emails.send_staff_order_confirmation(order.pk, "http://www.example.com/")
mocked_templated_email.assert_not_called()
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_staff_emails(
mocked_templated_email, order, site_settings, staff_notification_recipient
):
redirect_url = "http://www.example.com/"
emails.send_staff_order_confirmation(order.pk, redirect_url)
email_data = emails.collect_staff_order_notification_data(
order.pk, emails.STAFF_CONFIRM_ORDER_TEMPLATE, redirect_url
)
recipients = [staff_notification_recipient.get_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": emails.STAFF_CONFIRM_ORDER_TEMPLATE,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_email_order_confirmation(mocked_templated_email, order, site_settings):
template = emails.CONFIRM_ORDER_TEMPLATE
redirect_url = "https://www.example.com"
emails.send_order_confirmation(order.pk, redirect_url)
email_data = emails.collect_data_for_email(order.pk, template, redirect_url)
recipients = [order.get_customer_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": template,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_confirmation_emails_without_addresses_for_payment(
mocked_templated_email, order, site_settings, digital_content
):
assert not order.lines.count()
template = emails.CONFIRM_PAYMENT_TEMPLATE
add_variant_to_draft_order(order, digital_content.product_variant, quantity=1)
order.shipping_address = None
order.shipping_method = None
order.billing_address = None
order.save(update_fields=["shipping_address", "shipping_method", "billing_address"])
emails.send_payment_confirmation(order.pk)
email_data = emails.collect_data_for_email(order.pk, template)
recipients = [order.get_customer_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": template,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_confirmation_emails_without_addresses_for_order(
mocked_templated_email, order, site_settings, digital_content
):
assert not order.lines.count()
template = emails.CONFIRM_ORDER_TEMPLATE
add_variant_to_draft_order(order, digital_content.product_variant, quantity=1)
order.shipping_address = None
order.shipping_method = None
order.billing_address = None
order.save(update_fields=["shipping_address", "shipping_method", "billing_address"])
redirect_url = "https://www.example.com"
emails.send_order_confirmation(order.pk, redirect_url)
email_data = emails.collect_data_for_email(order.pk, template, redirect_url)
recipients = [order.get_customer_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": template,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@pytest.mark.parametrize(
"send_email,template",
[
(
emails.send_fulfillment_confirmation,
emails.CONFIRM_FULFILLMENT_TEMPLATE,
), # noqa
(emails.send_fulfillment_update, emails.UPDATE_FULFILLMENT_TEMPLATE),
],
)
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_fulfillment_emails(
mocked_templated_email, template, send_email, fulfilled_order, site_settings
):
fulfillment = fulfilled_order.fulfillments.first()
send_email(order_pk=fulfilled_order.pk, fulfillment_pk=fulfillment.pk)
email_data = emails.collect_data_for_fulfillment_email(
fulfilled_order.pk, template, fulfillment.pk
)
recipients = [fulfilled_order.get_customer_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": template,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@pytest.mark.parametrize(
"send_email,template",
[
(
emails.send_fulfillment_confirmation,
emails.CONFIRM_FULFILLMENT_TEMPLATE,
), # noqa
(emails.send_fulfillment_update, emails.UPDATE_FULFILLMENT_TEMPLATE),
],
)
@mock.patch("saleor.order.emails.send_templated_mail")
def test_send_fulfillment_emails_with_tracking_number_as_url(
mocked_templated_email, template, send_email, fulfilled_order, site_settings
):
fulfillment = fulfilled_order.fulfillments.first()
fulfillment.tracking_number = "https://www.example.com"
fulfillment.save()
assert fulfillment.is_tracking_number_url
send_email(order_pk=fulfilled_order.pk, fulfillment_pk=fulfillment.pk)
email_data = emails.collect_data_for_fulfillment_email(
fulfilled_order.pk, template, fulfillment.pk
)
recipients = [fulfilled_order.get_customer_email()]
expected_call_kwargs = {
"context": email_data["context"],
"from_email": site_settings.default_from_email,
"template_name": template,
}
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
def test_email_having_display_name_in_settings(customer_user, site_settings, settings):
expected_from_email = "Info <[email protected]>"
site_settings.default_mail_sender_name = None
site_settings.default_mail_sender_address = None
settings.DEFAULT_FROM_EMAIL = expected_from_email
assert site_settings.default_from_email == expected_from_email
def test_email_with_email_not_configured_raises_error(settings, site_settings):
"""Ensure an exception is thrown when not default sender is set;
both missing in the settings.py and in the site settings table.
"""
site_settings.default_mail_sender_address = None
settings.DEFAULT_FROM_EMAIL = None
with pytest.raises(ImproperlyConfigured) as exc:
_ = site_settings.default_from_email
assert exc.value.args == ("No sender email address has been set-up",)
def test_send_set_password_email(staff_user, site_settings):
password_set_url = "https://www.example.com"
template_name = "dashboard/staff/set_password"
recipient_email = staff_user.email
account_emails._send_set_password_email(
recipient_email, password_set_url, template_name
)
assert len(mail.outbox) == 1
sended_message = mail.outbox[0].body
assert password_set_url in sended_message
def test_prepare_url():
redirect_url = "https://www.example.com"
params = urlencode({"param1": "abc", "param2": "xyz"})
result = prepare_url(params, redirect_url)
assert result == "https://www.example.com?param1=abc¶m2=xyz"
@mock.patch("saleor.account.emails.send_templated_mail")
def test_send_email_request_change(
mocked_templated_email, site_settings, customer_user
):
new_email = "[email protected]"
template = account_emails.REQUEST_EMAIL_CHANGE_TEMPLATE
redirect_url = "localhost"
token = "token_example"
event_parameters = {"old_email": customer_user.email, "new_email": new_email}
account_emails._send_request_email_change_email(
new_email, redirect_url, customer_user.pk, token, event_parameters
)
ctx = {
"domain": "mirumee.com",
"redirect_url": "localhost?token=token_example",
"site_name": "mirumee.com",
}
recipients = [new_email]
expected_call_kwargs = {
"context": ctx,
"from_email": site_settings.default_from_email,
"template_name": template,
}
# mocked_templated_email.assert_called_once()
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
@mock.patch("saleor.account.emails.send_templated_mail")
def test_send_email_changed_notification(
mocked_templated_email, site_settings, customer_user
):
old_email = "[email protected]"
template = account_emails.EMAIL_CHANGED_NOTIFICATION_TEMPLATE
account_emails.send_user_change_email_notification(old_email)
ctx = {
"domain": "mirumee.com",
"site_name": "mirumee.com",
}
recipients = [old_email]
expected_call_kwargs = {
"context": ctx,
"from_email": site_settings.default_from_email,
"template_name": template,
}
# mocked_templated_email.assert_called_once()
mocked_templated_email.assert_called_once_with(
recipient_list=recipients, **expected_call_kwargs
)
# Render the email to ensure there is no error
email_connection = get_connection()
email_connection.get_email_message(to=recipients, **expected_call_kwargs)
|
py
|
1a57ad3a2b8111bcac0f618ca3b9a66f2c8d224d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discovery document tests
Unit tests for objects created from discovery documents.
"""
from __future__ import absolute_import
import six
__author__ = "[email protected] (Joe Gregorio)"
from six import BytesIO, StringIO
from six.moves.urllib.parse import urlparse, parse_qs
import copy
import datetime
import httplib2
import itertools
import json
import os
import pickle
import re
import sys
import unittest2 as unittest
from collections import defaultdict
from parameterized import parameterized
import mock
import google.auth.credentials
from google.auth.transport import mtls
from google.auth.exceptions import MutualTLSChannelError
import google_auth_httplib2
import google.api_core.exceptions
from googleapiclient.discovery import _fix_up_media_upload
from googleapiclient.discovery import _fix_up_method_description
from googleapiclient.discovery import _fix_up_parameters
from googleapiclient.discovery import _urljoin
from googleapiclient.discovery import build
from googleapiclient.discovery import build_from_document
from googleapiclient.discovery import DISCOVERY_URI
from googleapiclient.discovery import key2param
from googleapiclient.discovery import MEDIA_BODY_PARAMETER_DEFAULT_VALUE
from googleapiclient.discovery import MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE
from googleapiclient.discovery import ResourceMethodParameters
from googleapiclient.discovery import STACK_QUERY_PARAMETERS
from googleapiclient.discovery import STACK_QUERY_PARAMETER_DEFAULT_VALUE
from googleapiclient.discovery import V1_DISCOVERY_URI
from googleapiclient.discovery import V2_DISCOVERY_URI
from googleapiclient.discovery_cache import DISCOVERY_DOC_MAX_AGE
from googleapiclient.discovery_cache.base import Cache
from googleapiclient.errors import HttpError
from googleapiclient.errors import InvalidJsonError
from googleapiclient.errors import MediaUploadSizeError
from googleapiclient.errors import ResumableUploadError
from googleapiclient.errors import UnacceptableMimeTypeError
from googleapiclient.errors import UnknownApiNameOrVersion
from googleapiclient.errors import UnknownFileType
from googleapiclient.http import build_http
from googleapiclient.http import BatchHttpRequest
from googleapiclient.http import HttpMock
from googleapiclient.http import HttpMockSequence
from googleapiclient.http import MediaFileUpload
from googleapiclient.http import MediaIoBaseUpload
from googleapiclient.http import MediaUpload
from googleapiclient.http import MediaUploadProgress
from googleapiclient.http import tunnel_patch
from googleapiclient.model import JsonModel
from googleapiclient.schema import Schemas
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client.client import OAuth2Credentials, GoogleCredentials
from googleapiclient import _helpers as util
import uritemplate
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def assertUrisEqual(testcase, expected, actual):
"""Test that URIs are the same, up to reordering of query parameters."""
expected = urlparse(expected)
actual = urlparse(actual)
testcase.assertEqual(expected.scheme, actual.scheme)
testcase.assertEqual(expected.netloc, actual.netloc)
testcase.assertEqual(expected.path, actual.path)
testcase.assertEqual(expected.params, actual.params)
testcase.assertEqual(expected.fragment, actual.fragment)
expected_query = parse_qs(expected.query)
actual_query = parse_qs(actual.query)
for name in list(expected_query.keys()):
testcase.assertEqual(expected_query[name], actual_query[name])
for name in list(actual_query.keys()):
testcase.assertEqual(expected_query[name], actual_query[name])
def assert_discovery_uri(testcase, actual, service_name, version, discovery):
"""Assert that discovery URI used was the one that was expected
for a given service and version."""
params = {"api": service_name, "apiVersion": version}
expanded_requested_uri = uritemplate.expand(discovery, params)
assertUrisEqual(testcase, expanded_requested_uri, actual)
def validate_discovery_requests(testcase, http_mock, service_name, version, discovery):
"""Validates that there have > 0 calls to Http Discovery
and that LAST discovery URI used was the one that was expected
for a given service and version."""
testcase.assertTrue(len(http_mock.request_sequence) > 0)
if len(http_mock.request_sequence) > 0:
actual_uri = http_mock.request_sequence[-1][0]
assert_discovery_uri(testcase, actual_uri, service_name, version, discovery)
def datafile(filename):
return os.path.join(DATA_DIR, filename)
def read_datafile(filename, mode="r"):
with open(datafile(filename), mode=mode) as f:
return f.read()
class SetupHttplib2(unittest.TestCase):
def test_retries(self):
# Merely loading googleapiclient.discovery should set the RETRIES to 1.
self.assertEqual(1, httplib2.RETRIES)
class Utilities(unittest.TestCase):
def setUp(self):
self.zoo_root_desc = json.loads(read_datafile("zoo.json", "r"))
self.zoo_get_method_desc = self.zoo_root_desc["methods"]["query"]
self.zoo_animals_resource = self.zoo_root_desc["resources"]["animals"]
self.zoo_insert_method_desc = self.zoo_animals_resource["methods"]["insert"]
self.zoo_schema = Schemas(self.zoo_root_desc)
def test_key2param(self):
self.assertEqual("max_results", key2param("max-results"))
self.assertEqual("x007_bond", key2param("007-bond"))
def _base_fix_up_parameters_test(self, method_desc, http_method, root_desc, schema):
self.assertEqual(method_desc["httpMethod"], http_method)
method_desc_copy = copy.deepcopy(method_desc)
self.assertEqual(method_desc, method_desc_copy)
parameters = _fix_up_parameters(
method_desc_copy, root_desc, http_method, schema
)
self.assertNotEqual(method_desc, method_desc_copy)
for param_name in STACK_QUERY_PARAMETERS:
self.assertEqual(
STACK_QUERY_PARAMETER_DEFAULT_VALUE, parameters[param_name]
)
for param_name, value in six.iteritems(root_desc.get("parameters", {})):
self.assertEqual(value, parameters[param_name])
return parameters
def test_fix_up_parameters_get(self):
parameters = self._base_fix_up_parameters_test(
self.zoo_get_method_desc, "GET", self.zoo_root_desc, self.zoo_schema
)
# Since http_method is 'GET'
self.assertFalse("body" in parameters)
def test_fix_up_parameters_insert(self):
parameters = self._base_fix_up_parameters_test(
self.zoo_insert_method_desc, "POST", self.zoo_root_desc, self.zoo_schema
)
body = {"description": "The request body.", "type": "object", "$ref": "Animal"}
self.assertEqual(parameters["body"], body)
def test_fix_up_parameters_check_body(self):
dummy_root_desc = {}
dummy_schema = {
"Request": {
"properties": {
"description": "Required. Dummy parameter.",
"type": "string",
}
}
}
no_payload_http_method = "DELETE"
with_payload_http_method = "PUT"
invalid_method_desc = {"response": "Who cares"}
valid_method_desc = {
"request": {"key1": "value1", "key2": "value2", "$ref": "Request"}
}
parameters = _fix_up_parameters(
invalid_method_desc, dummy_root_desc, no_payload_http_method, dummy_schema
)
self.assertFalse("body" in parameters)
parameters = _fix_up_parameters(
valid_method_desc, dummy_root_desc, no_payload_http_method, dummy_schema
)
self.assertFalse("body" in parameters)
parameters = _fix_up_parameters(
invalid_method_desc, dummy_root_desc, with_payload_http_method, dummy_schema
)
self.assertFalse("body" in parameters)
parameters = _fix_up_parameters(
valid_method_desc, dummy_root_desc, with_payload_http_method, dummy_schema
)
body = {
"description": "The request body.",
"type": "object",
"$ref": "Request",
"key1": "value1",
"key2": "value2",
}
self.assertEqual(parameters["body"], body)
def test_fix_up_parameters_optional_body(self):
# Request with no parameters
dummy_schema = {"Request": {"properties": {}}}
method_desc = {"request": {"$ref": "Request"}}
parameters = _fix_up_parameters(method_desc, {}, "POST", dummy_schema)
def _base_fix_up_method_description_test(
self,
method_desc,
initial_parameters,
final_parameters,
final_accept,
final_max_size,
final_media_path_url,
):
fake_root_desc = {
"rootUrl": "http://root/",
"servicePath": "fake/",
"mtlsRootUrl": "http://root/",
}
fake_path_url = "fake-path/"
accept, max_size, media_path_url = _fix_up_media_upload(
method_desc, fake_root_desc, fake_path_url, initial_parameters
)
self.assertEqual(accept, final_accept)
self.assertEqual(max_size, final_max_size)
self.assertEqual(media_path_url, final_media_path_url)
self.assertEqual(initial_parameters, final_parameters)
def test_fix_up_media_upload_no_initial_invalid(self):
invalid_method_desc = {"response": "Who cares"}
self._base_fix_up_method_description_test(
invalid_method_desc, {}, {}, [], 0, None
)
def test_fix_up_media_upload_no_initial_valid_minimal(self):
valid_method_desc = {"mediaUpload": {"accept": []}}
final_parameters = {
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
self._base_fix_up_method_description_test(
valid_method_desc,
{},
final_parameters,
[],
0,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_media_upload_no_initial_valid_full(self):
valid_method_desc = {"mediaUpload": {"accept": ["*/*"], "maxSize": "10GB"}}
final_parameters = {
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
ten_gb = 10 * 2 ** 30
self._base_fix_up_method_description_test(
valid_method_desc,
{},
final_parameters,
["*/*"],
ten_gb,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_media_upload_with_initial_invalid(self):
invalid_method_desc = {"response": "Who cares"}
initial_parameters = {"body": {}}
self._base_fix_up_method_description_test(
invalid_method_desc, initial_parameters, initial_parameters, [], 0, None
)
def test_fix_up_media_upload_with_initial_valid_minimal(self):
valid_method_desc = {"mediaUpload": {"accept": []}}
initial_parameters = {"body": {}}
final_parameters = {
"body": {},
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
self._base_fix_up_method_description_test(
valid_method_desc,
initial_parameters,
final_parameters,
[],
0,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_media_upload_with_initial_valid_full(self):
valid_method_desc = {"mediaUpload": {"accept": ["*/*"], "maxSize": "10GB"}}
initial_parameters = {"body": {}}
final_parameters = {
"body": {},
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
ten_gb = 10 * 2 ** 30
self._base_fix_up_method_description_test(
valid_method_desc,
initial_parameters,
final_parameters,
["*/*"],
ten_gb,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_method_description_get(self):
result = _fix_up_method_description(
self.zoo_get_method_desc, self.zoo_root_desc, self.zoo_schema
)
path_url = "query"
http_method = "GET"
method_id = "bigquery.query"
accept = []
max_size = 0
media_path_url = None
self.assertEqual(
result, (path_url, http_method, method_id, accept, max_size, media_path_url)
)
def test_fix_up_method_description_insert(self):
result = _fix_up_method_description(
self.zoo_insert_method_desc, self.zoo_root_desc, self.zoo_schema
)
path_url = "animals"
http_method = "POST"
method_id = "zoo.animals.insert"
accept = ["image/png"]
max_size = 1024
media_path_url = "https://www.googleapis.com/upload/zoo/v1/animals"
self.assertEqual(
result, (path_url, http_method, method_id, accept, max_size, media_path_url)
)
def test_urljoin(self):
# We want to exhaustively test various URL combinations.
simple_bases = ["https://www.googleapis.com", "https://www.googleapis.com/"]
long_urls = ["foo/v1/bar:custom?alt=json", "/foo/v1/bar:custom?alt=json"]
long_bases = [
"https://www.googleapis.com/foo/v1",
"https://www.googleapis.com/foo/v1/",
]
simple_urls = ["bar:custom?alt=json", "/bar:custom?alt=json"]
final_url = "https://www.googleapis.com/foo/v1/bar:custom?alt=json"
for base, url in itertools.product(simple_bases, long_urls):
self.assertEqual(final_url, _urljoin(base, url))
for base, url in itertools.product(long_bases, simple_urls):
self.assertEqual(final_url, _urljoin(base, url))
def test_ResourceMethodParameters_zoo_get(self):
parameters = ResourceMethodParameters(self.zoo_get_method_desc)
param_types = {
"a": "any",
"b": "boolean",
"e": "string",
"er": "string",
"i": "integer",
"n": "number",
"o": "object",
"q": "string",
"rr": "string",
}
keys = list(param_types.keys())
self.assertEqual(parameters.argmap, dict((key, key) for key in keys))
self.assertEqual(parameters.required_params, [])
self.assertEqual(sorted(parameters.repeated_params), ["er", "rr"])
self.assertEqual(parameters.pattern_params, {"rr": "[a-z]+"})
self.assertEqual(
sorted(parameters.query_params),
["a", "b", "e", "er", "i", "n", "o", "q", "rr"],
)
self.assertEqual(parameters.path_params, set())
self.assertEqual(parameters.param_types, param_types)
enum_params = {"e": ["foo", "bar"], "er": ["one", "two", "three"]}
self.assertEqual(parameters.enum_params, enum_params)
def test_ResourceMethodParameters_zoo_animals_patch(self):
method_desc = self.zoo_animals_resource["methods"]["patch"]
parameters = ResourceMethodParameters(method_desc)
param_types = {"name": "string"}
keys = list(param_types.keys())
self.assertEqual(parameters.argmap, dict((key, key) for key in keys))
self.assertEqual(parameters.required_params, ["name"])
self.assertEqual(parameters.repeated_params, [])
self.assertEqual(parameters.pattern_params, {})
self.assertEqual(parameters.query_params, [])
self.assertEqual(parameters.path_params, set(["name"]))
self.assertEqual(parameters.param_types, param_types)
self.assertEqual(parameters.enum_params, {})
class Discovery(unittest.TestCase):
def test_discovery_http_is_closed(self):
http = HttpMock(datafile("malformed.json"), {"status": "200"})
service = build("plus", "v1", credentials=mock.sentinel.credentials)
http.close.assert_called_once()
class DiscoveryErrors(unittest.TestCase):
def test_tests_should_be_run_with_strict_positional_enforcement(self):
try:
plus = build("plus", "v1", None, static_discovery=False)
self.fail("should have raised a TypeError exception over missing http=.")
except TypeError:
pass
def test_failed_to_parse_discovery_json(self):
self.http = HttpMock(datafile("malformed.json"), {"status": "200"})
try:
plus = build("plus", "v1", http=self.http, cache_discovery=False, static_discovery=False)
self.fail("should have raised an exception over malformed JSON.")
except InvalidJsonError:
pass
def test_unknown_api_name_or_version(self):
http = HttpMockSequence(
[
({"status": "404"}, read_datafile("zoo.json", "rb")),
({"status": "404"}, read_datafile("zoo.json", "rb")),
]
)
with self.assertRaises(UnknownApiNameOrVersion):
plus = build("plus", "v1", http=http, cache_discovery=False)
def test_credentials_and_http_mutually_exclusive(self):
http = HttpMock(datafile("plus.json"), {"status": "200"})
with self.assertRaises(ValueError):
build("plus", "v1", http=http, credentials=mock.sentinel.credentials, static_discovery=False)
def test_credentials_file_and_http_mutually_exclusive(self):
http = HttpMock(datafile("plus.json"), {"status": "200"})
with self.assertRaises(ValueError):
build(
"plus",
"v1",
http=http,
client_options=google.api_core.client_options.ClientOptions(
credentials_file="credentials.json"
),
static_discovery=False,
)
def test_credentials_and_credentials_file_mutually_exclusive(self):
with self.assertRaises(google.api_core.exceptions.DuplicateCredentialArgs):
build(
"plus",
"v1",
credentials=mock.sentinel.credentials,
client_options=google.api_core.client_options.ClientOptions(
credentials_file="credentials.json"
),
static_discovery=False,
)
class DiscoveryFromDocument(unittest.TestCase):
MOCK_CREDENTIALS = mock.Mock(spec=google.auth.credentials.Credentials)
def test_can_build_from_local_document(self):
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
self.assertIsNotNone(plus)
self.assertTrue(hasattr(plus, "activities"))
def test_can_build_from_local_deserialized_document(self):
discovery = read_datafile("plus.json")
discovery = json.loads(discovery)
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
self.assertIsNotNone(plus)
self.assertTrue(hasattr(plus, "activities"))
def test_building_with_base_remembers_base(self):
discovery = read_datafile("plus.json")
base = "https://www.example.com/"
plus = build_from_document(
discovery, base=base, credentials=self.MOCK_CREDENTIALS
)
self.assertEqual("https://www.googleapis.com/plus/v1/", plus._baseUrl)
def test_building_with_optional_http_with_authorization(self):
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
# plus service requires Authorization, hence we expect to see AuthorizedHttp object here
self.assertIsInstance(plus._http, google_auth_httplib2.AuthorizedHttp)
self.assertIsInstance(plus._http.http, httplib2.Http)
self.assertIsInstance(plus._http.http.timeout, int)
self.assertGreater(plus._http.http.timeout, 0)
def test_building_with_optional_http_with_no_authorization(self):
discovery = read_datafile("plus.json")
# Cleanup auth field, so we would use plain http client
discovery = json.loads(discovery)
discovery["auth"] = {}
discovery = json.dumps(discovery)
plus = build_from_document(
discovery, base="https://www.googleapis.com/", credentials=None
)
# plus service requires Authorization
self.assertIsInstance(plus._http, httplib2.Http)
self.assertIsInstance(plus._http.timeout, int)
self.assertGreater(plus._http.timeout, 0)
def test_building_with_explicit_http(self):
http = HttpMock()
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery, base="https://www.googleapis.com/", http=http
)
self.assertEqual(plus._http, http)
def test_building_with_developer_key_skips_adc(self):
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery, base="https://www.googleapis.com/", developerKey="123"
)
self.assertIsInstance(plus._http, httplib2.Http)
# It should not be an AuthorizedHttp, because that would indicate that
# application default credentials were used.
self.assertNotIsInstance(plus._http, google_auth_httplib2.AuthorizedHttp)
def test_building_with_context_manager(self):
discovery = read_datafile("plus.json")
with mock.patch("httplib2.Http") as http:
with build_from_document(discovery, base="https://www.googleapis.com/", credentials=self.MOCK_CREDENTIALS) as plus:
self.assertIsNotNone(plus)
self.assertTrue(hasattr(plus, "activities"))
plus._http.http.close.assert_called_once()
def test_resource_close(self):
discovery = read_datafile("plus.json")
with mock.patch("httplib2.Http") as http:
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
plus.close()
plus._http.http.close.assert_called_once()
def test_api_endpoint_override_from_client_options(self):
discovery = read_datafile("plus.json")
api_endpoint = "https://foo.googleapis.com/"
options = google.api_core.client_options.ClientOptions(
api_endpoint=api_endpoint
)
plus = build_from_document(
discovery, client_options=options, credentials=self.MOCK_CREDENTIALS
)
self.assertEqual(plus._baseUrl, api_endpoint)
def test_api_endpoint_override_from_client_options_mapping_object(self):
discovery = read_datafile("plus.json")
api_endpoint = "https://foo.googleapis.com/"
mapping_object = defaultdict(str)
mapping_object["api_endpoint"] = api_endpoint
plus = build_from_document(discovery, client_options=mapping_object)
self.assertEqual(plus._baseUrl, api_endpoint)
def test_api_endpoint_override_from_client_options_dict(self):
discovery = read_datafile("plus.json")
api_endpoint = "https://foo.googleapis.com/"
plus = build_from_document(
discovery,
client_options={"api_endpoint": api_endpoint},
credentials=self.MOCK_CREDENTIALS,
)
self.assertEqual(plus._baseUrl, api_endpoint)
def test_scopes_from_client_options(self):
discovery = read_datafile("plus.json")
with mock.patch("googleapiclient._auth.default_credentials") as default:
plus = build_from_document(
discovery, client_options={"scopes": ["1", "2"]},
)
default.assert_called_once_with(scopes=["1", "2"], quota_project_id=None)
def test_quota_project_from_client_options(self):
discovery = read_datafile("plus.json")
with mock.patch("googleapiclient._auth.default_credentials") as default:
plus = build_from_document(
discovery,
client_options=google.api_core.client_options.ClientOptions(
quota_project_id="my-project"
),
)
default.assert_called_once_with(scopes=None, quota_project_id="my-project")
def test_credentials_file_from_client_options(self):
discovery = read_datafile("plus.json")
with mock.patch("googleapiclient._auth.credentials_from_file") as default:
plus = build_from_document(
discovery,
client_options=google.api_core.client_options.ClientOptions(
credentials_file="credentials.json"
),
)
default.assert_called_once_with(
"credentials.json", scopes=None, quota_project_id=None
)
REGULAR_ENDPOINT = "https://www.googleapis.com/plus/v1/"
MTLS_ENDPOINT = "https://www.mtls.googleapis.com/plus/v1/"
class DiscoveryFromDocumentMutualTLS(unittest.TestCase):
MOCK_CREDENTIALS = mock.Mock(spec=google.auth.credentials.Credentials)
ADC_CERT_PATH = "adc_cert_path"
ADC_KEY_PATH = "adc_key_path"
ADC_PASSPHRASE = "adc_passphrase"
def check_http_client_cert(self, resource, has_client_cert="false"):
if isinstance(resource._http, google_auth_httplib2.AuthorizedHttp):
certs = list(resource._http.http.certificates.iter(""))
else:
certs = list(resource._http.certificates.iter(""))
if has_client_cert == "true":
self.assertEqual(len(certs), 1)
self.assertEqual(
certs[0], (self.ADC_KEY_PATH, self.ADC_CERT_PATH, self.ADC_PASSPHRASE)
)
else:
self.assertEqual(len(certs), 0)
def client_encrypted_cert_source(self):
return self.ADC_CERT_PATH, self.ADC_KEY_PATH, self.ADC_PASSPHRASE
@parameterized.expand(
[
("never", "true"),
("auto", "true"),
("always", "true"),
("never", "false"),
("auto", "false"),
("always", "false"),
]
)
def test_mtls_not_trigger_if_http_provided(self, use_mtls_env, use_client_cert):
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(discovery, http=httplib2.Http())
self.assertIsNotNone(plus)
self.assertEqual(plus._baseUrl, REGULAR_ENDPOINT)
self.check_http_client_cert(plus, has_client_cert="false")
@parameterized.expand(
[
("never", "true"),
("auto", "true"),
("always", "true"),
("never", "false"),
("auto", "false"),
("always", "false"),
]
)
def test_exception_with_client_cert_source(self, use_mtls_env, use_client_cert):
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
with self.assertRaises(MutualTLSChannelError):
build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
client_options={"client_cert_source": mock.Mock()},
)
@parameterized.expand(
[
("never", "true", REGULAR_ENDPOINT),
("auto", "true", MTLS_ENDPOINT),
("always", "true", MTLS_ENDPOINT),
("never", "false", REGULAR_ENDPOINT),
("auto", "false", REGULAR_ENDPOINT),
("always", "false", MTLS_ENDPOINT),
]
)
def test_mtls_with_provided_client_cert(
self, use_mtls_env, use_client_cert, base_url
):
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
client_options={
"client_encrypted_cert_source": self.client_encrypted_cert_source
},
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert=use_client_cert)
self.assertEqual(plus._baseUrl, base_url)
@parameterized.expand(
[
("never", "true"),
("auto", "true"),
("always", "true"),
("never", "false"),
("auto", "false"),
("always", "false"),
]
)
def test_endpoint_not_switch(self, use_mtls_env, use_client_cert):
# Test endpoint is not switched if user provided api endpoint
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
client_options={
"api_endpoint": "https://foo.googleapis.com",
"client_encrypted_cert_source": self.client_encrypted_cert_source,
},
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert=use_client_cert)
self.assertEqual(plus._baseUrl, "https://foo.googleapis.com")
@parameterized.expand(
[
("never", "true", REGULAR_ENDPOINT),
("auto", "true", MTLS_ENDPOINT),
("always", "true", MTLS_ENDPOINT),
("never", "false", REGULAR_ENDPOINT),
("auto", "false", REGULAR_ENDPOINT),
("always", "false", MTLS_ENDPOINT),
]
)
@mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source", autospec=True
)
@mock.patch(
"google.auth.transport.mtls.default_client_encrypted_cert_source", autospec=True
)
def test_mtls_with_default_client_cert(
self,
use_mtls_env,
use_client_cert,
base_url,
default_client_encrypted_cert_source,
has_default_client_cert_source,
):
has_default_client_cert_source.return_value = True
default_client_encrypted_cert_source.return_value = (
self.client_encrypted_cert_source
)
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
adc_cert_path=self.ADC_CERT_PATH,
adc_key_path=self.ADC_KEY_PATH,
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert=use_client_cert)
self.assertEqual(plus._baseUrl, base_url)
@parameterized.expand(
[
("never", "true", REGULAR_ENDPOINT),
("auto", "true", REGULAR_ENDPOINT),
("always", "true", MTLS_ENDPOINT),
("never", "false", REGULAR_ENDPOINT),
("auto", "false", REGULAR_ENDPOINT),
("always", "false", MTLS_ENDPOINT),
]
)
@mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source", autospec=True
)
def test_mtls_with_no_client_cert(
self, use_mtls_env, use_client_cert, base_url, has_default_client_cert_source
):
has_default_client_cert_source.return_value = False
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
adc_cert_path=self.ADC_CERT_PATH,
adc_key_path=self.ADC_KEY_PATH,
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert="false")
self.assertEqual(plus._baseUrl, base_url)
class DiscoveryFromHttp(unittest.TestCase):
def setUp(self):
self.old_environ = os.environ.copy()
def tearDown(self):
os.environ = self.old_environ
def test_userip_is_added_to_discovery_uri(self):
# build() will raise an HttpError on a 400, use this to pick the request uri
# out of the raised exception.
os.environ["REMOTE_ADDR"] = "10.0.0.1"
try:
http = HttpMockSequence(
[({"status": "400"}, read_datafile("zoo.json", "rb"))]
)
zoo = build(
"zoo",
"v1",
http=http,
developerKey=None,
discoveryServiceUrl="http://example.com",
static_discovery=False,
)
self.fail("Should have raised an exception.")
except HttpError as e:
self.assertEqual(e.uri, "http://example.com?userIp=10.0.0.1")
def test_userip_missing_is_not_added_to_discovery_uri(self):
# build() will raise an HttpError on a 400, use this to pick the request uri
# out of the raised exception.
try:
http = HttpMockSequence(
[({"status": "400"}, read_datafile("zoo.json", "rb"))]
)
zoo = build(
"zoo",
"v1",
http=http,
developerKey=None,
discoveryServiceUrl="http://example.com",
static_discovery=False,
)
self.fail("Should have raised an exception.")
except HttpError as e:
self.assertEqual(e.uri, "http://example.com")
def test_key_is_added_to_discovery_uri(self):
# build() will raise an HttpError on a 400, use this to pick the request uri
# out of the raised exception.
try:
http = HttpMockSequence(
[({"status": "400"}, read_datafile("zoo.json", "rb"))]
)
zoo = build(
"zoo",
"v1",
http=http,
developerKey="foo",
discoveryServiceUrl="http://example.com",
static_discovery=False,
)
self.fail("Should have raised an exception.")
except HttpError as e:
self.assertEqual(e.uri, "http://example.com?key=foo")
def test_discovery_loading_from_v2_discovery_uri(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
self.assertTrue(hasattr(zoo, "animals"))
def test_api_endpoint_override_from_client_options(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
api_endpoint = "https://foo.googleapis.com/"
options = google.api_core.client_options.ClientOptions(
api_endpoint=api_endpoint
)
zoo = build(
"zoo", "v1", http=http, cache_discovery=False, client_options=options, static_discovery=False
)
self.assertEqual(zoo._baseUrl, api_endpoint)
def test_api_endpoint_override_from_client_options_dict(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
api_endpoint = "https://foo.googleapis.com/"
zoo = build(
"zoo",
"v1",
http=http,
cache_discovery=False,
client_options={"api_endpoint": api_endpoint},
static_discovery=False,
)
self.assertEqual(zoo._baseUrl, api_endpoint)
def test_discovery_with_empty_version_uses_v2(self):
http = HttpMockSequence([({"status": "200"}, read_datafile("zoo.json", "rb")),])
build("zoo", version=None, http=http, cache_discovery=False, static_discovery=False)
validate_discovery_requests(self, http, "zoo", None, V2_DISCOVERY_URI)
def test_discovery_with_empty_version_preserves_custom_uri(self):
http = HttpMockSequence([({"status": "200"}, read_datafile("zoo.json", "rb")),])
custom_discovery_uri = "https://foo.bar/$discovery"
build(
"zoo",
version=None,
http=http,
cache_discovery=False,
discoveryServiceUrl=custom_discovery_uri,
static_discovery=False,
)
validate_discovery_requests(self, http, "zoo", None, custom_discovery_uri)
def test_discovery_with_valid_version_uses_v1(self):
http = HttpMockSequence([({"status": "200"}, read_datafile("zoo.json", "rb")),])
build("zoo", version="v123", http=http, cache_discovery=False, static_discovery=False)
validate_discovery_requests(self, http, "zoo", "v123", V1_DISCOVERY_URI)
class DiscoveryRetryFromHttp(unittest.TestCase):
def test_repeated_500_retries_and_fails(self):
http = HttpMockSequence(
[
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "503"}, read_datafile("503.json", "rb")),
]
)
with self.assertRaises(HttpError):
with mock.patch("time.sleep") as mocked_sleep:
build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
mocked_sleep.assert_called_once()
# We also want to verify that we stayed with v1 discovery
validate_discovery_requests(self, http, "zoo", "v1", V1_DISCOVERY_URI)
def test_v2_repeated_500_retries_and_fails(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"), # last v1 discovery call
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "503"}, read_datafile("503.json", "rb")),
]
)
with self.assertRaises(HttpError):
with mock.patch("time.sleep") as mocked_sleep:
build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
mocked_sleep.assert_called_once()
# We also want to verify that we switched to v2 discovery
validate_discovery_requests(self, http, "zoo", "v1", V2_DISCOVERY_URI)
def test_single_500_retries_and_succeeds(self):
http = HttpMockSequence(
[
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
with mock.patch("time.sleep") as mocked_sleep:
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
self.assertTrue(hasattr(zoo, "animals"))
mocked_sleep.assert_called_once()
# We also want to verify that we stayed with v1 discovery
validate_discovery_requests(self, http, "zoo", "v1", V1_DISCOVERY_URI)
def test_single_500_then_404_retries_and_succeeds(self):
http = HttpMockSequence(
[
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "404"}, "Not found"), # last v1 discovery call
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
with mock.patch("time.sleep") as mocked_sleep:
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
self.assertTrue(hasattr(zoo, "animals"))
mocked_sleep.assert_called_once()
# We also want to verify that we switched to v2 discovery
validate_discovery_requests(self, http, "zoo", "v1", V2_DISCOVERY_URI)
class DiscoveryFromAppEngineCache(unittest.TestCase):
def setUp(self):
self.old_environ = os.environ.copy()
os.environ["APPENGINE_RUNTIME"] = "python27"
def tearDown(self):
os.environ = self.old_environ
def test_appengine_memcache(self):
# Hack module import
self.orig_import = __import__
self.mocked_api = mock.MagicMock()
def import_mock(name, *args, **kwargs):
if name == "google.appengine.api":
return self.mocked_api
return self.orig_import(name, *args, **kwargs)
import_fullname = "__builtin__.__import__"
if sys.version_info[0] >= 3:
import_fullname = "builtins.__import__"
with mock.patch(import_fullname, side_effect=import_mock):
namespace = "google-api-client"
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
self.mocked_api.memcache.get.return_value = None
plus = build("plus", "v1", http=self.http, static_discovery=False)
# memcache.get is called once
url = "https://www.googleapis.com/discovery/v1/apis/plus/v1/rest"
self.mocked_api.memcache.get.assert_called_once_with(
url, namespace=namespace
)
# memcache.set is called once
content = read_datafile("plus.json")
self.mocked_api.memcache.set.assert_called_once_with(
url, content, time=DISCOVERY_DOC_MAX_AGE, namespace=namespace
)
# Returns the cached content this time.
self.mocked_api.memcache.get.return_value = content
# Make sure the contents are returned from the cache.
# (Otherwise it should through an error)
self.http = HttpMock(None, {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# memcache.get is called twice
self.mocked_api.memcache.get.assert_has_calls(
[
mock.call(url, namespace=namespace),
mock.call(url, namespace=namespace),
]
)
# memcahce.set is called just once
self.mocked_api.memcache.set.assert_called_once_with(
url, content, time=DISCOVERY_DOC_MAX_AGE, namespace=namespace
)
class DiscoveryFromStaticDocument(unittest.TestCase):
def test_retrieve_from_local_when_static_discovery_true(self):
http = HttpMockSequence([({"status": "400"}, "")])
drive = build("drive", "v3", http=http, cache_discovery=False,
static_discovery=True)
self.assertIsNotNone(drive)
self.assertTrue(hasattr(drive, "files"))
def test_retrieve_from_internet_when_static_discovery_false(self):
http = HttpMockSequence([({"status": "400"}, "")])
with self.assertRaises(HttpError):
build("drive", "v3", http=http, cache_discovery=False,
static_discovery=False)
def test_unknown_api_when_static_discovery_true(self):
with self.assertRaises(UnknownApiNameOrVersion):
build("doesnotexist", "v3", cache_discovery=False,
static_discovery=True)
class DictCache(Cache):
def __init__(self):
self.d = {}
def get(self, url):
return self.d.get(url, None)
def set(self, url, content):
self.d[url] = content
def contains(self, url):
return url in self.d
class DiscoveryFromFileCache(unittest.TestCase):
def test_file_based_cache(self):
cache = mock.Mock(wraps=DictCache())
with mock.patch(
"googleapiclient.discovery_cache.autodetect", return_value=cache
):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# cache.get is called once
url = "https://www.googleapis.com/discovery/v1/apis/plus/v1/rest"
cache.get.assert_called_once_with(url)
# cache.set is called once
content = read_datafile("plus.json")
cache.set.assert_called_once_with(url, content)
# Make sure there is a cache entry for the plus v1 discovery doc.
self.assertTrue(cache.contains(url))
# Make sure the contents are returned from the cache.
# (Otherwise it should through an error)
self.http = HttpMock(None, {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# cache.get is called twice
cache.get.assert_has_calls([mock.call(url), mock.call(url)])
# cahce.set is called just once
cache.set.assert_called_once_with(url, content)
class Discovery(unittest.TestCase):
def test_method_error_checking(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# Missing required parameters
try:
plus.activities().list()
self.fail()
except TypeError as e:
self.assertTrue("Missing" in str(e))
# Missing required parameters even if supplied as None.
try:
plus.activities().list(collection=None, userId=None)
self.fail()
except TypeError as e:
self.assertTrue("Missing" in str(e))
# Parameter doesn't match regex
try:
plus.activities().list(collection="not_a_collection_name", userId="me")
self.fail()
except TypeError as e:
self.assertTrue("not an allowed value" in str(e))
# Unexpected parameter
try:
plus.activities().list(flubber=12)
self.fail()
except TypeError as e:
self.assertTrue("unexpected" in str(e))
def _check_query_types(self, request):
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["q"], ["foo"])
self.assertEqual(q["i"], ["1"])
self.assertEqual(q["n"], ["1.0"])
self.assertEqual(q["b"], ["false"])
self.assertEqual(q["a"], ["[1, 2, 3]"])
self.assertEqual(q["o"], ["{'a': 1}"])
self.assertEqual(q["e"], ["bar"])
def test_type_coercion(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.query(
q="foo", i=1.0, n=1.0, b=0, a=[1, 2, 3], o={"a": 1}, e="bar"
)
self._check_query_types(request)
request = zoo.query(
q="foo", i=1, n=1, b=False, a=[1, 2, 3], o={"a": 1}, e="bar"
)
self._check_query_types(request)
request = zoo.query(
q="foo", i="1", n="1", b="", a=[1, 2, 3], o={"a": 1}, e="bar", er="two"
)
request = zoo.query(
q="foo",
i="1",
n="1",
b="",
a=[1, 2, 3],
o={"a": 1},
e="bar",
er=["one", "three"],
rr=["foo", "bar"],
)
self._check_query_types(request)
# Five is right out.
self.assertRaises(TypeError, zoo.query, er=["one", "five"])
def test_optional_stack_query_parameters(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.query(trace="html", fields="description")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["trace"], ["html"])
self.assertEqual(q["fields"], ["description"])
def test_string_params_value_of_none_get_dropped(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.query(trace=None, fields="description")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertFalse("trace" in q)
def test_model_added_query_parameters(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().get(name="Lion")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["alt"], ["json"])
self.assertEqual(request.headers["accept"], "application/json")
def test_fallback_to_raw_model(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().getmedia(name="Lion")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertTrue("alt" not in q)
self.assertEqual(request.headers["accept"], "*/*")
def test_patch(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().patch(name="lion", body='{"description": "foo"}')
self.assertEqual(request.method, "PATCH")
def test_batch_request_from_discovery(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
# zoo defines a batchPath
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
batch_request = zoo.new_batch_http_request()
self.assertEqual(
batch_request._batch_uri, "https://www.googleapis.com/batchZoo"
)
def test_batch_request_from_default(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
# plus does not define a batchPath
plus = build("plus", "v1", http=self.http, cache_discovery=False, static_discovery=False)
batch_request = plus.new_batch_http_request()
self.assertEqual(batch_request._batch_uri, "https://www.googleapis.com/batch")
def test_tunnel_patch(self):
http = HttpMockSequence(
[
({"status": "200"}, read_datafile("zoo.json", "rb")),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
http = tunnel_patch(http)
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
resp = zoo.animals().patch(name="lion", body='{"description": "foo"}').execute()
self.assertTrue("x-http-method-override" in resp)
def test_plus_resources(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(plus, "activities"))
self.assertTrue(getattr(plus, "people"))
def test_oauth2client_credentials(self):
credentials = mock.Mock(spec=GoogleCredentials)
credentials.create_scoped_required.return_value = False
discovery = read_datafile("plus.json")
service = build_from_document(discovery, credentials=credentials)
self.assertEqual(service._http, credentials.authorize.return_value)
def test_google_auth_credentials(self):
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
discovery = read_datafile("plus.json")
service = build_from_document(discovery, credentials=credentials)
self.assertIsInstance(service._http, google_auth_httplib2.AuthorizedHttp)
self.assertEqual(service._http.credentials, credentials)
def test_no_scopes_no_credentials(self):
# Zoo doesn't have scopes
discovery = read_datafile("zoo.json")
service = build_from_document(discovery)
# Should be an ordinary httplib2.Http instance and not AuthorizedHttp.
self.assertIsInstance(service._http, httplib2.Http)
def test_full_featured(self):
# Zoo should exercise all discovery facets
# and should also have no future.json file.
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(zoo, "animals"))
request = zoo.animals().list(name="bat", projection="full")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["name"], ["bat"])
self.assertEqual(q["projection"], ["full"])
def test_nested_resources(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(zoo, "animals"))
request = zoo.my().favorites().list(max_results="5")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["max-results"], ["5"])
@unittest.skipIf(six.PY3, "print is not a reserved name in Python 3")
def test_methods_with_reserved_names(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http)
self.assertTrue(getattr(zoo, "animals"))
request = zoo.global_().print_().assert_(max_results="5")
parsed = urlparse(request.uri)
self.assertEqual(parsed[2], "/zoo/v1/global/print/assert")
def test_top_level_functions(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(zoo, "query"))
request = zoo.query(q="foo")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["q"], ["foo"])
def test_simple_media_uploads(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
doc = getattr(zoo.animals().insert, "__doc__")
self.assertTrue("media_body" in doc)
def test_simple_media_upload_no_max_size_provided(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().crossbreed(media_body=datafile("small.png"))
self.assertEqual("image/png", request.headers["content-type"])
self.assertEqual(b"PNG", request.body[1:4])
def test_simple_media_raise_correct_exceptions(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
try:
zoo.animals().insert(media_body=datafile("smiley.png"))
self.fail("should throw exception if media is too large.")
except MediaUploadSizeError:
pass
try:
zoo.animals().insert(media_body=datafile("small.jpg"))
self.fail("should throw exception if mimetype is unacceptable.")
except UnacceptableMimeTypeError:
pass
def test_simple_media_good_upload(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().insert(media_body=datafile("small.png"))
self.assertEqual("image/png", request.headers["content-type"])
self.assertEqual(b"PNG", request.body[1:4])
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=media&alt=json",
request.uri,
)
def test_simple_media_unknown_mimetype(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
try:
zoo.animals().insert(media_body=datafile("small-png"))
self.fail("should throw exception if mimetype is unknown.")
except UnknownFileType:
pass
request = zoo.animals().insert(
media_body=datafile("small-png"), media_mime_type="image/png"
)
self.assertEqual("image/png", request.headers["content-type"])
self.assertEqual(b"PNG", request.body[1:4])
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=media&alt=json",
request.uri,
)
def test_multipart_media_raise_correct_exceptions(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
try:
zoo.animals().insert(media_body=datafile("smiley.png"), body={})
self.fail("should throw exception if media is too large.")
except MediaUploadSizeError:
pass
try:
zoo.animals().insert(media_body=datafile("small.jpg"), body={})
self.fail("should throw exception if mimetype is unacceptable.")
except UnacceptableMimeTypeError:
pass
def test_multipart_media_good_upload(self, static_discovery=False):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().insert(media_body=datafile("small.png"), body={})
self.assertTrue(request.headers["content-type"].startswith("multipart/related"))
contents = read_datafile("small.png", "rb")
boundary = re.match(b"--=+([^=]+)", request.body).group(1)
self.assertEqual(
request.body.rstrip(b"\n"), # Python 2.6 does not add a trailing \n
b"--==============="
+ boundary
+ b"==\n"
+ b"Content-Type: application/json\n"
+ b"MIME-Version: 1.0\n\n"
+ b'{"data": {}}\n'
+ b"--==============="
+ boundary
+ b"==\n"
+ b"Content-Type: image/png\n"
+ b"MIME-Version: 1.0\n"
+ b"Content-Transfer-Encoding: binary\n\n"
+ contents
+ b"\n--==============="
+ boundary
+ b"==--",
)
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=multipart&alt=json",
request.uri,
)
def test_media_capable_method_without_media(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().insert(body={})
self.assertTrue(request.headers["content-type"], "application/json")
def test_resumable_multipart_media_good_upload(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body={})
self.assertTrue(request.headers["content-type"].startswith("application/json"))
self.assertEqual('{"data": {}}', request.body)
self.assertEqual(media_upload, request.resumable)
self.assertEqual("image/png", request.resumable.mimetype())
self.assertNotEqual(request.body, None)
self.assertEqual(request.resumable_uri, None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "308", "location": "http://upload.example.com/2"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/3",
"range": "0-12",
},
"",
),
(
{
"status": "308",
"location": "http://upload.example.com/4",
"range": "0-%d" % (media_upload.size() - 2),
},
"",
),
({"status": "200"}, '{"foo": "bar"}'),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(None, body)
self.assertTrue(isinstance(status, MediaUploadProgress))
self.assertEqual(0, status.resumable_progress)
# Two requests should have been made and the resumable_uri should have been
# updated for each one.
self.assertEqual(request.resumable_uri, "http://upload.example.com/2")
self.assertEqual(media_upload, request.resumable)
self.assertEqual(0, request.resumable_progress)
# This next chuck call should upload the first chunk
status, body = request.next_chunk(http=http)
self.assertEqual(request.resumable_uri, "http://upload.example.com/3")
self.assertEqual(media_upload, request.resumable)
self.assertEqual(13, request.resumable_progress)
# This call will upload the next chunk
status, body = request.next_chunk(http=http)
self.assertEqual(request.resumable_uri, "http://upload.example.com/4")
self.assertEqual(media_upload.size() - 1, request.resumable_progress)
self.assertEqual('{"data": {}}', request.body)
# Final call to next_chunk should complete the upload.
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"foo": "bar"})
self.assertEqual(status, None)
def test_resumable_media_good_upload(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
self.assertEqual(media_upload, request.resumable)
self.assertEqual("image/png", request.resumable.mimetype())
self.assertEqual(request.body, None)
self.assertEqual(request.resumable_uri, None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-12",
},
"",
),
(
{
"status": "308",
"location": "http://upload.example.com/3",
"range": "0-%d" % (media_upload.size() - 2),
},
"",
),
({"status": "200"}, '{"foo": "bar"}'),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(None, body)
self.assertTrue(isinstance(status, MediaUploadProgress))
self.assertEqual(13, status.resumable_progress)
# Two requests should have been made and the resumable_uri should have been
# updated for each one.
self.assertEqual(request.resumable_uri, "http://upload.example.com/2")
self.assertEqual(media_upload, request.resumable)
self.assertEqual(13, request.resumable_progress)
status, body = request.next_chunk(http=http)
self.assertEqual(request.resumable_uri, "http://upload.example.com/3")
self.assertEqual(media_upload.size() - 1, request.resumable_progress)
self.assertEqual(request.body, None)
# Final call to next_chunk should complete the upload.
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"foo": "bar"})
self.assertEqual(status, None)
def test_resumable_media_good_upload_from_execute(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=resumable&alt=json",
request.uri,
)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-12",
},
"",
),
(
{
"status": "308",
"location": "http://upload.example.com/3",
"range": "0-%d" % media_upload.size(),
},
"",
),
({"status": "200"}, '{"foo": "bar"}'),
]
)
body = request.execute(http=http)
self.assertEqual(body, {"foo": "bar"})
def test_resumable_media_fail_unknown_response_code_first_request(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
http = HttpMockSequence(
[({"status": "400", "location": "http://upload.example.com"}, "")]
)
try:
request.execute(http=http)
self.fail("Should have raised ResumableUploadError.")
except ResumableUploadError as e:
self.assertEqual(400, e.resp.status)
def test_resumable_media_fail_unknown_response_code_subsequent_request(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "400"}, ""),
]
)
self.assertRaises(HttpError, request.execute, http=http)
self.assertTrue(request._in_error_state)
http = HttpMockSequence(
[
({"status": "308", "range": "0-5"}, ""),
({"status": "308", "range": "0-6"}, ""),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(
status.resumable_progress,
7,
"Should have first checked length and then tried to PUT more.",
)
self.assertFalse(request._in_error_state)
# Put it back in an error state.
http = HttpMockSequence([({"status": "400"}, "")])
self.assertRaises(HttpError, request.execute, http=http)
self.assertTrue(request._in_error_state)
# Pretend the last request that 400'd actually succeeded.
http = HttpMockSequence([({"status": "200"}, '{"foo": "bar"}')])
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"foo": "bar"})
def test_media_io_base_stream_unlimited_chunksize_resume(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Set up a seekable stream and try to upload in single chunk.
fd = BytesIO(b'01234"56789"')
media_upload = MediaIoBaseUpload(
fd=fd, mimetype="text/plain", chunksize=-1, resumable=True
)
request = zoo.animals().insert(media_body=media_upload, body=None)
# The single chunk fails, restart at the right point.
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-4",
},
"",
),
({"status": "200"}, "echo_request_body"),
]
)
body = request.execute(http=http)
self.assertEqual("56789", body)
def test_media_io_base_stream_chunksize_resume(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Set up a seekable stream and try to upload in chunks.
fd = BytesIO(b"0123456789")
media_upload = MediaIoBaseUpload(
fd=fd, mimetype="text/plain", chunksize=5, resumable=True
)
request = zoo.animals().insert(media_body=media_upload, body=None)
# The single chunk fails, pull the content sent out of the exception.
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "400"}, "echo_request_body"),
]
)
try:
body = request.execute(http=http)
except HttpError as e:
self.assertEqual(b"01234", e.content)
def test_resumable_media_handle_uploads_of_unknown_size(self):
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Create an upload that doesn't know the full size of the media.
class IoBaseUnknownLength(MediaUpload):
def chunksize(self):
return 10
def mimetype(self):
return "image/png"
def size(self):
return None
def resumable(self):
return True
def getbytes(self, begin, length):
return "0123456789"
upload = IoBaseUnknownLength()
request = zoo.animals().insert(media_body=upload, body=None)
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"Content-Range": "bytes 0-9/*", "Content-Length": "10"})
def test_resumable_media_no_streaming_on_unsupported_platforms(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
class IoBaseHasStream(MediaUpload):
def chunksize(self):
return 10
def mimetype(self):
return "image/png"
def size(self):
return None
def resumable(self):
return True
def getbytes(self, begin, length):
return "0123456789"
def has_stream(self):
return True
def stream(self):
raise NotImplementedError()
upload = IoBaseHasStream()
orig_version = sys.version_info
sys.version_info = (2, 6, 5, "final", 0)
request = zoo.animals().insert(media_body=upload, body=None)
# This should raise an exception because stream() will be called.
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
self.assertRaises(NotImplementedError, request.next_chunk, http=http)
sys.version_info = orig_version
def test_resumable_media_handle_uploads_of_unknown_size_eof(self):
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
fd = BytesIO(b"data goes here")
# Create an upload that doesn't know the full size of the media.
upload = MediaIoBaseUpload(
fd=fd, mimetype="image/png", chunksize=15, resumable=True
)
request = zoo.animals().insert(media_body=upload, body=None)
status, body = request.next_chunk(http=http)
self.assertEqual(
body, {"Content-Range": "bytes 0-13/14", "Content-Length": "14"}
)
def test_resumable_media_handle_resume_of_upload_of_unknown_size(self):
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "400"}, ""),
]
)
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Create an upload that doesn't know the full size of the media.
fd = BytesIO(b"data goes here")
upload = MediaIoBaseUpload(
fd=fd, mimetype="image/png", chunksize=500, resumable=True
)
request = zoo.animals().insert(media_body=upload, body=None)
# Put it in an error state.
self.assertRaises(HttpError, request.next_chunk, http=http)
http = HttpMockSequence(
[({"status": "400", "range": "0-5"}, "echo_request_headers_as_json")]
)
try:
# Should resume the upload by first querying the status of the upload.
request.next_chunk(http=http)
except HttpError as e:
expected = {"Content-Range": "bytes */14", "content-length": "0"}
self.assertEqual(
expected,
json.loads(e.content.decode("utf-8")),
"Should send an empty body when requesting the current upload status.",
)
def test_pickle(self):
sorted_resource_keys = [
"_baseUrl",
"_developerKey",
"_dynamic_attrs",
"_http",
"_model",
"_requestBuilder",
"_resourceDesc",
"_rootDesc",
"_schema",
"animals",
"global_",
"load",
"loadNoTemplate",
"my",
"new_batch_http_request",
"query",
"scopedAnimals",
]
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
self.assertEqual(sorted(zoo.__dict__.keys()), sorted_resource_keys)
pickled_zoo = pickle.dumps(zoo)
new_zoo = pickle.loads(pickled_zoo)
self.assertEqual(sorted(new_zoo.__dict__.keys()), sorted_resource_keys)
self.assertTrue(hasattr(new_zoo, "animals"))
self.assertTrue(callable(new_zoo.animals))
self.assertTrue(hasattr(new_zoo, "global_"))
self.assertTrue(callable(new_zoo.global_))
self.assertTrue(hasattr(new_zoo, "load"))
self.assertTrue(callable(new_zoo.load))
self.assertTrue(hasattr(new_zoo, "loadNoTemplate"))
self.assertTrue(callable(new_zoo.loadNoTemplate))
self.assertTrue(hasattr(new_zoo, "my"))
self.assertTrue(callable(new_zoo.my))
self.assertTrue(hasattr(new_zoo, "query"))
self.assertTrue(callable(new_zoo.query))
self.assertTrue(hasattr(new_zoo, "scopedAnimals"))
self.assertTrue(callable(new_zoo.scopedAnimals))
self.assertEqual(sorted(zoo._dynamic_attrs), sorted(new_zoo._dynamic_attrs))
self.assertEqual(zoo._baseUrl, new_zoo._baseUrl)
self.assertEqual(zoo._developerKey, new_zoo._developerKey)
self.assertEqual(zoo._requestBuilder, new_zoo._requestBuilder)
self.assertEqual(zoo._resourceDesc, new_zoo._resourceDesc)
self.assertEqual(zoo._rootDesc, new_zoo._rootDesc)
# _http, _model and _schema won't be equal since we will get new
# instances upon un-pickling
def _dummy_zoo_request(self):
zoo_contents = read_datafile("zoo.json")
zoo_uri = uritemplate.expand(DISCOVERY_URI, {"api": "zoo", "apiVersion": "v1"})
if "REMOTE_ADDR" in os.environ:
zoo_uri = util._add_query_parameter(
zoo_uri, "userIp", os.environ["REMOTE_ADDR"]
)
http = build_http()
original_request = http.request
def wrapped_request(uri, method="GET", *args, **kwargs):
if uri == zoo_uri:
return httplib2.Response({"status": "200"}), zoo_contents
return original_request(uri, method=method, *args, **kwargs)
http.request = wrapped_request
return http
def _dummy_token(self):
access_token = "foo"
client_id = "some_client_id"
client_secret = "cOuDdkfjxxnv+"
refresh_token = "1/0/a.df219fjls0"
token_expiry = datetime.datetime.utcnow()
user_agent = "refresh_checker/1.0"
return OAuth2Credentials(
access_token,
client_id,
client_secret,
refresh_token,
token_expiry,
GOOGLE_TOKEN_URI,
user_agent,
)
def test_pickle_with_credentials(self):
credentials = self._dummy_token()
http = self._dummy_zoo_request()
http = credentials.authorize(http)
self.assertTrue(hasattr(http.request, "credentials"))
zoo = build("zoo", "v1", http=http, static_discovery=False)
pickled_zoo = pickle.dumps(zoo)
new_zoo = pickle.loads(pickled_zoo)
self.assertEqual(sorted(zoo.__dict__.keys()), sorted(new_zoo.__dict__.keys()))
new_http = new_zoo._http
self.assertFalse(hasattr(new_http.request, "credentials"))
def test_resumable_media_upload_no_content(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("empty"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
self.assertEqual(media_upload, request.resumable)
self.assertEqual(request.body, None)
self.assertEqual(request.resumable_uri, None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-0",
},
"",
),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(None, body)
self.assertTrue(isinstance(status, MediaUploadProgress))
self.assertEqual(0, status.progress())
class Next(unittest.TestCase):
def test_next_successful_none_on_no_next_page_token(self):
self.http = HttpMock(datafile("tasks.json"), {"status": "200"})
tasks = build("tasks", "v1", http=self.http)
request = tasks.tasklists().list()
self.assertEqual(None, tasks.tasklists().list_next(request, {}))
def test_next_successful_none_on_empty_page_token(self):
self.http = HttpMock(datafile("tasks.json"), {"status": "200"})
tasks = build("tasks", "v1", http=self.http)
request = tasks.tasklists().list()
next_request = tasks.tasklists().list_next(request, {"nextPageToken": ""})
self.assertEqual(None, next_request)
def test_next_successful_with_next_page_token(self):
self.http = HttpMock(datafile("tasks.json"), {"status": "200"})
tasks = build("tasks", "v1", http=self.http)
request = tasks.tasklists().list()
next_request = tasks.tasklists().list_next(request, {"nextPageToken": "123abc"})
parsed = list(urlparse(next_request.uri))
q = parse_qs(parsed[4])
self.assertEqual(q["pageToken"][0], "123abc")
def test_next_successful_with_next_page_token_alternate_name(self):
self.http = HttpMock(datafile("bigquery.json"), {"status": "200"})
bigquery = build("bigquery", "v2", http=self.http)
request = bigquery.tabledata().list(datasetId="", projectId="", tableId="")
next_request = bigquery.tabledata().list_next(request, {"pageToken": "123abc"})
parsed = list(urlparse(next_request.uri))
q = parse_qs(parsed[4])
self.assertEqual(q["pageToken"][0], "123abc")
def test_next_successful_with_next_page_token_in_body(self):
self.http = HttpMock(datafile("logging.json"), {"status": "200"})
logging = build("logging", "v2", http=self.http)
request = logging.entries().list(body={})
next_request = logging.entries().list_next(request, {"nextPageToken": "123abc"})
body = JsonModel().deserialize(next_request.body)
self.assertEqual(body["pageToken"], "123abc")
def test_next_with_method_with_no_properties(self):
self.http = HttpMock(datafile("latitude.json"), {"status": "200"})
service = build("latitude", "v1", http=self.http, static_discovery=False)
service.currentLocation().get()
def test_next_nonexistent_with_no_next_page_token(self):
self.http = HttpMock(datafile("drive.json"), {"status": "200"})
drive = build("drive", "v3", http=self.http)
drive.changes().watch(body={})
self.assertFalse(callable(getattr(drive.changes(), "watch_next", None)))
def test_next_successful_with_next_page_token_required(self):
self.http = HttpMock(datafile("drive.json"), {"status": "200"})
drive = build("drive", "v3", http=self.http)
request = drive.changes().list(pageToken="startPageToken")
next_request = drive.changes().list_next(request, {"nextPageToken": "123abc"})
parsed = list(urlparse(next_request.uri))
q = parse_qs(parsed[4])
self.assertEqual(q["pageToken"][0], "123abc")
class MediaGet(unittest.TestCase):
def test_get_media(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().get_media(name="Lion")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["alt"], ["media"])
self.assertEqual(request.headers["accept"], "*/*")
http = HttpMockSequence([({"status": "200"}, "standing in for media")])
response = request.execute(http=http)
self.assertEqual(b"standing in for media", response)
if __name__ == "__main__":
unittest.main()
|
py
|
1a57ad789a7ef2e51f7f24ee78f1508d64e85cb3
|
import psycopg2, os, json
from settings import (PG_DB_URI, PG_DBNAME, PG_SERVER_HOST,
PG_USER_PASSWORD, PG_USER)
# Update connection string information
host = PG_SERVER_HOST
dbname = PG_DBNAME
user = PG_USER
password = PG_USER_PASSWORD
sslmode = "require"
# Construct connection string
conn_string = f"host={host} user={user} dbname={dbname} password={password} sslmode={sslmode}"
conn = psycopg2.connect(conn_string)
print("Connection established")
#use cursor to wrap the connection and send commands
cursor = conn.cursor()
# Check the list of databases within the PG server
cursor.execute("SELECT datname from pg_database;")
rows=cursor.fetchall()
print("Here are the databases:\n",rows)
# This will auto check if "mlflow" exists
if [True for x in rows if "mlflow" in x[0] ][0]:
print("mlflow database exists")
pass
else:
# If mlflow database does not exist, create it
print("mlflow database does not exist")
cursor.execute("create database mlflow")
# Clean up
conn.commit()
cursor.close()
conn.close()
|
py
|
1a57ad9861fa48db6288cb498e1213eab80f97d6
|
import unittest
import unittest.mock
from programy.clients.render.renderer import RichMediaRenderer
class MockRichMediaRenderer(RichMediaRenderer):
def __init__(self, config):
RichMediaRenderer.__init__(self, config)
def handle_text(self, userid, text):
self._userid = userid
self._text = text
return None
def handle_url_button(self, userid, button):
self._userid = userid
self._button = button
return None
def handle_postback_button(self, userid, button):
self._userid = userid
self._button = button
return None
def handle_link(self, userid, link):
self._userid = userid
self._link = link
return None
def handle_image(self, userid, image):
self._userid = userid
self._image = image
return None
def handle_video(self, userid, video):
self._userid = userid
self._video = video
return None
def handle_card(self, userid, card):
self._userid = userid
self._card = card
return None
def handle_carousel(self, userid, carousel):
self._userid = userid
self._carousel = carousel
return None
def handle_reply(self, userid, reply):
self._userid = userid
self._reply = reply
return None
def handle_delay(self, userid, delay):
self._userid = userid
self._delay = delay
return None
def handle_split(self, userid, split):
self._userid = userid
self._split = split
return None
def handle_list(self, userid, list):
self._userid = userid
self._list = list
return None
def handle_ordered_list(self, userid, items):
self._userid = userid
self._list = list
return None
def handle_location(self, userid, location):
self._userid = userid
self._location = location
return None
def handle_tts(self, userid, text):
self._userid = userid
self._text = text
return None
class OpenChatBotRichMediaRendererTests(unittest.TestCase):
def test_card(self):
mock_config = unittest.mock.Mock()
renderer = MockRichMediaRenderer(mock_config)
self.assertIsNotNone(renderer)
renderer.render("testuser", """
<card>

<title>Fauteuil enfant, Visslegris</title>
<subtitle>Quand ils peuvent imiter les adultes, les enfants sesentent spéciaux et importants. C'est pourquoi nous avons créé une version miniature du fauteuil STRANDMON, l'un de nos produits favoris.</subtitle>
<button>
<text>Acheter en ligne</text>
<url>https://serv-api.target2sell.com/1.1/R/cookie/OFCBMN5RRHSG5L/1200/OFCBMN5RRHSG5L-1200-5/20343224/1/viewTogether-%7BtypeOfContextList%3A%5B%22current%22%2C%22view%22%5D%7D/f082e51f-561d-47f7-c0cb-13735e58bfc1</url>
</button>
</card>""")
self.assertEqual(renderer._userid, "testuser")
self.assertIsNotNone(renderer._card)
self.assertEqual("card", renderer._card['type'])
self.assertEqual(renderer._card['image'], "https://www.ikea.com/fr/fr/images/products/strandmon-fauteuil-enfant-gris__0574584_PE668407_S4.JPG")
self.assertEqual(renderer._card['title'], "Fauteuil enfant, Visslegris")
self.assertEqual(renderer._card['subtitle'], "Quand ils peuvent imiter les adultes, les enfants sesentent spéciaux et importants. C'est pourquoi nous avons créé une version miniature du fauteuil STRANDMON, l'un de nos produits favoris.")
self.assertEqual(len(renderer._card['buttons']), 1)
button1 = renderer._card['buttons'][0]
self.assertEqual("button", button1['type'])
self.assertEqual(button1['text'], "Acheter en ligne")
self.assertEqual(button1['url'], "https://serv-api.target2sell.com/1.1/R/cookie/OFCBMN5RRHSG5L/1200/OFCBMN5RRHSG5L-1200-5/20343224/1/viewTogether-%7BtypeOfContextList%3A%5B%22current%22%2C%22view%22%5D%7D/f082e51f-561d-47f7-c0cb-13735e58bfc1")
|
py
|
1a57adbf1e672b1a9649eacab143d8e980e190d7
|
import os
import subprocess
DIRECTORY = "graphs"
PACKAGES = ["", "event", "communications"]
def generate_graphs():
os.makedirs(DIRECTORY, exist_ok=True)
for package in PACKAGES:
subprocess.run(
["pyreverse", f"../iot_firmware/{package}", "-d", DIRECTORY, "-p", package]
)
if __name__ == "__main__":
generate_graphs()
|
py
|
1a57aebf16c4439174844b3e21bb42bf4331d02a
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
parse_iso8601,
)
class SportDeutschlandIE(InfoExtractor):
_VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])'
_TESTS = [{
'url': 'http://sportdeutschland.tv/badminton/live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
'info_dict': {
'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
'ext': 'mp4',
'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
'categories': ['Badminton'],
'view_count': int,
'thumbnail': 're:^https?://.*\.jpg$',
'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV',
'timestamp': int,
'upload_date': 're:^201408[23][0-9]$',
},
'params': {
'skip_download': 'Live stream',
},
}, {
'url': 'http://sportdeutschland.tv/li-ning-badminton-wm-2014/lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
'info_dict': {
'id': 'lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
'ext': 'mp4',
'upload_date': '20140825',
'description': 'md5:60a20536b57cee7d9a4ec005e8687504',
'timestamp': 1408976060,
'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
'categories': ['Li-Ning Badminton WM 2014'],
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
sport_id = mobj.group('sport')
api_url = 'http://splink.tv/api/permalinks/%s/%s' % (
sport_id, video_id)
req = compat_urllib_request.Request(api_url, headers={
'Accept': 'application/vnd.vidibus.v2.html+json',
'Referer': url,
})
data = self._download_json(req, video_id)
categories = list(data.get('section', {}).get('tags', {}).values())
asset = data['asset']
assets_info = self._download_json(asset['url'], video_id)
formats = []
smil_url = assets_info['video']
if '.smil' in smil_url:
m3u8_url = smil_url.replace('.smil', '.m3u8')
formats.extend(
self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'))
smil_doc = self._download_xml(
smil_url, video_id, note='Downloading SMIL metadata')
base_url = smil_doc.find('./head/meta').attrib['base']
formats.extend([{
'format_id': 'rmtp',
'url': base_url,
'play_path': n.attrib['src'],
'ext': 'flv',
'preference': -100,
'format_note': 'Seems to fail at example stream',
} for n in smil_doc.findall('./body/video')])
else:
formats.append({'url': smil_url})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': asset['title'],
'thumbnail': asset.get('image'),
'description': asset.get('teaser'),
'categories': categories,
'view_count': asset.get('views'),
'rtmp_live': asset.get('live'),
'timestamp': parse_iso8601(asset.get('date')),
}
|
py
|
1a57af607d32a1349a50bf274bb77b11946a6c3d
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-04 07:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Schema',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='SchemaQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=100)),
('schema', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dynamic_schemas.Schema')),
],
),
migrations.CreateModel(
name='SchemaResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qa_set', jsonfield.fields.JSONField()),
('pub_date', models.DateTimeField(auto_now=True)),
('schema', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dynamic_schemas.SchemaQuestion')),
],
),
]
|
py
|
1a57b277160350d763a2749301683d69a4cbc0d8
|
import os, setuptools
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'requirements.txt')) as f:
required_packages = f.read().splitlines()
with open(os.path.join(dir_path, 'README.md'), "r") as fh:
long_description = fh.read()
setuptools.setup(
name='FINE',
version='1.0.0',
author='Lara Welder',
author_email='[email protected]',
description='Framework for integrated energy systems assessment',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/FZJ-IEK3-VSA/FINE',
include_package_data=True,
packages=setuptools.find_packages(),
install_requires=required_packages,
setup_requires=['setuptools-git'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'],
keywords=['energy system', 'optimization'],
)
|
py
|
1a57b37563178f5dd41d591eae902a29725ffd04
|
# import pemfc_dash
# import pemfc_dash.main
from pemfc_dash.main import app
server = app.server
if __name__ == "__main__":
# [print(num, x) for num, x in enumerate(dl.ID_LIST) ]
app.run_server(debug=True, use_reloader=False)
# app.run_server(debug=True, use_reloader=False,
# host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
|
py
|
1a57b3dd361afd7fd3cc1f77a96a886b4f50a761
|
import re
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from api.helpers.utils import StatusChoices
from users.serializers import UserSerializer
from flights.serializers import FlightSerializer
from .models import Booking
def is_valid_ticket(value):
if re.search(r"^[a-zA-Z0-9]{6}$", value) is None:
raise serializers.ValidationError('Ticket number invalid please provide a valid ticket')
class BookingSerializer(serializers.ModelSerializer):
"""Booking serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
class Meta:
model = Booking
fields = '__all__'
validators = [
UniqueTogetherValidator(
queryset=Booking.objects.all(),
fields=('flight_id', 'passenger_id'),
message='Ticket already booked'
)
]
extra_kwargs = {'flight_status': {'read_only': True}}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['flight_id'].error_messages[
'does_not_exist'] = 'Flight with the id "{pk_value}" does not exist'
class TicketSerializer(serializers.ModelSerializer):
"""Ticket serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
flight_status = serializers.CharField(source='get_flight_status_display')
passenger = UserSerializer(read_only=True, source='passenger_id')
flight = FlightSerializer(read_only=True, source='flight_id')
class Meta:
model = Booking
exclude = ('flight_id', 'passenger_id')
class TicketStatusSerializer(serializers.ModelSerializer):
"""Ticket status serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
ticket = serializers.CharField(write_only=True,
source='ticket_number',
validators=[is_valid_ticket])
ticket_number = serializers.CharField(read_only=True)
class Meta:
model = Booking
fields = ('ticket_number', 'ticket')
class TicketReservationSerializer(serializers.ModelSerializer):
"""Ticket reservation serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
class Meta:
model = Booking
fields = ('flight_status', 'amount_paid', 'reserved_at')
def validate(self, data):
if not 'amount_paid' in data:
raise serializers.ValidationError('Field amount paid is required')
if data['amount_paid'] != self.instance.flight_id.flight_cost.amount:
raise serializers.ValidationError('Amount paid is not equal to the flight cost')
return data
class BookingReservationsSerializer(serializers.ModelSerializer):
date = serializers.DateField(required=True, write_only=True)
status = serializers.ChoiceField(required=True,
write_only=True,
choices=[(choice.value, choice.name)
for choice in StatusChoices])
flight_status = serializers.CharField(read_only=True, source='get_flight_status_display')
class Meta:
model = Booking
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
if field not in ('date', 'status'):
self.fields[field].read_only = True
|
py
|
1a57b4537b74b99cbb53f9f3095e36cabc864a2a
|
"""Tests for the Battery data frame"""
import json
import os
import h5py
import pandas as pd
from pandas import HDFStore
from pytest import fixture
from batdata.data import BatteryDataset
@fixture()
def test_df():
return BatteryDataset(raw_data=pd.DataFrame({
'current': [1, 0, -1],
'voltage': [2, 2, 2]
}), metadata={'name': 'Test data'})
def test_write_hdf(tmpdir, test_df):
"""Test whether the contents of the HDF5 file are reasonably understandable"""
# Write the HDF file
out_path = os.path.join(tmpdir, 'test.h5')
test_df.to_batdata_hdf(out_path)
# Investigate the contents
with h5py.File(out_path) as f:
assert 'metadata' in f.attrs
assert json.loads(f.attrs['metadata'])['name'] == 'Test data'
assert 'raw_data' in f
# Test writing to an already-open HDFStore
store = HDFStore(out_path, 'r+')
test_df.to_batdata_hdf(store)
def test_read_hdf(tmpdir, test_df):
# Write it
out_path = os.path.join(tmpdir, 'test.h5')
test_df.to_batdata_hdf(out_path)
# Test reading only the metadata
metadata = BatteryDataset.get_metadata_from_hdf5(out_path)
assert metadata.name == 'Test data'
# Read it
data = BatteryDataset.from_batdata_hdf(out_path)
assert data.metadata.name == 'Test data'
# Test reading from an already-open file
store = HDFStore(out_path, 'r')
data = BatteryDataset.from_batdata_hdf(store)
assert data.metadata.name == 'Test data'
def test_dict(test_df):
# Test writing it
d = test_df.to_batdata_dict()
assert d['metadata']['name'] == 'Test data'
assert 'raw_data' in d
# Test reading it
data = BatteryDataset.from_batdata_dict(d)
assert len(data.raw_data) == 3
assert data.metadata.name == 'Test data'
|
py
|
1a57b4aeaeb64fb7ac26a2192d68142760c015b4
|
from main_program import add, product
import pytest
@pytest.mark.numbers
def test_add_number():
assert add(5, 4)==9
assert add(-9, 0)==-9
@pytest.mark.strings
def test_add_string():
assert add('hello', 'world')=='helloworld'
@pytest.mark.numbers
def test_product_number():
assert product(5, 4)==20
assert product(-9, 0)==0
@pytest.mark.strings
def test_product_string():
assert product(3, 'hello')=='hellohellohello'
|
py
|
1a57b5b1b8c9e7ba8e5c7af343ff8c20751d0b55
|
from ex112.utilidadescev import moeda
from ex112.utilidadescev import dado
p = dado.leiaDinheiro('Digite um valor : ')
moeda.resumo(p)
|
py
|
1a57b616af0d009b87d650d42b224fe08129225a
|
#!/usr/bin/python3
import sys
import pytest
from brownie._cli.__main__ import main as cli_main
@pytest.fixture
def cli_tester(monkeypatch):
c = CliTester(monkeypatch)
yield c
c.close()
class CliTester:
def __init__(self, monkeypatch):
self.argv = sys.argv.copy()
self.monkeypatch = monkeypatch
self.called = False
self.total = 0
self.count = 0
def set_target(self, target):
self.monkeypatch.setattr(target, self.catch)
def set_subtargets(self, *targets):
for item in targets:
self.monkeypatch.setattr(item, self.incremental_catch)
self.total += 1
def run(self, argv, args=(), kwargs={}):
sys.argv = ["brownie"] + argv.split(" ")
self.args = args
self.kwargs = kwargs
cli_main()
assert self.called is True
assert self.count == self.total
self.called = False
self.count = 0
def catch(self, *args, **kwargs):
assert self.args == args
assert self.kwargs == kwargs
self.called = True
def incremental_catch(self, *args, **kwargs):
self.count += 1
def close(self):
sys.argv = self.argv
def test_cli_init(cli_tester):
cli_tester.set_target("brownie.project.new")
cli_tester.run("init", args=(".", False))
cli_tester.run("init test/path --force", args=("test/path", True))
def test_cli_bake(cli_tester):
cli_tester.set_target("brownie.project.from_brownie_mix")
cli_tester.run("bake token", args=("token", None, False))
cli_tester.run("bake token test/path --force", args=("token", "test/path", True))
def test_cli_compile(cli_tester, testproject):
cli_tester.set_target("brownie.project.load")
cli_tester.run("compile", args=(testproject._path,))
cli_tester.run("compile --all", args=(testproject._path,))
def test_cli_analyze(cli_tester, testproject):
cli_tester.set_target("brownie.project.load")
cli_tester.run("analyze")
def test_cli_console(cli_tester, testproject):
testproject.close()
cli_tester.set_target("brownie._cli.console.Console.interact")
cli_tester.set_subtargets("brownie.network.connect")
cli_tester.run("console", kwargs={"banner": "Brownie environment is ready.", "exitmsg": ""})
# travis doesn't like this
# def test_cli_gui(cli_tester, project_path):
# cli_tester.patch('brownie.gui.Gui.mainloop')
# cli_tester.counter_patch('brownie.project.load')
# cli_tester('gui')
def test_cli_run(cli_tester, testproject):
cli_tester.set_target("brownie.run")
cli_tester.set_subtargets("brownie.network.connect")
cli_tester.run("run testfile", args=("testfile",), kwargs={"method_name": "main"})
def test_cli_incorrect(cli_tester):
with pytest.raises(SystemExit):
cli_tester.run("foo")
|
py
|
1a57b64113de687c995cc75e453419a5074fc861
|
import os
import re
import tempfile
import pytest
from analyzer import util
comment = (
'@S.Jovan The expected result should look sth. like this:\n[\n{ ""key1"": str10, ""key2"": str20, ""key3"": str30 },\n{ ""key1"": str11, ""key2"": str21, ""key3"": str31 },\n{ ""key1"": str12, ""key2"": str22, ""key3"": str32 },\n...'
)
PREDICTIONS_SAMPLE = os.path.abspath(
os.path.join(
os.path.dirname(__file__), 'sanitized_comments_predictions.csv'))
NEG_COUNTS = 21
POS_COUNTS = 21
NEUTRAL_COUNTS = 166
def setup_function(function):
global post_base_text, post_expected_text, code_segment, pre_segment, blockquote_segment
post_base_text = "Hi, I have a problem. Here is my code:{}{}{}Can anyone help me?"
code_segment = "<code> for i in range(10):\n print(10)\n#wupwup!</code>"
pre_segment = "<pre> for val in elems:\n\n\n #do something\nprint(val)</pre>"
blockquote_segment = r"<blockquote>Gzipped data: \x1f\x8b\x08\x00\xf9w[Y\x02\xff%\x8e=\x0e\xc30\x08F\xaf\x82\x98\x91\x05\xe6\xc7\xa6c\xf7\x9e\xa0\xca\x96\xa5[\x86lQ\xee^\xdcN\xf0\xf4\xc1\x83\x0b?\xf8\x00|=\xe7D\x02<\n\xde\x17\xee\xab\xb85%\x82L\x02\xcb\xa6N\xa0\x7fri\xae\xd5K\xe1$\xe83\xc3\x08\x86Z\x81\xa9g-y\x88\xf6\x9a\xf5E\xde\x99\x7f\x96\xb1\xd5\x99\xb3\xfcb\x99\x121D\x1bG\xe7^.\xdcWPO\xdc\xdb\xfd\x05\x0ev\x15\x1d\x99\x00\x00\x00</blockquote>"
def test_sanitize_post_md_code_pattern_is_not_greedy():
"""Test that the markdown code pattern does not remove too much."""
post = ("`this is code` but a greedy```other code``` pattern\nwould remove"
"`this whole post`"
"```along with``` this as well```hehe```")
expected = "but a greedy pattern would remove this as well"
sanitized = util.sanitize_post(post)
assert sanitized == expected
def test_sanitize_post_replaces_all_whitespace_with_single_spaces():
sanitized = util.sanitize_post(
post_base_text.format(code_segment, pre_segment, blockquote_segment))
counter = 0
for ws in re.findall('\s+', sanitized):
counter += 1
assert ws == ' '
assert counter # meta assert
def test_sanitize_post_removes_url():
https_url = "https://hello.world#aweseaf45we23.com"
http_url = "http://blabla.com#badonk"
c = "{} and other stuff {} awesome donk {}\n\nhurrdurr".format(
comment, https_url, http_url)
sanitized = util.sanitize_post(c)
assert https_url not in sanitized
assert http_url not in sanitized
def test_sanitize_post_removes_single_backtick_code():
markdown_code = '`for i in range(10):\n print(i)`'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_post(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_post_removes_triple_backtick_code():
markdown_code = '```for i in range(10):\n print(i)```'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_post(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_post_removes_blockquote_segments():
text = post_base_text.format(blockquote_segment, "\n", "")
expected_text = post_base_text.format("", " ", "")
sanitized = util.sanitize_post(text)
assert sanitized == expected_text
def test_sanitize_post_removes_linefeeds():
text = "This is a text with \r\n some \u2028 nbbbb \u2029 random \n linefeeds \r and carriege returns \r\n hello \n"
sanitized = util.sanitize_post(text)
assert '\n' not in sanitized
assert '\r' not in sanitized
assert '\u2028' not in sanitized
assert '\u2029' not in sanitized
def test_sanitize_post_removes_code_segments():
text = post_base_text.format("\n", code_segment, "\n")
# the two newlines are replaced with single space
expected_text = post_base_text.format(" ", "", "")
res = util.sanitize_post(text)
assert res == expected_text
def test_sanitize_post_removes_pre_segments():
text = post_base_text.format("\n", pre_segment, "\n")
# the two newlines are replaced with single space
expected_text = post_base_text.format(" ", "", "")
res = util.sanitize_post(text)
assert res == expected_text
def test_sanitize_post_removes_code_pre_and_tags():
text = post_base_text.format("</a href=https://url.com>", code_segment,
pre_segment)
expected_text = post_base_text.format("", "", "")
res = util.sanitize_post(text)
assert res == expected_text
@pytest.mark.timeout(0.2)
def test_sanitize_post_handles_tag_case_mismatch():
"""Previous version of sanitize post froze due to case mismatch in tags.
In this particular case, it was the <pre> ... </prE> that cause exponential
backtracking (we think) to kick in.
"""
text =\
'''<p><em>"I didn't like this because I have only two C files and it seemed very odd to split the source base at the language level like this"</em></p>
<p>Why does it seem odd? Consider this project:</p>
<pre>
project1\src\java
project1\src\cpp
project1\src\python
</pre>
<p>Or, if you decide to split things up into modules:</p>
<p><pre>
project1\module1\src\java
project1\module1\src\cpp
project1\module2\src\java
project1\module2\src\python
</prE></p>
<p>I guess it's a matter of personal taste, but the above structure is fairly common, and I think it works quite well once you get used to it.</p>'''
util.sanitize_post(text)
def test_sanitize_comment_replaces_all_whitespace_with_single_spaces():
sanitized = util.sanitize_comment(comment)
counter = 0
for ws in re.findall('\s+', sanitized):
counter += 1
assert ws == ' '
assert counter # meta assert
def test_sanitize_comment_removes_url():
https_url = "https://hello.world#aweseaf45we23.com"
http_url = "http://blabla.com#badonk"
c = "{} and other stuff {} awesome donk {}\n\nhurrdurr".format(
comment, https_url, http_url)
sanitized = util.sanitize_comment(c)
assert https_url not in sanitized
assert http_url not in sanitized
def test_sanitize_comment_leaves_user_mentions():
sanitized = util.sanitize_comment(comment)
assert '@S.Jovan' in sanitized
def test_sanitize_comment_strips_leading_and_trailing_ws():
text = " there is leading whitespace here <code>some\ncode</code> "
sanitized = util.sanitize_comment(text)
assert sanitized == sanitized.strip()
def test_sanitize_comment_removes_single_backtick_code():
markdown_code = '`for i in range(10):\n print(i)`'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_comment(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_comment_removes_triple_backtick_code():
markdown_code = '```for i in range(10):\n print(i)```'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_comment(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_comment_removes_markdown_formatting():
random_md = "This is ```for i in range(t)``` just a **test** to see that _some_ `inline code` and **other\nmarkdown** stuff is removed."
sanitized_md = "This is just a test to see that some and other markdown stuff is removed."
text = post_base_text.format("", random_md, "")
expected = post_base_text.format("", sanitized_md, "")
sanitized = util.sanitize_comment(text)
assert sanitized == expected
def test_sanitize_real_post():
"""Test sanitizing a real post (answer) from SO, authored by Simon Larsén."""
text =\
"""<p>You can do this in just two lines.</p>
<pre><code>with open('path/to/file') as f:
line_lists = [list(line.strip()) for line in f]
</code></pre>
<p><code>list</code> on a <code>str</code> object will return a list where each character is an element (as a <code>char</code>). <code>line</code> is stripped first, which removes leading and trailing whitespace. This is assuming that you actually want the characters as <code>char</code>. If you want them parsed to <code>int</code>, this will work:</p>
<pre><code>with open('path/to/file') as f:
line_lists = [[int(x) for x in line.strip()] for line in f]
</code></pre>
<p>Mind you that there should be some error checking here, the above example will crash if any of the characters cannot be parsed to int.</p>
"""
expected = "You can do this in just two lines. on a object will return a list where each character is an element (as a ). is stripped first, which removes leading and trailing whitespace. This is assuming that you actually want the characters as . If you want them parsed to , this will work: Mind you that there should be some error checking here, the above example will crash if any of the characters cannot be parsed to int."
sanitized = util.sanitize_post(text)
assert sanitized == expected
def test_yield_batches():
expected = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
it = (i for i in range(9))
actual = [batch for batch in util.yield_batches(it, 3)]
assert actual == expected
|
py
|
1a57b6c77166645bbd88d4cc04f432450cb11498
|
"""Holder for the (test kind, list of tests) pair with additional metadata their execution."""
from __future__ import absolute_import
import itertools
import threading
import time
from . import report as _report
from . import summary as _summary
from .. import config as _config
from .. import selector as _selector
def synchronized(method):
"""Provide decorator to enfore instance lock ownership when calling the method."""
def synced(self, *args, **kwargs):
"""Sync an instance lock."""
lock = getattr(self, "_lock")
with lock:
return method(self, *args, **kwargs)
return synced
class Suite(object): # pylint: disable=too-many-instance-attributes
"""A suite of tests of a particular kind (e.g. C++ unit tests, dbtests, jstests)."""
def __init__(self, suite_name, suite_config, suite_options=_config.SuiteOptions.ALL_INHERITED):
"""Initialize the suite with the specified name and configuration."""
self._lock = threading.RLock()
self._suite_name = suite_name
self._suite_config = suite_config
self._suite_options = suite_options
self.test_kind = self.get_test_kind_config()
self.tests, self.excluded = self._get_tests_for_kind(self.test_kind)
self.return_code = None # Set by the executor.
self._suite_start_time = None
self._suite_end_time = None
self._test_start_times = []
self._test_end_times = []
self._reports = []
# We keep a reference to the TestReports from the currently running jobs so that we can
# report intermediate results.
self._partial_reports = None
def _get_tests_for_kind(self, test_kind):
"""Return the tests to run based on the 'test_kind'-specific filtering policy."""
selector_config = self.get_selector_config()
# The mongos_test doesn't have to filter anything, the selector_config is just the
# arguments to the mongos program to be used as the test case.
if test_kind == "mongos_test":
mongos_options = selector_config # Just for easier reading.
if not isinstance(mongos_options, dict):
raise TypeError("Expected dictionary of arguments to mongos")
return [mongos_options], []
return _selector.filter_tests(test_kind, selector_config)
def get_name(self):
"""Return the name of the test suite."""
return self._suite_name
def get_display_name(self):
"""Return the name of the test suite with a unique identifier for its SuiteOptions."""
if self.options.description is None:
return self.get_name()
return "{} ({})".format(self.get_name(), self.options.description)
def get_selector_config(self):
"""Return the "selector" section of the YAML configuration."""
if "selector" not in self._suite_config:
return {}
selector = self._suite_config["selector"].copy()
if self.options.include_tags is not None:
if "include_tags" in selector:
selector["include_tags"] = {
"$allOf": [
selector["include_tags"],
self.options.include_tags,
]
}
elif "exclude_tags" in selector:
selector["exclude_tags"] = {
"$anyOf": [
selector["exclude_tags"],
{"$not": self.options.include_tags},
]
}
else:
selector["include_tags"] = self.options.include_tags
return selector
def get_executor_config(self):
"""Return the "executor" section of the YAML configuration."""
return self._suite_config["executor"]
def get_test_kind_config(self):
"""Return the "test_kind" section of the YAML configuration."""
return self._suite_config["test_kind"]
@property
def options(self):
"""Get the options."""
return self._suite_options.resolve()
def with_options(self, suite_options):
"""Return a Suite instance with the specified resmokelib.config.SuiteOptions."""
return Suite(self._suite_name, self._suite_config, suite_options)
@synchronized
def record_suite_start(self):
"""Record the start time of the suite."""
self._suite_start_time = time.time()
@synchronized
def record_suite_end(self):
"""Record the end time of the suite."""
self._suite_end_time = time.time()
@synchronized
def record_test_start(self, partial_reports):
"""Record the start time of an execution.
The result is stored in the TestReports for currently running jobs.
"""
self._test_start_times.append(time.time())
self._partial_reports = partial_reports
@synchronized
def record_test_end(self, report):
"""Record the end time of an execution."""
self._test_end_times.append(time.time())
self._reports.append(report)
self._partial_reports = None
@synchronized
def get_active_report(self):
"""Return the partial report of the currently running execution, if there is one."""
if not self._partial_reports:
return None
return _report.TestReport.combine(*self._partial_reports)
@synchronized
def get_reports(self):
"""Return the list of reports.
If there's an execution currently in progress, then a report for the partial results
is included in the returned list.
"""
if self._partial_reports is not None:
return self._reports + [self.get_active_report()]
return self._reports
@synchronized
def summarize(self, sb):
"""Append a summary of the suite onto the string builder 'sb'."""
if not self._reports and not self._partial_reports:
sb.append("No tests ran.")
summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
elif not self._reports and self._partial_reports:
summary = self.summarize_latest(sb)
elif len(self._reports) == 1 and not self._partial_reports:
summary = self._summarize_execution(0, sb)
else:
summary = self._summarize_repeated(sb)
summarized_group = " %ss: %s" % (self.test_kind, "\n ".join(sb))
if summary.num_run == 0:
sb.append("Suite did not run any tests.")
return
# Override the 'time_taken' attribute of the summary if we have more accurate timing
# information available.
if self._suite_start_time is not None and self._suite_end_time is not None:
time_taken = self._suite_end_time - self._suite_start_time
summary = summary._replace(time_taken=time_taken)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
sb.append(summarized_group)
@synchronized
def summarize_latest(self, sb):
"""Return a summary of the latest execution of the suite.
Also append a summary of that execution onto the string builder 'sb'.
If there's an execution currently in progress, then the partial
summary of that execution is appended to 'sb'.
"""
if self._partial_reports is None:
return self._summarize_execution(-1, sb)
active_report = _report.TestReport.combine(*self._partial_reports)
# Use the current time as the time that this suite finished running.
end_time = time.time()
return self._summarize_report(active_report, self._test_start_times[-1], end_time, sb)
def _summarize_repeated(self, sb):
"""Return the summary information of all executions.
Also append each execution's summary onto the string builder 'sb' and
information of how many repetitions there were.
"""
reports = self.get_reports() # Also includes the combined partial reports.
num_iterations = len(reports)
start_times = self._test_start_times[:]
end_times = self._test_end_times[:]
if self._partial_reports:
end_times.append(time.time()) # Add an end time in this copy for the partial reports.
total_time_taken = end_times[-1] - start_times[0]
sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
for iteration in xrange(num_iterations):
# Summarize each execution as a bulleted list of results.
bulleter_sb = []
summary = self._summarize_report(reports[iteration], start_times[iteration],
end_times[iteration], bulleter_sb)
combined_summary = _summary.combine(combined_summary, summary)
for (i, line) in enumerate(bulleter_sb):
# Only bullet first line, indent others.
prefix = "* " if i == 0 else " "
sb.append(prefix + line)
return combined_summary
def _summarize_execution(self, iteration, sb):
"""Return the summary information of the execution given by 'iteration'.
Also append a summary of that execution onto the string builder 'sb'.
"""
return self._summarize_report(self._reports[iteration], self._test_start_times[iteration],
self._test_end_times[iteration], sb)
def _summarize_report(self, report, start_time, end_time, sb):
"""Return the summary information of the execution.
The summary is for 'report' that started at 'start_time' and finished at 'end_time'.
Also append a summary of that execution onto the string builder 'sb'.
"""
time_taken = end_time - start_time
# Tests that were interrupted are treated as failures because (1) the test has already been
# started and therefore isn't skipped and (2) the test has yet to finish and therefore
# cannot be said to have succeeded.
num_failed = report.num_failed + report.num_interrupted
num_run = report.num_succeeded + report.num_errored + num_failed
num_skipped = len(self.tests) + report.num_dynamic - num_run
if report.num_succeeded == num_run and num_skipped == 0:
sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
num_failed, report.num_errored)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
if num_failed > 0:
sb.append("The following tests failed (with exit code):")
for test_info in itertools.chain(report.get_failed(), report.get_interrupted()):
sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
if report.num_errored > 0:
sb.append("The following tests had errors:")
for test_info in report.get_errored():
sb.append(" %s" % (test_info.test_id))
return summary
@staticmethod
def log_summaries(logger, suites, time_taken):
"""Log summary of all suites."""
sb = []
sb.append("Summary of all suites: %d suites ran in %0.2f seconds" % (len(suites),
time_taken))
for suite in suites:
suite_sb = []
suite.summarize(suite_sb)
sb.append(" %s: %s" % (suite.get_display_name(), "\n ".join(suite_sb)))
logger.info("=" * 80)
logger.info("\n".join(sb))
|
py
|
1a57b70a3649f03a2f4f2dee66b0888caaacc939
|
# Generated by Django 3.0 on 2021-09-14 20:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0029_auto_20210910_1912'),
]
operations = [
migrations.AlterField(
model_name='processo',
name='interessado',
field=models.ManyToManyField(to='accounts.Interessado'),
),
]
|
py
|
1a57b7440f162c5c41c627978e1293b3b53f1c31
|
#Create by UncleEngineer
#http://www.uncle-engineer.com/python
#http://www.facebook.com/UncleEngineer
import sympy as sp
x = sp.Symbol('x')
problem = '2*x**4+1'
result = str(sp.integrate(2*(x**4)+1,x))
def pownumber(rs):
resulttext = ['','']
for i in rs:
if resulttext[-2] == '*' and resulttext[-1] == '*' and i == '2':
resulttext.append('²')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '3':
resulttext.append('³')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '4':
resulttext.append('⁴')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '5':
resulttext.append('⁵')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '6':
resulttext.append('⁶')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '7':
resulttext.append('⁷')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '8':
resulttext.append('⁸')
elif resulttext[-2] == '*' and resulttext[-1] == '*' and i == '9':
resulttext.append('⁹')
else:
resulttext.append(i)
resulttext.remove('')
resulttext.remove('')
finaltext = ''
for j in resulttext:
if j != '*':
finaltext += j
else:
finaltext = finaltext
return finaltext
textpb = pownumber(problem)
textres = pownumber(result)
print("∫ {} dx = {}".format(textpb,textres))
|
py
|
1a57b7b6af826145718d061c3247807aa4159eea
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for generating random quantum circuits."""
import dataclasses
import itertools
from typing import (
Any,
Callable,
Container,
Dict,
Iterable,
List,
Sequence,
TYPE_CHECKING,
Tuple,
Union,
Optional,
cast,
Iterator,
)
import networkx as nx
import numpy as np
from cirq import circuits, devices, ops, protocols, value
from cirq._doc import document
if TYPE_CHECKING:
import cirq
QidPairT = Tuple['cirq.Qid', 'cirq.Qid']
GridQubitPairT = Tuple['cirq.GridQubit', 'cirq.GridQubit']
@dataclasses.dataclass(frozen=True)
class GridInteractionLayer(Container[GridQubitPairT]):
"""A layer of aligned or staggered two-qubit interactions on a grid.
Layers of this type have two different basic structures,
aligned:
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
and staggered:
*-* *-* *-*
* *-* *-* *
*-* *-* *-*
* *-* *-* *
*-* *-* *-*
* *-* *-* *
Other variants are obtained by offsetting these lattices to the right by
some number of columns, and/or transposing into the vertical orientation.
There are a total of 4 aligned and 4 staggered variants.
The 2x2 unit cells for the aligned and staggered versions of this layer
are, respectively:
*-*
*-*
and
*-*
* *-
with left/top qubits at (0, 0) and (1, 0) in the aligned case, or
(0, 0) and (1, 1) in the staggered case. Other variants have the same unit
cells after transposing and offsetting.
Args:
col_offset: Number of columns by which to shift the basic lattice.
vertical: Whether gates should be oriented vertically rather than
horizontally.
stagger: Whether to stagger gates in neighboring rows.
"""
col_offset: int = 0
vertical: bool = False
stagger: bool = False
def __contains__(self, pair) -> bool:
"""Checks whether a pair is in this layer."""
if self.vertical:
# Transpose row, col coords for vertical orientation.
a, b = pair
pair = devices.GridQubit(a.col, a.row), devices.GridQubit(b.col, b.row)
a, b = sorted(pair)
# qubits should be 1 column apart.
if (a.row != b.row) or (b.col != a.col + 1):
return False
# mod to get the position in the 2 x 2 unit cell with column offset.
pos = a.row % 2, (a.col - self.col_offset) % 2
return pos == (0, 0) or pos == (1, self.stagger)
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['col_offset', 'vertical', 'stagger'])
def __repr__(self) -> str:
return (
'cirq.experiments.GridInteractionLayer('
f'col_offset={self.col_offset}, '
f'vertical={self.vertical}, '
f'stagger={self.stagger})'
)
GRID_STAGGERED_PATTERN = (
GridInteractionLayer(col_offset=0, vertical=True, stagger=True), # A
GridInteractionLayer(col_offset=1, vertical=True, stagger=True), # B
GridInteractionLayer(col_offset=1, vertical=False, stagger=True), # C
GridInteractionLayer(col_offset=0, vertical=False, stagger=True), # D
GridInteractionLayer(col_offset=1, vertical=False, stagger=True), # C
GridInteractionLayer(col_offset=0, vertical=False, stagger=True), # D
GridInteractionLayer(col_offset=0, vertical=True, stagger=True), # A
GridInteractionLayer(col_offset=1, vertical=True, stagger=True), # B
)
document(
GRID_STAGGERED_PATTERN,
"""A pattern of two-qubit gates that is hard to simulate.
This pattern of gates was used in the paper
https://www.nature.com/articles/s41586-019-1666-5
to demonstrate quantum supremacy.
""",
)
HALF_GRID_STAGGERED_PATTERN = (
GridInteractionLayer(col_offset=0, vertical=True, stagger=True), # A
GridInteractionLayer(col_offset=1, vertical=True, stagger=True), # B
GridInteractionLayer(col_offset=1, vertical=False, stagger=True), # C
GridInteractionLayer(col_offset=0, vertical=False, stagger=True), # D
)
document(
HALF_GRID_STAGGERED_PATTERN,
"""A pattern that is half of GRID_STAGGERED_PATTERN.
It activates each link in a grid once in a staggered way permits
easier simulation.
""",
)
GRID_ALIGNED_PATTERN = (
GridInteractionLayer(col_offset=0, vertical=False, stagger=False), # E
GridInteractionLayer(col_offset=1, vertical=False, stagger=False), # F
GridInteractionLayer(col_offset=0, vertical=True, stagger=False), # G
GridInteractionLayer(col_offset=1, vertical=True, stagger=False), # H
)
document(
GRID_ALIGNED_PATTERN,
"""A pattern of two-qubit gates that is easy to simulate.
This pattern of gates was used in the paper
https://www.nature.com/articles/s41586-019-1666-5
to evaluate the performance of a quantum computer.
""",
)
def random_rotations_between_two_qubit_circuit(
q0: 'cirq.Qid',
q1: 'cirq.Qid',
depth: int,
two_qubit_op_factory: Callable[
['cirq.Qid', 'cirq.Qid', 'np.random.RandomState'], 'cirq.OP_TREE'
] = lambda a, b, _: ops.CZPowGate()(a, b),
single_qubit_gates: Sequence['cirq.Gate'] = (
ops.X ** 0.5,
ops.Y ** 0.5,
ops.PhasedXPowGate(phase_exponent=0.25, exponent=0.5),
),
add_final_single_qubit_layer: bool = True,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> 'cirq.Circuit':
"""Generate a random two-qubit quantum circuit.
This construction uses a similar structure to those in the paper
https://www.nature.com/articles/s41586-019-1666-5.
The generated circuit consists of a number of "cycles", this number being
specified by `depth`. Each cycle is actually composed of two sub-layers:
a layer of single-qubit gates followed by a layer of two-qubit gates,
controlled by their respective arguments, see below.
Args:
q0: The first qubit
q1: The second qubit
depth: The number of cycles.
two_qubit_op_factory: A callable that returns a two-qubit operation.
These operations will be generated with calls of the form
`two_qubit_op_factory(q0, q1, prng)`, where `prng` is the
pseudorandom number generator.
single_qubit_gates: Single-qubit gates are selected randomly from this
sequence. No qubit is acted upon by the same single-qubit gate in
consecutive cycles. If only one choice of single-qubit gate is
given, then this constraint is not enforced.
add_final_single_qubit_layer: Whether to include a final layer of
single-qubit gates after the last cycle (subject to the same
non-consecutivity constraint).
seed: A seed or random state to use for the pseudorandom number
generator.
"""
prng = value.parse_random_state(seed)
circuit = circuits.Circuit()
previous_single_qubit_layer = circuits.Moment()
single_qubit_layer_factory = _single_qubit_gates_arg_to_factory(
single_qubit_gates=single_qubit_gates, qubits=(q0, q1), prng=prng
)
for _ in range(depth):
single_qubit_layer = single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
circuit += single_qubit_layer
circuit += two_qubit_op_factory(q0, q1, prng)
previous_single_qubit_layer = single_qubit_layer
if add_final_single_qubit_layer:
circuit += single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
return circuit
def generate_library_of_2q_circuits(
n_library_circuits: int,
two_qubit_gate: 'cirq.Gate',
*,
max_cycle_depth: int = 100,
q0: 'cirq.Qid' = devices.LineQubit(0),
q1: 'cirq.Qid' = devices.LineQubit(1),
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List['cirq.Circuit']:
"""Generate a library of two-qubit Circuits.
For single-qubit gates, this uses PhasedXZGates where the axis-in-XY-plane is one
of eight eighth turns and the Z rotation angle is one of eight eighth turns. This
provides 8*8=64 total choices, each implementable with one PhasedXZGate. This is
appropriate for architectures with microwave single-qubit control.
Args:
n_library_circuits: The number of circuits to generate.
two_qubit_gate: The two qubit gate to use in the circuits.
max_cycle_depth: The maximum cycle_depth in the circuits to generate. If you are using XEB,
this must be greater than or equal to the maximum value in `cycle_depths`.
q0: The first qubit to use when constructing the circuits.
q1: The second qubit to use when constructing the circuits
random_state: A random state or seed used to deterministically sample the random circuits.
"""
rs = value.parse_random_state(random_state)
exponents = np.linspace(0, 7 / 4, 8)
single_qubit_gates = [
ops.PhasedXZGate(x_exponent=0.5, z_exponent=z, axis_phase_exponent=a)
for a, z in itertools.product(exponents, repeat=2)
]
return [
random_rotations_between_two_qubit_circuit(
q0,
q1,
depth=max_cycle_depth,
two_qubit_op_factory=lambda a, b, _: two_qubit_gate(a, b),
single_qubit_gates=single_qubit_gates,
seed=rs,
)
for _ in range(n_library_circuits)
]
def _get_active_pairs(graph: nx.Graph, grid_layer: GridInteractionLayer):
"""Extract pairs of qubits from a device graph and a GridInteractionLayer."""
for edge in graph.edges:
if edge in grid_layer:
yield edge
@dataclasses.dataclass(frozen=True)
class CircuitLibraryCombination:
"""For a given layer (specifically, a set of pairs of qubits), `combinations` is a 2d array
of shape (n_combinations, len(pairs)) where each row represents a combination (with replacement)
of two-qubit circuits. The actual values are indices into a list of library circuits.
`layer` is used for record-keeping. This is the GridInteractionLayer if using
`get_random_combinations_for_device`, the Moment if using
`get_random_combinations_for_layer_circuit` and ommitted if using
`get_random_combinations_for_pairs`.
"""
layer: Optional[Any]
combinations: np.array
pairs: List[QidPairT]
def _get_random_combinations(
n_library_circuits: int,
n_combinations: int,
*,
pair_gen: Iterator[Tuple[List[QidPairT], Any]],
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For qubit pairs, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
This helper function should be called by one of
`get_random_comibations_for_device`,
`get_random_combinations_for_layer_circuit`, or
`get_random_combinations_for_pairs` which define
appropriate `pair_gen` arguments.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
pair_gen: A generator that yields tuples of (pairs, layer_meta) where pairs is a list
of qubit pairs and layer_meta is additional data describing the "layer" assigned
to the CircuitLibraryCombination.layer field.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to a layer
generated from `pair_gen`. Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))`. This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
rs = value.parse_random_state(random_state)
combinations_by_layer = []
for pairs, layer in pair_gen:
combinations = rs.randint(0, n_library_circuits, size=(n_combinations, len(pairs)))
combinations_by_layer.append(
CircuitLibraryCombination(layer=layer, combinations=combinations, pairs=pairs)
)
return combinations_by_layer
def get_random_combinations_for_device(
n_library_circuits: int,
n_combinations: int,
device_graph: nx.Graph,
*,
pattern: Sequence[GridInteractionLayer] = HALF_GRID_STAGGERED_PATTERN,
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For a given device, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
device_graph: A graph whose nodes are qubits and whose edges represent
the possibility of doing a two-qubit gate. This combined with the
`pattern` argument determines which two qubit pairs are activated
when.
pattern: A sequence of `GridInteractionLayer`, each of which has
a particular set of qubits that are activated simultaneously. These
pairs of qubits are deduced by combining this argument with `device_graph`.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to an interaction
layer in `pattern` where there is a non-zero number of pairs which would be activated.
Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))` where `len(pairs)` may
be different for each entry (i.e. for each layer in `pattern`). This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
def pair_gen():
for layer in pattern:
pairs = sorted(_get_active_pairs(device_graph, layer))
if len(pairs) == 0:
continue
yield pairs, layer
return _get_random_combinations(
n_library_circuits=n_library_circuits,
n_combinations=n_combinations,
random_state=random_state,
pair_gen=pair_gen(),
)
def get_random_combinations_for_pairs(
n_library_circuits: int,
n_combinations: int,
all_pairs: List[List[QidPairT]],
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For an explicit nested list of pairs, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
all_pairs: A nested list of qubit pairs. The outer list should represent a "layer"
where the inner pairs should all be able to be activated simultaneously.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to an interaction
layer the outer list of `all_pairs`. Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))` where `len(pairs)` may
be different for each entry. This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
def pair_gen():
for pairs in all_pairs:
yield pairs, None
return _get_random_combinations(
n_library_circuits=n_library_circuits,
n_combinations=n_combinations,
random_state=random_state,
pair_gen=pair_gen(),
)
def _pairs_from_moment(moment: 'cirq.Moment') -> List[QidPairT]:
"""Helper function in `get_random_combinations_for_layer_circuit` pair generator.
The moment should contain only two qubit operations, which define a list of qubit pairs.
"""
pairs: List[QidPairT] = []
for op in moment.operations:
if len(op.qubits) != 2:
raise ValueError("Layer circuit contains non-2-qubit operations.")
qpair = cast(QidPairT, op.qubits)
pairs.append(qpair)
return pairs
def get_random_combinations_for_layer_circuit(
n_library_circuits: int,
n_combinations: int,
layer_circuit: 'cirq.Circuit',
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For a layer circuit, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
layer_circuit: A calibration-style circuit where each Moment represents a layer.
Two qubit operations indicate the pair should be activated. This circuit should
only contain Moments which only contain two-qubit operations.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to a moment in `layer_circuit`.
Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))` where `len(pairs)` may
be different for each entry (i.e. for moment). This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
def pair_gen():
for moment in layer_circuit.moments:
yield _pairs_from_moment(moment), moment
return _get_random_combinations(
n_library_circuits=n_library_circuits,
n_combinations=n_combinations,
random_state=random_state,
pair_gen=pair_gen(),
)
def get_grid_interaction_layer_circuit(
device_graph: nx.Graph,
pattern: Sequence[GridInteractionLayer] = HALF_GRID_STAGGERED_PATTERN,
two_qubit_gate=ops.ISWAP ** 0.5,
) -> 'cirq.Circuit':
"""Create a circuit representation of a grid interaction pattern on a given device topology.
The resulting circuit is deterministic, of depth len(pattern), and consists of `two_qubit_gate`
applied to each pair in `pattern` restricted to available connections in `device_graph`.
Args:
device_graph: A graph whose nodes are qubits and whose edges represent the possibility of
doing a two-qubit gate. This combined with the `pattern` argument determines which
two qubit pairs are activated when.
pattern: A sequence of `GridInteractionLayer`, each of which has a particular set of
qubits that are activated simultaneously. These pairs of qubits are deduced by
combining this argument with `device_graph`.
two_qubit_gate: The two qubit gate to use in constructing the circuit layers.
"""
moments = []
for layer in pattern:
pairs = sorted(_get_active_pairs(device_graph, layer))
if len(pairs) == 0:
continue
moments += [circuits.Moment(two_qubit_gate.on(*pair) for pair in pairs)]
return circuits.Circuit(moments)
def random_rotations_between_grid_interaction_layers_circuit(
qubits: Iterable['cirq.GridQubit'],
depth: int,
*, # forces keyword arguments
two_qubit_op_factory: Callable[
['cirq.GridQubit', 'cirq.GridQubit', 'np.random.RandomState'], 'cirq.OP_TREE'
] = lambda a, b, _: ops.CZPowGate()(a, b),
pattern: Sequence[GridInteractionLayer] = GRID_STAGGERED_PATTERN,
single_qubit_gates: Sequence['cirq.Gate'] = (
ops.X ** 0.5,
ops.Y ** 0.5,
ops.PhasedXPowGate(phase_exponent=0.25, exponent=0.5),
),
add_final_single_qubit_layer: bool = True,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> 'cirq.Circuit':
"""Generate a random quantum circuit of a particular form.
This construction is based on the circuits used in the paper
https://www.nature.com/articles/s41586-019-1666-5.
The generated circuit consists of a number of "cycles", this number being
specified by `depth`. Each cycle is actually composed of two sub-layers:
a layer of single-qubit gates followed by a layer of two-qubit gates,
controlled by their respective arguments, see below. The pairs of qubits
in a given entangling layer is controlled by the `pattern` argument,
see below.
Args:
qubits: The qubits to use.
depth: The number of cycles.
two_qubit_op_factory: A callable that returns a two-qubit operation.
These operations will be generated with calls of the form
`two_qubit_op_factory(q0, q1, prng)`, where `prng` is the
pseudorandom number generator.
pattern: A sequence of GridInteractionLayers, each of which determine
which pairs of qubits are entangled. The layers in a pattern are
iterated through sequentially, repeating until `depth` is reached.
single_qubit_gates: Single-qubit gates are selected randomly from this
sequence. No qubit is acted upon by the same single-qubit gate in
consecutive cycles. If only one choice of single-qubit gate is
given, then this constraint is not enforced.
add_final_single_qubit_layer: Whether to include a final layer of
single-qubit gates after the last cycle.
seed: A seed or random state to use for the pseudorandom number
generator.
"""
prng = value.parse_random_state(seed)
qubits = list(qubits)
coupled_qubit_pairs = _coupled_qubit_pairs(qubits)
circuit = circuits.Circuit()
previous_single_qubit_layer = circuits.Moment()
single_qubit_layer_factory = _single_qubit_gates_arg_to_factory(
single_qubit_gates=single_qubit_gates, qubits=qubits, prng=prng
)
for i in range(depth):
single_qubit_layer = single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
circuit += single_qubit_layer
two_qubit_layer = _two_qubit_layer(
coupled_qubit_pairs, two_qubit_op_factory, pattern[i % len(pattern)], prng
)
circuit += two_qubit_layer
previous_single_qubit_layer = single_qubit_layer
if add_final_single_qubit_layer:
circuit += single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
return circuit
def _coupled_qubit_pairs(
qubits: List['cirq.GridQubit'],
) -> List[GridQubitPairT]:
pairs = []
qubit_set = set(qubits)
for qubit in qubits:
def add_pair(neighbor: 'cirq.GridQubit'):
if neighbor in qubit_set:
pairs.append((qubit, neighbor))
add_pair(devices.GridQubit(qubit.row, qubit.col + 1))
add_pair(devices.GridQubit(qubit.row + 1, qubit.col))
return pairs
class _RandomSingleQubitLayerFactory:
def __init__(
self,
qubits: Sequence['cirq.Qid'],
single_qubit_gates: Sequence['cirq.Gate'],
prng: 'np.random.RandomState',
) -> None:
self.qubits = qubits
self.single_qubit_gates = single_qubit_gates
self.prng = prng
def new_layer(self, previous_single_qubit_layer: 'cirq.Moment') -> 'cirq.Moment':
def random_gate(qubit: 'cirq.Qid') -> 'cirq.Gate':
excluded_op = previous_single_qubit_layer.operation_at(qubit)
excluded_gate = excluded_op.gate if excluded_op is not None else None
g = self.single_qubit_gates[self.prng.randint(0, len(self.single_qubit_gates))]
while g is excluded_gate:
g = self.single_qubit_gates[self.prng.randint(0, len(self.single_qubit_gates))]
return g
return circuits.Moment(random_gate(q).on(q) for q in self.qubits)
class _FixedSingleQubitLayerFactory:
def __init__(self, fixed_single_qubit_layer: Dict['cirq.Qid', 'cirq.Gate']) -> None:
self.fixed_single_qubit_layer = fixed_single_qubit_layer
def new_layer(self, previous_single_qubit_layer: 'cirq.Moment') -> 'cirq.Moment':
return circuits.Moment(v.on(q) for q, v in self.fixed_single_qubit_layer.items())
_SingleQubitLayerFactory = Union[_FixedSingleQubitLayerFactory, _RandomSingleQubitLayerFactory]
def _single_qubit_gates_arg_to_factory(
single_qubit_gates: Sequence['cirq.Gate'],
qubits: Sequence['cirq.Qid'],
prng: 'np.random.RandomState',
) -> _SingleQubitLayerFactory:
"""Parse the `single_qubit_gates` argument for circuit generation functions.
If only one single qubit gate is provided, it will be used everywhere.
Otherwise, we use the factory that excludes operations that were used
in the previous layer. This check is done by gate identity, not equality.
"""
if len(set(single_qubit_gates)) == 1:
return _FixedSingleQubitLayerFactory({q: single_qubit_gates[0] for q in qubits})
return _RandomSingleQubitLayerFactory(qubits, single_qubit_gates, prng)
def _two_qubit_layer(
coupled_qubit_pairs: List[GridQubitPairT],
two_qubit_op_factory: Callable[
['cirq.GridQubit', 'cirq.GridQubit', 'np.random.RandomState'], 'cirq.OP_TREE'
],
layer: GridInteractionLayer,
prng: 'np.random.RandomState',
) -> 'cirq.OP_TREE':
for a, b in coupled_qubit_pairs:
if (a, b) in layer:
yield two_qubit_op_factory(a, b, prng)
|
py
|
1a57ba102243ab62325ebef814295a436df1f5b7
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class Reason(str, Enum):
account_name_invalid = "AccountNameInvalid"
already_exists = "AlreadyExists"
class SkuName(str, Enum):
standard_lrs = "Standard_LRS"
standard_grs = "Standard_GRS"
standard_ragrs = "Standard_RAGRS"
standard_zrs = "Standard_ZRS"
premium_lrs = "Premium_LRS"
class SkuTier(str, Enum):
standard = "Standard"
premium = "Premium"
class AccessTier(str, Enum):
hot = "Hot"
cool = "Cool"
class Kind(str, Enum):
storage = "Storage"
blob_storage = "BlobStorage"
class ProvisioningState(str, Enum):
creating = "Creating"
resolving_dns = "ResolvingDNS"
succeeded = "Succeeded"
class AccountStatus(str, Enum):
available = "available"
unavailable = "unavailable"
class KeyPermission(str, Enum):
read = "Read"
full = "Full"
class UsageUnit(str, Enum):
count = "Count"
bytes = "Bytes"
seconds = "Seconds"
percent = "Percent"
counts_per_second = "CountsPerSecond"
bytes_per_second = "BytesPerSecond"
class HttpProtocol(str, Enum):
httpshttp = "https,http"
https = "https"
class SignedResource(str, Enum):
b = "b"
c = "c"
f = "f"
s = "s"
class Permissions(str, Enum):
r = "r"
d = "d"
w = "w"
l = "l"
a = "a"
c = "c"
u = "u"
p = "p"
|
py
|
1a57bc199246bb4c53503f50ef8e5fef017f1036
|
import sys
import pytest
import shutil
from pathlib import Path
from cookiecutter import main
CCDS_ROOT = Path(__file__).parents[1].resolve()
args = {
'project_name': 'AwesomeProject',
'author_name': 'AwesomeName',
'description': 'A very awesome project.',
'open_source_license': 'BSD-3-Clause',
'python_interpreter': 'python',
'version': '0.1.0'
}
def system_check(basename):
platform = sys.platform
if 'linux' in platform:
basename = basename.lower()
return basename
@pytest.fixture(scope='class', params=[{}, args])
def default_baked_project(tmpdir_factory, request):
temp = tmpdir_factory.mktemp('data-project')
out_dir = Path(temp).resolve()
pytest.param = request.param
main.cookiecutter(
str(CCDS_ROOT),
no_input=True,
extra_context=pytest.param,
output_dir=out_dir
)
project_name = pytest.param.get('project_name') or 'project_name'
# project name gets converted to lower case on Linux but not Mac
project_name = system_check(project_name)
project_path = out_dir/project_name
request.cls.project_path = project_path
yield
# cleanup after
shutil.rmtree(out_dir)
|
py
|
1a57bcc3dd12d7032ea5b861f763b21c47a2939e
|
import tty
import sys
import curses
import datetime
import locale
from decimal import Decimal
import getpass
import logging
import electrum_mona
from electrum_mona.util import format_satoshis
from electrum_mona.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum_mona.transaction import TxOutput
from electrum_mona.wallet import Wallet
from electrum_mona.storage import WalletStorage
from electrum_mona.network import NetworkParameters, TxBroadcastError, BestEffortRequestFailed
from electrum_mona.interface import deserialize_server
from electrum_mona.logging import console_stderr_handler
_ = lambda x:x # i18n
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists():
print("Wallet not found. try 'electrum-mona create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
console_stderr_handler.setLevel(logging.CRITICAL)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback(self.update, ['wallet_updated', 'network_updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event, *args):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for hist_item in self.wallet.get_history():
if hist_item.tx_mined_status.conf:
timestamp = hist_item.tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(hist_item.txid)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append(format_str % (time_str, label, format_satoshis(hist_item.value, whitespaces=True),
format_satoshis(hist_item.balance, whitespaces=True)))
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_receiving_address()
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
self.maxpos = 6
def print_banner(self):
if self.network and self.network.banner:
banner = self.network.banner
banner = banner.replace('\r', '')
self.print_list(banner.split('\n'))
def print_qr(self, data):
import qrcode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
s = StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
try:
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
except curses.error:
m = 'error. screen too small?'
m = m.encode(self.encoding)
self.stdscr.addstr(5, 1, m, 0)
def print_list(self, lst, firstline = None):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = lst[i] if i < len(lst) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print(c)
cc = curses.unctrl(c).decode()
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif cc in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif cc in ['^N']: self.network_dialog()
elif cc == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
cc = curses.unctrl(c).decode()
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or cc in '0123456789.':
target += cc
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Address', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = list(self.contacts.keys())[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[key] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
try:
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
except curses.error as e:
raise Exception("Error with curses. Is your screen too small?") from e
finally:
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_address(self.str_recipient):
self.show_message(_('Invalid Monacoin address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
self.show_message(repr(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
self.show_message(_("Please wait..."), getchar=False)
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
self.show_message(msg)
except BestEffortRequestFailed as e:
msg = repr(e)
self.show_message(msg)
else:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, list(map(lambda x: {'type':'button','label':x}, items)), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
net_params = self.network.get_parameters()
host, port, protocol = net_params.host, net_params.port, net_params.protocol
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = deserialize_server(server)
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('server') or out.get('proxy'):
proxy = electrum_mona.network.deserialize_proxy(out.get('proxy')) if out.get('proxy') else proxy_config
net_params = NetworkParameters(host, port, protocol, proxy, auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def settings_dialog(self):
fee = str(Decimal(self.config.fee_per_kb()) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee }
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(list(items))*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(list(items))
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if 'value' in item:
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
|
py
|
1a57bcf9e7f1f96fb25ae81f37f15ff762fb756b
|
#!/usr/bin/env python
# coding=utf-8
# Stan 2017-07-13
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import os
from ..a import app
def get_user_templates(user):
home = os.path.join(app.root_path, 'templates', 'custom')
try:
ldir = os.listdir(home)
except OSError:
pass
else:
for name in ldir:
tpname, ext = os.path.splitext(name)
if ext == '.html':
yield tpname
home = os.path.join(app.root_path, 'templates', 'custom', user.username)
try:
ldir = os.listdir(home)
except OSError:
pass
else:
for name in ldir:
tpname, ext = os.path.splitext(name)
if ext == '.html':
yield "{0}/{1}".format(user.username, tpname)
|
py
|
1a57bd2375feb31a04c5c7cd929500f01560d176
|
#!/usr/bin/env python
from bikeshed import cli
if __name__ == "__main__":
cli.main()
|
py
|
1a57bd48a58851516a737b509d54d4361cdca911
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import ast
from collections import namedtuple
from dataflow.udf.exceptions.comp_exceptions import SecurityCheckError
class UdfPolicy(object):
Import = namedtuple("Import", ["module", "name", "alias"])
def get_imports(self, path):
try:
with open(path) as fh:
root = ast.parse(fh.read(), path)
for node in ast.iter_child_nodes(root):
if isinstance(node, ast.Import):
module = []
elif isinstance(node, ast.ImportFrom):
module = node.module.split(".")
else:
continue
for n in node.names:
yield self.Import(module, n.name.split("."), n.asname)
except Exception as e:
raise SecurityCheckError(str(e))
|
py
|
1a57be231898fe6e152df722a7ac7b4ae99668c7
|
# Set up an NN to recognize clothing
# Use 85% of MNIST data to train and 15% to test
# We will also used ReLU
from __future__ import absolute_import, division, print_function
# Import Tensorflow
import tensorflow as tf
import tensorflow_datasets as tfds
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # updated
# Helper Libraries
import math
import numpy as np
import matplotlib.pyplot as plt
# Improve progress bar display
import tqdm
import tqdm.auto
tqdm.tqdm = tqdm.auto.tqdm
#print(tf.__version__)
# Load dataset and metadata
dataset, metadata = tfds.load('fashion_mnist', as_supervised = True, with_info = True)
train_dataset = dataset['train']
test_dataset = dataset['test']
class_names = metadata.features['label'].names
print("Class names: {}" .format(class_names))
# Explore Data
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}" .format(num_train_examples)) # 60000
print("Number of test examples: {}" .format(num_test_examples)) # 10000
# Preprocess the data
# Image has pixels with values [0, 255] ---- NORMALIZATION
def normalize(images, labels):
images = tf.cast(images, tf.float32) # cast it as float
images /= 255 # Casting, to return a value between 0 and 1
return images, labels
# Map function applies normalize function to each element in the followin sets
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
"""
# Plot the first image of test_dataset
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28, 28))
# Plot the image
plt.figure()
plt.imshow(image, cmap = plt.cm.binary)
plt.colorbar
plt.grid(False)
plt.show()
# Diplay the first 25 imgaes from Training Set and display the class
plt.figure(figsize=(10, 10))
i = 0
for (image, label) in test_dataset.take(25):
image = image.numpy().reshape((28, 28))
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap = plt.cm.binary)
plt.xlabel(class_names[label])
i +=1
plt.show()
"""
# Build the model
# 1 - Set up Layers
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape = (28, 28, 1)), # Image from 2d array of 28X28 to 1D of 784
tf.keras.layers.Dense(128, activation = tf.nn.relu), # Densely connected hidden Layer of 128 Neurons
tf.keras.layers.Dense(10, activation = tf.nn.softmax) # 10-node softmax layer, each node is a clothing class
])# Input-hidden-output
# 2 - Compile the model
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy']) # images that are correctly classified
# 3 - Train the model
BATCH_SIZE = 32
train_dataset = train_dataset.repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
model.fit(train_dataset, epochs = 5, steps_per_epoch = math.ceil(num_train_examples / BATCH_SIZE))
# Notice improving accuracy that reaches 0,89
# 4 - Evaluate Accuracy
test_loss, test_accuracy = model.evaluate(test_dataset, steps = math.ceil(num_test_examples / 32))
print("Accuracy on test dataset: ", test_accuracy) # 0,87
# 5 - Predictions and Exploration
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
print(predictions.shape) # (32,10) 32 answers 10 classes
print(predictions[0]) # For 1st image
print(np.argmax(predictions[0])) # Class 4 to take the largest prediction
test_labels[0]
# Plot the results on full 10 channel set
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img[..., 0], cmap = plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100 * np.max(predictions_array),
class_names[true_label]),
color = color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color = "#777777")
plt.ylim([0,1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# Check our for a certain pic
"""
i = 12 # a Pullover
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1, 2, 1)
plot_value_array(i, predictions, test_labels)
"""
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
# now predict
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
plt.show()
print(np.argmax(predictions_single[0]))
|
py
|
1a57be7e0129a903c60ce50b817307745370d9d9
|
#!/usr/bin/env python
import os
import sys
from pathlib import Path
current_working_dir = os.getcwd()
if current_working_dir not in sys.path:
sys.path.append(current_working_dir)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# heimdall directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "heimdall"))
execute_from_command_line(sys.argv)
|
py
|
1a57bf29f4b46fae0a0e2d0498a07b4022807dba
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0011_card_how_to_obtain'),
]
operations = [
migrations.AddField(
model_name='account',
name='stars',
field=models.PositiveIntegerField(null=True, verbose_name='Stars', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='how_to_obtain',
field=models.TextField(help_text="For event or special songs cards. Leave empty if it's only obtainable in recruitment.", null=True, verbose_name='How to get it?', blank=True),
preserve_default=True,
),
]
|
py
|
1a57bf37590b753e66e8e471b530b424fea7ffe5
|
import os
gettext = lambda s: s # noqa
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for test_project project.
Generated by 'django-admin startproject' using Django 1.8.18.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$rv%bfl!o1o7-n3#ikb1f*14ia_ozv((_xh5^3d3ae^+@)#0m#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'test_project', 'static'),
)
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'test_project', 'templates'), ],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.csrf',
'django.template.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.template.context_processors.static',
'cms.context_processors.cms_settings'
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
],
},
},
]
MIDDLEWARE_CLASSES = (
'cms.middleware.utils.ApphookReloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
)
INSTALLED_APPS = (
'djangocms_admin_style',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_text_ckeditor',
'cmsplugin_form_handler',
'crispy_forms',
'test_project.apps.sample',
'test_project'
)
LANGUAGES = (
('en', gettext('en')),
)
CMS_LANGUAGES = {
1: [
{
'code': 'en',
'public': True,
'redirect_on_fallback': True,
'hide_untranslated': False,
'name': gettext('en'),
},
],
'default': {
'public': True,
'redirect_on_fallback': True,
'hide_untranslated': False,
},
}
CMS_TEMPLATES = (
('fullwidth.html', 'Fullwidth'),
('sidebar_left.html', 'Sidebar Left'),
('sidebar_right.html', 'Sidebar Right')
)
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': 'localhost',
'NAME': 'project.db',
'PASSWORD': '',
'PORT': '',
'USER': ''
}
}
MIGRATION_MODULES = {
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
|
py
|
1a57c0e40a8571d347e5f6a1870fddcec048d8df
|
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ))
import pickle
import numpy as np
import matplotlib.pyplot as plt
import cv2
from data.transforms.image import de_transform
from vision.multiview import coord2pix, pix2coord
import scipy
from matplotlib.patches import Circle
import time
import math
RGB_MATCHING_COLOR = '#0066cc'
BASELINE_MATCHING_COLOR = 'y'
OURS_MATCHING_COLOR = 'r'
GROUNDTRUTH_COLOR = 'g'
def de_normalize(pts, H, W, engine='numpy'):
"""
Args:
pts: *N x 2 (x, y -> W, H)
"""
pts_ = pts.copy()
if engine == 'torch':
WH = torch.tensor([W, H], dtype=pts.dtype, device=pts.device)
return (pts + 1) * (WH - 1) / 2.
pts_[..., 0] = (pts[..., 0] + 1) * (W - 1) / 2.
pts_[..., 1] = (pts[..., 1] + 1) * (H - 1) / 2.
return pts_
def normalize(pts, H, W):
"""
Args:
pts: *N x 2 (x, y -> W, H)
"""
pts_ = pts.copy()
pts_[..., 0] = -1. + 2. * pts[..., 0] / (W - 1)
pts_[..., 1] = -1. + 2. * pts[..., 1] / (H - 1)
return pts_
def BGR2Lab(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
def Lab2ab(image):
_, A, B = cv2.split(image)
return np.stack([A, B])
class Output(object):
def __init__(self, pkl_path):
with open(pkl_path,"rb") as f:
output = pickle.load(f)
img1 = output['img1'][0]
img1 = de_transform(img1).transpose(1,2,0)
img2 = output['img2'][0]
img2 = de_transform(img2).transpose(1,2,0)
self.img1 = img1[:, :, ::-1]
self.img2 = img2[:, :, ::-1]
img1_ab = Lab2ab(BGR2Lab(img1)).transpose(1,2,0)
img2_ab = Lab2ab(BGR2Lab(img2)).transpose(1,2,0)
self.img1_ab = img1_ab
self.img2_ab = img2_ab
self.depth = output['depth']
self.corr_pos_pred = output['corr_pos_pred']
self.sample_locs = output['sample_locs']
self.img1_path = output['img1_path']
self.img2_path = output['img2_path']
self.camera = output['camera'][0]
self.other_camera = output['other_camera'][0]
self.heatmap_pred = output['heatmap_pred']
self.batch_locs = output['batch_locs']
self.points_2d = output['points-2d']
self.H, self.W = img1.shape[:2]
def calc_color_score(self, x, y):
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
ref_point = self.img1_ab[int(y), int(x), :]
color_score = []
max_score_id = None
max_score = -1
for i in range(0, 64):
pos = self.sample_locs[i][int(cy)][int(cx)]
depos = de_normalize(pos, self.H, self.W)
source_point = self.img2_ab[int(depos[1]), int(depos[0]), :]
color_score.append(np.dot(ref_point, source_point))
if color_score[-1] > max_score:
max_score = color_score[-1]
max_score_id = (int(depos[0]), int(depos[1]))
color_score = color_score / sum(color_score)
return color_score, max_score_id
class Complex_Draw(object):
def __init__(self, output, b_output):
self.output = output
self.b_output = b_output
self.ref_img = output.img1
assert output.img1_path == b_output.img1_path
def draw_sample_ax(self, ax, x, y):
output = self.output
b_output = self.b_output
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
ax.clear()
# update the line positions
ax.imshow(self.ref_img)
self.lx.set_ydata(y)
self.ly.set_xdata(x)
circ = Circle((x, y), 3, color=GROUNDTRUTH_COLOR)
ax.add_patch(circ)
self.txt.set_text('x=%1.1f, y=%1.1f; g: groundtruth; y: baseline; r: prediction' % (x, y))
def draw_dist_ax(self, ax, x, y):
output = self.output
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
color_score, max_score_id = output.calc_color_score(x, y)
xrange = np.arange(0, 64)
ax.clear()
lines_color = {
'feat. matching': OURS_MATCHING_COLOR,
'rgb matching' : '#0066cc',
'non-fusion feat. matching': BASELINE_MATCHING_COLOR,
}
lines_data = {
'feat. matching': output.depth[:, cy, cx],
'rgb matching' : color_score,
'non-fusion feat. matching': self.b_output.depth[:, cy, cx],
}
ax.clear()
for label, line in lines_data.items():
ax.plot(xrange[1:-1], line[1:-1], color=lines_color[label], label=label)
ax.set_yscale('log')
ax.set_ylabel('similarity (log)')
ax.tick_params(bottom=False, top=True)
ax.tick_params(labelbottom=False, labeltop=True)
ax.legend()
return max_score_id
def draw_other_ax(self, ax, x, y, max_score_id, joint_id=None):
output = self.output
b_output = self.b_output
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
xx, yy = output.corr_pos_pred[cy][cx]
bxx, byy = self.b_output.corr_pos_pred[cy][cx]
ax.clear()
ax.imshow(output.img2)
circ = Circle(max_score_id, 3, color=RGB_MATCHING_COLOR)
ax.add_patch(circ)
# draw epipolar lines
line_start1 = de_normalize(output.sample_locs[1][int(cy)][int(cx)], output.H, output.W)
line_start2 = de_normalize(output.sample_locs[63][int(cy)][int(cx)], output.H, output.W)
ax.plot([line_start1[0], line_start2[0]], [line_start1[1], line_start2[1]], alpha=0.5, color='b', zorder=1)
# draw groundtruth points
# for i in range(17):
gx, gy = output.points_2d[output.other_camera][joint_id][0], output.points_2d[output.other_camera][joint_id][1]
circ = Circle((gx, gy), 3, color=GROUNDTRUTH_COLOR, zorder=2)
ax.add_patch(circ)
# draw baseline predicted point
circ = Circle((pix2coord(bxx, 4), pix2coord(byy, 4)), 3, color=BASELINE_MATCHING_COLOR, zorder=2)
ax.add_patch(circ)
# draw predicted point
circ = Circle((pix2coord(xx, 4), pix2coord(yy, 4)), 3, color=OURS_MATCHING_COLOR, zorder=3)
ax.add_patch(circ)
def dist(x1, y1, x2, y2):
return math.sqrt((x1 - x2)**2 + (y1-y2) **2)
flag = True
# predicted - gt > baseline - gt
if dist(pix2coord(xx, 4), pix2coord(yy,4), gx, gy)*1.5 > dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy):
flag = False
# predicted - gt > TH: 3
if dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy) < 5:
flag = False
if flag:
print('img1 path: ', output.img1_path)
print('img2 path: ', output.img2_path)
print('pred - gt: ', dist(pix2coord(xx, 4), pix2coord(yy,4), gx, gy))
print('baseline - gt', dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy))
txt = self.sample_ax.text(0, 0, '', va="bottom", ha="left")
txt.set_text('g: groundtruth; y: baseline; r: our prediction')
return flag
def draw_heatmap_ax(self, ax):
output = self.output
ax.clear()
ax.imshow(output.heatmap_pred.max(0))
def draw(self, x, y, save_path, joint_id=None):
self.fig, self.axs = plt.subplots(2, 2, squeeze=True, figsize=(12, 8))
self.sample_ax = self.axs[0, 0]
self.dist_ax = self.axs[0, 1]
self.other_ax = self.axs[1, 0]
self.heatmap_ax = self.axs[1, 1]
self.lx = self.sample_ax.axhline(color='k') # the horiz line
self.ly = self.sample_ax.axvline(color='k') # the vert line
self.txt = self.sample_ax.text(0, 0, '', va="bottom", ha="left")
output = self.output
self.draw_sample_ax(self.sample_ax, x, y)
max_score_id = self.draw_dist_ax(self.dist_ax, x, y)
flag = self.draw_other_ax(self.other_ax, x, y, max_score_id, joint_id)
if not flag:
plt.close()
return flag
self.draw_heatmap_ax(self.heatmap_ax)
plt.savefig(save_path) #, transparent=True)
print('saved for ', save_path)
return flag
class Easy_Draw(Complex_Draw):
def __init__(self, output, b_output):
self.output = output
self.b_output = b_output
self.ref_img = output.img1
assert output.img1_path == b_output.img1_path
def draw(self, x, y, save_path):
self.fig, self.ax = plt.subplots(1, figsize=(12, 8))
output = self.output
self.draw_dist_ax(self.ax, x, y)
plt.savefig(save_path, transparent=True)
print('saved for ', save_path)
root_dir = "outs/epipolar/keypoint_h36m_fixed/visualizations/h36m/"
# for i in range(4,5):
i = 1
j = 2
ours_pkl = root_dir + "output_{}.pkl".format(i)
baseline_pkl = root_dir + "output_baseline_{}.pkl".format(i)
complex_output = root_dir + "{}_joint{}_output.eps"
easy_output = root_dir + "easy_output/{}_joint{}_easy_output.eps"
output = Output(ours_pkl)
b_output = Output(baseline_pkl)
cd = Complex_Draw(output, b_output)
ed = Easy_Draw(output, b_output)
flag = cd.draw(x=output.points_2d[output.camera][j][0], y=output.points_2d[output.camera][j][1], save_path=complex_output.format(i, j), joint_id=j)
if flag:
ed.draw(x=output.points_2d[output.camera][j][0], y=output.points_2d[output.camera][j][1], save_path=easy_output.format(i, j))
fig, ax = plt.subplots()
plt.imshow(output.img1)
ax.axis('off')
fig.savefig(root_dir+'original/{}_ref_img.eps'.format(i),bbox_inches='tight', pad_inches=0)
fig, ax = plt.subplots()
ax.axis('off')
plt.imshow(output.img2)
fig.savefig(root_dir+'original/{}_source_img.eps'.format(i),bbox_inches='tight', pad_inches=0)
print('saved original images')
|
py
|
1a57c13ffb9e56eff251856f73ab6ed0b61d5768
|
from django.db import models
from django.utils.text import slugify
from django.core.validators import MinValueValidator, MinLengthValidator
from django.db.models.fields import SlugField
from django.contrib.auth.models import User
# Create your models here.
class Person(models.Model):
DIRECTOR = 'DR'
DEAN_OF_ACADEMIC_AFFAIRS = 'DOAA'
DEAN_OF_FACULTY_AFFAIRS = 'DOFA'
DEAN_OF_STUDENT_AFFAIRS = 'DOSA'
HEAD_OF_DEPARTMENT = 'HOD'
FACULTY = 'F'
VISITING_FACULTY = 'VF'
REGISTRAR = 'RG'
HEAD_OF_STAFF = 'HOS'
STAFF = 'S'
COMPUTER_SCIENCE_AND_ENGINEERING = 'CSE'
ELECTRONICS_AND_COMMUNICATION_ENGINEERING = 'ECE'
MECHANICAL_AND_MECHATRONICS_ENGINEERING = 'ME'
HUMANITIES_AND_SOCIAL_SCIENCES = 'HSS'
MATHEMATICS = 'MH'
PHYSICS = 'PH'
NON_TEACHING_STAFF = 'NTS'
PERSON_ROLES = (
('DR', 'Director'),
('DOAA', 'Dean of Academic Affairs'),
('DOFA', 'Dean of Faculty Affairs'),
('DOSA', 'Dean of Student Affairs'),
('HOD', 'Head of Department'),
('F', 'Faculty'),
('VF', 'Visiting Faculty'),
('RG', 'Registrar'),
('HOS', 'Head of Staff'),
('S', 'Staff'),
)
DEPARTMENT = (
('CSE', 'Computer Science and Engineering'),
('ECE', 'Electronics and Communication Engineering'),
('ME', 'Mechanical and Mechatronics Engineering'),
('HSS', 'Humanities and Social Sciences'),
('MH', 'Mathematics'),
('PH', 'Physics'),
('NTS', 'Non Teaching Staff'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='person')
faculty_id = models.CharField(max_length=10, blank=False, null=False, unique=True, default='0')
leave_count = models.IntegerField(validators=[MinValueValidator(0)], blank=False, null=False, default=22)
department = models.CharField(max_length=3, choices=DEPARTMENT, blank=False, null=False, default='CSE')
first_name = models.CharField(max_length=50, validators=[MinLengthValidator(1)], blank=False, null=False)
last_name = models.CharField(max_length=50, blank=True, null=False)
email = models.EmailField(blank=False, null=False, unique=True)
office_no = models.IntegerField(blank=False, null=False, default='0000')
role = models.CharField(max_length=5, choices=PERSON_ROLES, default='F')
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
# slug = models.SlugField(unique=True)
class Meta:
ordering = ['id']
verbose_name = 'Person'
verbose_name_plural = 'Persons'
def is_director(self):
return self.role == 'DR'
def is_dean(self):
return self.role == 'DN'
def is_hod(self):
return self.role == 'HOD'
def is_valid(self):
if self.email.split('@')[1] != 'lnmiit.ac.in':
return False
return len(self.first_name) > 0 and self.user is not None and self.leave_count >= 0
def __str__(self):
return f'{self.id}. {self.first_name} {self.last_name}'
class Application(models.Model):
PENDING = 'P'
APPROVED = 'A'
REJECTED = 'R'
APPLICATION_STATUS = (
('P', 'Pending'),
('A', 'Approved'),
('R', 'Rejected'),
)
person = models.ForeignKey(Person, on_delete=models.CASCADE, related_name='applicant', default=1)
status = models.CharField(max_length=1, choices=APPLICATION_STATUS, default='P')
start_date = models.DateField(blank=False, null=False)
end_date = models.DateField(blank=False, null=False)
hasClasses = models.BooleanField(blank=False, null=False, default=False)
rescheduled_date = models.DateField(blank=True, null=True)
up_next = models.ForeignKey(Person, on_delete=models.CASCADE, related_name='up_next', default=1)
comments = models.TextField(blank=True, null=False)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
# slug = models.SlugField(unique=True)
class Meta:
ordering = ['start_date', 'end_date']
def is_valid(self):
return self.person.is_valid() and self.start_date < self.end_date
def __str__(self):
return f'{self.id}. {self.person.first_name} {self.person.last_name} - {self.get_status_display()}'
|
py
|
1a57c3af4a67e091e6dbc991979b2bf52d4d4c3c
|
import factory.fuzzy
from waldur_pid.tests import models
class OfferingFactory(factory.DjangoModelFactory):
class Meta:
model = models.Offering
|
py
|
1a57c549b8414413426678288daf9e80ebf96fb6
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListOnDemandResourceRatingsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'amount': 'decimal.Decimal',
'discount_amount': 'decimal.Decimal',
'official_website_amount': 'decimal.Decimal',
'measure_id': 'int',
'currency': 'str',
'product_rating_results': 'list[DemandProductRatingResult]'
}
attribute_map = {
'amount': 'amount',
'discount_amount': 'discount_amount',
'official_website_amount': 'official_website_amount',
'measure_id': 'measure_id',
'currency': 'currency',
'product_rating_results': 'product_rating_results'
}
def __init__(self, amount=None, discount_amount=None, official_website_amount=None, measure_id=None, currency=None, product_rating_results=None):
"""ListOnDemandResourceRatingsResponse - a model defined in huaweicloud sdk"""
super(ListOnDemandResourceRatingsResponse, self).__init__()
self._amount = None
self._discount_amount = None
self._official_website_amount = None
self._measure_id = None
self._currency = None
self._product_rating_results = None
self.discriminator = None
if amount is not None:
self.amount = amount
if discount_amount is not None:
self.discount_amount = discount_amount
if official_website_amount is not None:
self.official_website_amount = official_website_amount
if measure_id is not None:
self.measure_id = measure_id
if currency is not None:
self.currency = currency
if product_rating_results is not None:
self.product_rating_results = product_rating_results
@property
def amount(self):
"""Gets the amount of this ListOnDemandResourceRatingsResponse.
|参数名称:总额| |参数约束及描述:即最终优惠后的金额|
:return: The amount of this ListOnDemandResourceRatingsResponse.
:rtype: decimal.Decimal
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this ListOnDemandResourceRatingsResponse.
|参数名称:总额| |参数约束及描述:即最终优惠后的金额|
:param amount: The amount of this ListOnDemandResourceRatingsResponse.
:type: decimal.Decimal
"""
self._amount = amount
@property
def discount_amount(self):
"""Gets the discount_amount of this ListOnDemandResourceRatingsResponse.
|参数名称:优惠额| |参数约束及描述:(官网价和总价的差)|
:return: The discount_amount of this ListOnDemandResourceRatingsResponse.
:rtype: decimal.Decimal
"""
return self._discount_amount
@discount_amount.setter
def discount_amount(self, discount_amount):
"""Sets the discount_amount of this ListOnDemandResourceRatingsResponse.
|参数名称:优惠额| |参数约束及描述:(官网价和总价的差)|
:param discount_amount: The discount_amount of this ListOnDemandResourceRatingsResponse.
:type: decimal.Decimal
"""
self._discount_amount = discount_amount
@property
def official_website_amount(self):
"""Gets the official_website_amount of this ListOnDemandResourceRatingsResponse.
|参数名称:官网价| |参数约束及描述:官网价|
:return: The official_website_amount of this ListOnDemandResourceRatingsResponse.
:rtype: decimal.Decimal
"""
return self._official_website_amount
@official_website_amount.setter
def official_website_amount(self, official_website_amount):
"""Sets the official_website_amount of this ListOnDemandResourceRatingsResponse.
|参数名称:官网价| |参数约束及描述:官网价|
:param official_website_amount: The official_website_amount of this ListOnDemandResourceRatingsResponse.
:type: decimal.Decimal
"""
self._official_website_amount = official_website_amount
@property
def measure_id(self):
"""Gets the measure_id of this ListOnDemandResourceRatingsResponse.
|参数名称:度量单位标识| |参数约束及描述:1:元|
:return: The measure_id of this ListOnDemandResourceRatingsResponse.
:rtype: int
"""
return self._measure_id
@measure_id.setter
def measure_id(self, measure_id):
"""Sets the measure_id of this ListOnDemandResourceRatingsResponse.
|参数名称:度量单位标识| |参数约束及描述:1:元|
:param measure_id: The measure_id of this ListOnDemandResourceRatingsResponse.
:type: int
"""
self._measure_id = measure_id
@property
def currency(self):
"""Gets the currency of this ListOnDemandResourceRatingsResponse.
|参数名称:币种| |参数约束及描述:比如CNY|
:return: The currency of this ListOnDemandResourceRatingsResponse.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this ListOnDemandResourceRatingsResponse.
|参数名称:币种| |参数约束及描述:比如CNY|
:param currency: The currency of this ListOnDemandResourceRatingsResponse.
:type: str
"""
self._currency = currency
@property
def product_rating_results(self):
"""Gets the product_rating_results of this ListOnDemandResourceRatingsResponse.
|参数名称:产品询价结果| |参数的约束及描述:产品询价结果|
:return: The product_rating_results of this ListOnDemandResourceRatingsResponse.
:rtype: list[DemandProductRatingResult]
"""
return self._product_rating_results
@product_rating_results.setter
def product_rating_results(self, product_rating_results):
"""Sets the product_rating_results of this ListOnDemandResourceRatingsResponse.
|参数名称:产品询价结果| |参数的约束及描述:产品询价结果|
:param product_rating_results: The product_rating_results of this ListOnDemandResourceRatingsResponse.
:type: list[DemandProductRatingResult]
"""
self._product_rating_results = product_rating_results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListOnDemandResourceRatingsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a57c54a6583966e1b0f3ce0c1dcfd68484f657b
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "xh"
# Date: 2019/11/13
from core import info_collection
from conf import settings
import urllib.request
import urllib.parse, urllib.error
import os, sys
import json
import datetime
class ArgvHandler(object):
def __init__(self, argv_list):
self.argvs = argv_list
self.pase_argvs()
def pase_argvs(self):
if len(self.argvs) > 1:
if hasattr(self, self.argvs[1]):
func = getattr(self, self.argvs[1])
func()
else:
self.help_msg()
else:
self.help_msg()
def help_msg(self):
msg = '''
collect_data 收集资产数据
run_forever ...
get_asset_id 获取资产id
report_asset 汇报资产数据到服务器
'''
print(msg)
def collect_data(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
print("asset", asset_data)
return asset_data
def get_asset_id(self):
pass
def load_asset_id(self, sn=None):
asset_id_file = settings.Params["asset_id"]
has_asset_id = False
if os.path.isfile(asset_id_file):
asset_id = open(asset_id_file).read().strip()
if asset_id.isdigit():
return asset_id
else:
has_asset_id = False
else:
has_asset_id = False
def __updata_asset_id(self, new_asset_id):
'''将服务端返回的资产id更新到本地'''
asset_id_file = settings.Params["asset_id"]
with open(asset_id_file, "w", encoding="utf-8") as f:
f.write(str(new_asset_id))
def log_record(self, log, action_type=None):
'''记录日志'''
f = open(settings.Params["log_file"], "ab")
if type(log) is str:
pass
if type(log) is dict:
if "info" in log:
for msg in log["info"]:
log_format = "%s\tINFO\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
if "error" in log:
for msg in log:
log_format = "%s\tERROR\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
if "warning" in log:
for msg in log:
log_format = "%s\tWARNING\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
f.close()
def report_asset(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
asset_id = self.load_asset_id(asset_data["sn"])
if asset_id: # 资产之前汇报过,只需把存在客户端的asset_id放进asset_data中,直接汇报到正式资产库中
asset_data["asset_id"] = asset_id
post_url = "asset_report"
else: # 否则资产为第一次汇报,需要先汇报到待批准区
asset_data["asset_id"] = None
post_url = "asset_report_with_no_id"
data = {"asset_data": json.dumps(asset_data)}
respones = self.__submit_data(post_url, data, method="post")
print("返回的respones", respones)
if "asset_id" in str(respones):
self.__updata_asset_id(respones["asset_id"])
def __submit_data(self, action_type, data, method):
'''
发达数据到目标主机
:param action_type: url
:param data: 数据
:param method: 请求方式
:return:
'''
if action_type in settings.Params["urls"]:
if type(settings.Params["port"]) is int:
url = "http://%s:%s%s" % (
settings.Params["server"], settings.Params["port"], settings.Params["urls"][action_type])
else:
url = "http://%s%s" % (settings.Params["server"], settings.Params["urls"][action_type])
if method == "get":
args = ""
for k, v in data.item:
args += "&%s=%s" % (k, v)
args = args[1:]
url_with_args = "%s?%s" % (url, args)
try:
req = urllib.request.urlopen(url_with_args, timeout=settings.Params["request_timeout"])
callback = req.read()
return callback
except urllib.error as e:
sys.exit("\033[31;1m%s\033[0m" % e)
elif method == "post":
try:
data_encode = urllib.parse.urlencode(data).encode()
req = urllib.request.urlopen(url=url, data=data_encode, timeout=settings.Params['request_timeout'])
callback = req.read()
print("\033[31;1m[%s]:[%s]\033[0m response:\n%s" % (method, url, callback))
return callback
except Exception as e:
sys.exit("\033[31;1m%s\033[0m" % e)
else:
raise KeyError
|
py
|
1a57c5876f01aea71068aca09357848943774cbe
|
'''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import math
import torch
import torch.distributed as dist
try:
from deepspeed.git_version_info import version
from deepspeed.moe.utils import is_moe_param
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.ops.op_builder import UtilsBuilder
from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION_GRADIENTS
except ImportError:
pass
from packaging import version as pkg_version
from torch._six import inf
from torch.distributed.distributed_c10d import _get_global_rank
from torch.optim import Optimizer
from colossalai.core import global_context as gpc
from colossalai.registry import OPTIMIZER_WRAPPERS
from colossalai.utils import report_memory_usage
from ._utils import is_model_parallel_parameter
from .loss_scaler import LossScaler, DynamicLossScaler
from ...context.parallel_mode import ParallelMode
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def input(msg):
return
def split_half_float_double(tensors):
dtypes = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor"
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append(bucket)
return buckets
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def get_alignment_padding(tensor_list, alignment):
num_elements = sum([tensor.numel() for tensor in tensor_list])
remainder = num_elements % alignment
return (alignment - remainder) if remainder else remainder
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
def print_rank_msg(msg):
print(f"rank {dist.get_rank()} - {msg}")
@OPTIMIZER_WRAPPERS.register_module
class ZeroRedundancyOptimizer_Level_2(Optimizer):
"""
ZeroRedundancyOptimizer_Level_2 designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
"""
def __init__(self,
init_optimizer,
dp_parallel_mode=ParallelMode.DATA,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=False,
contiguous_gradients=True,
reduce_bucket_size=500000000,
allgather_bucket_size=5000000000,
reduce_scatter=True,
overlap_comm=False,
cpu_offload=False,
clip_grad=0.0,
allreduce_always_fp32=False,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
ignore_unused_parameters=True,
round_robin_gradients=False,
fp16_master_weights_and_gradients=False):
# mpu = None is removed from the parameter list
# tensor parallel will be automatically detected later
# LSG: default arguments for compatibility
has_moe_layers = False
partition_grads = True
expert_parallel_group = None
expert_data_parallel_group = None
self.timers = None
self.defaults = init_optimizer.defaults
dp_process_group = gpc.get_group(dp_parallel_mode)
if gpc.get_world_size(dp_parallel_mode) == 1:
partition_grads = False # for compatibility with dp size = 1
self.verbose = verbose
if dist.get_rank() == 0 and self.verbose:
print(f"Reduce bucket size {reduce_bucket_size}")
print(f"Allgather bucket size {allgather_bucket_size}")
print(f"CPU Offload: {cpu_offload}")
print(
f'Round robin gradient partitioning: {round_robin_gradients}')
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master gard and unflat master weight never exist. TODO: a way to save out unflat master?
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.optimizer = init_optimizer
# Load pre-built or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
# ZeRO stage 1 (False) or 2 (True)
self.partition_gradients = partition_grads
self.reduce_scatter = reduce_scatter
self.overlap_comm = overlap_comm
self.cpu_offload = cpu_offload
self.deepspeed_adam_offload = cpu_offload
self.device = torch.cuda.current_device() if not self.cpu_offload else 'cpu'
self.dp_process_group = dp_process_group
# expert parallel group
self.ep_process_group = expert_parallel_group
# data parallel group for experts
self.expert_dp_process_group = expert_data_parallel_group
# data parallel size for non-experts
dp_size = dist.get_world_size(group=self.dp_process_group)
# For MoE models this maybe different for different param group
# It will be modified during MoE setup later in the init
self.real_dp_process_group = [
dp_process_group for i in range(len(self.optimizer.param_groups))
]
self.partition_count = [dp_size for i in range(
len(self.optimizer.param_groups))]
self.is_gradient_accumulation_boundary = True
# CPU-Offload requires contiguous gradients
self.contiguous_gradients = contiguous_gradients or cpu_offload
self.has_moe_layers = has_moe_layers
if self.has_moe_layers:
self._configure_moe_settings()
if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_world_size(ParallelMode.TENSOR) == 1:
self.model_parallel_group = None
self.model_parallel_rank = 0
else:
self.model_parallel_group = gpc.get_group(ParallelMode.TENSOR)
self.model_parallel_rank = gpc.get_local_rank(ParallelMode.TENSOR)
self.overflow = False
self.clip_grad = clip_grad
self.allreduce_always_fp32 = allreduce_always_fp32
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.ignore_unused_parameters = ignore_unused_parameters
self.round_robin_gradients = round_robin_gradients
self.extra_large_param_to_reduce = None
self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients
if self.fp16_master_weights_and_gradients:
assert self.cpu_offload and type(self.optimizer) in [
DeepSpeedCPUAdam], f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32. Currenty only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}. Either disable fp16_master_weights_and_gradients or enable ZeRO-2 Offload with DeepSpeedCPUAdam"
if self.reduce_scatter:
assert not self.allreduce_always_fp32, "allreduce_always_fp32 is not yet supported with ZeRO-2 with reduce scatter enabled"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-2 with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-2 with reduce scatter enabled"
# param flattened by groups
self.fp16_groups = []
self.fp16_groups_flat = []
# param partitioned by data parallel degree
# this will contain a list of equal sized tensors
# each of which will be updated by a different process
self.parallel_partitioned_fp16_groups = []
# a single 32-bit partition of the parallel partitioned parameters
# that this process will update
self.single_partition_of_fp32_groups = []
# param partition info
# These are the parameters in each group that will not be updated by this process directly
self.params_not_in_partition = []
# These are the parameters that will be updated by this process directly
self.params_in_partition = []
# Offset from the first paramter in the the self.params_in_partition
# the parameter boundaries may not align with partition boundaries
# so we need to keep track of the offset
self.first_offset = []
# number of elements per partition in each group
self.partition_size = []
# align nccl all-gather send buffers to 4-bye boundary
# 4-byte alignment/sizeof(fp16) = 2
self.nccl_start_alignment_factor = 2
assert (
allgather_bucket_size % self.nccl_start_alignment_factor == 0), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} "
self.all_reduce_print = False
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self.round_robin_fp16_groups = []
self.round_robin_fp6_indices = []
# padding on each partition for alignment purposes
self.groups_padding = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# push this group to list before modify
# TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group
self.fp16_groups.append(param_group['params'])
# Record padding required to align group to world size
if partition_id == dist.get_world_size(
group=self.real_dp_process_group[i]) - 1:
padding = get_alignment_padding(self.fp16_groups[i],
self.partition_count[i])
else:
padding = 0
self.groups_padding.append(padding)
# not sure why apex was cloning the weights before flattening
# removing cloning here
if self.verbose:
report_memory_usage(f"Before moving param group {i} to CPU")
# move all the parameters to cpu to free up GPU space for creating flat buffer
move_to_cpu(self.fp16_groups[i])
if self.verbose:
report_memory_usage(f"After moving param group {i} to CPU")
# Reorder group parameters for load balancing of gradient partitioning during backward among ranks.
# This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks.
# For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging
# to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m).
if self.round_robin_gradients:
round_robin_tensors, round_robin_indices = self._round_robin_reorder(
self.fp16_groups[i],
dist.get_world_size(group=self.real_dp_process_group[i])
)
else:
round_robin_tensors = self.fp16_groups[i]
round_robin_indices = list(range(len(self.fp16_groups[i])))
self.round_robin_fp16_groups.append(round_robin_tensors)
self.round_robin_fp6_indices.append(round_robin_indices)
# create flat buffer in CPU and move to GPU
self.fp16_groups_flat.append(
self.flatten_dense_tensors_aligned(
self.round_robin_fp16_groups[i],
self.nccl_start_alignment_factor *
dist.get_world_size(group=self.real_dp_process_group[i])).cuda(
torch.cuda.current_device()))
if self.verbose:
report_memory_usage(
f"After flattening and moving param group {i} to GPU")
if dist.get_rank(group=self.real_dp_process_group[i]) == 0:
report_memory_usage(
f"After Flattening and after emptying param group {i} cache")
# set model fp16 weight to slices of flattened buffer
self._update_model_fp16_weights(i)
# divide the flat weights into near equal partition equal to the data parallel degree
# each process will compute on a different part of the partition
data_parallel_partitions = self.get_data_parallel_partitions(
self.fp16_groups_flat[i],
i)
self.parallel_partitioned_fp16_groups.append(
data_parallel_partitions)
# verify that data partition start locations are 4-byte aligned
for partitioned_data in data_parallel_partitions:
assert (partitioned_data.data_ptr() %
(2 * self.nccl_start_alignment_factor) == 0)
# a partition of the fp32 master weights that will be updated by this process
if not fp16_master_weights_and_gradients:
self.single_partition_of_fp32_groups.append(
self.parallel_partitioned_fp16_groups[i][partition_id].to(
self.device).clone().float().detach())
else:
self.single_partition_of_fp32_groups.append(
self.parallel_partitioned_fp16_groups[i][partition_id].to(
self.device).clone().half().detach())
# modify optimizer of have flat master weight
self.single_partition_of_fp32_groups[
i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = [self.single_partition_of_fp32_groups[i]]
partition_size = len(self.fp16_groups_flat[i]) / dist.get_world_size(
group=self.real_dp_process_group[i])
params_in_partition, params_not_in_partition, first_offset = self.get_partition_info(
self.round_robin_fp16_groups[i],
partition_size,
partition_id)
self.partition_size.append(partition_size)
self.params_in_partition.append(params_in_partition)
self.params_not_in_partition.append(params_not_in_partition)
self.first_offset.append(first_offset)
for rank in range(dist.get_world_size()):
if dist.get_rank() == rank and self.verbose:
print(
f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i, p in enumerate(self.single_partition_of_fp32_groups)]} "
)
dist.barrier()
# exit(0)
self.reduce_bucket_size = int(reduce_bucket_size)
self.allgather_bucket_size = int(allgather_bucket_size)
self.reduction_event = torch.cuda.Event(
enable_timing=False, blocking=False)
self.reduction_stream = torch.cuda.Stream()
self.cpu_computation_stream = torch.cuda.Stream()
self.copy_grad_stream = torch.cuda.Stream()
self.callback_queued = False
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.elements_in_ipg_bucket = 0
self.params_already_reduced = []
self._release_ipg_buffers()
self.previous_reduced_grads = None
self.ipg_bucket_has_moe_params = False
# simplified param id
self.param_id = {}
largest_param_numel = 0
count = 0
for i, params_group in enumerate(self.fp16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
self.params_already_reduced.append(False)
if param.numel() > largest_param_numel:
largest_param_numel = param.numel()
count = count + 1
for param_group in self.params_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(
param)] = True
for param_group in self.params_not_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(
param)] = False
if self.cpu_offload:
self.accumulated_grads_in_cpu = {}
self.norm_for_param_grads = {}
self.local_overflow = False
self.grad_position = {}
self.temp_grad_buffer_for_cpu_offload = torch.zeros(
largest_param_numel,
device=self.device,
dtype=self.dtype).pin_memory()
self.temp_grad_buffer_for_gpu_offload = torch.zeros(
largest_param_numel,
device=torch.cuda.current_device(),
dtype=self.dtype)
for i, params_group in enumerate(self.fp16_groups):
self.get_grad_position(i,
self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i])
# mapping from parameter to partition that it belongs to
self.param_to_partition_ids = {}
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# number of grads in partition that still need to be computed
self.remaining_grads_in_partition = {}
# total number of grads in partition
self.total_grads_in_partition = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# stores the offset at which a parameter gradient needs to be inserted in a partition
self.grad_partition_insertion_offset = {}
# the offset in the gradient at which it must be inserted at the beginning of the partition
self.grad_start_offset = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
# store index of first parameter in each partition
self.first_param_index_in_partition = {}
# initializes all data structures for implementing gradient partitioning
self.initialize_gradient_partitioning_data_structures()
# resets the data structure value for the next backward propagation
self.reset_partition_gradient_structures()
# creates backward hooks for gradient partitioning
if self.partition_gradients or self.overlap_comm:
self.create_reduce_and_remove_grad_hooks()
# we may have a way of fusing dynamic scale. Do not support for now
if self.dtype == torch.float or not dynamic_loss_scale:
loss_scale_value = 1.0 if self.dtype == torch.float else static_loss_scale
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(scale=loss_scale_value)
cur_iter = 0
else:
if dynamic_loss_args is None:
self.loss_scaler = DynamicLossScaler()
else:
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
self.dynamic_loss_scale = True
if self.verbose:
report_memory_usage("Before initializing optimizer states")
self.initialize_optimizer_states()
if self.verbose:
report_memory_usage("After initializing optimizer states")
if dist.get_rank() == 0:
print(f"optimizer state initialized")
if dist.get_rank(group=self.dp_process_group) == 0:
report_memory_usage(f"After initializing ZeRO optimizer")
def _configure_moe_settings(self):
assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
def is_moe_group(group):
return 'moe' in group and group['moe']
assert any([is_moe_group(group) for group in
self.optimizer.param_groups]), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer"
self.is_moe_param_group = []
for i, group in enumerate(self.optimizer.param_groups):
if is_moe_group(group):
assert all(
[is_moe_param(param) for param in group['params']]), "All params in MoE group must be MoE params"
self.real_dp_process_group[i] = self.expert_dp_process_group
self.partition_count[i] = dist.get_world_size(
group=self.expert_dp_process_group)
self.is_moe_param_group.append(True)
else:
self.is_moe_param_group.append(False)
assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE"
assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE"
def _update_model_fp16_weights(self, group_index):
updated_params = self.unflatten(self.fp16_groups_flat[group_index],
self.round_robin_fp16_groups[group_index])
for p, q in zip(self.round_robin_fp16_groups[group_index], updated_params):
p.data = q.data
# set model fp16 weight to slices of reordered flattened buffer
for param_index, param in enumerate(self.fp16_groups[group_index]):
new_index = self.round_robin_fp6_indices[group_index][param_index]
param.data = self.round_robin_fp16_groups[group_index][new_index].data
def _round_robin_reorder(self, tensor_list, num_partitions):
# disable round robin if need to debug something
# return tensor_list, list(range(len(tensor_list)))
partition_tensors = {}
for i, tensor in enumerate(tensor_list):
j = i % num_partitions
if not j in partition_tensors:
partition_tensors[j] = []
partition_tensors[j].append((i, tensor))
reordered_tensors = []
reordered_indices = {}
for partition_index in partition_tensors.keys():
for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]):
reordered_indices[original_index] = len(reordered_tensors)
reordered_tensors.append(tensor)
return reordered_tensors, reordered_indices
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
self.grads_in_partition = None
self.grads_in_partition_offset = 0
def initialize_optimizer_states(self):
for i, group in enumerate(self.fp16_groups):
single_grad_partition = torch.zeros(
int(self.partition_size[i]),
dtype=self.single_partition_of_fp32_groups[i].dtype,
device=self.device)
self.single_partition_of_fp32_groups[
i].grad = single_grad_partition.pin_memory(
) if self.cpu_offload else single_grad_partition
self.optimizer.step()
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None # class init
return
#########################################################################
#################### ZeRO Stage 1 - reduce gradients ####################
#########################################################################
def reduce_gradients(self, pipeline_parallel=False):
world_size = dist.get_world_size(self.dp_process_group)
my_rank = dist.get_rank(self.dp_process_group)
# with PP we must create ipg buffer, since backward is handled outside zero
if pipeline_parallel and self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=torch.cuda.current_device())
self.ipg_buffer.append(buf_0)
self.ipg_index = 0
if not self.overlap_comm:
for i, group in enumerate(self.fp16_groups):
for param in group:
if param.grad is not None:
self.reduce_ready_partitions_and_remove_grads(param, i)
# reduce any pending grads in either hook/non-hook case
self.overlapping_partition_gradients_reduce_epilogue()
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
for i, param_group in enumerate(self.round_robin_fp16_groups):
total_partitions = dist.get_world_size(
group=self.real_dp_process_group[i])
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.total_grads_in_partition[i][partition_id] = 0
self.initialize_gradient_partition(
i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][
partition_id] = self.get_first_param_index(
i,
param_group,
partition_id)
def independent_gradient_partition_epilogue(self):
if self.verbose:
self.report_ipg_memory_usage(
f"In ipg_epilogue before reduce_ipg_grads", 0)
self.reduce_ipg_grads()
if self.verbose:
self.report_ipg_memory_usage(
f"In ipg_epilogue after reduce_ipg_grads", 0)
# if dist.get_rank() == 0:
# print()("Params already reduced %s", self.params_already_reduced)
for i in range(len(self.params_already_reduced)):
self.params_already_reduced[i] = False
if self.overlap_comm:
torch.cuda.synchronize()
# It is safe to clear previously reduced grads of other partitions
self._clear_previous_reduced_grads()
if self.cpu_offload is False:
for i, _ in enumerate(self.fp16_groups):
if not i in self.averaged_gradients or self.averaged_gradients[i] is None:
self.averaged_gradients[i] = self.get_flat_partition(
self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=torch.cuda.current_device(),
return_tensor_list=True)
else:
avg_new = self.get_flat_partition(self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=torch.cuda.current_device(),
return_tensor_list=True)
for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new):
accumulated_grad.add_(new_avg_grad)
self._release_ipg_buffers()
# No need to keep the gradients anymore.
# All gradients required by the step
# are in self.averaged_gradients
self.zero_grad()
if self.verbose:
report_memory_usage(f"End ipg_epilogue")
# resets all partition to no reduced
# sets remaining grads to the total number of grads in each partition
# set is grad computed to false for all grads in partition
def reset_partition_gradient_structures(self):
for i, _ in enumerate(self.fp16_groups):
total_partitions = dist.get_world_size(
group=self.real_dp_process_group[i])
for partition_id in range(total_partitions):
self.is_partition_reduced[i][partition_id] = False
self.remaining_grads_in_partition[i][
partition_id] = self.total_grads_in_partition[i][partition_id]
for param_id in self.is_grad_computed[i][partition_id]:
self.is_grad_computed[i][partition_id][param_id] = False
def initialize_gradient_partition(self, i, param_group, partition_id):
def set_key_value_list(dictionary, key, value):
if key in dictionary:
dictionary[key].append(value)
else:
dictionary[key] = [value]
def increment_value(dictionary, key):
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
partition_size = self.partition_size[i]
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for param in param_group:
param_size = param.numel()
param_id = self.get_param_id(param)
if (current_index >= start_index and current_index < end_index):
set_key_value_list(self.param_to_partition_ids[i],
param_id,
partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][
param_id] = current_index - start_index
self.grad_start_offset[i][partition_id][param_id] = 0
elif start_index > current_index and start_index < (current_index +
param_size):
assert (
first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
set_key_value_list(self.param_to_partition_ids[i],
param_id,
partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][param_id] = 0
self.grad_start_offset[i][partition_id][param_id] = first_offset
current_index = current_index + param_size
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
self.grad_accs = []
for i, param_group in enumerate(self.fp16_groups):
for param in param_group:
if param.requires_grad:
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(
param, i)
grad_acc.register_hook(
reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
wrapper(param, i)
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (
100.0 * elem_count) // self.reduce_bucket_size
if self.verbose:
report_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}"
)
# create a flat tensor aligned at the alignment boundary
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
num_elements = 0
for tensor in tensor_list:
num_elements = num_elements + tensor.numel()
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add,
device=tensor_list[0].device,
dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
num_elements = num_elements + elements_to_add
else:
padded_tensor_list = tensor_list
return self.flatten(padded_tensor_list)
############### Independent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads",
param.numel())
self.reduce_ipg_grads()
if self.contiguous_gradients and self.overlap_comm:
# Swap ipg_index between 0 and 1
self.ipg_index = 1 - self.ipg_index
self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads",
param.numel())
param_id = self.get_param_id(param)
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
if param.numel() > self.reduce_bucket_size:
self.extra_large_param_to_reduce = param
elif self.contiguous_gradients:
# keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening
new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(
0,
self.elements_in_ipg_bucket,
param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
self.elements_in_ipg_bucket += param.numel()
assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient"
self.grads_in_ipg_bucket.append(param.grad)
self.params_in_ipg_bucket.append((i, param, param_id))
# make sure the average tensor function knows how to average the gradients
if is_moe_param(param):
self.ipg_bucket_has_moe_params = True
self.report_ipg_memory_usage("End ipg_remove_grads", 0)
def print_rank_0(self, message):
if dist.get_rank() == 0 and self.verbose:
print(message)
def gradient_reduction_w_predivide(self, tensor):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32:
tensor_to_allreduce = tensor.float()
if self.postscale_gradients:
if self.gradient_predivide_factor != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.gradient_predivide_factor != dp_world_size:
tensor_to_allreduce.mul_(
self.gradient_predivide_factor / dp_world_size)
else:
tensor_to_allreduce.div_(dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def average_tensor(self, tensor):
if self.overlap_comm:
torch.cuda.synchronize()
stream = self.reduction_stream
else:
stream = torch.cuda.current_stream()
with torch.cuda.stream(stream):
if not self.reduce_scatter:
self.gradient_reduction_w_predivide(tensor)
return
# Accumulate destination ranks and bucket offsets for each gradient slice.
# Note: potential future optimization, record access pattern of parameters
# in backward pass and partition gradients w.r.t. access pattern so that our
# bucket is guaranteed to be contiguous w.r.t. ranks
rank_and_offsets = []
real_dp_process_group = []
curr_size = 0
prev_id = -1
process_group = self.dp_process_group
# count = 0
for i, param, param_id in self.params_in_ipg_bucket:
process_group = self.dp_process_group
# Averages gradients at parameter level if ipg has a moe param
# Otherwise averaging is done at the entire buffer level at the end of the loop
if self.ipg_bucket_has_moe_params:
process_group = self.expert_dp_process_group if is_moe_param(
param) else self.dp_process_group
param.grad.data.div_(
dist.get_world_size(group=process_group))
partition_ids = self.param_to_partition_ids[i][param_id]
partition_size = self.partition_size[i]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
# if dist.get_rank() == 0 and count < 100:
# print(f"Rank {dist.get_rank()} rank offet id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}")
# count += 1
# Calculate numel for grad slice depending on partition location
if idx == len(partition_ids_w_offsets) - 1:
# Last partition_id uses its own offset
numel = param.numel() - offset
else:
# Set numel to next partition's offset
numel = partition_ids_w_offsets[idx + 1][1] - offset
# Merge bucket ranges if they belong to the same rank
if partition_id == prev_id:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid,
prev_size, prev_numel + numel)
else:
rank_and_offsets.append(
(partition_id, curr_size, numel))
real_dp_process_group.append(process_group)
curr_size += numel
prev_id = partition_id
if not self.ipg_bucket_has_moe_params:
tensor.div_(dist.get_world_size(group=self.dp_process_group))
async_handles = []
for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets):
grad_slice = tensor.narrow(0, int(bucket_offset), int(numel))
# if dist.get_rank() == 0:
# print(f"Rank {dist.get_rank()} rank offet id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}")
# dist.barrier()
# dist.barrier()
dst_rank = _get_global_rank(real_dp_process_group[i], dst)
async_handle = dist.reduce(grad_slice,
dst=dst_rank,
group=real_dp_process_group[i],
async_op=True)
async_handles.append(async_handle)
for handle in async_handles:
handle.wait()
##############################################################################
############################# CPU Offload Methods#############################
##############################################################################
def get_grad_position(self, group_id, tensor_list, first_offset, partition_size):
current_offset = 0
for i, tensor in enumerate(tensor_list):
param_id = self.get_param_id(tensor)
param_start_offset = 0
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
param_start_offset = first_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_offset):
num_elements = partition_size - current_offset
self.grad_position[param_id] = [
int(group_id),
int(param_start_offset),
int(current_offset),
int(num_elements)
]
current_offset += num_elements
def update_overflow_tracker_for_param_grad(self, param):
if param.grad is not None and self._has_inf_or_nan(param.grad.data):
self.local_overflow = True
def async_accumulate_grad_in_cpu_via_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
# copy to a preexisiting buffer to avoid memory allocation penalty
dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(
0,
0,
param.numel())
# buffer for storing gradients for this parameter in CPU
def buffer_to_accumulate_to_in_cpu():
if not self.fp16_master_weights_and_gradients:
return torch.zeros(param.numel(),
dtype=param.dtype,
device=self.device).pin_memory()
else:
return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(
0,
dest_offset,
num_elements)
# accumulate gradients into param.grad or parts of it that belongs to this parittion
def accumulate_gradients():
if not self.fp16_master_weights_and_gradients:
dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1),
non_blocking=True)
param.grad.data.view(-1).add_(dest_buffer)
else:
dest_buffer.narrow(0,
source_offset,
num_elements).copy_(
self.accumulated_grads_in_cpu[param_id].view(-1),
non_blocking=True)
param.grad.data.view(-1).narrow(
0,
source_offset,
num_elements).add_(dest_buffer.narrow(0,
source_offset,
num_elements))
# move accumulated gradients back to CPU
def copy_gradients_to_cpu():
if not self.fp16_master_weights_and_gradients:
self.accumulated_grads_in_cpu[param_id].data.copy_(
param.grad.data.view(-1),
non_blocking=True)
else:
self.accumulated_grads_in_cpu[param_id].data.copy_(
param.grad.data.view(-1).narrow(0,
source_offset,
num_elements),
non_blocking=True)
if param_id not in self.accumulated_grads_in_cpu:
self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu(
)
if self.micro_step_id > 0:
accumulate_gradients()
# at the boundary we will send 32bit directly
if not self.is_gradient_accumulation_boundary:
copy_gradients_to_cpu()
def set_norm_for_param_grad(self, param):
param_id = self.get_param_id(param)
accumulated_grad = self.accumulated_grads_in_cpu[
param_id] if self.gradient_accumulation_steps > 1 else param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(
-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(
2)
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
accumulated_grad = param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(
-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(
2)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(
0,
dest_offset,
num_elements)
src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements)
if not self.fp16_master_weights_and_gradients:
src_tensor = src_tensor.float()
dest_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None # offload only
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
# as some model have trainable parameters but skipped in training,
# their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run,
# so they have no norm_for_param_grads
if param_id in self.norm_for_param_grads:
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item() ** 2
else:
# As unused parameters in modules may not be expected sometimes,
# add an explicit error msg when it occurred and an option to
# avoid the error
assert self.ignore_unused_parameters, """
This assert indicates that your module has parameters that
were not used in producing loss.
You can avoid this assert by
(1) enable ignore_unused_parameters option in zero_optimization config;
(2) making sure all trainable parameters and `forward` function
outputs participate in calculating loss.
"""
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda,
op=torch.distributed.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
############################################################################################
def copy_grads_in_partition(self, param):
if self.cpu_offload:
if self.gradient_accumulation_steps > 1:
self.async_accumulate_grad_in_cpu_via_gpu(param)
if self.is_gradient_accumulation_boundary:
self.set_norm_for_param_grad_in_gpu(param)
self.update_overflow_tracker_for_param_grad(param)
self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param)
return
# print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}")
if self.grads_in_partition is None:
self.grads_in_partition_offset = 0
total_size = 0
for group in self.params_in_partition:
for param_in_partition in group:
total_size += param_in_partition.numel()
if self.verbose:
report_memory_usage(
f"before copying {total_size} gradients into partition")
self.grads_in_partition = torch.empty(int(total_size),
dtype=self.dtype,
device=torch.cuda.current_device())
if self.verbose:
report_memory_usage(
f"after copying {total_size} gradients into partition")
# The allreduce buffer will be rewritted. Copy the gradients in partition to a new buffer
new_grad_tensor = self.grads_in_partition.view(-1).narrow(
0,
self.grads_in_partition_offset,
param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
# print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}")
self.grads_in_partition_offset += param.numel()
def reduce_ipg_grads(self):
if self.contiguous_gradients:
if self.extra_large_param_to_reduce is not None:
assert len(
self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen"
_, _, param_id = self.params_in_ipg_bucket[0]
assert self.get_param_id(
self.extra_large_param_to_reduce) == param_id, "param in ipg bucket does not match extra-large param"
self.average_tensor(
self.extra_large_param_to_reduce.grad.view(-1))
self.extra_large_param_to_reduce = None
else:
self.average_tensor(self.ipg_buffer[self.ipg_index])
else:
self.buffered_reduce_fallback(
None,
self.grads_in_ipg_bucket,
elements_per_buffer=self.elements_in_ipg_bucket)
if self.overlap_comm:
stream = self.reduction_stream
elif self.cpu_offload:
# TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed.
# torch.cuda.synchronize()
# stream = self.copy_grad_stream
stream = torch.cuda.current_stream()
else:
stream = torch.cuda.current_stream()
with torch.cuda.stream(stream):
for _, param, param_id in self.params_in_ipg_bucket:
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
self.params_already_reduced[param_id] = True
if self.partition_gradients:
if not self.is_param_in_current_partition[param_id]:
if self.overlap_comm and self.contiguous_gradients is False:
# Clear grads of other partitions during the next reduction
# to avoid clearing them before the reduction is complete.
if self.previous_reduced_grads is None:
self.previous_reduced_grads = []
self.previous_reduced_grads.append(param)
else:
param.grad = None # only if self.partition_gradients
elif self.contiguous_gradients:
self.copy_grads_in_partition(param)
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.ipg_bucket_has_moe_params = False
self.elements_in_ipg_bucket = 0
#####################################################################
def reduce_ready_partitions_and_remove_grads(self, param, i):
if self.partition_gradients or self.is_gradient_accumulation_boundary:
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None # dead code
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
print(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducable_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(
total_elements - start,
self.partition_size[i] -
self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0,
int(start),
int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(
0,
int(start),
int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducable_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
print(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, allreduce_always_fp32=False, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
allreduce_always_fp32 = True
if allreduce_always_fp32:
tensor_to_allreduce = tensor.float()
tensor_to_allreduce.div_(
dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = _get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank,
group=self.dp_process_group)
if allreduce_always_fp32 and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
def _clear_previous_reduced_grads(self):
if self.previous_reduced_grads is not None:
for param in self.previous_reduced_grads:
param.grad = None # overlap enabled
self.previous_reduced_grads = None
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
if self.overlap_comm:
torch.cuda.synchronize()
# It is safe to clear the previously reduced grads of other partitions
self._clear_previous_reduced_grads()
stream = self.reduction_stream
else:
stream = torch.cuda.current_stream()
with torch.cuda.stream(stream):
allreduced = self.allreduce_bucket(
small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self,
bucket,
numel_per_bucket=500000000,
rank=None,
log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
# allows using reduction of gradients instead of using all_reduce
def buffered_reduce_fallback(self,
rank,
grads,
elements_per_buffer=500000000,
log=None):
split_buckets = split_half_float_double(grads)
for i, bucket in enumerate(split_buckets):
self.allreduce_no_retain(bucket,
numel_per_bucket=elements_per_buffer,
rank=rank,
log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor, group_id):
partitions = []
dp = dist.get_world_size(group=self.real_dp_process_group[group_id])
dp_id = dist.get_rank(group=self.real_dp_process_group[group_id])
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index +
tensor_size):
params_in_partition.append(tensor)
assert (
first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
def zero_grad(self, set_grads_to_None=True):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_grads_to_None:
p.grad = None # epilogue and in step
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None:
pass
else:
torch.distributed.all_reduce(tensor=tensor,
op=op,
group=self.model_parallel_group)
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from ``torch.nn.utils.clip_grad.clip_grad_norm_`` and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.MAX,
group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda,
op=torch.distributed.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.0
# if dist.get_rank() == 0:
# print()(f"Total Norm begining {total_norm}")
for g, p in zip(gradients, params):
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_norm = g.data.double().norm(2)
total_norm += param_norm.item() ** 2
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda,
op=torch.distributed.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self,
tensor_list,
first_offset,
partition_size,
dtype,
device,
return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(
0,
int(tensor_offset),
int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(
torch.zeros(int(partition_size - current_size),
dtype=dtype,
device=device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None # in step
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
self.local_overflow = False
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def step(self, closure=None):
"""
Not supporting closure.
"""
self.micro_step_id = -1
if self.verbose:
report_memory_usage(f"In step before checking overflow")
# First compute norm for all group so we know if there is overflow
self.check_overflow(self.partition_gradients)
OPTIMIZER_ALLGATHER = 'optimizer_allgather'
OPTIMIZER_GRADIENTS = 'optimizer_gradients'
OPTIMIZER_STEP = 'optimizer_step'
timer_names = [OPTIMIZER_ALLGATHER,
OPTIMIZER_GRADIENTS, OPTIMIZER_STEP]
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
report_memory_usage('After overflow before clearing gradients')
self.zero_grad()
if self.cpu_offload:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
if self.verbose:
report_memory_usage('After overflow after clearing gradients')
print(
"[deepspeed] fp16 dynamic loss scale overflow! Rank {} Skipping step. Attempted loss scale: {}, "
"reducing to {}".format(dist.get_rank(),
prev_scale,
self.loss_scale))
self.start_timers(timer_names)
self.stop_timers(timer_names)
return
self.start_timers([OPTIMIZER_GRADIENTS])
norm_groups = []
single_partition_grad_groups = []
skip = False
for i, group in enumerate(self.fp16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
if self.cpu_offload:
norm_groups.append(
self.complete_grad_norm_calculation_for_cpu_offload(
self.params_in_partition[i]))
single_grad_partition = self.single_partition_of_fp32_groups[i].grad
else:
norm_groups.append(
self.get_grad_norm_direct(self.averaged_gradients[i],
self.params_in_partition[i]))
# free gradients for all the prameters that are not updated by this process
self.free_grad_in_param_list(self.params_not_in_partition[i])
# create a flat gradients for parameters updated by this process
# If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors
if partition_id == dist.get_world_size(
group=self.real_dp_process_group[i]) - 1:
single_grad_partition = self.flatten_dense_tensors_aligned(
self.averaged_gradients[i],
int(self.partition_size[i])).to(
self.single_partition_of_fp32_groups[i].dtype)
else:
single_grad_partition = self.flatten(self.averaged_gradients[i]).to(
self.single_partition_of_fp32_groups[i].dtype)
assert single_grad_partition.numel() == self.partition_size[i], \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.partition_size[i], i, partition_id)
self.single_partition_of_fp32_groups[i].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition
self.free_grad_in_param_list(self.params_in_partition[i])
self.averaged_gradients[i] = None
single_partition_grad_groups.append(single_grad_partition)
if self.has_moe_layers:
self._average_expert_grad_norms(norm_groups)
self.unscale_and_clip_grads(single_partition_grad_groups, norm_groups)
self.stop_timers([OPTIMIZER_GRADIENTS])
self.start_timers([OPTIMIZER_STEP])
if self.deepspeed_adam_offload:
from deepspeed.ops.adam import DeepSpeedCPUAdam
if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half:
fp16_param_groups = [
fp16_partitions[partition_id]
for fp16_partitions in self.parallel_partitioned_fp16_groups
]
self.optimizer.step(fp16_param_groups=fp16_param_groups)
else:
self.optimizer.step()
for fp16_partitions, fp32_partition in zip(self.parallel_partitioned_fp16_groups,
self.single_partition_of_fp32_groups):
fp16_partitions[partition_id].data.copy_(
fp32_partition.data)
else:
self.optimizer.step()
# get rid of the fp32 gradients. Not needed anymore
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None # in step
for fp16_partitions, fp32_partition in zip(self.parallel_partitioned_fp16_groups,
self.single_partition_of_fp32_groups):
fp16_partitions[partition_id].data.copy_(fp32_partition.data)
self.stop_timers([OPTIMIZER_STEP])
if self.cpu_offload:
self.reset_cpu_buffers()
self.start_timers([OPTIMIZER_ALLGATHER])
# gather the updated weights from everyone
for group_id, partitioned_params in enumerate(self.parallel_partitioned_fp16_groups):
# Sequential AllGather Best of both worlds
dp_world_size = dist.get_world_size(
group=self.real_dp_process_group[group_id])
num_shards = max(
1,
partitioned_params[partition_id].numel() * dp_world_size //
self.allgather_bucket_size)
shard_size = partitioned_params[partition_id].numel() // num_shards
num_elements = shard_size
assert shard_size * \
num_shards <= partitioned_params[partition_id].numel()
for shard_id in range(num_shards):
if shard_id == (num_shards - 1):
num_elements = partitioned_params[partition_id].numel(
) - shard_id * shard_size
shard_list = []
for dp_id in range(dp_world_size):
curr_shard = partitioned_params[dp_id].narrow(
0,
shard_id * shard_size,
num_elements).detach()
shard_list.append(curr_shard)
dist.all_gather(shard_list,
shard_list[partition_id],
group=self.real_dp_process_group[group_id])
self.stop_timers([OPTIMIZER_ALLGATHER])
# TODO: we probably don't need this? just to be safe
for i in range(len(norm_groups)):
self._update_model_fp16_weights(i)
self.log_timers(timer_names)
if self.verbose:
report_memory_usage('After zero_optimizer step')
return
def _average_expert_grad_norms(self, norm_groups):
for i, norm in enumerate(norm_groups):
if self.is_moe_param_group[i]:
scaled_norm = norm * 1.0 / float(
dist.get_world_size(group=self.ep_process_group))
scaled_norm_tensor = torch.tensor(scaled_norm,
device='cuda',
dtype=torch.float)
dist.all_reduce(scaled_norm_tensor,
group=self.ep_process_group)
norm_groups[i] = scaled_norm_tensor.item()
def unscale_and_clip_grads(self, grad_groups_flat, norm_groups):
total_norm = 0.0
for norm in norm_groups:
total_norm += norm ** 2.0
total_norm = math.sqrt(total_norm)
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
for grad in grad_groups_flat:
if isinstance(grad, list):
sub_partitions = grad
for g in sub_partitions:
g.data.mul_(1. / combined_scale)
else:
grad.data.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.fp16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
def has_overflow(self, partition_gradients=True):
if partition_gradients:
overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial(
)
overflow_gpu = torch.cuda.ByteTensor([overflow])
'''This will capture overflow across all data parallel and expert parallel process
Since expert parallel process are a subset of data parallel process'''
torch.distributed.all_reduce(overflow_gpu,
op=torch.distributed.ReduceOp.MAX,
group=self.dp_process_group)
else:
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(
params, is_grad_list=partition_gradients)
overflow_gpu = torch.cuda.ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu,
op=torch.distributed.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
self.micro_step_id += 1
if self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=torch.cuda.current_device())
self.ipg_buffer.append(buf_0)
# Use double buffers to avoid data access conflict when overlap_comm is enabled.
if self.overlap_comm:
buf_1 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=torch.cuda.current_device())
self.ipg_buffer.append(buf_1)
self.ipg_index = 0
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
# Return group tensor after removing paddings that are added for alignment to DP world size.
# This method works on the assumption that each group contains a single flattened tensor.
def _get_groups_without_padding(self, groups_with_padding):
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_length = group.numel() - self.groups_padding[i]
groups_without_padding.append(group[:lean_length])
return groups_without_padding
# Return optimizer state after removing paddings that are added for alignment.
def _get_state_without_padding(self, state_with_padding, padding):
lean_state = {}
for key, value in state_with_padding.items():
if torch.is_tensor(value):
lean_length = value.numel() - padding
lean_state[key] = value[:lean_length]
else:
lean_state[key] = value
return lean_state
# Return base optimizer states.
# This method assumes that each param group contains a single flattened tensor.
def _get_base_optimizer_state(self):
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_optimizer_state = self._get_state_without_padding(
self.optimizer.state[p],
self.groups_padding[i])
optimizer_groups_state.append(lean_optimizer_state)
return optimizer_groups_state
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['base_optimizer_state'] = self._get_base_optimizer_state()
state_dict['zero_stage'] = ZERO_OPTIMIZATION_GRADIENTS
state_dict['partition_count'] = self.partition_count
state_dict['ds_version'] = version
# Remove paddings for DP alignment to enable loading for other alignment values
fp32_groups_without_padding = self._get_groups_without_padding(
self.single_partition_of_fp32_groups)
state_dict['single_partition_of_fp32_groups'] = fp32_groups_without_padding
# if self.cpu_offload:
# state_dict_tmp = async_copy_to(state_dict,
# 'cpu',
# torch.cuda.current_stream())
# state_dict = state_dict_tmp
return state_dict
# Restore base optimizer fp32 weights from checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_fp32_weights(self, all_state_dict):
merged_single_partition_of_fp32_groups = []
for i in range(len(self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
merged_partitions = [
sd['single_partition_of_fp32_groups'][i] for sd in all_state_dict
]
flat_merged_partitions = self.flatten_dense_tensors_aligned(
merged_partitions,
self.nccl_start_alignment_factor *
dist.get_world_size(group=self.real_dp_process_group[i]))
dp_partitions = self.get_data_parallel_partitions(
flat_merged_partitions, i)
merged_single_partition_of_fp32_groups.append(
dp_partitions[partition_id])
for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 weights
def _restore_from_fp16_weights(self):
for group_id, fp16_partitions, fp32_partition in enumerate(
zip(self.parallel_partitioned_fp16_groups, self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(
group=self.real_dp_process_group[group_id])
fp32_partition.data.copy_(fp16_partitions[partition_id].data)
# Refresh the fp32 master params from the fp16 copies.
def refresh_fp32_params(self):
self._restore_from_fp16_weights()
# Extract optimizer state for current partition from merged states of all partitions
def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id):
partition_id = dist.get_rank(
group=self.real_dp_process_group[group_id])
alignment = dist.get_world_size(
group=self.real_dp_process_group[group_id])
if torch.is_tensor(all_partition_states[0]):
flat_merged_partitions = self.flatten_dense_tensors_aligned(
all_partition_states,
alignment)
dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions,
group_id)
return dp_partitions[partition_id]
else:
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return all_partition_states[0]
# Restore base optimizer state from checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [
sd['base_optimizer_state'][i] for sd in all_state_dict
]
for key in all_partition_group_states[0].keys():
all_partition_states = [
all_states[key] for all_states in all_partition_group_states
]
partition_states[key] = self._partition_base_optimizer_state(
key,
all_partition_states,
i)
base_optimizer_group_states.append(partition_states)
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
self.optimizer.state[p][key].data.copy_(saved.data)
else:
self.optimizer.state[p][key] = saved
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False):
r"""Loading ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict_list[0]['loss_scaler']
self.dynamic_loss_scale = state_dict_list[0]['dynamic_loss_scale']
self.overflow = state_dict_list[0]['overflow']
# zero stage 1 mode
if not self.partition_gradients:
required_version = pkg_version.parse("0.3.17")
ckpt_version = state_dict_list[0].get("ds_version", False)
error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \
"with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \
"please set 'legacy_stage1': true in your zero config json. This old version of " \
"stage 1 will be removed in v0.4.0."
assert ckpt_version, f"Empty ds_version! {error_str}"
assert required_version <= pkg_version.parse(
ckpt_version), f"Old version: {ckpt_version} {error_str}"
if load_optimizer_states:
self._restore_base_optimizer_state(state_dict_list)
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 1 if changing DP degree and option 2 otherwise.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
if load_from_fp32_weights:
self._restore_from_fp32_weights(state_dict_list)
else:
self._restore_from_fp16_weights()
def allreduce_gradients(self):
self.overlapping_partition_gradients_reduce_epilogue()
def _handle_overflow(cpu_sum, x, i):
import math
rank = torch.distributed.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
print(
f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}"
)
def estimate_zero2_model_states_mem_needs(total_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
if cpu_offload:
gpu_mem = 2 * total_params
cpu_mem = total_params * \
max(4 * total_gpus, 16) * additional_buffer_factor
else:
gpu_mem = 4 * total_params + int(16 * total_params / total_gpus)
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem)
def model_to_params(model):
# shared params calculated only once
total_params = sum(
dict((p.data_ptr(),
p.numel()) for p in model.parameters()).values())
return total_params
def estimate_zero2_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params = model_to_params(model)
estimate_zero2_model_states_mem_needs_all_cold(
total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero2_model_states_mem_needs_all_cold(total_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload):
enabled = []
enabled.append(f"cpu_offload={1 if cpu_offload else 0}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print(
"Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params / 1e6)}M total params.")
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(
total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
additional_buffer_factor=additional_buffer_factor
)
options_str = format_options(cpu_offload=cpu_offload)
print(
f" {cpu_mem / 2 ** 30:7.2f}GB | {gpu_mem / 2 ** 30:6.2f}GB | {options_str}")
|
py
|
1a57c5909bc4eacd510ef06e4841076798440c13
|
#!/usr/bin/env python
import os
import cub
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
packages = ['cub']
requires = ['requests>=0.9']
setup(
name='cub',
version=cub.__version__,
description='Cub Client for Python',
long_description=open('README.rst').read(),
author='Denis Stebunov',
author_email='[email protected]',
url='https://github.com/praetoriandigital/cub-python',
packages=packages,
python_requires='>=2.7.9, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
install_requires=requires,
license=open('LICENSE').readline().strip(),
zip_safe=False,
test_suite="tests",
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
),
)
del os.environ['PYTHONDONTWRITEBYTECODE']
|
py
|
1a57c64fa444048316f17d087f5cce105bbd0bde
|
from dataclasses import dataclass
from datetime import date
from enum import Enum
from typing import Optional
class EligibilityStatus(str, Enum):
ELIGIBLE = "Eligible"
NEEDS_MORE_ANALYSIS = "Needs more analysis"
INELIGIBLE = "Ineligible"
class ChargeEligibilityStatus(str, Enum):
UNKNOWN = "Unknown"
ELIGIBLE_NOW = "Eligible now"
POSSIBLY_ELIGIBILE = "Possibly eligible"
WILL_BE_ELIGIBLE = "Will be eligible"
POSSIBLY_WILL_BE_ELIGIBLE = "Possibly will be eligible"
INELIGIBLE = "Ineligible"
@dataclass(frozen=True)
class TypeEligibility:
status: EligibilityStatus
reason: str
@dataclass(frozen=True)
class TimeEligibility:
status: EligibilityStatus
reason: str
date_will_be_eligible: date
@dataclass(frozen=True)
class ChargeEligibility:
status: ChargeEligibilityStatus
label: str
@dataclass(frozen=True)
class ExpungementResult:
type_eligibility: TypeEligibility = TypeEligibility(
status=EligibilityStatus.NEEDS_MORE_ANALYSIS, reason="Default value"
) # TODO: Remove default value
time_eligibility: Optional[TimeEligibility] = None
charge_eligibility: Optional[ChargeEligibility] = None
|
py
|
1a57c729c47d59e72eb7a8e344d312e88149a808
|
from time import sleep
from json import dumps
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers=['localhost:9092'], value_serializer=lambda x: dumps(x).encode('utf-8'))
print("Please insert a number --> 'stop' to exit")
input_user = input()
index = 0
while input_user != "stop":
data = {"id": "PXL"+str(index), "number" : input_user}
producer.send("pxl_data", value=data)
print(f"Sending data: {data}")
index += 1
print("Insert new data (stop to exit)")
input_user = input()
|
py
|
1a57c7376724ec0041b7d00c1766c5ddfe363388
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Lexers for agile languages.
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
LexerContext, include, combined, do_insertions, bygroups, using
from pygments.token import Error, Text, \
Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation
from pygments.util import get_bool_opt, get_list_opt, shebang_matches
from pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'RubyLexer', 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer',
'MiniDLexer', 'IoLexer', 'TclLexer', 'ClojureLexer',
'Python3Lexer', 'Python3TracebackLexer']
# b/w compatibility
from pygments.lexers.functional import SchemeLexer
line_re = re.compile('.*?\n')
class PythonLexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code.
"""
name = 'Python'
aliases = ['python', 'py']
filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript']
mimetypes = ['text/x-python', 'application/x-python']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|continue|del|elif|else|except|exec|'
r'finally|for|global|if|lambda|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'((?:\s|\\\s)+)(as)((?:\s|\\\s)+)',
bygroups(Text, Keyword.Namespace, Text)),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'((?:\s|\\\s)+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(2\.\d)?')
class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
*New in Pygments 0.10.*
"""
name = 'Python 3'
aliases = ['python3', 'py3']
filenames = [] # Nothing until Python 3 gets widespread
mimetypes = ['text/x-python3', 'application/x-python3']
flags = re.MULTILINE | re.UNICODE
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(r'(assert|break|continue|del|elif|else|except|'
r'finally|for|global|if|lambda|pass|raise|'
r'return|try|while|yield|as|with|True|False|None)\b', Keyword),
]
tokens['builtins'] = [
(r'(?<!\.)(__import__|abs|all|any|bin|bool|bytearray|bytes|'
r'chr|classmethod|cmp|compile|complex|delattr|dict|dir|'
r'divmod|enumerate|eval|filter|float|format|frozenset|getattr|'
r'globals|hasattr|hash|hex|id|input|int|isinstance|issubclass|'
r'iter|len|list|locals|map|max|memoryview|min|next|object|oct|'
r'open|ord|pow|print|property|range|repr|reversed|round|'
r'set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|'
r'vars|zip)\b', Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|BufferError|BytesWarning|DeprecationWarning|'
r'EOFError|EnvironmentError|Exception|FloatingPointError|'
r'FutureWarning|GeneratorExit|IOError|ImportError|'
r'ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplementedError|OSError|OverflowError|'
r'PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
]
tokens['backtick'] = []
tokens['name'] = [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
(uni_name, Name),
]
tokens['funcname'] = [
(uni_name, Name.Function, '#pop')
]
tokens['classname'] = [
(uni_name, Name.Class, '#pop')
]
tokens['import'] = [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
]
tokens['fromimport'] = [
(r'(\s+)(import)\b', bygroups(Text, Keyword), '#pop'),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
]
# don't highlight "%s" substitutions
tokens['strings'] = [
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
]
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``False``.
*New in Pygments 1.0.*
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = Python3Lexer(**self.options)
tblexer = Python3TracebackLexer(**self.options)
else:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>> ') or line.startswith('... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == '...':
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, '...')]))
curcode += line[3:]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if (line.startswith('Traceback (most recent call last):') or
re.match(r' File "[^"]+", line \d+\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == '...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
*New in Pygments 0.7.*
"""
name = 'Python Traceback'
aliases = ['pytb']
filenames = ['*.pytb']
mimetypes = ['text/x-python-traceback']
tokens = {
'root': [
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+\n)', Generic.Traceback, 'intb'),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name.Identifier, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(PythonLexer), Text)),
(r'^([ \t]*)(...)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Name.Class, Text, Name.Identifier, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Name.Class, Text), '#pop')
],
}
class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
*New in Pygments 1.0.*
"""
name = 'Python 3.0 Traceback'
aliases = ['py3tb']
filenames = ['*.py3tb']
mimetypes = ['text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Text),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name.Identifier, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python3Lexer), Text)),
(r'^([ \t]*)(...)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Name.Class, Text, Name.Identifier, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Name.Class, Text), '#pop')
],
}
class RubyLexer(ExtendedRegexLexer):
"""
For `Ruby <http://www.ruby-lang.org>`_ source code.
"""
name = 'Ruby'
aliases = ['rb', 'ruby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), Name.Constant, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), Name.Constant, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, name in ('\\{', '\\}', 'cb'), \
('\\[', '\\]', 'sb'), \
('\\(', '\\)', 'pa'), \
('<', '>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Regex),
(r'(?<!\\)' + lbrace, String.Regex, '#push'),
(r'(?<!\\)' + rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + lbrace + rbrace + ']', String.Regex),
(r'[^\\#' + lbrace + rbrace + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([^a-zA-Z0-9]))([^\2\\]*(?:\\.[^\2\\]*)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([^a-zA-Z0-9])([^\1\\]*(?:\\.[^\1\\]*)*)\1', String.Other),
(r'(%[QWx]([^a-zA-Z0-9]))([^\2\\]*(?:\\.[^\2\\]*)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:[^\3\\]*(?:\\.[^\3\\]*)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:[^\3\\]*(?:\\.[^\3\\]*)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))([^\2\\]*(?:\\.[^\2\\]*)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end', Comment.Multiline),
# keywords
(r'(BEGIN|END|alias|begin|break|case|defined\?|'
r'do|else|elsif|end|ensure|for|if|in|next|redo|'
r'rescue|raise|retry|return|super|then|undef|unless|until|when|'
r'while|yield)\b', Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_][a-zA-Z0-9_]*(::[a-zA-Z_][a-zA-Z0-9_]*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# special methods
(r'(initialize|new|loop|include|extend|raise|attr_reader|'
r'attr_writer|attr_accessor|attr|catch|throw|private|'
r'module_function|public|protected|true|false|nil)\b', Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(r'(autoload|block_given|const_defined|eql|equal|frozen|include|'
r'instance_of|is_a|iterator|kind_of|method_defined|nil|'
r'private_method_defined|protected_method_defined|'
r'public_method_defined|respond_to|tainted)\?', Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(r'(?<!\.)(Array|Float|Integer|String|__id__|__send__|abort|ancestors|'
r'at_exit|autoload|binding|callcc|caller|'
r'catch|chomp|chop|class_eval|class_variables|'
r'clone|const_defined\?|const_get|const_missing|const_set|constants|'
r'display|dup|eval|exec|exit|extend|fail|fork|'
r'format|freeze|getc|gets|global_variables|gsub|'
r'hash|id|included_modules|inspect|instance_eval|'
r'instance_method|instance_methods|'
r'instance_variable_get|instance_variable_set|instance_variables|'
r'lambda|load|local_variables|loop|'
r'method|method_missing|methods|module_eval|name|'
r'object_id|open|p|print|printf|private_class_method|'
r'private_instance_methods|'
r'private_methods|proc|protected_instance_methods|'
r'protected_methods|public_class_method|'
r'public_instance_methods|public_methods|'
r'putc|puts|raise|rand|readline|readlines|require|'
r'scan|select|self|send|set_trace_func|singleton_methods|sleep|'
r'split|sprintf|srand|sub|syscall|system|taint|'
r'test|throw|to_a|to_s|trace_var|trap|type|untaint|untrace_var|'
r'warn)\b', Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)', heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=(?:\s|;|\.)index\s)|'
r'(?<=(?:\s|;|\.)scan\s)|'
r'(?<=(?:\s|;|\.)sub\s)|'
r'(?<=(?:\s|;|\.)sub!\s)|'
r'(?<=(?:\s|;|\.)gsub\s)|'
r'(?<=(?:\s|;|\.)gsub!\s)|'
r'(?<=(?:\s|;|\.)match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)(?!=)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls)
(r'(?<=\(|,)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/[^\s=])', String.Regex, 'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance),
(r'\$[a-zA-Z0-9_]+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][a-zA-Z0-9_]+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(r'(\.|::)([a-zA-Z_]\w*[\!\?]?|[*%&^`~+-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_][\w_]*[\!\?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_][a-zA-Z0-9_]*)(\.))?'
r'([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
(r'', Text, '#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_][\w_]*', Name.Class, '#pop'),
(r'', Text, '#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
('}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol),
(r'#\$[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile('irb\([a-zA-Z_][a-zA-Z0-9_]*\):\d{3}:\d+[>*"\'] '
'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
class PerlLexer(RegexLexer):
"""
For `Perl <http://www.perl.org>`_ source code.
"""
name = 'Perl'
aliases = ['perl', 'pl']
filenames = ['*.pl', '*.pm']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
# TODO: give this a perl guy who knows how to parse perl...
tokens = {
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'{(\\\\|\\}|[^}])*}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\#.*?$', Comment.Single),
(r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
(r'(case|continue|do|else|elsif|for|foreach|if|last|my|'
r'next|our|redo|reset|then|unless|until|while|use|'
r'print|new|BEGIN|END|return)\b', Keyword),
(r'(format)(\s+)([a-zA-Z0-9_]+)(\s*)(=)(\s*\n)',
bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'),
(r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
# common delimiters
(r's/(\\\\|\\/|[^/])*/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex),
(r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
(r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
(r's@(\\\\|\\@|[^@])*@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex),
(r's%(\\\\|\\%|[^%])*%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex),
# balanced delimiters
(r's{(\\\\|\\}|[^}])*}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\/|[^/])*/[gcimosx]*', String.Regex),
(r'\s+', Text),
(r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|'
r'chmod|chomp|chop|chown|chr|chroot|close|closedir|connect|'
r'continue|cos|crypt|dbmclose|dbmopen|defined|delete|die|'
r'dump|each|endgrent|endhostent|endnetent|endprotoent|'
r'endpwent|endservent|eof|eval|exec|exists|exit|exp|fcntl|'
r'fileno|flock|fork|format|formline|getc|getgrent|getgrgid|'
r'getgrnam|gethostbyaddr|gethostbyname|gethostent|getlogin|'
r'getnetbyaddr|getnetbyname|getnetent|getpeername|getpgrp|'
r'getppid|getpriority|getprotobyname|getprotobynumber|'
r'getprotoent|getpwent|getpwnam|getpwuid|getservbyname|'
r'getservbyport|getservent|getsockname|getsockopt|glob|gmtime|'
r'goto|grep|hex|import|index|int|ioctl|join|keys|kill|last|'
r'lc|lcfirst|length|link|listen|local|localtime|log|lstat|'
r'map|mkdir|msgctl|msgget|msgrcv|msgsnd|my|next|no|oct|open|'
r'opendir|ord|our|pack|package|pipe|pop|pos|printf|'
r'prototype|push|quotemeta|rand|read|readdir|'
r'readline|readlink|readpipe|recv|redo|ref|rename|require|'
r'reverse|rewinddir|rindex|rmdir|scalar|seek|seekdir|'
r'select|semctl|semget|semop|send|setgrent|sethostent|setnetent|'
r'setpgrp|setpriority|setprotoent|setpwent|setservent|'
r'setsockopt|shift|shmctl|shmget|shmread|shmwrite|shutdown|'
r'sin|sleep|socket|socketpair|sort|splice|split|sprintf|sqrt|'
r'srand|stat|study|substr|symlink|syscall|sysopen|sysread|'
r'sysseek|system|syswrite|tell|telldir|tie|tied|time|times|tr|'
r'truncate|uc|ucfirst|umask|undef|unlink|unpack|unshift|untie|'
r'utime|values|vec|wait|waitpid|wantarray|warn|write'
r')\b', Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
(r'<<([\'"]?)([a-zA-Z_][a-zA-Z0-9_]*)\1;?\n.*?\n\2\n', String),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
(r'[$@%#]+', Name.Variable, 'varname'),
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'\d+', Number.Integer),
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'`(\\\\|\\`|[^`])*`', String.Backtick),
(r'<([^\s>]+)>', String.Regexp),
(r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
(r'(q|qq|qw|qr|qx)(.)[.\n]*?\1', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=|<=>|={3}|!=|=~|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&^|!\\~]=?', Operator),
(r'[\(\)\[\]:;,<>/\?\{\}]', Punctuation), # yes, there's no shortage
# of punctuation in Perl!
(r'(?=\w)', Name, 'name'),
],
'format': [
(r'\.\n', String.Interpol, '#pop'),
(r'[^\n]*\n', String.Interpol),
],
'varname': [
(r'\s+', Text),
(r'\{', Punctuation, '#pop'), # hash syntax?
(r'\)|,', Punctuation, '#pop'), # argument specifier
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name.Variable, '#pop'),
],
'name': [
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name, '#pop'),
(r'[A-Z_]+(?=[^a-zA-Z0-9_])', Name.Constant, '#pop'),
(r'(?=[^a-zA-Z0-9_])', Text, '#pop'),
],
'modulename': [
(r'[a-zA-Z_][\w_]*', Name.Namespace, '#pop')
],
'funcname': [
(r'[a-zA-Z_][\w_]*[\!\?]?', Name.Function),
(r'\s+', Text),
# argument declaration
(r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)),
(r'.*?{', Punctuation, '#pop'),
(r';', Punctuation, '#pop'),
],
'cb-string': [
(r'\\[\{\}\\]', String.Other),
(r'\\', String.Other),
(r'\{', String.Other, 'cb-string'),
(r'\}', String.Other, '#pop'),
(r'[^\{\}\\]+', String.Other)
],
'rb-string': [
(r'\\[\(\)\\]', String.Other),
(r'\\', String.Other),
(r'\(', String.Other, 'rb-string'),
(r'\)', String.Other, '#pop'),
(r'[^\(\)]+', String.Other)
],
'sb-string': [
(r'\\[\[\]\\]', String.Other),
(r'\\', String.Other),
(r'\[', String.Other, 'sb-string'),
(r'\]', String.Other, '#pop'),
(r'[^\[\]]+', String.Other)
],
'lt-string': [
(r'\\[\<\>\\]', String.Other),
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
(r'[^\<\>]]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
def analyse_text(text):
if shebang_matches(text, r'perl(\d\.\d\.\d)?'):
return True
if 'my $' in text:
return 0.9
return 0.1 # who knows, might still be perl!
class LuaLexer(RegexLexer):
"""
For `Lua <http://www.lua.org>`_ source code.
Additional options accepted:
`func_name_highlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabled_modules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted.
To get a list of allowed modules have a look into the
`_luabuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._luabuiltins import MODULES
>>> MODULES.keys()
['string', 'coroutine', 'modules', 'io', 'basic', ...]
"""
name = 'Lua'
aliases = ['lua']
filenames = ['*.lua']
mimetypes = ['text/x-lua', 'application/x-lua']
tokens = {
'root': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
('(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]', Text),
(r'(?s)\[(=*)\[.*?\]\1\]', String.Multiline),
(r'[\[\]\{\}\(\)\.,:;]', Punctuation),
(r'(==|~=|<=|>=|\.\.|\.\.\.|[=+\-*/%^<>#])', Operator),
(r'(and|or|not)\b', Operator.Word),
('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
r'while)\b', Keyword),
(r'(local)\b', Keyword.Declaration),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(function)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name),
# multiline strings
(r'(?s)\[(=*)\[(.*?)\]\1\]', String),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'funcname': [
('[A-Za-z_][A-Za-z0-9_]*', Name.Function, '#pop'),
# inline function
('\(', Punctuation, '#pop'),
],
'classname': [
('[A-Za-z_][A-Za-z0-9_]*', Name.Class, '#pop')
],
# if I understand correctly, every character is valid in a lua string,
# so this state is only for later corrections
'string': [
('.', String)
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String, '#pop'),
include('string')
],
'dqs': [
('"', String, '#pop'),
include('string')
]
}
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._luabuiltins import MODULES
for mod, func in MODULES.iteritems():
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self._functions:
yield index, Name.Builtin, value
continue
elif '.' in value:
a, b = value.split('.')
yield index, Name, a
yield index + len(a), Punctuation, u'.'
yield index + len(a) + 1, Name, b
continue
yield index, token, value
class MiniDLexer(RegexLexer):
"""
For `MiniD <http://www.dsource.org/projects/minid>`_ (a D-like scripting
language) source.
"""
name = 'MiniD'
filenames = ['*.md']
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# Keywords
(r'(as|assert|break|case|catch|class|continue|coroutine|default'
r'|do|else|finally|for|foreach|function|global|namespace'
r'|if|import|in|is|local|module|return|super|switch'
r'|this|throw|try|vararg|while|with|yield)\b', Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)?\.[0-9_]+([eE][+\-]?[0-9_]+)?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number),
# -- Octal
(r'0[Cc][0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'@"(""|.)*"', String),
# -- AlternateWysiwygString
(r'`(``|.)*`', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(
r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
}
class IoLexer(RegexLexer):
"""
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
*New in Pygments 0.10.*
"""
name = 'Io'
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'#(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Operators
(r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
Operator),
# keywords
(r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
Keyword),
# constants
(r'(nil|false|true)\b', Name.Constant),
# names
('(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
]
}
class TclLexer(RegexLexer):
"""
For Tcl source code.
*New in Pygments 0.10.*
"""
keyword_cmds_re = (
r'\b(after|apply|array|break|catch|continue|elseif|else|error|'
r'eval|expr|for|foreach|global|if|namespace|proc|rename|return|'
r'set|switch|then|trace|unset|update|uplevel|upvar|variable|'
r'vwait|while)\b'
)
builtin_cmds_re = (
r'\b(append|bgerror|binary|cd|chan|clock|close|concat|dde|dict|'
r'encoding|eof|exec|exit|fblocked|fconfigure|fcopy|file|'
r'fileevent|flush|format|gets|glob|history|http|incr|info|interp|'
r'join|lappend|lassign|lindex|linsert|list|llength|load|loadTk|'
r'lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|mathfunc|'
r'mathop|memory|msgcat|open|package|pid|pkg::create|pkg_mkIndex|'
r'platform|platform::shell|puts|pwd|re_syntax|read|refchan|'
r'regexp|registry|regsub|scan|seek|socket|source|split|string|'
r'subst|tell|time|tm|unknown|unload)\b'
)
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w\.\-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w\.\-\:]+)', Name.Variable),
(r'([\w\.\-\:]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(\\\\|\\[0-7]+|\\.|[^"])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(\\\\|\\[0-7]+|\\.|[^\]])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
*New in Pygments 0.11.*
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
keywords = [
'fn', 'def', 'defn', 'defmacro', 'defmethod', 'defmulti', 'defn-',
'defstruct',
'if', 'cond',
'let', 'for'
]
builtins = [
'.', '..',
'*', '+', '-', '->', '..', '/', '<', '<=', '=', '==', '>', '>=',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush',
'fnseq', 'frest', 'gensym', 'get', 'get-proxy-class',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list', 'list*', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper']
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\([()/'\".'_!§$%& ?;=+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
|
py
|
1a57c878ef7092c17408d0c7dc18f993bced0c64
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import warnings
import numpy as np
from keras import Model
from keras import layers
from keras.optimizers import SGD, Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
from ConfigSpace import ConfigurationSpace
from ConfigSpace import UniformFloatHyperparameter, CategoricalHyperparameter, InCondition
from alphaml.engine.components.models.base_model import BaseClassificationModel
from alphaml.engine.components.data_preprocessing.image_preprocess import preprocess
from alphaml.engine.components.data_manager import DataManager
class BaseImageClassificationModel(BaseClassificationModel):
def __init__(self):
self.base_model = None
self.model_name = None
self.work_size = None
self.min_size = None
self.default_size = None
super().__init__()
def set_model_config(self, inputshape, classnum, *args, **kwargs):
self.inputshape = inputshape
self.classnum = classnum
@staticmethod
def set_training_space(cs: ConfigurationSpace):
'''
Set hyperparameters for training
'''
batch_size = CategoricalHyperparameter('batch_size', [16, 32], default_value=32)
keep_prob = UniformFloatHyperparameter('keep_prob', 0, 0.99, default_value=0.5)
cs.add_hyperparameters([batch_size, keep_prob])
@staticmethod
def set_optimizer_space(cs: ConfigurationSpace):
'''
Set hyperparameters for optimizers
'''
optimizer = CategoricalHyperparameter('optimizer', ['SGD', 'Adam'], default_value='Adam')
sgd_lr = UniformFloatHyperparameter('sgd_lr', 0.00001, 0.1,
default_value=0.005, log=True) # log scale
sgd_decay = UniformFloatHyperparameter('sgd_decay', 0.0001, 0.1,
default_value=0.05, log=True) # log scale
sgd_momentum = UniformFloatHyperparameter('sgd_momentum', 0.3, 0.99, default_value=0.9)
adam_lr = UniformFloatHyperparameter('adam_lr', 0.00001, 0.1,
default_value=0.005, log=True) # log scale
adam_decay = UniformFloatHyperparameter('adam_decay', 0.0001, 0.1,
default_value=0.05, log=True) # log scale
sgd_lr_cond = InCondition(child=sgd_lr, parent=optimizer, values=['SGD'])
sgd_decay_cond = InCondition(child=sgd_decay, parent=optimizer, values=['SGD'])
sgd_momentum_cond = InCondition(child=sgd_momentum, parent=optimizer, values=['SGD'])
adam_lr_cond = InCondition(child=adam_lr, parent=optimizer, values=['Adam'])
adam_decay_cond = InCondition(child=adam_decay, parent=optimizer, values=['Adam'])
cs.add_hyperparameters([optimizer, sgd_lr, sgd_decay, sgd_momentum, adam_lr, adam_decay])
cs.add_conditions([sgd_lr_cond, sgd_decay_cond, sgd_momentum_cond, adam_lr_cond, adam_decay_cond])
def validate_inputshape(self):
if self.inputshape[0] < self.min_size or self.inputshape[1] < self.min_size:
raise ValueError(
"The minimum inputshape of " + self.model_name + " is " + str((self.min_size, self.min_size)) +
", while " + str(self.inputshape[0:2]) + " given.")
if self.inputshape[0] < self.work_size or self.inputshape[1] < self.work_size:
warnings.warn(
"The minimum recommended inputshape of the model is " + str((self.work_size, self.work_size)) +
", while " + str(self.inputshape[0:2]) + " given.")
def parse_monitor(self):
if self.metricstr == 'mse':
self.monitor = 'mean_squared_error'
elif self.metricstr == 'mae':
self.monitor = 'mean_abstract_error'
else:
self.monitor = self.metricstr
return
def load_data(self, data, **kwargs):
trainpregen, validpregen = preprocess(data)
self.metricstr = kwargs['metric']
self.parse_monitor()
if data.train_X is None and data.train_y is None:
if hasattr(data, 'train_valid_dir') or (hasattr(data, 'train_dir') and hasattr(data, 'valid_dir')):
if hasattr(data, 'train_valid_dir'):
self.train_gen = trainpregen.flow_from_directory(data.train_valid_dir,
target_size=self.inputshape[:2],
batch_size=self.batch_size, subset='training')
self.valid_gen = trainpregen.flow_from_directory(data.train_valid_dir,
target_size=self.inputshape[:2],
batch_size=self.batch_size, subset='validation')
self.classnum = self.train_gen.num_classes
self.monitor = 'val_' + self.monitor
else:
self.train_gen = trainpregen.flow_from_directory(data.train_dir, target_size=self.inputshape[:2],
batch_size=self.batch_size)
self.valid_gen = validpregen.flow_from_directory(data.valid_dir, target_size=self.inputshape[:2],
batch_size=self.batch_size)
self.classnum = self.train_gen.num_classes
self.monitor = 'val_' + self.monitor
else:
raise ValueError("Invalid data input!")
else:
if data.val_X is None and data.val_y is None:
if_valid = False
else:
if_valid = True
self.train_gen = trainpregen.flow(data.train_X, data.train_y, batch_size=self.batch_size)
if if_valid:
self.valid_gen = validpregen.flow(data.val_X, data.val_y, batch_size=self.batch_size)
self.monitor = 'val_' + self.monitor
else:
self.valid_gen = None
self.monitor = self.monitor
def fit(self, data: DataManager, **kwargs):
if self.base_model is None:
raise AttributeError("Base model is not defined!")
if self.optimizer == 'SGD':
optimizer = SGD(self.sgd_lr, self.sgd_momentum, self.sgd_decay, nesterov=True)
elif self.optimizer == 'Adam':
optimizer = Adam(self.adam_lr, decay=self.adam_decay)
else:
raise ValueError('No optimizer named %s defined' % str(self.optimizer))
timestr = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))
# build model
if self.classnum == 1:
final_activation = 'sigmoid'
loss = 'binary_crossentropy'
else:
final_activation = 'softmax'
loss = 'categorical_crossentropy'
y = self.base_model.output
y = layers.Dropout(1 - self.keep_prob)(y)
y = layers.Dense(self.classnum, activation=final_activation, name='Dense_final')(y)
model = Model(inputs=self.base_model.input, outputs=y)
# TODO: save models after training
save_dir = 'dl_models'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
modelpath = os.path.join(save_dir, 'model_%s.hdf5' % timestr)
checkpoint = ModelCheckpoint(filepath=modelpath,
monitor=self.monitor,
save_best_only=True,
period=1)
earlystop = EarlyStopping(monitor=self.monitor, patience=12)
model.compile(optimizer=optimizer, loss=loss, metrics=[self.metricstr])
model.fit_generator(generator=self.train_gen,
epochs=200,
validation_data=self.valid_gen,
callbacks=[checkpoint, earlystop])
self.estimator = model
self.best_result = checkpoint.best
return self, modelpath
def predict_proba(self, data: DataManager):
if self.estimator is None:
raise TypeError("Unsupported estimator type 'NoneType'!")
_, testpregen = preprocess()
if hasattr(data, 'test_dir'):
self.test_gen = testpregen.flow_from_directory(data.test_dir, target_size=self.inputshape[:2],
batch_size=32)
else:
self.test_gen = testpregen.flow(data.test_X, target_size=self.inputshape[:2],
batch_size=32)
pred = self.estimator.predict_generator(self.test_gen)
return pred
def predict(self, data: DataManager):
pred = self.predict_proba(data)
pred = np.argmax(pred, axis=-1)
return pred
|
py
|
1a57c9aba5ed27f314cccf382aa3b0db9ff37737
|
import cntk as C
import numpy as np
from helpers import *
from cntk.layers import *
from cntk.layers.sequence import *
from cntk.layers.typing import *
from cntk.debugging import debug_model
import pickle
import importlib
import os
class PolyMath:
def __init__(self, config_file):
data_config = importlib.import_module(config_file).data_config
model_config = importlib.import_module(config_file).model_config
self.word_count_threshold = data_config['word_count_threshold']
self.char_count_threshold = data_config['char_count_threshold']
self.word_size = data_config['word_size']
self.abs_path = os.path.dirname(os.path.abspath(__file__))
pickle_file = os.path.join(self.abs_path, data_config['pickle_file'])
with open(pickle_file, 'rb') as vf:
known, self.vocab, self.chars = pickle.load(vf)
self.wg_dim = known
self.wn_dim = len(self.vocab) - known
self.c_dim = len(self.chars)
self.a_dim = 1
self.hidden_dim = model_config['hidden_dim']
self.w2v_hidden_dim = model_config['w2v_hidden_dim']
self.convs = model_config['char_convs']
self.dropout = model_config['dropout']
self.char_emb_dim = model_config['char_emb_dim']
self.highway_layers = model_config['highway_layers']
self.two_step = model_config['two_step']
self.use_cudnn = model_config['use_cudnn']
self.use_sparse = True
# Source and target inputs to the model
inputAxis = C.Axis('inputAxis')
outputAxis = C.Axis('outputAxis')
InputSequence = C.layers.SequenceOver[inputAxis]
OutputSequence = C.layers.SequenceOver[outputAxis]
print('dropout', self.dropout)
print('use_cudnn', self.use_cudnn)
print('use_sparse', self.use_sparse)
def charcnn(self, x):
conv_out = C.layers.Sequential([
C.layers.Embedding(self.char_emb_dim),
C.layers.Dropout(self.dropout),
C.layers.Convolution2D((5,self.char_emb_dim), self.convs, activation=C.relu, init=C.glorot_uniform(), bias=True, init_bias=0, name='charcnn_conv')])(x)
return C.reduce_max(conv_out, axis=1) # workaround cudnn failure in GlobalMaxPooling
def embed(self):
# load glove
npglove = np.zeros((self.wg_dim, self.w2v_hidden_dim), dtype=np.float32)
with open(os.path.join(self.abs_path, 'glove.6B.100d.txt'), encoding='utf-8') as f:
for line in f:
parts = line.split()
word = parts[0].lower()
if word in self.vocab:
npglove[self.vocab[word],:] = np.asarray([float(p) for p in parts[1:]])
glove = C.constant(npglove)
nonglove = C.parameter(shape=(len(self.vocab) - self.wg_dim, self.w2v_hidden_dim), init=C.glorot_uniform(), name='TrainableE')
def func(wg, wn):
return C.times(wg, glove) + C.times(wn, nonglove)
return func
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
processed = C.layers.Sequential([For(range(2), lambda: OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='input_rnn'))])(embedded)
qce = C.one_hot(qc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
cce = C.one_hot(cc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
q_processed = processed.clone(C.CloneMethod.share, {input_chars:qce, input_glove_words:qgw_ph, input_nonglove_words:qnw_ph})
c_processed = processed.clone(C.CloneMethod.share, {input_chars:cce, input_glove_words:cgw_ph, input_nonglove_words:cnw_ph})
return C.as_block(
C.combine([c_processed, q_processed]),
[(cgw_ph, cgw),(cnw_ph, cnw),(cc_ph, cc),(qgw_ph, qgw),(qnw_ph, qnw),(qc_ph, qc)],
'input_layer',
'input_layer')
def gated_attention_gru_layer(self, context, query):
q_processed = C.placeholder(shape=(2*self.hidden_dim,))
c_processed = C.placeholder(shape=(2*self.hidden_dim,))
#gate weight
Wg = C.parameter(shape=(4*self.hidden_dim, 4*self.hidden_dim))
att_gru = C.layers.GRU(2*self.hidden_dim)
attention_model = C.layers.AttentionModel(self.hidden_dim, name='attention_model')
@C.Function
def out_func0(att_input, enc_input):
enc_input2 = enc_input
@C.Function
def gru_with_attentioin(dh, x):
c_att = attention_model(att_input, x)
x = C.splice(x, c_att)
x = C.element_times(x, C.sigmoid(C.times(x, Wg)))
return att_gru(dh, x)
att_context = Recurrence(gru_with_attentioin)(enc_input2)
return att_context
att_context = out_func0(q_processed, c_processed)
return C.as_block(
att_context,
[(c_processed, context), (q_processed, query)],
'gated_attention_gru_layer',
'gated_attention_gru_layer')
def matching_attention_layer(self, attention_context):
att_context = C.placeholder(shape=(2*self.hidden_dim,))
#matching layer
matching_model = C.layers.AttentionModel(attention_dim=self.hidden_dim, name='attention_model')
#gate weight
Wg = C.parameter(shape=(2*self.hidden_dim, 2*self.hidden_dim))
#gru
att_gru = C.layers.GRU(self.hidden_dim)
@C.Function
def out_func1(att_input, enc_input):
enc_input2 = enc_input
@C.Function
def bigru_with_match(dh, x):
c_att = matching_model(att_input, dh)
x = C.splice(x, c_att)
x = C.element_times(x, C.sigmoid(C.times(x, Wg)))
return att_gru(dh, x)
return C.splice(C.layers.Recurrence(bigru_with_match)(enc_input2),
C.layers.Recurrence(bigru_with_match, go_backwards=True)(enc_input2),
name="bigru_with_match")
match_context = out_func1(att_context, att_context)
return C.as_block(
match_context,
[(att_context, attention_context)],
'matching_attention_layer',
'matching_attention_layer')
def output_layer(self, query, match_context):
q_processed = C.placeholder(shape=(2*self.hidden_dim,))
mat_context = C.placeholder(shape=(2*self.hidden_dim,))
#output layer
r_q = question_pooling(q_processed, 2*self.hidden_dim) #shape n*(2*self.hidden_dim)
p1_logits = attention_weight(mat_context, r_q, 2*self.hidden_dim)
attention_pool = C.sequence.reduce_sum(p1_logits * mat_context)
state = C.layers.GRU(2*self.hidden_dim)(attention_pool, r_q)
p2_logits = attention_weight(mat_context, state, 2*self.hidden_dim)
@C.Function
def start_ave_point(p1_logits, p2_logits, point):
@C.Function
def start_ave(last, now):
now = now + last - last
new_start = now * C.sequence.gather(p2_logits, point)
point = C.sequence.future_value(point)
return new_start
start_logits_ave = C.layers.Recurrence(start_ave)(p1_logits)
return start_logits_ave
point = C.sequence.is_first(p1_logits)
point = C.layers.Sequential([For(range(2), lambda: C.layers.Recurrence(C.plus))])(point)
point = C.greater(C.constant(16), point)
start_logits_ave = start_ave_point(p1_logits, p2_logits, point)
@C.Function
def end_ave_point(p1_logits, p2_logits, point):
@C.Function
def end_ave(last, now):
now = now + last - last
new_end = now * C.sequence.gather(p2_logits, point)
point = C.sequence.past_value(point)
return new_end
end_logits_ave = C.layers.Recurrence(end_ave, go_backwards=True)(p2_logits)
return end_logits_ave
point = C.sequence.is_last(p1_logits)
point = C.layers.Sequential([For(range(2), lambda: C.layers.Recurrence(C.plus, go_backwards=True))])(point)
point = C.greater(C.constant(16),point)
end_logits_ave = end_ave_point(p1_logits, p2_logits, point)
start_logits = seq_hardmax(start_logits_ave)
end_logits = seq_hardmax(end_logits_ave)
'''
start_logits = seq_hardmax(p1_logits)
end_logits = seq_hardmax(p2_logits)
'''
return C.as_block(
C.combine([start_logits, end_logits]),
[(q_processed, query), (mat_context, match_context)],
'output_layer',
'output_layer')
def model(self):
c = C.Axis.new_unique_dynamic_axis('c')
q = C.Axis.new_unique_dynamic_axis('q')
b = C.Axis.default_batch_axis()
cgw = C.input_variable(self.wg_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cgw')
cnw = C.input_variable(self.wn_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cnw')
qgw = C.input_variable(self.wg_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qgw')
qnw = C.input_variable(self.wn_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qnw')
cc = C.input_variable((1,self.word_size), dynamic_axes=[b,c], name='cc')
qc = C.input_variable((1,self.word_size), dynamic_axes=[b,q], name='qc')
ab = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ab')
ae = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ae')
#input layer
c_processed, q_processed = self.input_layer(cgw,cnw,cc,qgw,qnw,qc).outputs
# attention layer
att_context = self.gated_attention_gru_layer(c_processed, q_processed)
# seif-matching_attention layer
match_context = self.matching_attention_layer(att_context)
# output layer
start_logits, end_logits = self.output_layer(q_processed, match_context).outputs
# loss
start_loss = seq_loss(start_logits, ab)
end_loss = seq_loss(end_logits, ae)
#paper_loss = start_loss + end_loss
new_loss = all_spans_loss(start_logits, ab, end_logits, ae)
return C.combine([start_logits, end_logits]), new_loss
|
py
|
1a57c9c33ba7c20dacd84269bd6d06d4bc1c2c3d
|
"""
Misc tools for implementing data structures
"""
import re
import collections
import numbers
import codecs
import csv
import types
from datetime import datetime, timedelta
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas as pd
import pandas.algos as algos
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import StringIO, BytesIO, range, long, u, zip, map
from pandas.core.config import get_option
from pandas.core import array as pa
class PandasError(Exception):
pass
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
for t in ['O', 'int8',
'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
'm8[ns]', '<m8[ns]', '>m8[ns]']])
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, None) in comp
dct = dict(__instancecheck__=_check,
__subclasscheck__=_check)
meta = type("ABCBase", (type,), dct)
return meta(name, tuple(), dct)
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not compat.PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
def __init__(cls, name, bases, attrs):
pass
class CategoricalDtype(object):
__meta__ = CategoricalDtypeType
"""
A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
name = 'category'
names = None
type = CategoricalDtypeType
subdtype = None
kind = 'O'
str = '|O08'
num = 100
shape = tuple()
itemsize = 8
base = np.dtype('O')
isbuiltin = 0
isnative = 0
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, CategoricalDtype)
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notnull: boolean inverse of pandas.isnull
"""
return _isnull(obj)
def _isnull_new(obj):
if lib.isscalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=isnull))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isnull_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if lib.isscalar(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=_isnull_old))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isnull = _isnull_new
def _use_inf_as_null(key):
"""Option change callback for null/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
flag = get_option(key)
if flag:
globals()['_isnull'] = _isnull_old
else:
globals()['_isnull'] = _isnull_new
def _isnull_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[...] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if values.dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notnull(obj):
"""Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
on object arrays.
Parameters
----------
arr : ndarray or object value
Object to check for *not*-null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is *not* null or if an array
is given which of the element is *not* null.
See also
--------
pandas.isnull : boolean inverse of pandas.notnull
"""
res = isnull(obj)
if np.isscalar(res):
return not res
return ~res
def _is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is pd.NaT or other is None:
return True
elif np.isscalar(other):
# a timedelta
if hasattr(other,'dtype'):
return other.view('i8') == tslib.iNaT
elif is_integer(other) and other == tslib.iNaT:
return True
return isnull(other)
return False
def array_equivalent(left, right):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs in
corresponding locations. False otherwise. It is assumed that left and right
are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(np.array([1, 2, nan]), np.array([1, 2, nan]))
True
>>> array_equivalent(np.array([1, nan, 2]), np.array([1, 2, nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
if left.shape != right.shape: return False
# NaNs occur only in object arrays, float or complex arrays.
if issubclass(left.dtype.type, np.object_):
return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all()
if issubclass(left.dtype.type, (np.floating, np.complexfloating)):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
return np.array_equal(left, right)
def _iterable_not_string(x):
return (isinstance(x, collections.Iterable) and
not isinstance(x, compat.string_types))
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
if not isinstance(values_to_mask, (list, np.ndarray)):
values_to_mask = [values_to_mask]
try:
values_to_mask = np.array(values_to_mask, dtype=arr.dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isnull(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if np.isscalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isnull(arr)
else:
mask |= isnull(arr)
return mask
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
# All datetimes should be stored as M8[ns]. When unpickling with
# numpy1.6, it will read these as M8[us]. So this ensures all
# datetime64 types are read as MS[ns]
if is_datetime64_dtype(arr):
arr = arr.view(_NS_DTYPE)
return arr
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_1d_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
_take_nd_generic(arr, indexer, out, axis=axis,
fill_value=fill_value, mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
mask_info=None, allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
common._maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype,
axis=axis, mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan,
mask_info=None, allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_generic(arr, indexer, out,
fill_value=fill_value, mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
""" difference of n between self,
analagoust to s-s.shift(n) """
n = int(n)
dtype = arr.dtype
na = np.nan
if is_timedelta64_dtype(arr) or is_datetime64_dtype(arr):
dtype = 'timedelta64[ns]'
arr = arr.view('i8')
na = tslib.iNaT
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif issubclass(dtype.type, np.bool_):
dtype = np.object_
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if dtype == 'timedelta64[ns]':
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
return out_arr
def _coerce_to_dtypes(result, dtypes):
""" given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
def conv(r, dtype):
try:
if isnull(r):
pass
elif dtype == _NS_DTYPE:
r = lib.Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0,1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def _infer_dtype_from_scalar(val):
""" interpret the dtype from a scalar, upcast floats and ints
return the new value and the dtype """
dtype = np.object_
# a 1-element ndarray
if isinstance(val, pa.Array):
if val.ndim != 0:
raise ValueError(
"invalid ndarray passed to _infer_dtype_from_scalar")
dtype = val.dtype
val = val.item()
elif isinstance(val, compat.string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)) and getattr(val,'tz',None) is None:
val = lib.Timestamp(val).value
dtype = np.dtype('M8[ns]')
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslib.convert_to_timedelta(val,'ns')
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
# provide implicity upcast on scalars
elif is_integer(val):
dtype = np.int64
elif is_float(val):
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
return dtype, val
def _maybe_cast_scalar(dtype, value):
""" if we a scalar value and are casting to a dtype that needs nan -> NaT
conversion
"""
if np.isscalar(value) and dtype in _DATELIKE_DTYPES and isnull(value):
return tslib.iNaT
return value
def _maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = tslib.iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
# for now: refuse to upcast datetime64
# (this is because datetime64 will not implicitly upconvert
# to object correctly as of numpy 1.6.1)
if isnull(fill_value):
fill_value = tslib.iNaT
else:
if issubclass(dtype.type, np.datetime64):
try:
fill_value = lib.Timestamp(fill_value).value
except:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
fill_value = tslib.iNaT
else:
fill_value = tslib.iNaT
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
else:
dtype = np.object_
# in case we have a string that looked like a number
if issubclass(np.dtype(dtype).type, compat.string_types):
dtype = np.object_
return dtype, fill_value
def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None):
""" a safe version of put mask that (potentially upcasts the result
return the result
if change is not None, then MUTATE the change (and change the dtype)
return a changed flag
"""
if mask.any():
other = _maybe_cast_scalar(result.dtype, other)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_other = result.values.copy()
new_other[mask] = om_at
result[:] = new_other
return result, False
except:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, fill_value = _maybe_upcast(
result, fill_value=other, dtype=dtype, copy=True)
np.putmask(r, mask, other)
# we need to actually change the dtype here
if change is not None:
# if we are trying to do something unsafe
# like put a bigger dtype in a smaller one, use the smaller one
# pragma: no cover
if change.dtype.itemsize < r.dtype.itemsize:
raise AssertionError(
"cannot change dtype of input to smaller size")
change.dtype = r.dtype
change[:] = r
return r, True
# we want to decide whether putmask will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibily) otherwise we DON't want to upcast (e.g. if we are
# have values, say integers in the success portion then its ok to not
# upcast)
new_dtype, fill_value = _maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (np.isscalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isnull(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isnull(other[mask]).any():
return changeit()
try:
np.putmask(result, mask, other)
except:
return changeit()
return result, False
def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explict type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = _maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def _possibly_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
def _possibly_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if np.isscalar(result):
return result
trans = lambda x: x
if isinstance(dtype, compat.string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
trans = lambda x: x.round()
else:
dtype = 'object'
if isinstance(dtype, compat.string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape):
return result
if issubclass(dtype.type, np.floating):
return result.astype(dtype)
elif dtype == np.bool_ or issubclass(dtype.type, np.integer):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if not np.allclose(arr, trans(arr).astype(dtype)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notnull(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result):
return new_result
except:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
# a datetimelike
elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']:
try:
result = result.astype(dtype)
except:
pass
except:
pass
return result
def _lcd_dtypes(a_dtype, b_dtype):
""" return the lcd dtype to hold these types """
if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):
return _NS_DTYPE
elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):
return _TD_DTYPE
elif is_complex_dtype(a_dtype):
if is_complex_dtype(b_dtype):
return a_dtype
return np.float64
elif is_integer_dtype(a_dtype):
if is_integer_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
return np.int64
return np.float64
elif is_float_dtype(a_dtype):
if is_float_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
else:
return np.float64
elif is_integer(b_dtype):
return np.float64
return np.object
def _fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is not None:
if name.startswith('r'):
x,y = y,x
if not isinstance(y, np.ndarray):
dtype, value = _infer_dtype_from_scalar(y)
y = pa.empty(result.shape, dtype=dtype)
y.fill(value)
if is_integer_dtype(y):
if (y.ravel() == 0).any():
shape = result.shape
result = result.ravel().astype('float64')
# GH 7325, mask and nans must be broadcastable
signs = np.sign(result)
mask = ((y == 0) & ~np.isnan(x)).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it
# correctly
# GH 6178
if np.isinf(fill):
np.putmask(result,signs<0 & mask, -fill)
result = result.reshape(shape)
return result
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,
np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def _clean_interp_method(method, order=None, **kwargs):
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial',
'krogh', 'piecewise_polynomial',
'pchip', 'spline']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {0}."
"Got '{1}' instead.".format(valid, method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
fill_value=None, bounds_error=False, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isnull(yvalues)
valid = ~invalid
valid_y = yvalues[valid]
valid_x = xvalues[valid]
new_x = xvalues[invalid]
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
def _interp_limit(invalid, limit):
"""mask off values that won't be filled since they exceed the limit"""
all_nans = np.where(invalid)[0]
violate = [invalid[x:x + limit + 1] for x in all_nans]
violate = np.array([x.all() & (x.size > limit) for x in violate])
return all_nans[violate] + limit
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
if limit:
violate_limit = _interp_limit(invalid, limit)
if valid.any():
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
result = yvalues.copy()
if valid.all():
return yvalues
else:
# have to call np.array(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.array(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(pa.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
inds = inds[firstIndex:]
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
yvalues[firstIndex:][valid])
if limit:
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'piecewise_polynomial', 'pchip']
if method in sp_methods:
new_x = new_x[firstIndex:]
xvalues = xvalues[firstIndex:]
result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
valid_x, valid_y, new_x, method=method, fill_value=fill_value,
bounds_error=bounds_error, **kwargs)
if limit:
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
from pandas import DatetimeIndex
except ImportError:
raise ImportError('{0} interpolation requires Scipy'.format(method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x.values.astype('i8'), new_x.astype('i8')
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
if method == 'pchip':
raise ImportError("Your version of scipy does not support "
"PCHIP interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
terp = interpolate.UnivariateSpline(x, y, k=order)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x)
return new_y
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = _clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def _get_fill_func(method):
method = _clean_fill_method(method)
return _fill_methods[method]
#----------------------------------------------------------------------
# Lots of little utilities
def _validate_date_like_dtype(dtype):
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('%s' % e)
if typ != 'generic' and typ != 'ns':
raise ValueError('%r is too specific of a frequency, try passing %r'
% (dtype.name, dtype.type.__name__))
def _invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for ``DataFrame.select_dtypes()``."""
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def _get_dtype_from_object(dtype):
"""Get a numpy dtype.type-style object.
Notes
-----
If nothing can be found, returns ``object``.
"""
# type object from a dtype
if isinstance(dtype, type) and issubclass(dtype, np.generic):
return dtype
elif isinstance(dtype, np.dtype): # dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# should still pass if we don't have a datelike
pass
return dtype.type
elif isinstance(dtype, compat.string_types):
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
elif dtype == 'category':
return CategoricalDtypeType
try:
return _get_dtype_from_object(getattr(np, dtype))
except AttributeError:
# handles cases like _get_dtype(int)
# i.e., python objects that are valid dtypes (unlike user-defined
# types, in general)
pass
return _get_dtype_from_object(np.dtype(dtype))
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
raise TypeError('object of type %r has no info axis' %
type(obj).__name__)
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, np.datetime64):
value = tslib.Timestamp(value)
elif isinstance(value, np.timedelta64):
pass
return value
_values_from_object = lib.values_from_object
def _possibly_convert_objects(values, convert_dates=True,
convert_numeric=True,
convert_timedeltas=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = _possibly_cast_to_datetime(
values, 'M8[ns]', coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
values = _possibly_cast_to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
except:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
return values
def _possibly_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M' or kind == 'm':
return arr.dtype in _DATELIKE_DTYPES
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def _possibly_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(values)
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, 'values'):
values = values.values
values = lib.maybe_convert_objects(values)
return values
def _possibly_cast_to_datetime(value, dtype, coerce=False):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_timedelta64:
# force the dtype if needed
if is_datetime64 and dtype != _NS_DTYPE:
if dtype.name == 'datetime64[ns]':
dtype = _NS_DTYPE
else:
raise TypeError(
"cannot convert datetimelike to dtype [%s]" % dtype)
elif is_timedelta64 and dtype != _TD_DTYPE:
if dtype.name == 'timedelta64[ns]':
dtype = _TD_DTYPE
else:
raise TypeError(
"cannot convert timedeltalike to dtype [%s]" % dtype)
if np.isscalar(value):
if value == tslib.iNaT or isnull(value):
value = tslib.iNaT
else:
value = np.array(value,copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = tslib.iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) and value.dtype != dtype:
try:
if is_datetime64:
from pandas.tseries.tools import to_datetime
value = to_datetime(value, coerce=coerce).values
elif is_timedelta64:
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
value = _possibly_cast_to_timedelta(value, coerce='compat', dtype=dtype)
except:
pass
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if (is_array and value.dtype.kind in ['M','m']):
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
value = _possibly_cast_to_timedelta(value, coerce='compat')
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif (is_array and not (
issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
pass
# try to infer if we have a datetimelike here
# otherwise pass thru
else:
value = _possibly_infer_to_datetimelike(value)
return value
def _possibly_infer_to_datetimelike(value):
# we might have a array (or single object) that is datetime like,
# and no dtype is passed don't change the value unless we find a
# datetime/timedelta set
# this is pretty strict in that a datetime/timedelta is REQUIRED
# in addition to possible nulls/string likes
# ONLY strings are NOT datetimelike
v = value
if not is_list_like(v):
v = [v]
v = np.array(v,copy=False)
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if len(v):
def _try_datetime(v):
# safe coerce to datetime64
try:
return tslib.array_to_datetime(v, raise_=True).reshape(shape)
except:
return v
def _try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas.tseries.timedeltas import to_timedelta
try:
return to_timedelta(v).values.reshape(shape)
except:
# this is for compat with numpy < 1.7
# but string-likes will fail here
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
try:
return _possibly_cast_to_timedelta(v, coerce='compat').reshape(shape)
except:
return v
# do a quick inference for perf
sample = v[:min(3,len(v))]
inferred_type = lib.infer_dtype(sample)
if inferred_type in ['datetime', 'datetime64']:
value = _try_datetime(v)
elif inferred_type in ['timedelta', 'timedelta64']:
value = _try_timedelta(v)
# its possible to have nulls intermixed within the datetime or timedelta
# these will in general have an inferred_type of 'mixed', so have to try
# both datetime and timedelta
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
elif inferred_type in ['mixed']:
if lib.is_possible_datetimelike_array(_ensure_object(v)):
value = _try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = _try_datetime(v)
return value
def _is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import Int64Index
values = np.arange(n, dtype=np.int64)
result = values.view(Int64Index)
result.name = None
result.is_unique = True
return result
def ensure_float(arr):
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
raise TypeError('mutually exclusive arguments: %r and %r' %
(label1, label2))
elif val1 is not None:
return val1
else:
return val2
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def _count_not_none(*args):
return sum(x is not None for x in args)
#------------------------------------------------------------------------------
# miscellaneous python tools
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
from random import Random
import string
return ''.join(Random().sample(string.ascii_letters + string.digits, n))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def iterpairs(seq):
"""
Parameters
----------
seq: sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
"""
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple))
or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
def _get_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
if isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
return arr_or_dtype.dtype
def _get_dtype_type(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
if isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype).type
return arr_or_dtype.dtype.type
def _is_any_int_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.integer)
def is_integer_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def _is_int_or_datetime_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_datetime64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.datetime64)
def is_datetime64_ns_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype)
return tipo == _NS_DTYPE
def is_timedelta64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.timedelta64)
def is_timedelta64_ns_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return tipo == _TD_DTYPE
def _is_datetime_or_timedelta_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, (np.datetime64, np.timedelta64))
needs_i8_conversion = _is_datetime_or_timedelta_dtype
def is_numeric_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, (np.number, np.bool_))
and not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_float_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
def _is_floating_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return isinstance(tipo, np.floating)
def is_bool_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.bool_)
def is_categorical_dtype(arr_or_dtype):
if hasattr(arr_or_dtype,'dtype'):
arr_or_dtype = arr_or_dtype.dtype
if isinstance(arr_or_dtype, CategoricalDtype):
return True
try:
return arr_or_dtype == 'category'
except:
return False
def is_complex_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.complexfloating)
def is_object_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.object_)
def is_re(obj):
return isinstance(obj, re._pattern_type)
def is_re_compilable(obj):
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(arg):
return (hasattr(arg, '__iter__') and
not isinstance(arg, compat.string_and_binary_types))
def _is_sequence(x):
try:
iter(x)
len(x) # it has a length
return not isinstance(x, compat.string_and_binary_types)
except (TypeError, AttributeError):
return False
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
_ensure_float64 = algos.ensure_float64
_ensure_float32 = algos.ensure_float32
_ensure_int64 = algos.ensure_int64
_ensure_int32 = algos.ensure_int32
_ensure_int16 = algos.ensure_int16
_ensure_int8 = algos.ensure_int8
_ensure_platform_int = algos.ensure_platform_int
_ensure_object = algos.ensure_object
def _astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return arr.astype(object)
# in py3, timedelta64[ns] are int64
elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not compat.PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
if dtype.kind == 'm':
mask = isnull(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if np.isnan(arr).any():
raise ValueError('Cannot convert NA to integer')
elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):
# work around NumPy brokenness, #1987
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
elif issubclass(dtype.type, compat.text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel()).reshape(arr.shape)
elif issubclass(dtype.type, compat.string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
if copy:
return arr.astype(dtype)
return arr.view(dtype)
def _clean_fill_method(method):
if method is None:
return None
method = method.lower()
if method == 'ffill':
method = 'pad'
if method == 'bfill':
method = 'backfill'
if method not in ['pad', 'backfill']:
msg = ('Invalid fill method. Expecting pad (ffill) or backfill '
'(bfill). Got %s' % method)
raise ValueError(msg)
return method
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def read(self, bytes=-1):
return self.reader.read(bytes).encode('utf-8')
def readline(self):
return self.reader.readline().encode('utf-8')
def next(self):
return next(self.reader).encode("utf-8")
# Python 3 iterator
__next__ = next
def _get_handle(path, mode, encoding=None, compression=None):
"""Gets file handle for given path and mode.
NOTE: Under Python 3.2, getting a compressed file handle means reading in
the entire file, decompressing it and decoding it to ``str`` all at once
and then wrapping it in a StringIO.
"""
if compression is not None:
if encoding is not None and not compat.PY3:
msg = 'encoding + compression not yet supported in Python 2'
raise ValueError(msg)
if compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
if compat.PY3_2:
# gzip and bz2 don't work with TextIOWrapper in 3.2
encoding = encoding or get_option('display.encoding')
f = StringIO(f.read().decode(encoding))
elif compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
else:
if compat.PY3:
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors='replace')
else:
f = open(path, mode)
return f
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
# python 3 iterator
__next__ = next
def __iter__(self): # pragma: no cover
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def _concat_compat(to_concat, axis=0):
# filter empty arrays
nonempty = [x for x in to_concat if x.shape[axis] > 0]
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
if nonempty:
is_datetime64 = [x.dtype == _NS_DTYPE for x in nonempty]
if all(is_datetime64):
# work around NumPy 1.6 bug
new_values = np.concatenate([x.view(np.int64) for x in nonempty],
axis=axis)
return new_values.view(_NS_DTYPE)
elif any(is_datetime64):
to_concat = [_to_pydatetime(x) for x in nonempty]
return np.concatenate(to_concat, axis=axis)
def _to_pydatetime(x):
if x.dtype == _NS_DTYPE:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel())
x = x.reshape(shape)
return x
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
import pandas.tslib as tslib
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main()
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
DEPRECATED: This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', "")
)
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
DEPRECATED: This is no longer used in pandas, and won't work in IPython 3
and above.
"""
try:
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', "")
)
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython()
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) If you need to send something to the console, use console_encode().
#
# console_encode() should (hopefully) choose the right encoding for you
# based on the encoding set in option "display.encoding"
#
# 3) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("set([%s])")
else:
fmt = u("[%s]") if hasattr(seq, '__setitem__') else u("(%s)")
nitems = get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(next(s), _nest_lvl + 1, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt % body
def _pprint_dict(seq, _nest_lvl=0, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{%s}")
pairs = []
pfmt = u("%s: %s")
nitems = get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, **kwds),
pprint_thing(v, _nest_lvl + 1, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
else:
return fmt % ", ".join(pairs)
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t',
'\n': r'\n',
'\r': r'\r',
}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True)
elif _is_sequence(thing) and _nest_lvl < \
get_option("display.pprint_nest_depth"):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = "'%s'"
else:
fmt = "u'%s'"
result = fmt % as_escaped_unicode(thing)
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
def console_encode(object, **kwds):
"""
this is the sanctioned way to prepare something for
sending *to the console*, it delegates to pprint_thing() to get
a unicode representation of the object relies on the global encoding
set in display.encoding. Use this everywhere
where you output to the console.
"""
return pprint_thing_encoded(object,
get_option("display.encoding"))
def load(path): # TODO remove in 0.13
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Warning: Loading pickled data received from untrusted sources can be
unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
import warnings
warnings.warn("load is deprecated, use read_pickle", FutureWarning)
from pandas.io.pickle import read_pickle
return read_pickle(path)
def save(obj, path): # TODO remove in 0.13
"""
Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path
"""
import warnings
warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning)
from pandas.io.pickle import to_pickle
return to_pickle(obj, path)
def _maybe_match_name(a, b):
a_name = getattr(a, 'name', None)
b_name = getattr(b, 'name', None)
if a_name == b_name:
return a_name
return None
|
py
|
1a57ca407db1fa1944ac51d33e63e2036db03dd3
|
import pytest
from subprocess import call
import os
import yaml
"""
test metafunc
this test will test metafunc.
this test will also show how to run tests where
failure is expected (i.e., checking that we handle
invalid parameters).
"""
class TestCLI:
"""
simple metafunc test class
This uses the subprocess PIPE var
to capture system input and output,
since we are running metafunc from the
command line directly using subprocess.
"""
@classmethod
def setup_class(self):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
def testSetup(self):
"""
test workflow
"""
command = ["metafunc", "setup", "-n", "test"]
pwd = os.path.abspath(os.path.dirname(__file__))
rc = call(command, cwd=pwd)
assert rc == 0
@pytest.mark.parametrize(
"test_input_config,expected",
[("test/config.yaml", 0), ("config_wrong.yaml", 1)],
)
def test_run(self, test_input_config, expected):
"""
test workflow
"""
command_prefix = ["metafunc", "run"]
pwd = os.path.abspath(os.path.dirname(__file__))
command = command_prefix + [test_input_config]
rc = call(command, cwd=pwd)
assert rc == expected
# clean up run dat
# config files here specify a resultdir where the snakemake run results
# will be written to. Here we find it for each indifivual run and delete
# the directory after successful runs.
config_data = yaml.safe_load(open(os.path.join(pwd, test_input_config)))
print(config_data)
resultdir = config_data["resultdir"]
rc = call(["rm", "-rf", resultdir], cwd=pwd)
assert rc == 0
@classmethod
def teardown_class(self):
""" teardown any state that was previously setup with a call to
setup_class.
"""
pwd = os.path.abspath(os.path.dirname(__file__))
rc = call(["rm", "-rf", "test"], cwd=pwd)
assert rc == 0
|
py
|
1a57ca4b4056dd92d02e3c6c9c5b38a23a320ff6
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-03 05:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('presto', '0005_auto_20171002_2206'),
]
operations = [
migrations.RemoveField(
model_name='appeal',
name='appeal_motivation',
),
]
|
py
|
1a57ca6eaf577e2f72c1d8cf8584bd716a76bcaf
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GZTAN dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@misc{tzanetakis_essl_cook_2001,
author = "Tzanetakis, George and Essl, Georg and Cook, Perry",
title = "Automatic Musical Genre Classification Of Audio Signals",
url = "http://ismir2001.ismir.net/pdf/tzanetakis.pdf",
publisher = "The International Society for Music Information Retrieval",
year = "2001"
}
"""
_DESCRIPTION = """
The dataset consists of 1000 audio tracks each 30 seconds long.
It contains 10 genres, each represented by 100 tracks.
The tracks are all 22050Hz Mono 16-bit audio files in .wav format.
The genres are:
* blues
* classical
* country
* disco
* hiphop
* jazz
* metal
* pop
* reggae
* rock
"""
_DOWNLOAD_URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
_HOMEPAGE_URL = "http://marsyas.info/index.html"
_CLASS_LABELS = [
"blues", "classical", "country", "disco", "hiphop", "jazz", "metal", "pop",
"reggae", "rock"
]
class GTZAN(tfds.core.GeneratorBasedBuilder):
"""GTZAN Dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"audio": tfds.features.Audio(file_format="wav", sample_rate=22050),
"label": tfds.features.ClassLabel(names=_CLASS_LABELS),
"audio/filename": tfds.features.Text(),
}),
supervised_keys=("audio", "label"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract({"genres": _DOWNLOAD_URL})
path = os.path.join(dl_paths["genres"], "genres")
# There is no predefined train/val/test split for this dataset.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN, gen_kwargs={"path": path}),
]
def _generate_examples(self, path):
"""Yields examples.
Args:
path: Path of the downloaded and extracted directory
Yields:
Next examples
"""
for root, _, file_name in tf.io.gfile.walk(path):
for fname in file_name:
if fname.endswith(".wav"): # select only .wav files
# Each .wav file has name in the format of <genre>.<number>.wav
label = fname.split(".")[0]
key = fname
example = {
"audio": os.path.join(root, fname),
"label": label,
"audio/filename": fname,
}
yield key, example
|
py
|
1a57cafb387dd1aafd2dc2c3b08b881109c9c54e
|
"""
Forecast datasets & data generators
"""
import os.path
from typing import Union, List
import numpy as np
from numpy.random import default_rng
import pandas as pd
"""
Synthetic sequences of (non-iid) true probs/means
"""
def bernoulli(
n: int,
p: Union[float, List, np.ndarray] = 0.5,
rng: np.random.Generator = np.random.default_rng(),
) -> np.ndarray:
"""Return a sequence of Bernoulli random variables."""
return rng.binomial(1, p, size=n)
def zeros_then_ones(
n_zeros: int,
n_ones: int,
) -> np.ndarray:
"""Return a sequence of `n_zeros` 0's followed by `n_ones` 1's."""
return np.concatenate([np.zeros((n_zeros, )), np.ones((n_ones, ))])
def zeros_then_ones_repeated(
n: int,
n_spans: int,
roll: int = 0,
) -> np.ndarray:
"""Return a repeating sequence of 0's and 1's."""
assert 1 <= n_spans <= n
span = n // n_spans
ys = np.concatenate([
zeros_then_ones(span, span)
for _ in range((n_spans + 1) // 2)
])[:n]
return np.roll(ys, roll)
def randoms_zeros_then_ones(
n_randoms: int,
n_zeros: int,
n_ones: int,
p: float = 0.5,
rng: np.random.Generator = default_rng(),
) -> np.ndarray:
"""Return a sequence of `n_randoms` Bernoulli(p) random variables,
followed by `n_zeros` 0's and `n_ones` 1's."""
return np.concatenate([rng.binomial(1, p, size=n_randoms),
np.zeros((n_zeros, )),
np.ones((n_ones, ))])
def default(
n: int,
):
"""Default setting for the paper.
Random for the first 100, and then repeated zeros-then-ones in
each log-scale span ([101, 1000], [1001, 10000], ...).
"""
n_spans = int(np.log10(n))
assert n_spans >= 2, f"default setting requires n > 100 (given: {n})"
seqs = [np.repeat(0.5, 100)]
for span in range(2, n_spans):
r = 10 ** (span + 1) - 10 ** span
seqs.append(zeros_then_ones(r // 4, r // 4))
seqs.append(zeros_then_ones(r // 4, r // 4)[::-1])
return np.concatenate(seqs)
def sigmoid(
n: int,
changepoint: float = 0.25,
) -> np.ndarray:
"""Return a sequence of values between [0, 1] that follow a sigmoid fn."""
grid = 20. * (np.linspace(0, 1, num=n) - changepoint) # [-10, 10]
return 1. / (1. + np.exp(-grid))
"""
Presets:
binary: pd.DataFrame(time, data, true_probs)
continuous: pd.DataFrame(time, data, true_means, true_params)
"""
def make_preset(
true_probs: np.ndarray,
rng: np.random.Generator = default_rng(),
):
"""A helper function that makes binary data given true probabilities."""
n = len(true_probs)
data = bernoulli(n, true_probs, rng=rng)
return pd.DataFrame({
"time": np.arange(1, n + 1),
"data": data,
"true_probs": true_probs,
})
def preset_default(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Default synthetic data.
Generated from a noisy version of
100 1/2s, 1000 1s, 1000 0s, 1000 1s, 1000 0s, ..., 1000 1s, and 500 0s."""
pattern = default(n)
true_probs = 0.8 * pattern + 0.2 * (1 - pattern)
true_probs = np.clip(true_probs + rng.normal(0, noise, n), 0, 1)
return make_preset(true_probs, rng)
def preset_random(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Random synthetic data: true_prob == 0.5 + noise for all rounds."""
true_probs = np.repeat(0.5, n)
true_probs = np.clip(true_probs + rng.normal(0, noise, n), 0, 1)
return make_preset(true_probs, rng)
def preset_sigmoid(
n: int,
noise: float = 0.25,
rng: np.random.Generator = default_rng(),
changepoint: float = 0.25, # between [0, 1]
) -> pd.DataFrame:
"""A smoothly increasing function with a changepoint + sinusoidal noise."""
pattern = sigmoid(n, changepoint)
sine_noise = np.sin(0.1 * np.arange(n)) + rng.normal(0, 1, n)
true_probs = np.clip(pattern + noise * sine_noise, 0, 1)
return make_preset(true_probs, rng)
def make_preset_beta(
true_means: np.ndarray,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""A helper function that makes continuous data given true means, where
y_t ~ Beta(r_t, 1 - r_t)."""
n = len(true_means)
true_params = [true_means, 1. - true_means]
data = rng.beta(*true_params)
out = {
"time": np.arange(1, n + 1),
"data": data,
"true_means": true_means,
"true_dist": ["beta" for _ in range(n)],
}
out.update({
f"true_param{i}": true_param
for i, true_param in enumerate(true_params)
})
return pd.DataFrame(out)
def preset_beta(
n: int,
noise: float = 0.1,
rng: np.random.Generator = default_rng(),
) -> pd.DataFrame:
"""Synthetic data with continuous outcomes taking values in [-1, 1].
z_t ~ Beta(r_t, 1 - r_t)
y_t = 2 * z_t - 1
"""
pattern = sigmoid(n, changepoint=0.25)
true_means = 0.8 * pattern + 0.2 * (1 - pattern)
true_means = np.clip(true_means + rng.normal(0, noise, n), 0.01, 0.99)
return make_preset_beta(true_means, rng)
# pd.DataFrame(time, data, true_probs)
PRESETS = {
"default": preset_default,
"random": preset_random,
"sigmoid": preset_sigmoid,
"beta": preset_beta,
}
def get_data(
data_name: str,
size: int = 0,
noise: float = 0.1,
rng: Union[int, np.random.Generator] = default_rng(),
) -> pd.DataFrame:
"""Get data from its name or filename, up to n_rounds."""
if os.path.exists(data_name):
data = pd.read_csv(data_name)
if size > 0:
data = data[:size]
else:
try:
if isinstance(rng, int):
rng = default_rng(rng)
assert size > 0, f"specify data size for synthetic data generation"
data = PRESETS[data_name](size, noise, rng)
except KeyError:
raise KeyError(
f"data name {data_name} is not one of the presets, "
f"available: " + " ".join(list(PRESETS.keys()))) from None
return data
|
py
|
1a57cc174068da602a9c15fca496c1a1f4721da4
|
import os
import numpy as np
import time
from collections import deque
import glob
import pickle
import shutil
from copy import deepcopy
import matplotlib.pyplot as plt
import torch
from agents import AgentDDPG, AgentMADDPG
from utilities import get_env_info
def run(env, params):
brain_name, n_agents, state_size, action_size = get_env_info(env)
if params["type"].lower() == "ddpg":
agent = AgentDDPG(state_size=state_size,
action_size=action_size, params=params)
scores = ddpg(agent, env, params)
elif params["type"].lower() == "2 ddpg":
agent = [AgentDDPG(state_size=state_size, action_size=action_size,
params=params) for i in range(n_agents)]
scores = ddpg(agent, env, params)
elif params["type"].lower() == "maddpg":
agent = AgentMADDPG(env, params)
scores = ddpg(agent, env, params)
else:
raise Exception("'type' can be 'ddpg', '2 ddpg', 'maddpg'")
def ddpg(agent, env, params):
# Get environment information
brain_name, n_agents, state_size, action_size = get_env_info(env)
# Initialize stuff
log = Logger(params, agent)
for _ in range(1, params["n_episodes"]+1):
env_info = env.reset(train_mode=True)[brain_name]
if isinstance(agent, list):
for i in range(n_agents):
agent[i].reset()
else:
agent.reset()
states = env_info.vector_observations
episode_scores = np.zeros(n_agents)
for t in range(params["max_steps"]):
if isinstance(agent, list):
actions = np.zeros((n_agents, action_size))
for i in range(n_agents):
actions[i] = agent[i].act(states[i])
else:
actions = agent.act(states)
if params["type"].lower() == "maddpg":
actions = actions.reshape(n_agents, action_size)
actions = actions.detach().cpu().numpy()
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
if isinstance(agent, list):
for i in range(n_agents):
agent[i].step(states[i], actions[i], rewards[i],
next_states[i], dones[i])
else:
agent.step(states, actions, rewards, next_states, dones)
episode_scores += rewards
states = next_states
# check if we should save and show progress
log.tic()
if np.any(dones):
break
log.update(agent, episode_scores, t+1)
log.tic()
if log.solved and params["stop_on_solve"]:
break
if time.time() - log.t_start > params["max_time"] + 5:
break
return agent, log
class Logger():
'''
Logs, displays, and saves progress.
'''
def __init__(self, params, agent):
self.data = params
# If save folder exists raise an exception
if os.path.isdir(self.data["folder"]):
if self.data["overwrite"]:
shutil.rmtree(self.data["folder"])
else:
raise Exception("Folder already exists and overwrite is off.")
if not os.path.isdir(self.data["folder"]):
os.makedirs(self.data["folder"])
self.data["scores"] = []
self.data["mean_scores"] = []
self.data["steps_done"] = []
self._update_agent(agent)
# comb_score_window is the combined score - for tennis it's the max
self.comb_score_window = deque(maxlen=params["scores_window"])
# all_score_window contains the scores of all agents
self.all_score_window = deque(maxlen=params["scores_window"])
self.best_score = -np.inf
self.t_start = time.time()
self.progress_t = time.time()
self.saved_t = time.time()
self.solved = False
self.data["train_time"] = time.time() - self.t_start
def _update_agent(self, agent):
if isinstance(agent, list):
if not "actor_local_dict" in self.data:
temp = []
for i in range(len(agent)):
temp.append([])
self.data["actor_local_dict"] = deepcopy(temp)
self.data["actor_target_dict"] = deepcopy(temp)
self.data["critic_local_dict"] = deepcopy(temp)
self.data["critic_target_dict"] = deepcopy(temp)
else:
for i in range(len(agent)):
self.data["actor_local_dict"][i] = agent[i].actor_local.state_dict()
self.data["actor_target_dict"][i] = agent[i].actor_target.state_dict()
self.data["critic_local_dict"][i] = agent[i].critic_local.state_dict()
self.data["critic_target_dict"][i] = agent[i].critic_target.state_dict(
)
elif isinstance(agent, AgentDDPG):
self.data["actor_local_dict"] = agent.actor_local.state_dict()
self.data["actor_target_dict"] = agent.actor_target.state_dict()
self.data["critic_local_dict"] = agent.critic_local.state_dict()
self.data["critic_target_dict"] = agent.critic_target.state_dict()
elif isinstance(agent, AgentMADDPG):
if not "actor_local_dict" in self.data:
temp = []
for i in range(len(agent.maddpg_agent)):
temp.append([])
self.data["actor_local_dict"] = deepcopy(temp)
self.data["actor_target_dict"] = deepcopy(temp)
self.data["critic_local_dict"] = deepcopy(temp)
self.data["critic_target_dict"] = deepcopy(temp)
else:
for i in range(len(agent.maddpg_agent)):
self.data["actor_local_dict"][i] = agent.maddpg_agent[i].actor_local.state_dict(
)
self.data["actor_target_dict"][i] = agent.maddpg_agent[i].actor_target.state_dict(
)
self.data["critic_local_dict"][i] = agent.maddpg_agent[i].critic_local.state_dict(
)
self.data["critic_target_dict"][i] = agent.maddpg_agent[i].critic_target.state_dict(
)
else:
raise Exception("Unkown agent type.")
def update(self, agent, episode_scores, steps):
self.comb_score_window.append(np.max(episode_scores))
self.all_score_window.append(episode_scores)
self.data["scores"].append(episode_scores)
self.data["mean_scores"].append(np.mean(self.all_score_window, axis=0))
self.data["steps_done"].append(steps)
self._update_agent(agent)
self.tic()
def show_progress(self):
if len(self.data["mean_scores"]):
print('\rMin agent score: {:.2f}\tMax agent score: {:.2f}\tMax steps: {}\tTotal time: {}\tEpisodes: {}'.format(
min(self.data["mean_scores"][-1]),
max(self.data["mean_scores"][-1]),
self.data["steps_done"][-1],
seconds_to_time_str(time.time() - self.t_start),
len(self.data["scores"])), end="")
if len(self.data["mean_scores"]) and self.data["steps_done"][-1] > 5000:
raise Exception("debug")
def tic(self):
self.data["train_time"] = time.time() - self.t_start
if self.data["verbose"] and (self.data["progress_every"] > 0 and
time.time() - self.progress_t >= self.data["progress_every"]):
self.show_progress()
self.progress_t = time.time()
if self.data["save_every"] > 0 and \
time.time() - self.saved_t >= self.data["save_every"]:
self.saved_t = time.time()
self.save()
if len(self.comb_score_window) and \
(np.mean(self.comb_score_window) >= self.data["score_solved"]):
print('\nEnvironment solved in {:d} episodes!\tAverage combined score: {:.2f}'.format(
len(self.data["scores"])-100, np.mean(self.comb_score_window)))
self.save(add="SOLVED")
self.solved = True
def save(self, add=""):
# Figure out the root of the resulting file names
if add != "":
name = "agent_" + add + "_"
else:
name = "agent_"
name = name + "train_time_" + \
seconds_to_time_str(
self.data["train_time"]).replace(" ", "_")
save_path = os.path.join(self.data["folder"], name + ".pkl")
with open(save_path, 'wb') as f:
pickle.dump(self.data, f)
def find_state_mag(env, max_steps=1000, n_episodes=1000):
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
action_size = brain.vector_action_space_size
states = []
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
num_agents = len(env_info.agents)
state = env_info.vector_observations[0]
for t in range(max_steps):
states.append(state)
actions = np.random.randn(num_agents, action_size)
actions = np.clip(actions, -1, 1)
env_info = env.step(actions)[brain_name]
state = env_info.vector_observations[0]
done = env_info.local_done[0]
if done:
break
states = np.array(states)
states = np.abs(states)
return np.mean(states, axis=0), np.std(states, axis=0)
def seconds_to_time_str(t):
if t < 0:
raise Exception("Negative time?")
if t < 60:
return "{:02d} seconds".format(int(t))
elif t >= 60 and t < 3600:
return "{:04.1f} minutes".format(t/60)
elif t >= 3600:
return "{:04.1f} hours".format(t/3600)
def load_agent(folder, add="", train_time="last"):
if add != "":
name = "agent_" + add + "_"
else:
name = "agent_"
if train_time != "last":
name = name + "train_time_" + train_time.replace(" ", "_") + ".pkl"
else:
files = glob.glob(os.path.join(folder, "*.pkl"))
files.sort(key=os.path.getmtime)
files = files[-1]
name = os.path.split(files)[1]
path = os.path.join(folder, name)
with open(path, 'rb') as f:
data = pickle.load(f)
return data
def show_plots(mean_scores, scores, labels=None, max_episodes=None, only_mean=False, legend_outside=False):
if max_episodes == None:
# Find max number of episodes
max_episodes = 0
for i in range(len(mean_scores)):
if len(mean_scores[i]) > max_episodes:
max_episodes = len(mean_scores[i])
fig, ax = plt.subplots()
cmap = plt.cm.get_cmap("jet", max([len(mean_scores), 2]))
for i in range(len(mean_scores)):
if labels is not None:
label = labels[i]
else:
label = None
mean_score = mean_scores[i]
score = scores[i]
if len(mean_score) < max_episodes:
mean_score = np.concatenate(
(mean_score, np.nan * np.ones(max_episodes-len(mean_score))))
score = np.concatenate(
(score, np.nan * np.ones(max_episodes-len(score))))
if not only_mean:
ax.plot(np.arange(1, max_episodes+1),
score, alpha=0.3, color=cmap(i))
ax.plot(np.arange(1, max_episodes+1), mean_score,
label=label, color=cmap(i), linewidth=2)
if labels is not None:
if legend_outside:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
ax.legend()
ax.set_xlabel("# episodes")
ax.grid()
|
py
|
1a57cd4feb2cdef87d271a59f71452dd19980d5f
|
# -*- coding: utf-8 -*-
#
'''
Numerical solution schemes for the Navier--Stokes equation
rho (u' + u.nabla(u)) = - nabla(p) + mu Delta(u) + f,
div(u) = 0.
For an overview of methods, see
An overview of projection methods for incompressible flows;
Guermond, Minev, Shen;
Comput. Methods Appl. Mech. Engrg., 195 (2006);
<http://www.math.ust.hk/~mawang/teaching/math532/guermond-shen-2006.pdf>
or
<http://mumerik.iwr.uni-heidelberg.de/Oberwolfach-Seminar/CFD-Course.pdf>.
'''
from dolfin import (
dot, inner, grad, dx, ds, div, Function, TestFunction, solve, derivative,
TrialFunction, assemble, PETScPreconditioner, FacetNormal,
PETScKrylovSolver, as_backend_type, PETScOptions, Identity
)
from ..message import Message
def _rhs_weak(u, v, f, rho, mu, p0):
'''Right-hand side of the Navier--Stokes momentum equation in weak form.
'''
# It was first proposed in (with two intermediate steps)
#
# Sur l'approximation de la solution des 'equations de Navier-Stokes
# par la m'ethode des pas fractionnaires (II);
# R. Temam;
# Arch. Ration. Mech. Anal. 33, (1969) 377-385;
# <http://link.springer.com/article/10.1007%2FBF00247696>.
#
# to replace the (weak form) convection <(u.\nabla)v, w> by something more
# appropriate. Note, e.g., that
#
# 1/2 ( <(u.\nabla)v, w> - <(u.\nabla)w, v>)
# = 1/2 (2 <(u.\nabla)v, w> - <u, \nabla(v.w)>)
# = <(u.\nabla)v, w> - 1/2 \int u.\nabla(v.w)
# = <(u.\nabla)v, w> - 1/2 (-\int div(u)*(v.w)
# +\int_\Gamma (n.u)*(v.w)
# ).
#
# Since for solutions we have div(u)=0, n.u=0, we can consistently replace
# the convection term <(u.\nabla)u, w> by the skew-symmetric
#
# 1/2 (<(u.\nabla)u, w> - <(u.\nabla)w, u>).
#
# One distinct advantage of this formulation is that the convective term
# doesn't contribute to the total energy of the system since
#
# d/dt ||u||^2 = 2<d_t u, u> = <(u.\nabla)u, u> - <(u.\nabla)u, u> = 0.
#
# More references and info on skew-symmetry can be found in
#
# Finite Element Methods for the Simulation of Incompressible Flows,
# Volker John,
# <http://www.wias-berlin.de/people/john/lectures_madrid_2012.pdf>,
#
# and
#
# <http://calcul.math.cnrs.fr/Documents/Ecoles/CEMRACS2012/Julius_Reiss.pdf>.
#
# The first lecture is quite instructive and gives info on other
# possibilities, e.g.,
#
# * Rotational form
# <http://www.igpm.rwth-aachen.de/Download/reports/DROPS/IGPM193.pdf>
# * Divergence form
# This paper
# <http://www.cimec.org.ar/ojs/index.php/mc/article/viewFile/486/464>
# mentions 'divergence form', but it seems to be understood as another
# way of expressing the stress term mu\Delta(u).
#
# The different methods are numerically compared in
#
# On the accuracy of the rotation form in simulations of the
# Navier-Stokes equations;
# Layton et al.;
# <http://www.mathcs.emory.edu/~molshan/ftp/pub/RotationForm.pdf>.
#
# In
#
# Finite element methods
# for the incompressible Navier-Stokes equations;
# Ir. A. Segal;
# <http://ta.twi.tudelft.nl/users/vuik/burgers/fem_notes.pdf>;
#
# it is advised to use (u{k}.\nabla)u^{k+1} for the treatment of the
# nonlinear term. In connection with the the div-stabilitation, this yields
# unconditional stability of the scheme. On the other hand, an advantage
# of treating the nonlinear term purely explicitly is that the resulting
# problem would be symmetric and positive definite, qualifying for robust
# AMG preconditioning.
# One can also find advice on the boundary conditions for axisymmetric flow
# here.
#
# For more information on stabilization techniques and general solution
# recipes, check out
#
# Finite Element Methods for Flow Problems;
# Jean Donea, Antonio Huerta.
#
# There are plenty of references in the book, e.g. to
#
# Finite element stabilization parameters
# computed from element matrices and vectors;
# Tezduyar, Osawa;
# Comput. Methods Appl. Mech. Engrg. 190 (2000) 411-430;
# <http://www.tafsm.org/PUB_PRE/jALL/j89-CMAME-EBTau.pdf>
#
# where more details on SUPG are given.
#
def epsilon(u):
return 0.5*(grad(u) + grad(u).T)
def sigma(u, p):
d = u.ufl_element().cell().topological_dimension()
return 2*mu*epsilon(u) - p*Identity(d)
# One could omit the boundary term
#
# mu * inner(grad(u)*n, v) * ds.
#
# This effectively means that at all boundaries where no sufficient
# Dirichlet-conditions are posed, we assume grad(u)*n to vanish.
normal = FacetNormal(v.function_space().mesh())
return (
inner(f, v) * dx
# - rho*inner(grad(u)*u, v) * dx
- rho * 0.5 * (inner(grad(u)*u, v) - inner(grad(v)*u, u)) * dx
# - mu * inner(grad(u), grad(v)) * dx
# - inner(grad(p0), v) * dx
- inner(sigma(u, p0), epsilon(v)) * dx
- inner(p0*normal, v) * ds
+ mu*inner(grad(u).T*normal, v)*ds
)
def _compute_tentative_velocity(
u, p0, f, u_bcs, time_step_method, rho, mu, dt, v,
tol=1.0e-10
):
#
# F(u) = 0,
# F(u) := rho (U0 + (u.\nabla)u) - mu \div(\nabla u) - f = 0.
#
# TODO higher-order scheme for time integration
#
# For higher-order schemes, see
#
# A comparison of time-discretization/linearization approaches
# for the incompressible Navier-Stokes equations;
# Volker John, Gunar Matthies, Joachim Rang;
# Comput. Methods Appl. Mech. Engrg. 195 (2006) 5995-6010;
# <http://www.wias-berlin.de/people/john/ELECTRONIC_PAPERS/JMR06.CMAME.pdf>.
#
ui = Function(u[0].function_space())
# F1 is scaled with `dt / rho`.
if time_step_method == 'forward euler':
alpha = 1.0
F1 = (
inner(ui - u[0], v) * dx
- dt/rho * _rhs_weak(u[0], v, f[0], rho, mu, p0)
)
elif time_step_method == 'backward euler':
alpha = 1.0
F1 = (
inner(ui - u[0], v) * dx
- dt/rho * _rhs_weak(ui, v, f[1], rho, mu, p0)
)
else:
assert time_step_method == 'crank-nicolson'
alpha = 1.0
F1 = (
inner(ui - u[0], v) * dx
- dt/rho * 0.5 * (
_rhs_weak(u[0], v, f[0], rho, mu, p0) +
_rhs_weak(ui, v, f[1], rho, mu, p0)
)
)
# else:
# assert time_step_method == 'bdf2'
# alpha = 1.5
# F1 = (
# inner(1.5 * ui - 2 * u[0] + 0.5 * u[-1], v) * dx
# - dt/rho * _rhs_weak(ui, v, f[1], rho, mu, p0)
# )
# Get linearization and solve nonlinear system.
# If the scheme is fully explicit (theta=0.0), then the system is
# actually linear and only one Newton iteration is performed.
J = derivative(F1, ui)
# What is a good initial guess for the Newton solve?
# Three choices come to mind:
#
# (1) the previous solution u0,
# (2) the intermediate solution from the previous step ui0,
# (3) the solution of the semilinear system
# (u.\nabla(u) -> u0.\nabla(u)).
#
# Numerical experiments with the Karman vortex street show that the
# order of accuracy is (1), (3), (2). Typical norms would look like
#
# ||u - u0 || = 1.726432e-02
# ||u - ui0|| = 2.720805e+00
# ||u - u_e|| = 5.921522e-02
#
# Hence, use u0 as initial guess.
ui.assign(u[0])
# problem = NonlinearVariationalProblem(F1, ui, u_bcs, J)
# solver = NonlinearVariationalSolver(problem)
solve(
F1 == 0, ui,
bcs=u_bcs,
J=J,
solver_parameters={
# 'nonlinear_solver': 'snes',
'nonlinear_solver': 'newton',
'newton_solver': {
'maximum_iterations': 10,
'report': True,
'absolute_tolerance': tol,
'relative_tolerance': 0.0,
'error_on_nonconvergence': True
# 'linear_solver': 'iterative',
# # # The nonlinear term makes the problem generally
# # # nonsymmetric.
# # 'symmetric': False,
# # If the nonsymmetry is too strong, e.g., if u_1 is
# # large, then AMG preconditioning might not work
# # very well.
# 'preconditioner': 'ilu',
# # 'preconditioner': 'hypre_amg',
# 'krylov_solver': {
# 'relative_tolerance': tol,
# 'absolute_tolerance': 0.0,
# 'maximum_iterations': 1000,
# 'monitor_convergence': verbose
# }
}
}
)
return ui, alpha
def _compute_pressure(
p0,
alpha, rho, dt, mu,
div_ui,
p_bcs=None,
p_function_space=None,
rotational_form=False,
tol=1.0e-10,
verbose=True
):
'''Solve the pressure Poisson equation
- \\Delta phi = -div(u),
boundary conditions,
for p with
\\nabla p = u.
'''
#
# The following is based on the update formula
#
# rho/dt (u_{n+1}-u*) + \nabla phi = 0
#
# with
#
# phi = (p_{n+1} - p*) + chi*mu*div(u*)
#
# and div(u_{n+1})=0. One derives
#
# - \nabla^2 phi = rho/dt div(u_{n+1} - u*),
# - n.\nabla phi = rho/dt n.(u_{n+1} - u*),
#
# In its weak form, this is
#
# \int \grad(phi).\grad(q)
# = - rho/dt \int div(u*) q - rho/dt \int_Gamma n.(u_{n+1}-u*) q.
#
# If Dirichlet boundary conditions are applied to both u* and u_{n+1} (the
# latter in the final step), the boundary integral vanishes.
#
# Assume that on the boundary
# L2 -= inner(n, rho/k (u_bcs - ui)) * q * ds
# is zero. This requires the boundary conditions to be set for ui as well
# as u_final.
# This creates some problems if the boundary conditions are supposed to
# remain 'free' for the velocity, i.e., no Dirichlet conditions in normal
# direction. In that case, one needs to specify Dirichlet pressure
# conditions.
#
if p0:
P = p0.function_space()
else:
P = p_function_space
p1 = Function(P)
p = TrialFunction(P)
q = TestFunction(P)
a2 = dot(grad(p), grad(q)) * dx
L2 = -alpha * rho/dt * div_ui * q * dx
L2 += dot(grad(p0), grad(q)) * dx
if rotational_form:
L2 -= mu * dot(grad(div_ui), grad(q)) * dx
if p_bcs:
solve(a2 == L2, p1,
bcs=p_bcs,
solver_parameters={
'linear_solver': 'iterative',
'symmetric': True,
'preconditioner': 'hypre_amg',
'krylov_solver': {
'relative_tolerance': tol,
'absolute_tolerance': 0.0,
'maximum_iterations': 100,
'monitor_convergence': verbose,
'error_on_nonconvergence': True
}
})
else:
# If we're dealing with a pure Neumann problem here (which is the
# default case), this doesn't hurt CG if the system is consistent, cf.
#
# Iterative Krylov methods for large linear systems,
# Henk A. van der Vorst.
#
# And indeed, it is consistent: Note that
#
# <1, rhs> = \sum_i 1 * \int div(u) v_i
# = 1 * \int div(u) \sum_i v_i
# = \int div(u).
#
# With the divergence theorem, we have
#
# \int div(u) = \int_\Gamma n.u.
#
# The latter term is 0 if and only if inflow and outflow are exactly
# the same at any given point in time. This corresponds with the
# incompressibility of the liquid.
#
# Another lesson from this:
# If the mesh has penetration boundaries, you either have to specify
# the normal component of the velocity such that \int(n.u) = 0, or
# specify Dirichlet conditions for the pressure somewhere.
#
A = assemble(a2)
b = assemble(L2)
# If the right hand side is flawed (e.g., by round-off errors), then it
# may have a component b1 in the direction of the null space,
# orthogonal to the image of the operator:
#
# b = b0 + b1.
#
# When starting with initial guess x0=0, the minimal achievable
# relative tolerance is then
#
# min_rel_tol = ||b1|| / ||b||.
#
# If ||b|| is very small, which is the case when ui is almost
# divergence-free, then min_rel_to may be larger than the prescribed
# relative tolerance tol. This happens, for example, when the time
# steps is very small.
# Sanitation of right-hand side is easy with
#
# e = Function(P)
# e.interpolate(Constant(1.0))
# evec = e.vector()
# evec /= norm(evec)
# print(b.inner(evec))
# b -= b.inner(evec) * evec
#
# However it's hard to decide when the right-hand side is inconsistent
# because of round-off errors in previous steps, or because the system
# is actually inconsistent (insufficient boundary conditions or
# something like that). Hence, don't do anything and rather try to
# fight the cause for round-off.
# In principle, the ILU preconditioner isn't advised here since it
# might destroy the semidefiniteness needed for CG.
#
# The system is consistent, but the matrix has an eigenvalue 0. This
# does not harm the convergence of CG, but with preconditioning one has
# to make sure that the preconditioner preserves the kernel. ILU might
# destroy this (and the semidefiniteness). With AMG, the coarse grid
# solves cannot be LU then, so try Jacobi here.
# <http://lists.mcs.anl.gov/pipermail/petsc-users/2012-February/012139.html>
#
# TODO clear everything; possible in FEniCS 2017.1
# <https://fenicsproject.org/qa/12916/clear-petscoptions>
# PETScOptions.clear()
prec = PETScPreconditioner('hypre_amg')
PETScOptions.set(
'pc_hypre_boomeramg_relax_type_coarse',
'jacobi'
)
solver = PETScKrylovSolver('cg', prec)
solver.parameters['absolute_tolerance'] = 0.0
solver.parameters['relative_tolerance'] = tol
solver.parameters['maximum_iterations'] = 1000
solver.parameters['monitor_convergence'] = verbose
solver.parameters['error_on_nonconvergence'] = True
# Create solver and solve system
A_petsc = as_backend_type(A)
b_petsc = as_backend_type(b)
p1_petsc = as_backend_type(p1.vector())
solver.set_operator(A_petsc)
solver.solve(p1_petsc, b_petsc)
return p1
def _compute_velocity_correction(
ui, u, u_bcs, p1, p0, v, mu, rho, dt, rotational_form, tol, verbose
):
# Velocity correction.
# U = U0 - dt/rho \nabla p.
u2 = TrialFunction(u[0].function_space())
a3 = inner(u2, v) * dx
phi = p1 - p0
if rotational_form:
phi += mu * div(ui)
L3 = inner(ui, v) * dx \
- dt/rho * inner(grad(phi), v) * dx
u1 = Function(u[0].function_space())
solve(a3 == L3, u1,
bcs=u_bcs,
solver_parameters={
'linear_solver': 'iterative',
'symmetric': True,
'preconditioner': 'hypre_amg',
'krylov_solver': {
'relative_tolerance': tol,
'absolute_tolerance': 0.0,
'maximum_iterations': 100,
'monitor_convergence': verbose,
'error_on_nonconvergence': True
}
})
return u1
def _step(
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
time_step_method,
f,
rotational_form=False,
verbose=True,
tol=1.0e-10,
):
'''Incremental pressure correction scheme scheme as described in section
3.4 of
An overview of projection methods for incompressible flows;
Guermond, Miev, Shen;
Comput. Methods Appl. Mech. Engrg. 195 (2006),
<http://www.math.tamu.edu/~guermond/PUBLICATIONS/guermond_minev_shen_CMAME_2006.pdf>.
'''
# dt is a Constant() function
assert dt.values()[0] > 0.0
assert mu.values()[0] > 0.0
# Define trial and test functions
v = TestFunction(u[0].function_space())
# Create functions
# Define coefficients
with Message('Computing tentative velocity'):
ui, alpha = _compute_tentative_velocity(
u, p0, f, u_bcs, time_step_method, rho, mu, dt, v,
tol=1.0e-10
)
with Message('Computing pressure'):
p1 = _compute_pressure(
p0,
alpha, rho, dt, mu,
div_ui=div(ui),
p_bcs=p_bcs,
rotational_form=rotational_form,
tol=tol,
verbose=verbose
)
with Message('Computing velocity correction'):
u1 = _compute_velocity_correction(
ui, u, u_bcs, p1, p0, v, mu, rho, dt, rotational_form, tol, verbose
)
return u1, p1
class Chorin(object):
order = {
'velocity': 1.0,
'pressure': 0.5,
}
def __init__(self):
return
# p0 and f0 aren't necessary here, we just keep it around to interface
# equality with IPCS.
# pylint: disable=no-self-use
def step(
self,
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
f,
verbose=True,
tol=1.0e-10
):
return _step(
dt,
u, Function(p0.function_space()),
u_bcs, p_bcs,
rho, mu,
'backward euler',
f,
verbose=verbose,
tol=tol,
)
class IPCS(object):
order = {
'velocity': 2.0,
'pressure': 1.0,
}
def __init__(self, time_step_method='backward euler'):
self.time_step_method = time_step_method
return
def step(
self,
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
f,
verbose=True,
tol=1.0e-10
):
return _step(
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
self.time_step_method,
f,
verbose=verbose,
tol=tol
)
class Rotational(object):
order = {
'velocity': 2.0,
'pressure': 1.5,
}
def __init__(self, time_step_method='backward euler'):
self.time_step_method = time_step_method
return
def step(
self,
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
f,
verbose=True,
tol=1.0e-10
):
return _step(
dt,
u, p0,
u_bcs, p_bcs,
rho, mu,
self.time_step_method,
f,
rotational_form=True,
verbose=verbose,
tol=tol
)
|
py
|
1a57cd883822b8c2c938083ae3a6f81a52b9bb12
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv, global_add_pool, GCNConv
class NNGinConv(torch.nn.Module):
def __init__(self, node_features, classes):
super(NNGinConv, self).__init__()
nn1 = Sequential(Linear(node_features, 32), ReLU(), Linear(32, 64), ReLU() , Linear(64,128))
self.conv1 = GINConv(nn1)
self.bn1 = nn.BatchNorm1d(128)
nn2 = Sequential(Linear(128, 128), ReLU(), Linear(128, 64), ReLU() , Linear(64,32))
self.conv2 = GINConv(nn2)
self.bn2 = nn.BatchNorm1d(32)
nn3 = Sequential(Linear(32, 32), ReLU(), Linear(32, 16))
self.conv3 = GINConv(nn3)
self.bn3 = nn.BatchNorm1d(16)
self.fc1 = Linear(16, 16)
self.fc2 = Linear(16, classes)
def forward(self,data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, edge_index))
x = self.bn2(x)
x = F.relu(self.conv3(x, edge_index))
x = self.bn3(x)
#x = global_add_pool(x, batch)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return torch.tanh(x)
class NNGcnConv(torch.nn.Module):
def __init__(self, node_features, classes):
super(NNGcnConv, self).__init__()
self.conv1 = GCNConv(node_features, 16)
self.conv2 = GCNConv(16, 32)
self.conv3 = GCNConv(32,64)
self.conv4 = GCNConv(64,128)
self.fc1 = Linear(128, 32)
self.fc2 = Linear(32, classes)
def forward(self,data):
x, edge_index = data.x, data.edge_index
x = F.relu(self.conv1(x, edge_index))
#x = F.dropout(x, training=self.training)
x = F.relu(self.conv2(x, edge_index))
x = F.relu(self.conv3(x, edge_index))
x = F.relu(self.conv4(x, edge_index))
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return F.tanh(x)
|
py
|
1a57cd9f98a7fc3cf79c42505ab42cfdebd88a48
|
# Реализуйте абстракцию для работы с рациональными числами
# включающую в себя следующие функции:
#
# Конструктор make_rational — принимает на вход числитель и знаменатель,
# возвращает дробь.
# Селектор get_numer — возвращает числитель
# Селектор get_denom — возвращает знаменатель
# Сложение add — складывает переданные дроби
# Вычитание sub — находит разность между двумя дробями
# Не забудьте реализовать нормализацию дробей удобным для вас способом.
#
# >>> rat1 = make_rational(3, 9)
# >>> get_numer(rat1)
# 1
# >>> get_denom(rat1)
# 3
#
# >>> rat2 = make_rational(10, 3)
#
# >>> rat3 = add(rat1, rat2)
# >>> rat_to_string(rat3)
# 11/3
#
# >>> rat4 = sub(rat1, rat2)
# >>> rat_to_string(rat4)
# -3/1
# Подсказки
# Функция gcd из модуля math находит наибольший общий делитель двух чисел
# Функция rat_to_string возвращает строковое представление числа
# (используется для отладки)
# Функция int преобразует значение к целому числу
import math
def make_rational(numer, denom):
gcd = math.gcd(numer, denom)
return {"numer": numer // gcd, "denom": denom // gcd}
def get_numer(rat):
return rat["numer"]
def get_denom(rat):
return rat["denom"]
def add(rat1, rat2):
numer1 = get_numer(rat1) * get_denom(rat2)
numer2 = get_numer(rat2) * get_denom(rat1)
numer = numer1 + numer2
denom = get_denom(rat1) * get_denom(rat2)
return make_rational(numer, denom)
def sub(rat1, rat2):
numer1 = get_numer(rat1) * get_denom(rat2)
numer2 = get_numer(rat2) * get_denom(rat1)
numer = numer1 - numer2
denom = get_denom(rat1) * get_denom(rat2)
return make_rational(numer, denom)
def rat_to_string(rat):
return "{}/{}".format(get_numer(rat), get_denom(rat))
rat_1 = make_rational(3, 9)
rat_2 = make_rational(10, 3)
print(get_numer(rat_1))
print(get_denom(rat_1))
print(rat_1)
print(rat_2)
rat_3 = add(rat_1, rat_2)
print(rat_3)
rat_4 = sub(rat_1, rat_2)
print(rat_4)
def test_rational():
rat1 = make_rational(3, 9)
assert get_numer(rat1) == 1
assert get_denom(rat1) == 3
rat2 = make_rational(10, 3)
assert add(rat1, rat2) == make_rational(11, 3)
assert sub(rat1, rat2) == make_rational(-3, 1)
rat3 = make_rational(-4, 16)
assert get_numer(rat3) == -1
assert get_denom(rat3) == 4
rat4 = make_rational(12, 5)
assert add(rat3, rat4) == make_rational(43, 20)
assert sub(rat3, rat4) == make_rational(-53, 20)
assert rat_to_string(rat1) == "1/3"
assert rat_to_string(rat3) == "-1/4"
test_rational()
|
py
|
1a57ce68b21144c56bd8f11a6044d2c41f8cc676
|
import random
def train(jm=None, api=None, seed=2020, case=None):
pass
def test(jm=None, api=None, seed=2020, case=1):
cases = ["local", "distributed"]
if case not in cases:
print('[WARN] case not in ' + str(cases))
return
api.conf_reset()
conf = {}
if case == 'checkpoint_high':
conf = {
'candidates': ['cnn', 'lstm', 'resnet50', 'vgg16', 'inception3'],
'jm': jm,
'api': api,
'seed': seed,
'job_num': 1,
}
elif case == 'checkpoint_low':
conf = {
'candidates': ['resnet50_d', 'vgg16_d', 'inception3_d'],
'jm': jm,
'api': api,
'seed': seed,
'job_num': 1,
}
elif case == 'checkpoint_auto':
conf = {
'candidates': ['resnet50_d', 'vgg16_d', 'inception3_d'],
'jm': jm,
'api': api,
'seed': seed,
'job_num': 1,
}
else:
print('[ERROR] case not in ' + str(cases))
return
launch(conf)
def launch(conf=None):
if conf is None:
conf = {}
candidates = conf['candidates']
jm = conf['jm']
job_num = conf['job_num']
api = conf['api']
random.seed(conf['seed'])
for i in range(job_num):
next_seed = random.randint(0, 999999)
job_name = random.choice(candidates)
job = jm.get_job(job_name, seed=next_seed)
job['tasks'] = job['tasks'].replace('--save_model_steps=0', '--save_model_steps=50')
msg = api.submit_job(job)
print(i, msg)
if __name__ == '__main__':
print("checkpoint.launcher")
pass
|
py
|
1a57ce83afca6742bcd87617fc81d44d53762c1f
|
# encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
from torch import nn
from .baseline import Baseline, Baseline_InsDis, Baseline_Mask, Baseline_GCN
from .baseline_selfgcn import Baseline_SelfGCN
from .losses import reidLoss
# Changed by Xinchen Liu
def build_model(cfg, num_classes, use_mask=False) -> nn.Module:
if 'InsDis' in list(cfg.SOLVER.LOSSTYPE):
print('Baseline Instance Model')
model = Baseline_InsDis(
cfg.MODEL.BACKBONE,
num_classes,
cfg.MODEL.LAST_STRIDE,
cfg.MODEL.WITH_IBN,
cfg.MODEL.GCB,
cfg.MODEL.STAGE_WITH_GCB,
cfg.MODEL.PRETRAIN,
cfg.MODEL.PRETRAIN_PATH)
elif use_mask:
print('Baseline with Mask Branch')
model = Baseline_Mask(
cfg.MODEL.BACKBONE,
num_classes,
cfg.MODEL.NUM_PARTS,
cfg.MODEL.LAST_STRIDE,
cfg.MODEL.WITH_IBN,
cfg.MODEL.GCB,
cfg.MODEL.STAGE_WITH_GCB,
cfg.MODEL.PRETRAIN,
cfg.MODEL.PRETRAIN_PATH)
else:
print('Baseline Model')
model = Baseline(
cfg.MODEL.BACKBONE,
num_classes,
cfg.MODEL.LAST_STRIDE,
cfg.MODEL.WITH_IBN,
cfg.MODEL.GCB,
cfg.MODEL.STAGE_WITH_GCB,
cfg.MODEL.PRETRAIN,
cfg.MODEL.PRETRAIN_PATH)
return model
def build_model_gcn(cfg, num_classes, use_mask=False) -> nn.Module:
print('Baseline GCN Model')
model = Baseline_GCN(
cfg.MODEL.BACKBONE,
num_classes,
cfg.MODEL.NUM_PARTS,
cfg.MODEL.LAST_STRIDE,
cfg.MODEL.WITH_IBN,
cfg.MODEL.GCB,
cfg.MODEL.STAGE_WITH_GCB,
cfg.MODEL.PRETRAIN,
cfg.MODEL.PRETRAIN_PATH)
return model
def build_model_selfgcn(cfg, num_classes) -> nn.Module:
print('Baseline SelfGCN Model')
model = Baseline_SelfGCN(
cfg.MODEL.BACKBONE,
num_classes,
cfg.MODEL.NUM_PARTS,
cfg.MODEL.LAST_STRIDE,
cfg.MODEL.WITH_IBN,
cfg.MODEL.GCB,
cfg.MODEL.STAGE_WITH_GCB,
cfg.MODEL.PRETRAIN,
cfg.MODEL.PRETRAIN_PATH)
return model
|
py
|
1a57cec18f1ba7ce90e785f1fb00d2ebcefdccd7
|
import random
import yaml
def load_data_cfg(data_cfg, merge_classes=False):
with open(data_cfg) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
if not data.get('colors'):
data['colors'] = [
[random.randint(0, 255) for _ in range(3)]
for _ in range(len(data['names']))
]
if merge_classes:
data['nc'] = 1
data['names'] = ['item']
assert len(data['names']) == data['nc'], f'len(`names`) != `nc` in {data_cfg}.'
return data
|
py
|
1a57cef8d7d88c93d70583c145e91eaae449b8a8
|
import explorerhat as eh
from time import sleep
while True:
voltage = eh.analog.one.read()
celsius = 100 * (voltage - 0.5)
fahrenheit = 32 + 9 * celsius / 5.0
print('Temperature is %4.1f degrees C or %4.1f degrees F'
% (celsius, fahrenheit))
sleep(1)
|
py
|
1a57cf5c8592de047743a6e59e956d6b3bbd655b
|
from distutils.core import setup, Extension
setup(
py_modules =['weighted_dict'],
ext_modules=[Extension("randomdict", ["random_weighted/randomdict.c"])]
)
|
py
|
1a57d0405d724a0cd41e660b1270d8711829d11b
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
import pytest
from pants.backend.python.subsystems.python_tool_base import DEFAULT_TOOL_LOCKFILE
from pants.backend.python.target_types import UnrecognizedResolveNamesError
from pants.core.util_rules import config_files, source_files
from pants.core.util_rules.external_tool import rules as external_tool_rules
from pants.engine.fs import Digest, DigestContents
from pants.engine.rules import SubsystemRule, rule
from pants.jvm.resolve import jvm_tool
from pants.jvm.resolve.coursier_fetch import ArtifactRequirements, Coordinate
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.resolve.jvm_tool import (
GatherJvmCoordinatesRequest,
JvmToolBase,
JvmToolLockfileRequest,
JvmToolLockfileSentinel,
determine_resolves_to_generate,
filter_tool_lockfile_requests,
)
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
from pants.util.ordered_set import FrozenOrderedSet
class MockJvmTool(JvmToolBase):
options_scope = "mock-tool"
default_version = "1.3"
default_artifacts = ("org.hamcrest:hamcrest-core:{version}",)
default_lockfile_resource = ("pants.backend.jvm.resolve", "mock-tool.default.lockfile.txt")
default_lockfile_url = ""
class MockJvmToolLockfileSentinel(JvmToolLockfileSentinel):
options_scope = MockJvmTool.options_scope
@rule
async def generate_test_tool_lockfile_request(
_: MockJvmToolLockfileSentinel, tool: MockJvmTool
) -> JvmToolLockfileRequest:
return JvmToolLockfileRequest.from_tool(tool)
def test_jvm_tool_base_extracts_correct_coordinates() -> None:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*external_tool_rules(),
*source_files.rules(),
*util_rules(),
*jvm_tool.rules(),
generate_test_tool_lockfile_request,
SubsystemRule(MockJvmTool),
QueryRule(JvmToolLockfileRequest, (MockJvmToolLockfileSentinel,)),
QueryRule(ArtifactRequirements, (GatherJvmCoordinatesRequest,)),
QueryRule(DigestContents, (Digest,)),
],
target_types=[JvmArtifactTarget],
)
rule_runner.set_options(
args=[
"--mock-tool-artifacts=//:junit_junit",
"--mock-tool-lockfile=/dev/null",
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
rule_runner.write_files(
{
"BUILD": textwrap.dedent(
"""\
jvm_artifact(
name="junit_junit",
group="junit",
artifact="junit",
version="4.13.2",
)
"""
)
}
)
lockfile_request = rule_runner.request(JvmToolLockfileRequest, [MockJvmToolLockfileSentinel()])
assert sorted(lockfile_request.artifact_inputs) == [
"//:junit_junit",
"org.hamcrest:hamcrest-core:1.3",
]
requirements = rule_runner.request(
ArtifactRequirements, [GatherJvmCoordinatesRequest(lockfile_request.artifact_inputs, "")]
)
coordinates = [i.coordinate for i in requirements]
assert sorted(coordinates, key=lambda c: (c.group, c.artifact, c.version)) == [
Coordinate(group="junit", artifact="junit", version="4.13.2"),
Coordinate(group="org.hamcrest", artifact="hamcrest-core", version="1.3"),
]
def test_determine_tool_sentinels_to_generate() -> None:
class Tool1(JvmToolLockfileSentinel):
options_scope = "tool1"
class Tool2(JvmToolLockfileSentinel):
options_scope = "tool2"
class Tool3(JvmToolLockfileSentinel):
options_scope = "tool3"
def assert_chosen(
requested: list[str],
expected_tools: list[type[JvmToolLockfileSentinel]],
) -> None:
tools = determine_resolves_to_generate([Tool1, Tool2, Tool3], requested)
assert tools == expected_tools
assert_chosen([Tool2.options_scope], expected_tools=[Tool2])
assert_chosen(
[Tool1.options_scope, Tool3.options_scope],
expected_tools=[Tool1, Tool3],
)
# If none are specifically requested, return all.
assert_chosen([], expected_tools=[Tool1, Tool2, Tool3])
with pytest.raises(UnrecognizedResolveNamesError):
assert_chosen(["fake"], expected_tools=[])
def test_filter_tool_lockfile_requests() -> None:
def create_request(name: str, lockfile_dest: str | None = None) -> JvmToolLockfileRequest:
return JvmToolLockfileRequest(
FrozenOrderedSet(),
resolve_name=name,
lockfile_dest=lockfile_dest or f"{name}.txt",
)
tool1 = create_request("tool1")
tool2 = create_request("tool2")
default_tool = create_request("default", lockfile_dest=DEFAULT_TOOL_LOCKFILE)
def assert_filtered(
extra_request: JvmToolLockfileRequest | None,
*,
resolve_specified: bool,
) -> None:
requests = [tool1, tool2]
if extra_request:
requests.append(extra_request)
assert filter_tool_lockfile_requests(requests, resolve_specified=resolve_specified) == [
tool1,
tool2,
]
assert_filtered(None, resolve_specified=False)
assert_filtered(None, resolve_specified=True)
assert_filtered(default_tool, resolve_specified=False)
with pytest.raises(ValueError) as exc:
assert_filtered(default_tool, resolve_specified=True)
assert f"`[{default_tool.resolve_name}].lockfile` is set to `{DEFAULT_TOOL_LOCKFILE}`" in str(
exc.value
)
|
py
|
1a57d053cf62abb8243dc8bef5ce0dede8b7f364
|
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def insertNodeAtPosition(head, data, position):
cur = head
new_node = SinglyLinkedListNode(data)
count = 0
prev = None
while cur and count != position:
prev = cur
cur = cur.next
count += 1
prev.next = new_node
new_node.next = cur
return head
|
py
|
1a57d0a8c4ee9c660d41045a53433fd2f9f98ba2
|
"""
Django settings for instagram project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8v(&sbm&ygl)q#!i(7wg80v#knjfm&#hung0l*x30ad)4wf9on'
# SECRET_KEY = config('SECRET_KEY')
# DEBUG = config('DEBUG', default=False, cast=bool)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'bootstrap3',
'inst.apps.InstConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'ig',
'USER': 'bri',
'PASSWORD':'12345',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
db_from_env = dj_database_url.config(conn_max_age=500)
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"),]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE ='whitenoise.django.GzipManifestStaticFilesStorage'
|
py
|
1a57d49df386f3965074768df37087fd3aee0e54
|
'''1. Write a Python program to check the sum of three elements (each from an array) from three arrays is equal to a target value. Print all those three-element combinations.
Sample data:
/*
X = [10, 20, 20, 20]
Y = [10, 20, 30, 40]
Z = [10, 30, 40, 20]
target = 70
*/ '''
X = [10, 20, 20, 20]
Y = [10, 20, 30, 40]
Z = [10, 30, 40, 20]
for x in X:
for y in Y:
for z in Z:
if x+y+z==70:
print((x, y, z))
|
py
|
1a57d5037f51ad7df524259178a43c98a9ae36e3
|
"""Answer the following questions"""
# 1
# What is the difference between a break and a continue statement?
# 2
# What is the result of the following?
for num in "blackjack-21":
if (num.isdigit()):
print(num)
# 3
# What is the result of the following?
def add_func(x : int, y : int) -> int:
"""docstring goodness"""
return x + y
print(add_func(5,4))
print(add_func("cat", "dog"))
print(add_func("dog", 7))
# 4
# What is the result of the following?
print('5'.isalpha())
print('5'.isupper())
print('5'.isdigit())
# 5
# What is the result of the following code?
for num in range(0, 25, 6):
if num % 12 == 0:
print(num)
# 6
# What is always the value of the first index for any given, indexable data type?
# 7
# Define a function that returns true if the integer passed is less than 10 and greater than or equal to 1 and returns false in all other cases.
# 8
# What is the result of the following?
x = "9035768"
x[::-1]
|
py
|
1a57d51af4650a444599129075c472dcd467968c
|
#!/usr/bin/python
# coding=utf8
import sys
import numpy as np
import pickle as pk
from struct import unpack
print(sys.argv)
fName = sys.argv[1]
with open(fName, 'rb') as f:
info = f.readline().split(bytes(' '.encode('utf8')))
wordNum = int(info[0])
embSize = int(info[1])
l = []
vocab = {}
count = 0
buf = ''
first = False
while True:
ch = f.read(1).decode('utf8')
if ch == '':
break
elif ch == ' ':
ll = [unpack('f', f.read(4))[0] for _ in range(embSize)]
l.append(ll)
vocab[buf.lower()] = count
count += 1
elif ch == '\n':
buf = ''
else:
buf += str(ch)
matrix = np.array(l, dtype=np.float32)
avgNorm = np.linalg.norm(matrix, axis = 1).reshape([len(vocab), 1])
matrix = matrix / avgNorm
# Read Vectors
# WordSim-353
# with open('wordsim353.pkl', 'rb') as f:
# testData = pk.load(f)
# w1Idx = []
# w2Idx = []
# labels = []
# totalList = []
# for p, c in testData.items():
# w1 = p[0]
# w2 = p[1]
# if w1 in vocab and w2 in vocab:
# w1Idx.append(vocab[w1])
# w2Idx.append(vocab[w2])
# labels.append(float(c))
# totalList.append((float(c), (vocab[w1], vocab[w2])))
# SemLex-999
# with open('SimLex-999.txt', 'r') as f:
# w1Idx = []
# w2Idx = []
# labels = []
# totalList = []
# l = f.readline()
# for line in f.readlines():
# line = line.split('\t')
# w1 = line[0]
# w2 = line[1]
# if w1 in vocab and w2 in vocab:
# w1Idx.append(vocab[w1])
# w2Idx.append(vocab[w2])
# labels.append(float(line[3]))
# totalList.append((float(line[3]), (vocab[w1], vocab[w2])))
# MEN
with open('MEN_dataset_lemma_form_full', 'r') as f:
w1Idx = []
w2Idx = []
labels = []
totalList = []
for line in f.readlines():
line = line.split(' ')
w1 = line[0]
w2 = line[1]
if w1 in vocab and w2 in vocab:
w1Idx.append(vocab[w1])
w2Idx.append(vocab[w2])
labels.append(float(line[2]))
totalList.append((float(line[2]), (vocab[w1], vocab[w2])))
# norm = np.absolute(np.maximum(0, np.sum(matrix[w1Idx, :] * matrix[w2Idx, :], axis = 1)) - np.array(labels, dtype = np.float32) / 10)
# print("Avg Loss:", np.sum(norm) / len(labels), "\nData Count:", len(labels))
totalList.sort(key = lambda x: x[0])
rankDict = {}
for i, v in enumerate(totalList):
rankDict[v[1]] = i
cosines = np.maximum(0, np.sum(matrix[w1Idx, :] * matrix[w2Idx, :], axis = 1))
totalList = []
for i in range(len(w1Idx)):
totalList.append((cosines[i], (w1Idx[i], w2Idx[i])))
totalList.sort(key = lambda x: x[0])
summ = 0
n = len(w1Idx)
for i, v in enumerate(totalList):
summ += (rankDict[v[1]] - i)**2
print('Spearman\'s Correlation:', 1 - (6 * summ / n / (n**2 - 1)))
|
py
|
1a57d54d77110c7e9c710010b32801b263b3be38
|
def get_input_lines():
with open("input.txt") as f:
return f.readlines()
|
py
|
1a57d7f80b7e06235429e8c9cb7614bc12da60a8
|
"""SQLAlchemy models and utility functions for Twitoff"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class User(DB.Model): # User Table
"""Twitter Users corresponding to Tweets"""
id = DB.Column(DB.BigInteger, primary_key=True) # id column
name = DB.Column(DB.String, nullable=False) # name column
newest_tweet_id = DB.Column(DB.BigInteger) # keeps track of newest tweet of user
def __repr__(self):
return "<User: {}>".format(self.name)
class Tweet(DB.Model):
"""Tweets corresponding to Users"""
id = DB.Column(DB.BigInteger, primary_key=True) # id column
text = DB.Column(DB.Unicode(300)) # tweet text column - allows for emojis
vect = DB.Column(DB.PickleType, nullable=False) # vectorize twets
user_id = DB.Column(DB.BigInteger, DB.ForeignKey("user.id"), nullable=False) # user_id column (corresponding user)
user = DB.relationship("User", backref=DB.backref("tweets", lazy=True)) # creates user link between tweets
def __repr__(self):
return "<Tweet: {}>".format(self.text)
|
py
|
1a57d81f8dad2e7eb44866559c0a06b8c3897f01
|
import re
import sys
import operator
def process_output(command_output):
warnings = {}
regex = r"(.*):\swarning:\s(.*)"
lines = command_output.split("\n")
for line in lines[:-2]:
matches = re.finditer(regex, line)
for matchNum, match in enumerate(matches):
try:
warnings[match.group()] +=1
except KeyError:
warnings[match.group()] =1
time = lines[-2]
return time, warnings
def generate_stats(warnings):
total_count = sum(warnings.values())
sorted_warnings = sorted(warnings.items(), key=operator.itemgetter(1), reverse=True)
return sorted_warnings, total_count
def print_summary(time, warnings):
sorted_warnings, total_count = generate_stats(warnings)
print "START - Compilation warnings count"
print total_count
print "END - Compilation warnings count"
print 'START - Compilation warnings summary'
print 'Time taken to compile:', time, 's'
print 'Total number of warnings:', total_count, '\n'
print 'Below is the list of unique warnings and the number of occurrences of that warning'
for warning, count in sorted_warnings:
print count, ': ', warning
print 'END - Compilation warnings summary'
c_output = open(sys.argv[1],'r')
time, warnings = process_output(c_output.read())
print_summary(time, warnings)
|
py
|
1a57d845c6b92d09507108f5379de2c3618cef47
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeClaimSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_modes': 'list[str]',
'data_source': 'V1TypedLocalObjectReference',
'resources': 'V1ResourceRequirements',
'selector': 'V1LabelSelector',
'storage_class_name': 'str',
'volume_mode': 'str',
'volume_name': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'data_source': 'dataSource',
'resources': 'resources',
'selector': 'selector',
'storage_class_name': 'storageClassName',
'volume_mode': 'volumeMode',
'volume_name': 'volumeName'
}
def __init__(self, access_modes=None, data_source=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeClaimSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._data_source = None
self._resources = None
self._selector = None
self._storage_class_name = None
self._volume_mode = None
self._volume_name = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if data_source is not None:
self.data_source = data_source
if resources is not None:
self.resources = resources
if selector is not None:
self.selector = selector
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if volume_name is not None:
self.volume_name = volume_name
@property
def access_modes(self):
"""Gets the access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:return: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""Sets the access_modes of this V1PersistentVolumeClaimSpec.
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:param access_modes: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: list[str]
"""
self._access_modes = access_modes
@property
def data_source(self):
"""Gets the data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1TypedLocalObjectReference
"""
return self._data_source
@data_source.setter
def data_source(self, data_source):
"""Sets the data_source of this V1PersistentVolumeClaimSpec.
:param data_source: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1TypedLocalObjectReference
"""
self._data_source = data_source
@property
def resources(self):
"""Gets the resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1PersistentVolumeClaimSpec.
:param resources: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def selector(self):
"""Gets the selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1PersistentVolumeClaimSpec.
:param selector: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def storage_class_name(self):
"""Gets the storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:return: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
"""Sets the storage_class_name of this V1PersistentVolumeClaimSpec.
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:param storage_class_name: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._storage_class_name = storage_class_name
@property
def volume_mode(self):
"""Gets the volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
:return: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
"""Sets the volume_mode of this V1PersistentVolumeClaimSpec.
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
:param volume_mode: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_mode = volume_mode
@property
def volume_name(self):
"""Gets the volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:return: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
"""Sets the volume_name of this V1PersistentVolumeClaimSpec.
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:param volume_name: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_name = volume_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return True
return self.to_dict() != other.to_dict()
|
py
|
1a57d8c1f64062fe1e8ab16503865e4ff994338f
|
# 1.
# ask the user for a number
# print EVEN (2, 4, 6, etc...) or ODD (1, 3, 5...) accordingly
# 2.
# print all the numbers that can be multiplied by 3
# between 1 ... 1000
|
py
|
1a57d9782f2bf9c775567d31c2f65c7a54287921
|
import json
class FindVaccineCenter:
"""
Queries the Cowin API to get required data
"""
def __init__(self, raw_json_data,vaccine):
self.raw_json_data = raw_json_data
self.vaccine = vaccine
def filter_results(self, response):
"""
Filters the response object by vaccine type, availability etc
:param response:
:return:
"""
filtered_responses = []
for center in response["centers"]:
for session in center["sessions"]:
filtered_center = {"center_id": center["center_id"], "name": center["name"],
"address": center["address"], "state_name": center["state_name"],
"district_name": center["district_name"], "block_name": center["block_name"],
"pincode": center["pincode"], "lat": center["lat"], "long": center["long"],
"from": center["from"], "to": center["to"], "fee_type": center["fee_type"]}
if center["fee_type"] == "Paid":
if center.get("vaccine_fees", False):
fee = ""
for key in center["vaccine_fees"][0]:
fee += f"{key.title()} : {center['vaccine_fees'][0][key]}\n "
filtered_center["fee_type"] = fee
filtered_sessions = []
if session["available_capacity"] == 0 or session["vaccine"] != self.vaccine:
continue
filtered_sessions.append(session)
if len(filtered_sessions) != 0:
filtered_center["sessions"] = filtered_sessions
filtered_responses.append(filtered_center)
if len(filtered_responses) == 0:
filtered_responses.append({"No centers available ": "("})
filtered_responses = {"centers": filtered_responses}
return filtered_responses
def get_data(self):
"""
The main interface used by external entities, Calls the other methods in class
filters the results and returns a json object of filtered results
:return: Dict (JSON obj) of filtered responses
"""
if self.raw_json_data != -1:
filtered_response = self.filter_results(self.raw_json_data)
return json.dumps(filtered_response, indent=2)
else:
return -1
|
py
|
1a57daea7bd761a601f0793601d66df6dbc8c898
|
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import logging as log
import os
import os.path as osp
from collections import OrderedDict
from datumaro.components.converter import Converter
from datumaro.components.extractor import AnnotationType, DEFAULT_SUBSET_NAME
from .format import YoloPath
def _make_yolo_bbox(img_size, box):
# https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py
# <x> <y> <width> <height> - values relative to width and height of image
# <x> <y> - are center of rectangle
x = (box[0] + box[2]) / 2 / img_size[0]
y = (box[1] + box[3]) / 2 / img_size[1]
w = (box[2] - box[0]) / img_size[0]
h = (box[3] - box[1]) / img_size[1]
return x, y, w, h
class YoloConverter(Converter):
# https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
extractor = self._extractor
save_dir = self._save_dir
os.makedirs(save_dir, exist_ok=True)
label_categories = extractor.categories()[AnnotationType.label]
label_ids = {label.name: idx
for idx, label in enumerate(label_categories.items)}
with open(osp.join(save_dir, 'obj.names'), 'w') as f:
f.writelines('%s\n' % l[0]
for l in sorted(label_ids.items(), key=lambda x: x[1]))
subset_lists = OrderedDict()
for subset_name, subset in self._extractor.subsets().items():
if not subset_name or subset_name == DEFAULT_SUBSET_NAME:
subset_name = YoloPath.DEFAULT_SUBSET_NAME
elif subset_name not in YoloPath.SUBSET_NAMES:
log.warn("Skipping subset export '%s'. "
"If specified, the only valid names are %s" % \
(subset_name, ', '.join(
"'%s'" % s for s in YoloPath.SUBSET_NAMES)))
continue
subset_dir = osp.join(save_dir, 'obj_%s_data' % subset_name)
os.makedirs(subset_dir, exist_ok=True)
image_paths = OrderedDict()
for item in subset:
if not item.has_image:
raise Exception("Failed to export item '%s': "
"item has no image info" % item.id)
height, width = item.image.size
image_name = self._make_image_filename(item)
if self._save_images:
if item.has_image and item.image.has_data:
self._save_image(item, osp.join(subset_dir, image_name))
else:
log.warning("Item '%s' has no image" % item.id)
image_paths[item.id] = osp.join('data',
osp.basename(subset_dir), image_name)
yolo_annotation = ''
for bbox in item.annotations:
if bbox.type is not AnnotationType.bbox:
continue
if bbox.label is None:
continue
yolo_bb = _make_yolo_bbox((width, height), bbox.points)
yolo_bb = ' '.join('%.6f' % p for p in yolo_bb)
yolo_annotation += '%s %s\n' % (bbox.label, yolo_bb)
annotation_path = osp.join(subset_dir, '%s.txt' % item.id)
os.makedirs(osp.dirname(annotation_path), exist_ok=True)
with open(annotation_path, 'w') as f:
f.write(yolo_annotation)
subset_list_name = '%s.txt' % subset_name
subset_lists[subset_name] = subset_list_name
with open(osp.join(save_dir, subset_list_name), 'w') as f:
f.writelines('%s\n' % s for s in image_paths.values())
with open(osp.join(save_dir, 'obj.data'), 'w') as f:
f.write('classes = %s\n' % len(label_ids))
for subset_name, subset_list_name in subset_lists.items():
f.write('%s = %s\n' % (subset_name,
osp.join('data', subset_list_name)))
f.write('names = %s\n' % osp.join('data', 'obj.names'))
f.write('backup = backup/\n')
|
py
|
1a57db1eef0f4a6565efc0079eaf4e8ee4e2702d
|
"""
###############################################################################
a "mixin" class for other frames: common methods for canned dialogs,
spawning programs, simple text viewers, etc; this class must be mixed
with a Frame (or a subclass derived from Frame) for its quit method
###############################################################################
"""
from tkinter import *
from tkinter.messagebox import *
from tkinter.filedialog import *
from PP4E.Gui.Tour.scrolledtext import ScrolledText # or tkinter.scrolledtext
from PP4E.launchmodes import PortableLauncher, System # or use multiprocessing
class GuiMixin:
def infobox(self, title, text, *args): # use standard dialogs
return showinfo(title, text) # *args for bkwd compat
def errorbox(self, text):
showerror('Error!', text)
def question(self, title, text, *args):
return askyesno(title, text) # return True or False
def notdone(self):
showerror('Not implemented', 'Option not available')
def quit(self):
ans = self.question('Verify quit', 'Are you sure you want to quit?')
if ans:
Frame.quit(self) # quit not recursive!
def help(self):
self.infobox('RTFM', 'See figure 1...') # override this better
def selectOpenFile(self, file="", dir="."): # use standard dialogs
return askopenfilename(initialdir=dir, initialfile=file)
def selectSaveFile(self, file="", dir="."):
return asksaveasfilename(initialfile=file, initialdir=dir)
def clone(self, args=()): # optional constructor args
new = Toplevel() # make new in-process version of me
myclass = self.__class__ # instance's (lowest) class object
myclass(new, *args) # attach/run instance to new window
def spawn(self, pycmdline, wait=False):
if not wait: # start new process
PortableLauncher(pycmdline, pycmdline)() # run Python progam
else:
System(pycmdline, pycmdline)() # wait for it to exit
def browser(self, filename):
new = Toplevel() # make new window
view = ScrolledText(new, file=filename) # Text with Scrollbar
view.text.config(height=30, width=85) # config Text in Frame
view.text.config(font=('courier', 10, 'normal')) # use fixed-width font
new.title("Text Viewer") # set window mgr attrs
new.iconname("browser") # file text added auto
"""
def browser(self, filename): # if tkinter.scrolledtext
new = Toplevel() # included for reference
text = ScrolledText(new, height=30, width=85)
text.config(font=('courier', 10, 'normal'))
text.pack(expand=YES, fill=BOTH)
new.title("Text Viewer")
new.iconname("browser")
text.insert('0.0', open(filename, 'r').read() )
"""
if __name__ == '__main__':
class TestMixin(GuiMixin, Frame): # standalone test
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack()
Button(self, text='quit', command=self.quit).pack(fill=X)
Button(self, text='help', command=self.help).pack(fill=X)
Button(self, text='clone', command=self.clone).pack(fill=X)
Button(self, text='spawn', command=self.other).pack(fill=X)
def other(self):
self.spawn('guimixin.py') # spawn self as separate process
TestMixin().mainloop()
|
py
|
1a57db3282ce89ea3a9210d6d6b1d1169726fdcf
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#440. K-th Smallest in Lexicographical Order
#Given integers n and k, find the lexicographically k-th smallest integer in the range from 1 to n.
#Note: 1 ≤ k ≤ n ≤ 109.
#Example:
#Input:
#n: 13 k: 2
#Output:
#10
#Explanation:
#The lexicographical order is [1, 10, 11, 12, 13, 2, 3, 4, 5, 6, 7, 8, 9], so the second smallest number is 10.
#class Solution:
# def findKthNumber(self, n, k):
# """
# :type n: int
# :type k: int
# :rtype: int
# """
# Time Is Money
|
py
|
1a57db4a212f1cb243bba6290eaaf1e00469164a
|
import pandas as pd
from pathlib import Path
import json
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.path import Path as mplPath
import skimage.io
def load_annotated_dataset(csv_file_path, images_directory_path):
csv_path = Path(csv_file_path)
df = pd.read_csv(csv_path)
originals = []
masks = []
i = 0
for fn in df["filename"].unique():
i += 1
img_file_path = f"{images_directory_path}/{fn}"
img = skimage.io.imread(img_file_path, as_gray=True)
img_mask = np.zeros([img.shape[1], img.shape[0]])
dirty = False
for region in df[df["filename"] == fn].region_shape_attributes:
region_shape_attributes = json.loads(region)
# I found out, that CSV contains some strange areas
if "all_points_x" not in region_shape_attributes or "all_points_y" not in region_shape_attributes:
continue
plt.imshow(img, cmap="gray")
polygon_x = region_shape_attributes["all_points_x"]
polygon_y = region_shape_attributes["all_points_y"]
polygon = list(zip(polygon_y, polygon_x))
poly_path = mplPath(polygon)
x, y = np.mgrid[
: img.shape[0], : img.shape[1]
]
coors = np.hstack(
(x.reshape(-1, 1), y.reshape(-1, 1))
)
mask = poly_path.contains_points(coors)
mask = mask.reshape([img.shape[0], img.shape[1]])
dirty = True
img_mask = np.logical_xor(img_mask, mask)
if dirty:
originals.append(img)
plt.imshow(img, cmap="gray")
plt.show()
masks.append(img_mask)
plt.imshow(img_mask, cmap="gray")
plt.show()
return originals, masks
def cut_images(images, width, height, xstep, ystep):
cut_array = []
for img in images:
for x in range(0, img.shape[1]-width, xstep):
for y in range(0, img.shape[0]-height, ystep):
cut = img[y: y + height, x: x + width]
cut_array.append(cut)
return cut_array
def load_image(filepath):
img = skimage.io.imread(filepath, as_gray=True)
return img
|
py
|
1a57db6a8eef4ce73ae9c67221a52d6e05c3ef35
|
import tempfile
from unittest import TestCase
import peloton_bloomfilters
class TestDivideByMultiple(TestCase):
def assert_divides(self, D):
multiplier, pre_shift, post_shift, increment = peloton_bloomfilters._compute_unsigned_magic_info(D, 64)
n = 1
while n < D**3:
n *= 1.41
N = int(n)
N += increment
N = N >> pre_shift
if multiplier != 1:
N *= multiplier
N = N >> 64
N = N % (2 ** 64)
N = N >> post_shift
self.assertEquals(N, int(n) / D)
def test(self):
for x in xrange(1, 1000):
print x
self.assert_divides(x)
class BloomFilterCase(object):
def test_add(self):
self.assertEqual(0, len(self.bloomfilter))
self.assertNotIn("5", self.bloomfilter)
self.assertFalse(self.bloomfilter.add("5"))
self.assertEqual(1, len(self.bloomfilter))
self.assertIn("5", self.bloomfilter)
def test_capacity(self):
for i in xrange(50):
self.assertFalse(self.bloomfilter.add(i))
for i in xrange(50):
self.assertIn(i, self.bloomfilter)
self.assertTrue(self.bloomfilter.add(50))
for i in xrange(50):
self.assertNotIn(i, self.bloomfilter)
self.assertIn(50, self.bloomfilter)
class TestBloomFilter(TestCase, BloomFilterCase):
def setUp(self):
self.bloomfilter = peloton_bloomfilters.BloomFilter(50, 0.001)
class TestThreadSafeBloomFilter(TestCase, BloomFilterCase):
def setUp(self):
self.bloomfilter = peloton_bloomfilters.ThreadSafeBloomFilter(50, 0.001)
class TestSharedMemoryBloomFilter(TestCase, BloomFilterCase):
def setUp(self):
self.fd = tempfile.NamedTemporaryFile()
self.bloomfilter = peloton_bloomfilters.SharedMemoryBloomFilter(self.fd.name, 50, 0.001)
def tearDown(self):
self.fd.close()
def test_sharing(self):
print "Test started\n"
bf1 = self.bloomfilter
bf2 = peloton_bloomfilters.SharedMemoryBloomFilter(self.fd.name, 50, 0.001)
self.assertEquals(len(bf2), 0)
self.assertNotIn(1, bf1)
self.assertNotIn(1, bf2)
bf1.add(1)
self.assertIn(1, bf1)
self.assertIn(1, bf2)
bf2.add(2)
self.assertIn(2, bf1)
self.assertIn(2, bf2)
def test_capacity_in_sync(self):
bf1 = self.bloomfilter
bf2 = peloton_bloomfilters.SharedMemoryBloomFilter(self.fd.name, 50, 0.001)
bfs = [bf1, bf2]
for i in xrange(50):
bfs[i % 2].add(i)
for i in xrange(50):
self.assertIn(i, bf1)
self.assertIn(i, bf2)
self.assertTrue(bf2.add(50))
for i in xrange(50):
self.assertNotIn(i, bf1)
self.assertNotIn(i, bf2)
self.assertIn(50, bf1)
self.assertIn(50, bf2)
|
py
|
1a57dc0e3424dc80e4f2a4e6702468079146dde7
|
/home/runner/.cache/pip/pool/9e/48/38/c6f60827bedacc0441f5955c7a40fcfa1ac3439b28fdaa1773deb1f24b
|
py
|
1a57dc24ed3261c6fd133bec8f1a53b4153c9c1c
|
# coding=utf-8
#
# created by kpe on 15.Mar.2019 at 15:28
#
from __future__ import division, absolute_import, print_function
from .version import __version__
from .attention import AttentionLayer
from .layer import Layer
from .model import BertModelLayer
from .loader import StockBertConfig, load_stock_weights, params_from_pretrained_ckpt
from .tokenization import FullTokenizer
|
py
|
1a57dc52fb6f77f63b9765d0c490381b89dba9a1
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates some swift wrapper from some ops description protobuf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import c_api_util
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'api_def_path',
None,
'path to the api_def directory, e.g. tensorflow/core/api_def/base_api')
flags.DEFINE_string(
'output_path',
None,
'path for the generated swift file')
_WARNING = """// !!! THIS CODE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND !!!
//
"""
_HEADER = """// Copyright 2018-19 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
"""
_OUTPUT_FILE = 'RawOpsGenerated.swift'
_RENAMED_KEYWORDS = {
'': 'empty',
'in': 'in_',
'var': 'var_',
'where': 'where_',
'if': 'if_',
'for': 'for_',
'while': 'while_',
'switch': 'switch_',
'protocol': 'protocol_',
'init': 'init_'}
_TYPE_PROTOCOLS = [
(set(), 'TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64}, 'UnsignedInteger & TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64,
types_pb2.DT_INT8,
types_pb2.DT_INT16,
types_pb2.DT_INT32,
types_pb2.DT_INT64}, 'BinaryInteger & TensorFlowScalar'),
({types_pb2.DT_FLOAT,
types_pb2.DT_DOUBLE,
types_pb2.DT_HALF,
types_pb2.DT_BFLOAT16}, 'FloatingPoint & TensorFlowScalar'),
({types_pb2.DT_UINT8,
types_pb2.DT_UINT16,
types_pb2.DT_UINT32,
types_pb2.DT_UINT64,
types_pb2.DT_INT8,
types_pb2.DT_INT16,
types_pb2.DT_INT32,
types_pb2.DT_INT64,
types_pb2.DT_FLOAT,
types_pb2.DT_DOUBLE,
types_pb2.DT_HALF,
types_pb2.DT_BFLOAT16}, 'Numeric & TensorFlowScalar')]
_SWIFTIFIED_TYPES = {
types_pb2.DT_FLOAT: 'Float',
types_pb2.DT_DOUBLE: 'Double',
types_pb2.DT_INT32: 'Int32',
types_pb2.DT_UINT8: 'UInt8',
types_pb2.DT_INT16: 'Int16',
types_pb2.DT_INT8: 'Int8',
types_pb2.DT_INT64: 'Int64',
types_pb2.DT_BOOL: 'Bool',
types_pb2.DT_UINT16: 'UInt16',
types_pb2.DT_UINT32: 'UInt32',
types_pb2.DT_UINT64: 'UInt64'}
_SWIFTIFIED_ATTR_TYPES = {
'int': 'Int64',
'float': 'Double',
'bool': 'Bool',
'string': 'String',
'type': 'TensorDataType',
'shape': 'TensorShape?',
'list(int)': '[Int32]',
'list(float)': '[Double]',
'list(bool)': '[Bool]',
'list(string)': '[String]',
'list(type)': '[TensorDataType]',
'list(shape)': '[TensorShape?]'}
_OMITTED_PARAMETER_NAMES = {
'x', 'y', 'a', 'b', 'input', 'tensor', 'values'}
_START_COMMENT = '///'
class UnableToGenerateCodeError(Exception):
def __init__(self, details):
self.details = details
super(UnableToGenerateCodeError, self).__init__()
def __str__(self):
return self.details
class Op(object):
def __init__(self, op_def, api_def, enum_store, string_valued=False):
self.op_def = op_def
self.api_def = api_def
self.enum_store = enum_store
self.string_valued = string_valued
self.inferred_counts = dict()
# Collect all the input and output arguments.
self.input_args = [
Argument(arg_def, op=self)
for arg_def in self.op_def.input_arg]
self.output_args = [
Argument(arg_def, op=self)
for arg_def in self.op_def.output_arg]
# Collect all attributes.
self.attrs = [
Attribute(attr, op=self)
for attr in op_def.attr]
self.type_attrs = [
attr for attr in self.attrs
if attr.is_type_attr]
def swift_function(self):
return '''
{documentation}@inlinable @inline(__always)
public static func {name}{generics}({input_args}
){return_type} {{
{body}
}}'''.format(
documentation=self._swift_documentation(),
name=self._swift_name(),
generics=self._swift_generics(),
input_args=self._swift_input_args(),
return_type=self._swift_return_type(),
body=self._swift_body())
def _swift_documentation(self):
def comment_block(text, indent_level):
"""Returns a commented block of text with some specified indentation."""
def indent(line_index):
if indent_level == 0:
return ''
if line_index:
return ' ' * indent_level
return ' ' * (indent_level - 1) + '- '
return ''.join([
(_START_COMMENT + ' ' + indent(line_index) + line + '\n'
if line else _START_COMMENT + '\n')
for line_index, line in enumerate(text.split('\n'))
])
def append_list(doc, args, arg_type):
"""Returns the documentation for lists of inputs/outputs/attributes."""
args = [arg for arg in args if arg.description]
if len(args) == 1:
block = '%s %s: %s' % (arg_type, args[0].name, args[0].description)
doc += _START_COMMENT + '\n'
doc += comment_block(block, indent_level=1)
elif len(args) > 1:
doc += '%s\n%s - %ss:\n' % (_START_COMMENT, _START_COMMENT, arg_type)
for arg in args:
block = '%s: %s' % (arg.name, arg.description)
doc += comment_block(block, indent_level=2)
return doc
doc = ''
if self.api_def.summary:
doc = comment_block(self.api_def.summary, indent_level=0)
if self.api_def.description:
doc += _START_COMMENT + '\n'
doc += comment_block(self.api_def.description, indent_level=0)
doc = append_list(doc, self.api_def.in_arg, 'Parameter')
doc = append_list(doc, self.api_def.attr, 'Attr')
doc = append_list(doc, self.api_def.out_arg, 'Output')
if doc and not doc.endswith('\n'):
doc = doc + '\n'
return doc
def _swift_name(self):
return swift_compatible_identifier(
self.op_def.name[0].lower() + self.op_def.name[1:])
def _swift_generics(self):
constraints = [
attr.generic_constraints(self.string_valued)
for attr in self.attrs]
constraints = [c for c in constraints if c is not None]
if len(constraints) == 1:
return '<' + ', '.join(constraints) + '>'
if len(constraints) > 1:
return '<\n ' + ',\n '.join(constraints) + '\n>'
return ''
def _swift_input_args(self):
args = ''
for arg in self.input_args:
args += '\n %s: %s,' % (arg.swift_arg_name, str(arg.swift_type(self.string_valued)))
for attr in self.attrs:
if not attr.is_inferred_type_attr and not attr.is_inferred_number_attr:
args += '\n %s: %s%s,' % (attr.swift_arg_name, attr.swift_type, attr.swift_default)
if args != '':
args = args[:-1]
return args
def _swift_return_type(self):
return_type = ''
if len(self.output_args) == 1:
return_type = ' -> ' + str(self.output_args[0].swift_type(self.string_valued))
elif len(self.output_args) > 1:
named_types = [
arg.swift_name + ': ' + str(arg.swift_type(self.string_valued))
for arg in self.output_args]
return_type = ' -> (' + ', '.join(named_types) + ')'
return return_type
def _swift_body(self):
setters = []
for attr in self.attrs:
setters.append(attr.swift_setter(self.string_valued))
for arg in self.input_args:
setters.append(arg.swift_setter())
counts = ['Int({})'.format(arg.swift_count) for arg in self.output_args]
if len(self.output_args) == 0:
body = 'let nOutputs = 0'
else:
body = 'let nOutputs = {}'.format(' + '.join(counts))
body += '\n let op = makeOp("{}", nOutputs)\n '.format(self.op_def.name)
body += '\n '.join(setters)
if len(self.output_args) == 0:
return body + '\n op.execute()'
body += '\n return op.execute({})'.format(', '.join(counts))
return body
class Argument(object):
def __init__(self, arg_def, op):
self.arg_def = arg_def
self.op = op
self.is_list = arg_def.number_attr is not '' \
or arg_def.type_list_attr is not ''
@property
def name(self):
return self.arg_def.name
@property
def swift_name(self):
return swift_compatible_identifier(
self.name[0].lower() + self.name[1:])
@property
def swift_arg_name(self):
name = self.swift_name
if name in _OMITTED_PARAMETER_NAMES:
name = '_ ' + name
return name
def swift_type(self, string_valued=False):
return self.type.swift_type(
string_valued=self.allows_string and string_valued)
def swift_setter(self):
if self.is_list:
return 'op.addInputList({})'.format(self.swift_name)
else:
return 'op.addInput({})'.format(self.swift_name)
@property
def swift_count(self):
number_attr = self.arg_def.number_attr
if number_attr and number_attr in self.op.inferred_counts:
return self.op.inferred_counts[number_attr]
if self.arg_def.type_list_attr:
return self.op.inferred_counts[self.arg_def.type_list_attr]
return '1'
@property
def type(self):
number = self.arg_def.number_attr
if self.arg_def.type_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_attr)
return Type('Tensor', base_type=type_attr.swift_name, number=number)
if self.arg_def.type_list_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_list_attr)
# There are never any numbered type lists.
return Type(type_attr.swift_name)
if self.arg_def.type in _SWIFTIFIED_TYPES:
base_type = _SWIFTIFIED_TYPES[self.arg_def.type]
return Type('Tensor', base_type=base_type, number=number)
if self.arg_def.type == types_pb2.DT_STRING:
return Type('Tensor', base_type='String', number=number)
if self.arg_def.type == types_pb2.DT_RESOURCE:
return Type('ResourceHandle', number=number)
if self.arg_def.type == types_pb2.DT_VARIANT:
return Type('VariantHandle', number=number)
raise UnableToGenerateCodeError(
'Unsupported type for argument "%s".' % self.name)
@property
def allows_string(self):
if self.arg_def.type_attr:
type_attr = next(
attr for attr in self.op.type_attrs
if attr.name == self.arg_def.type_attr)
return types_pb2.DT_STRING in type_attr.attr_def.allowed_values.list.type
return False
class Type(object):
def __init__(self, kind, base_type=None, number=None):
self.kind = kind
self.base_type = base_type
self.number = number
@property
def count(self):
return self.number if self.number else 1
def swift_type(self, string_valued=False):
if self.kind == 'Tensor':
if self.base_type == 'String' or string_valued:
name = 'StringTensor'
else:
name = 'Tensor<' + self.base_type + '>'
elif self.kind == 'TensorHandle':
name = 'TensorHandle<' + self.base_type + '>'
elif self.kind == 'ResourceHandle':
name = 'ResourceHandle'
elif self.kind == 'VariantHandle':
name = 'VariantHandle'
else:
name = self.kind
return ('[%s]' % name) if self.number else name
class Attribute(object):
"""Represents information extracted from op `type` and `list(type)` attributes."""
def __init__(self, attr_def, op):
self.attr_def = attr_def
self.op = op
self.is_type_attr = attr_def.type in ['type', 'list(type)']
# Check whether the value of this attribute can be
# inferred automatically (this only applies to
# type-valued attributes).
input_args = list(op.op_def.input_arg)
output_args = list(op.op_def.output_arg)
input_arg_type_attrs = set(
[arg.type_attr for arg in input_args] +
[arg.type_list_attr for arg in input_args])
output_arg_type_attrs = set(
[arg.type_attr for arg in output_args] +
[arg.type_list_attr for arg in output_args])
arg_type_attrs = input_arg_type_attrs.union(output_arg_type_attrs)
self.is_inferred_type_attr = attr_def.name in arg_type_attrs
self.is_output_type_attr = attr_def.name in output_arg_type_attrs
self.is_func_attr = self.attr_def.type == 'func'
# We use this for obtaining the `_typeList` property.
self.input_arg = None
self.is_inferred_number_attr = False
for arg in self.op.input_args:
if self.attr_def.name in [arg.arg_def.type_attr,
arg.arg_def.type_list_attr] or \
self.attr_def.name == arg.arg_def.number_attr:
self.input_arg = arg
self.is_inferred_number_attr = True
break
# The following properties are only relevant for
# non-inferred-type-valued attributes.
self._swift_type = ''
self._use_enum = False
if not self.is_inferred_type_attr and not self.is_func_attr:
if self.attr_def.type not in _SWIFTIFIED_ATTR_TYPES:
raise UnableToGenerateCodeError(
'Unsupported type for attribute "%s".'
% self.attr_def.name)
# Get the arg type.
self._swift_type = _SWIFTIFIED_ATTR_TYPES[self.attr_def.type]
# Check if the arg is an enum type.
self._use_enum = False
if self.attr_def.type == 'string':
allowed_values = tuple(sorted(self.attr_def.allowed_values.list.s))
if allowed_values:
self._swift_type = self.op.enum_store.maybe_add(
allowed_values, self.attr_def.name)
self._use_enum = True
if self.is_func_attr:
input_type = self.swift_name.capitalize() + 'In'
output_type = self.swift_name.capitalize() + 'Out'
self._swift_type = '({}) -> {}'.format(input_type, output_type)
@property
def name(self):
return self.attr_def.name
@property
def swift_name(self):
if self.is_inferred_type_attr:
return swift_compatible_identifier(
self.name, capitalize=True)
return swift_compatible_identifier(
self.name[0].lower() + self.name[1:])
@property
def swift_arg_name(self):
name = self.swift_name
if name in _OMITTED_PARAMETER_NAMES:
name = '_ ' + name
return name
@property
def swift_type(self):
return self._swift_type
@property
def swift_default(self):
def swift_float(f):
if f == float('inf'): return 'Double.infinity'
if f == float('-inf'): return '-Double.infinity'
return '%g' % f
if not self.is_inferred_type_attr and self.attr_def.default_value:
default_value = self.attr_def.default_value
if default_value.HasField('b'):
default_value = str(default_value.b).lower()
elif default_value.HasField('i'):
default_value = str(default_value.i)
elif default_value.HasField('f'):
default_value = swift_float(default_value.f)
elif default_value.HasField('s') and default_value.s:
s = str(default_value.s, encoding='utf-8')
default_value = '.' + swift_compatible_identifier(s.lower()) \
if self._use_enum else '"' + s + '"'
elif default_value.HasField('list'):
if default_value.list.i:
default_values = [str(s) for s in default_value.list.i]
default_value = '[' + ', '.join(default_values) + ']'
elif default_value.list.f:
default_values = [swift_float(s) for s in default_value.list.f]
default_value = '[' + ', '.join(default_values) + ']'
else:
default_value = None
else:
default_value = None
if default_value is not None:
default_value = default_value.replace("\t", "\\t")
return ' = ' + default_value
return ''
def swift_setter(self, string_valued=False):
# Inferred-type-valued attributes.
if self.is_inferred_type_attr:
name = self.swift_name
if self.input_arg is not None:
name = self.input_arg.swift_name
if self.attr_def.type == 'list(type)' or self.is_inferred_number_attr:
self.op.inferred_counts[self.name] = name + '._typeList.count'
if self.attr_def.type == 'list(type)':
return 'op.updateAttribute("{}", {}._typeList)'.format(self.name, name)
if string_valued and self.allows_string:
return 'op.updateAttribute("{}", TensorDataType(TF_STRING))'.format(self.name)
return 'op.updateAttribute("{}", {}.tensorFlowDataType)'.format(self.name, self.swift_name)
if self.is_inferred_number_attr:
# The following is used for inferring the lengths of output lists.
self.op.inferred_counts[self.name] = self.input_arg.swift_name + '.count'
return 'op.updateAttribute("{}", {}.count)'.format(self.name, self.input_arg.swift_name)
if self.attr_def.type == 'int':
# The following is used for inferring the lengths of output lists.
self.op.inferred_counts[self.name] = self.swift_name
# Remaining attributes.
value = self.swift_name + '.cName' if self._use_enum else self.swift_name
return 'op.updateAttribute("{}", {})'.format(self.name, value)
def generic_constraints(self, string_valued):
# We use this for obtaining the `_typeList` property.
input_arg = None
if self.attr_def.type == 'list(type)':
for arg in self.op.input_args:
if self.attr_def.name in [arg.arg_def.type_attr,
arg.arg_def.type_list_attr]:
input_arg = arg
break
if self.is_func_attr:
input_type = self.swift_name.capitalize() + 'In'
output_type = self.swift_name.capitalize() + 'Out'
return '{}: TensorGroup,\n {}: TensorGroup'.format(
input_type, output_type)
if not self.is_inferred_type_attr:
return None
protocol = None
if self.attr_def.type == 'list(type)' and input_arg is None:
protocol = 'TensorGroup'
elif self.attr_def.type == 'list(type)':
protocol = 'TensorArrayProtocol'
elif self.attr_def.type == 'type':
if string_valued and self.allows_string:
return None
protocol = 'TensorFlowScalar'
allowed_types = set(self.attr_def.allowed_values.list.type)
allowed_types &= set(_SWIFTIFIED_TYPES.keys())
for types, protocol_name in _TYPE_PROTOCOLS:
if allowed_types.issubset(types):
protocol = protocol_name
break
if protocol is not None:
return self.swift_name + ': ' + protocol
return None
@property
def allows_string(self):
return types_pb2.DT_STRING in self.attr_def.allowed_values.list.type
def swift_compatible_identifier(s, capitalize=False):
"""Transforms an identifier to be more swift idiomatic."""
if s in _RENAMED_KEYWORDS:
return _RENAMED_KEYWORDS[s]
if capitalize:
s = s.capitalize()
without_underscores = []
capitalize_next_char = False
for c in s:
if c == '-' or c == '_' or c == '(' or c == ')':
capitalize_next_char = True
elif capitalize_next_char:
capitalize_next_char = False
without_underscores.append(c.upper())
else:
without_underscores.append(c)
return ''.join(without_underscores)
class EnumStore(object):
"""Stores details on string attributes represented as swift enums."""
def __init__(self):
self._entries = {}
self._type_names = set()
self._counter = 1
def enum_codes(self):
"""Generates the swift code for enums."""
codes = []
entries = list(six.iteritems(self._entries))
for allowed_values, type_name in sorted(entries, key=lambda x: x[1]):
allowed_values = [str(a, encoding='utf-8') for a in allowed_values]
codes.append(
# FIXME: Re-add `@_frozen` after SR-9739 is resolved.
# https://bugs.swift.org/browse/SR-9739
# '@_frozen\n' +
'// @_frozen // SR-9739\n' +
'public enum {} {{\n'.format(type_name) +
'\n'.join([' case {}'.format(
swift_compatible_identifier(a.lower()))
for a in allowed_values]) +
'\n\n' +
' @inlinable\n' +
' var cName: String {\n' +
' @inline(__always)\n' +
' get {\n' +
' switch self {\n' +
'\n'.join([' case .{}: return "{}"'.format(
swift_compatible_identifier(a.lower()), a)
for a in allowed_values]) +
'\n' +
' }\n' +
' }\n' +
' }\n' +
'}')
return codes
def maybe_add(self, allowed_values, attr_def_name):
if allowed_values in self._entries:
return self._entries[allowed_values]
type_name = swift_compatible_identifier(attr_def_name, capitalize=True)
while type_name in self._type_names:
type_name += str(self._counter)
self._counter += 1
self._type_names.add(type_name)
self._entries[allowed_values] = type_name
return type_name
def main(argv):
del argv # Unused.
if FLAGS.output_path is None:
raise ValueError('No output_path has been set')
api_def_map = c_api_util.ApiDefMap()
op_codes = []
enum_store = EnumStore()
op_names = api_def_map.op_names()
if FLAGS.api_def_path is not None:
for op_name in op_names:
path = os.path.join(FLAGS.api_def_path, 'api_def_%s.pbtxt' % op_name)
if not tf.gfile.Exists(path):
continue
with tf.gfile.Open(path, 'r') as fobj:
data = fobj.read()
try:
api_def_map.put_api_def(data)
except Exception as e:
print('Cannot load api def for %s: %s' % (op_name, str(e)))
num_generated = 0
for op_name in sorted(op_names):
try:
if op_name[0] == '_': continue
op_def = api_def_map.get_op_def(op_name)
if any(a.is_ref for a in op_def.input_arg):
raise UnableToGenerateCodeError('has ref-valued input')
if any(a.is_ref for a in op_def.output_arg):
raise UnableToGenerateCodeError('has ref-valued output')
api_def = api_def_map.get_api_def(bytes(op_name, 'utf8'))
# It would be nicer to handle `StringTensor` in a more
# general way by having `String` conform to `TensorFlowScalar`.
default_op = Op(op_def, api_def, enum_store, string_valued=False)
string_valued_op = Op(op_def, api_def, enum_store, string_valued=True)
default_code = default_op.swift_function()
string_valued_code = string_valued_op.swift_function()
op_codes.append(default_code)
if string_valued_code != default_code:
op_codes.append(string_valued_code)
num_generated += 1
except UnableToGenerateCodeError as e:
print('Cannot generate code for %s: %s' % (op_name, e.details))
print('Generated code for %d/%d ops.' % (num_generated, len(op_names)))
version_codes = [
'static let generatedTensorFlowVersion = "%s"' % tf.__version__,
'static let generatedTensorFlowGitVersion = "%s"' % tf.__git_version__]
swift_code = (
_WARNING +
_HEADER +
'import CTensorFlow\n\n' +
'@inlinable @inline(__always)\n' +
'func makeOp(_ name: String, _ nOutputs: Int)'+
' -> TFTensorOperation {\n' +
' _ExecutionContext.makeOp(name, nOutputs)\n' +
'}\n'+
'\npublic enum Raw {\n\n' +
'\n'.join(version_codes) +
'\n\n' +
'\n\n'.join(enum_store.enum_codes()) +
'\n\n' +
'\n'.join(op_codes) +
'\n\n}\n')
with tf.gfile.Open(FLAGS.output_path, 'w') as f:
f.write(swift_code)
if __name__ == '__main__':
tf.app.run(main)
|
py
|
1a57dc5ca1d93535262b522258a5fa8e79a9bfbc
|
import itertools
from json import loads
from pathlib import Path
import sys
def encode_msg_text_for_github(msg):
# even though this is probably url quoting, we match the implementation at
# https://github.com/actions/toolkit/blob/af821474235d3c5e1f49cee7c6cf636abb0874c4/packages/core/src/command.ts#L36-L94
return msg.replace('%', '%25').replace('\r', '%0D').replace('\n', '%0A')
def format_msg(msg):
# Formatted for https://github.com/actions/toolkit/blob/master/docs/commands.md#log-level
# mapping between lean severity levels and github levels.
# github does not support info levels, which are emitted by `#check` etc:
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-debug-message
severity_map = {'information': 'warning'}
severity = msg.get('severity')
severity = severity_map.get(severity, severity)
# We include the filename / line number information as both message and metadata, to ensure
# that github shows it.
msg_text = f"{msg['file_name']}:{msg.get('pos_line')}:{msg.get('pos_col')}:\n{msg.get('text')}"
msg_text = encode_msg_text_for_github(msg_text)
return f"::{severity} file={msg['file_name']},line={msg.get('pos_line')},col={msg.get('pos_col')}::{msg_text}"
def write_and_print_noisy_files(noisy_files):
with open('src/.noisy_files', 'w') as f:
for file in noisy_files:
f.write(file + '\n')
print(file)
noisy_files = set()
for line in sys.stdin:
msg = loads(line)
print(format_msg(msg))
if msg.get('severity') == 'error':
if len(noisy_files) > 0:
print("Also, the following files were noisy:")
write_and_print_noisy_files(noisy_files)
sys.exit(1)
else:
noisy_files.add(str(Path(msg['file_name']).relative_to(Path.cwd())))
if len(noisy_files) > 0:
print("Build succeeded, but the following files were noisy:")
write_and_print_noisy_files(noisy_files)
sys.exit(1)
|
py
|
1a57dcc62c8899705f265c7b3e7913fd63d7d8c3
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from logging.handlers import RotatingFileHandler
LOG_FILE = '/var/log/enodebd.log'
MAX_BYTES = 1024 * 1024 * 10 # 10MB
BACKUP_COUNT = 5 # 10MB, 5 files, 50MB total
class EnodebdLogger:
"""
EnodebdLogger backs up debug logs with a RotatingFileHandler.
Debug logs will be propagated to root level if the root logger is set to
debug level.
"""
_LOGGER = logging.getLogger(__name__) # type: logging.Logger
@staticmethod
def init() -> None:
if logging.root.level is not logging.DEBUG:
EnodebdLogger._LOGGER.propagate = False
handler = RotatingFileHandler(
LOG_FILE,
maxBytes=MAX_BYTES,
backupCount=BACKUP_COUNT,
)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
EnodebdLogger._LOGGER.addHandler(handler)
EnodebdLogger._LOGGER.setLevel(logging.DEBUG)
@staticmethod
def debug(msg, *args, **kwargs):
EnodebdLogger._LOGGER.debug(msg, *args, **kwargs)
@staticmethod
def info(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.info(msg, *args, **kwargs)
EnodebdLogger._LOGGER.info(msg, *args, **kwargs)
@staticmethod
def warning(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.warning(msg, *args, **kwargs)
EnodebdLogger._LOGGER.warning(msg, *args, **kwargs)
@staticmethod
def error(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.error(msg, *args, **kwargs)
EnodebdLogger._LOGGER.error(msg, *args, **kwargs)
@staticmethod
def exception(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.exception(msg, *args, **kwargs)
EnodebdLogger._LOGGER.exception(msg, *args, **kwargs)
@staticmethod
def critical(msg, *args, **kwargs):
if not EnodebdLogger._LOGGER.propagate:
logging.critical(msg, *args, **kwargs)
EnodebdLogger._LOGGER.critical(msg, *args, **kwargs)
|
py
|
1a57dd5e112f74844164995eb460441636236e25
|
#!/usr/bin/env python
__author__ = "bt3"
def find_max_unimodal_array(A):
if len(A) <= 2 :
return None
left = 0
right = len(A)-1
while right > left +1:
mid = (left + right)//2
if A[mid] > A[mid-1] and A[mid] > A[mid+1]:
return A[mid]
elif A[mid] > A[mid-1] and A[mid] < A[mid+1]:
left = mid
else:
right = mid
return None
def test_find_max_unimodal_array():
seq = [1, 2, 5, 6, 7, 10, 12, 9, 8, 7, 6]
assert(find_max_unimodal_array(seq) == 12)
print('Tests passed!')
if __name__ == '__main__':
test_find_max_unimodal_array()
|
py
|
1a57dd8307b9ff99f1fff1ef3f4d5973a499c953
|
'''
Yet another thread pool module.
A thread pool consists of a set of worker threads for performing time consuming
operations concurrently. A minimal API provides a way to submit jobs (requests),
without waiting for them to finish, and get the results back in some way once
they are available. The thread pool is responsible for assigning jobs to the
worker threads by putting them in a job queue, where they are picked up by the
next available worker. The worker then performs the assigned job in the background
and puts the processed request in an output queue.
The main novelty of this module compared to other threadpool recipes is the way
results are returned to the client. Instead of providing a callback to post-process
the computed results, a L{generator <ThreadPool.iterProcessedJobs>} is used for
popping the processed jobs from the output queue and yielding them back to the
caller. The processed jobs encapsulate the computed result (or raised exception)
and can be used transparently by the calling thread, as if the computation didn't
take place in a different thread. This is more flexible that the callback-based
approach since it gives full control to the caller of when to ask for a result,
how long to wait for it and what to do with it once it is fetched.
After a C{JobRequest} is L{added <ThreadPool.addJob>} to a L{ThreadPool}, it can
be in one of the following states:
1. Unassigned: The request is still in the input queue, no worker thread
has been assigned to it yet. There are two substates:
- Pending: The job is waiting its turn to be picked up by a L{Worker}.
- Cancelled: The job has been L{cancelled <ThreadPool.cancelJob>} and,
although it still occupies a slot in the input queue, it will be
discarded when a L{Worker} picks it up.
2. In progress: The job has been popped by the input queue by a L{Worker} and
is in the process of being executed.
3. Processed: The job has been processed (successfully or not) and has been
added to the output queue, ready to be returned.
4. Returned: The job has been returned to the client, either by
L{ThreadPool.iterProcessedJobs} or L{ThreadPool.processedJobs} and is no
longer associated with the threadpool.
A job in state 1.a, 2 or 3 is said to be I{active}.
B{Acknowledgements:} The basic concept and the initial implementation was taken
from the U{threadpool module of Christopher Arndt
<http://www.chrisarndt.de/en/software/python/threadpool/>}, who in turn borrowed
from the "Python in a Nutshell" book by Alex Martelli.
'''
__all__ = ['ThreadPool', 'JobRequest']
__author__ = 'George Sakkis'
import sys
import time
import Queue
import logging
import threading
_log = logging.getLogger('threadpool')
def synchronized(f):
'''A synchronized method decorator'''
def wrapper(self, *args, **kwargs):
try: lock = self.__lock
except AttributeError: # first time use
lock = self.__dict__.setdefault('__lock', threading.RLock())
lock.acquire()
try: return f(self, *args, **kwargs)
finally: lock.release()
return wrapper
class ThreadPool(object):
'''A thread pool, distributing job requests and collecting them after they
are processed.
See the module doctring for more information.
'''
def __init__(self, num_workers, input_queue_size=0, output_queue_size=0):
'''Set up the thread pool and start C{num_workers} worker threads.
@param num_workers: The number of worker threads to start initially.
@param input_queue_size: If a positive integer, it's the maximum number
of unassigned jobs. The thread pool blocks when the queue is full a
new job is submitted.
@param output_queue_size: If a positive integer, it's the maximum number
of completed jobs waiting to be fetched. The thread pool blocks when
the queue is full and a job is completed.
'''
self._workers = []
self._activeKey2Job = {}
self._unassignedKey2Job = {}
self._unassignedJobs = Queue.Queue(input_queue_size)
self._processedJobs = Queue.Queue(output_queue_size)
self.addWorkers(num_workers)
@synchronized
def addWorkers(self, n=1):
'''Add C{n} worker threads to the pool.'''
for _ in xrange(n):
self._workers.append(Worker(self._unassignedJobs, self._processedJobs,
self._unassignedKey2Job))
_log.debug('Added %d workers' % n)
@synchronized
def dismissWorkers(self, n=1):
'Tell C{n} worker threads to quit after they finish with their current job.'
for _ in xrange(n):
try: self._workers.pop().dismissed = True
except KeyError: break
@synchronized
def addJob(self, job, timeout=None):
'''Add a job request to the end of the input queue.
@param timeout: If the input queue is full and C{timeout is None}, block
until a slot becomes available. If C{timeout > 0}, block for up to
C{timeout} seconds and raise C{Queue.Full} exception if the queue is
still full. If C{timeout <= 0}, do not block and raise C{Queue.Full}
immediately if the queue is full.
'''
key = job.key
self._unassignedJobs.put(job, timeout is None or timeout>0, timeout)
self._unassignedKey2Job[key] = self._activeKey2Job[key] = job
_log.debug('Added job %r to the input queue' % key)
@synchronized
def cancelJob(self, key):
'''Cancel a job.
This has effect only if the job is still unassigned; if it's in progress
or has already been processed, it has no effect.
@param key: The job's identifier.
'''
try:
del self._unassignedKey2Job[key]
# if it's not in unassigned, it may be in progress or already
# processed; don't try to delete it from active
del self._activeKey2Job[key]
except KeyError: pass
@synchronized
def cancelAllJobs(self):
'''Cancel all unassigned jobs.'''
while self._unassignedKey2Job:
del self._activeKey2Job[self._unassignedKey2Job.popitem()[0]]
def numActiveJobs(self):
'''Return the approximate number of active jobs.
This is not reliable due to thread semantics.
'''
return len(self._activeKey2Job)
def iterProcessedJobs(self, timeout=None):
'''Return an iterator over processed job requests, popping them off the
output queue.
@param timeout: There are three cases:
- If C{None}, iterate over the processed jobs as long as there are
any active jobs. Whenever there are no processed jobs available,
block and wait for a job to finish.
- If C{<= 0}, iterate over the currently processed jobs only; do not
block.
- If C{> 0}, wait up to C{timeout} seconds per processed job as long
as there are active jobs. Note that a loop such as::
for r in t.iterProcessedJobs(2): pass
may take from microseconds (if there are no active jobs) to
arbitrarily long time, as long as each processed job is yielded
within 2 seconds. If you want a timeout for the whole loop, use
L{processedJobs} instead.
'''
block = timeout is None or timeout>0
while self._activeKey2Job:
try: job = self._processedJobs.get(block, timeout)
except Queue.Empty:
break
key = job.key
_log.debug('Popped job %r from the output queue' % key)
# at this point the key is guaranteed to be in _activeKey2Job even
# if the job has been cancelled
assert key in self._activeKey2Job
del self._activeKey2Job[key]
yield job
def processedJobs(self, timeout=None):
'''Return a list of processed job requests.
@param timeout: If C{timeout is None} or C{timeout <= 0}, it is
equivalent to C{list(t.iterProcessedJobs(timeout))}. If C{timeout > 0},
this is the maximum overall time to spend on collecting processed jobs.
'''
if timeout is None or timeout <= 0:
return list(self.iterProcessedJobs(timeout))
now = time.time
end = now() + timeout
processed = []
while timeout > 0:
try: processed.append(self.iterProcessedJobs(timeout).next())
except StopIteration: break
timeout = end - now()
return processed
class JobRequest(object):
'''A request to execute a callable later and encapsulate its result or
exception info.
'''
class UnprocessedRequestError(Exception):
'''The callable of a L{JobRequest} has not been called yet.'''
def __init__(self, callable, args=(), kwds=None, key=None):
'''Create a job request for a callable.
A job request consists of the a callable to be executed by a L{worker
thread <Worker>}, a list of positional arguments and a dictionary of
keyword arguments.
@param key: If given, it must be hashable to be used as identifier of
the request. It defaults to C{id(self)}.
'''
if kwds is None: kwds = {}
if key is None: key = id(self)
for attr in 'callable', 'args', 'kwds', 'key':
setattr(self, attr, eval(attr))
self._exc_info = None
def process(self):
'''Execute the callable of this request with the given arguments and
store the result or the raised exception info.
'''
_log.debug('Ready to process job request %r' % self.key)
try:
self._result = self.callable(*self.args, **self.kwds)
except:
self._exc_info = sys.exc_info()
_log.debug('Failed to process job request %r' % self.key)
else:
self._exc_info = None
_log.debug('Job request %r was processed successfully' % self.key)
def result(self):
'''Return the computed result for this processed request.
If the callable had risen an exception, it is reraised here with its
original traceback.
@raise JobRequest.UnprocessedRequestError: If L{process} has not been
called for this request.
'''
if self._exc_info is not None:
tp,exception,trace = self._exc_info
raise tp,exception,trace
try: return self._result
except AttributeError:
raise self.UnprocessedRequestError
class Worker(threading.Thread):
'''Background thread connected to the input/output job request queues.
A worker thread sits in the background and picks up job requests from one
queue and puts the processed requests in another, until it is dismissed.
'''
def __init__(self, inputQueue, outputQueue, unassignedKey2Job, **kwds):
'''Set up thread in daemonic mode and start it immediatedly.
@param inputQueue, outputQueue: U{Queues
<http://docs.python.org/lib/module-Queue.html>} passed by the L{ThreadPool}
class when it creates a new worker thread.
'''
super(Worker,self).__init__(**kwds)
self.setDaemon(True)
self._inputQueue = inputQueue
self._outputQueue = outputQueue
self._unassignedKey2Job = unassignedKey2Job
self.dismissed = False
self.start()
def run(self):
'''Poll the input job queue indefinitely or until told to exit.
Once a job request has been popped from the input queue, process it and
add it to the output queue if it's not cancelled, otherwise discard it.
'''
while True:
# thread blocks here if inputQueue is empty
job = self._inputQueue.get()
key = job.key
_log.debug('Popped job request %r from the input queue' % key)
try: del self._unassignedKey2Job[key]
except KeyError:
_log.info('Discarded cancelled job request %r' % key)
continue
if self.dismissed: # put back the job we just picked up and exit
self._inputQueue.put(job)
_log.debug('Dismissing worker %r' % self.getName())
break
job.process()
# thread blocks here if outputQueue is full
self._outputQueue.put(job)
_log.debug('Added job request %r to the output queue' % job.key)
if __name__ == '__main__':
# demo
import random
# change the seed to get different sequence of results
random.seed(2)
# the work the workers threads will have to do
def slow_sqrt(num):
t = random.randrange(1,5)
log('%s: pretending to work hard on computing sqrt(%s) for %d seconds' %
(threading.currentThread().getName(),num,t))
time.sleep(t)
return num**0.5
# log each completed job
def job_done(job):
# job.result() will reraise any exception raised while the job was being
# processed; otherwise it will return the computed result
try:
return 'job #%s: result=%s' % (job.key, job.result())
except Exception, ex:
return 'job #%s: exception raised: %s' % (job.key, ex)
def log(msg, start=time.time()):
print '%.2f seconds elapsed: %s' % (time.time()-start, msg)
# create a pool of 3 worker threads
pool = ThreadPool(3)
# create 10 job requests and add them in the queue
for i in xrange(10):
num = random.randrange(-3,7)
pool.addJob(JobRequest(slow_sqrt, [num]))
# collect all processed jobs within 3.5 seconds
firstbatch = pool.processedJobs(timeout=3.5)
log('%d jobs done:' % len(firstbatch))
for job in firstbatch:
print ' ', job_done(job)
print '** %d active jobs after first batch' % pool.numActiveJobs()
# non-blocking iterator over processed jobs
for i in xrange(5):
for job in pool.iterProcessedJobs(timeout=0):
log('From non-blocking loop: %s' % job_done(job))
if pool.numActiveJobs():
log('Do something in the main thread; will check the pool again after a sec')
time.sleep(1)
print '** %d active jobs after second batch' % pool.numActiveJobs()
# blocking iterator over any remaining active jobs
for job in pool.iterProcessedJobs():
log('From blocking loop: %s' % job_done(job))
print '** %d active jobs after third batch' % pool.numActiveJobs()
|
py
|
1a57de0dd84e48b698ae10fc93a93590611218a3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from config import yoloCfg,yoloWeights,AngleModelFlag
from config import AngleModelPb,AngleModelPbtxt
import numpy as np
import cv2
from apphelper.image import letterbox_image
if AngleModelFlag=='tf':
##转换为tf模型,以便GPU调用
import tensorflow as tf
from tensorflow.python.platform import gfile
tf.compat.v1.disable_eager_execution()
config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
sess = tf.compat.v1.Session(config=config)
with gfile.FastGFile(AngleModelPb, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
inputImg = sess.graph.get_tensor_by_name('input_1:0')
predictions = sess.graph.get_tensor_by_name('predictions/Softmax:0')
keep_prob = tf.placeholder(tf.float32)
else:
angleNet = cv2.dnn.readNetFromTensorflow(AngleModelPb,AngleModelPbtxt)##dnn 文字方向检测
textNet = cv2.dnn.readNetFromDarknet(yoloCfg,yoloWeights)##文字定位
def text_detect(img,scale,maxScale,prob = 0.05):
thresh = prob
img_height,img_width = img.shape[:2]
inputBlob,f = letterbox_image(img,(scale,scale))
inputBlob = cv2.dnn.blobFromImage(inputBlob, scalefactor=1.0, size=(scale,scale),swapRB=True ,crop=False);
textNet.setInput(inputBlob/255.0)
outputName = textNet.getUnconnectedOutLayersNames()
outputs = textNet.forward(outputName)
class_ids = []
confidences = []
boxes = []
for output in outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > thresh:
center_x = int(detection[0] * scale/f)
center_y = int(detection[1] * scale/f)
width = int(detection[2] * scale/f)
height = int(detection[3] * scale/f)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
if class_id==1:
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([left, top,left+width, top+height ])
boxes = np.array(boxes)
confidences = np.array(confidences)
return boxes,confidences
def angle_detect_dnn(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True ,
mean=[103.939,116.779,123.68],crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred,axis=1)[0]
return ROTATE[index]
def angle_detect_tf(img,adjust=True):
"""
文字方向检测
"""
h,w = img.shape[:2]
ROTATE = [0,90,180,270]
if adjust:
thesh = 0.05
xmin,ymin,xmax,ymax = int(thesh*w),int(thesh*h),w-int(thesh*w),h-int(thesh*h)
img = img[ymin:ymax,xmin:xmax]##剪切图片边缘
img = cv2.resize(img,(224,224))
img = img[..., ::-1].astype(np.float32)
img[..., 0] -= 103.939
img[..., 1] -= 116.779
img[..., 2] -= 123.68
img = np.array([img])
out = sess.run(predictions, feed_dict={inputImg: img,
keep_prob: 0
})
index = np.argmax(out,axis=1)[0]
return ROTATE[index]
def angle_detect(img,adjust=True):
"""
文字方向检测
"""
if AngleModelFlag=='tf':
return angle_detect_tf(img,adjust=adjust)
else:
return angle_detect_dnn(img,adjust=adjust)
|
py
|
1a57de1c1a5260695be3db144f167c04c19ac051
|
# Generated by Django 2.0 on 2017-12-22 06:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tool', '0020_code'),
]
operations = [
migrations.AlterField(
model_name='code',
name='text',
field=models.TextField(),
),
]
|
py
|
1a57de9619307cb970c49e93b5fadfa463f48c54
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import math
import numpy as np
#ROS Imports
import rospy
from sensor_msgs.msg import Image, LaserScan
from ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive
class reactive_follow_gap:
def __init__(self):
#Topics & Subscriptions,Publishers
lidarscan_topic = 'scan'
drive_topic = 'drive'
#drive_topic = '/vesc/high_level/ackermann_cmd_mux/input/nav_1'
self.lidar_sub = rospy.Subscriber( lidarscan_topic, LaserScan, self.lidar_callback, queue_size=1)
self.drive_pub = rospy.Publisher( drive_topic, AckermannDriveStamped, queue_size=1)
def preprocess_lidar(self, ranges):
""" Preprocess the LiDAR scan array. Expert implementation includes:
1.Setting each value to the mean over some window
2.Rejecting high values (eg. > 3m) """
n = len(ranges)
proc_ranges = [0]*n
for i in range(n):
proc_ranges[i] = (ranges[i] + ranges[i-1] + ranges[i-2])/3
if ranges[i] < 1.2:
proc_ranges[i] = 0
if ranges[i] == "nan":
proc_ranges[i] = max(proc_ranges[i-1], 0)
return proc_ranges
def find_max_gap(self, free_space_ranges):
""" Return the start index & end index of the max gap in free_space_ranges
"""
start_i,end_i, best_start, best_end = 0,0,0,0
for i in range(len(free_space_ranges)):
if free_space_ranges[i] > 0:
end_i += 1
else:
if end_i != start_i and end_i - start_i + 1 > best_end-best_start+1:
best_start = start_i
best_end = end_i
start_i = i
end_i = i
if end_i != start_i-1 and end_i - start_i + 1 > best_end-best_start+1:
best_start = start_i
best_end = end_i
return best_start, best_end
def find_best_point(self, start_i, end_i, ranges):
return (start_i+end_i)//2
def lidar_callback(self, data):
""" Process each LiDAR scan as per the Follow Gap algorithm & publish an AckermannDriveStamped Message
"""
ranges = data.ranges
proc_ranges = self.preprocess_lidar(ranges)
n = len(proc_ranges)
#Find closest point to LiDAR
index = np.argmin(proc_ranges) # proc_ranges.index(min(proc_ranges))
min_distance = ranges[index]
#Eliminate all points inside 'bubble' (set them to zero)
r = 0.2
l = ranges[index]
if l == 0:
delta_a = math.asin(0)
elif l > r:
delta_a = math.asin(r/l)
else:
delta_a = math.asin(1)
angle_range = [data.angle_increment*index - delta_a, data.angle_increment*index + delta_a]
#print(angle_range)
for i in range(len(proc_ranges)):
angle_point = data.angle_increment*i
if angle_range[0] <= angle_point <= angle_range[1]:
proc_ranges[i] = 0
#Find max length gap
start_i, end_i = self.find_max_gap(proc_ranges)
#print([start_i, end_i])
#Find the best point in the gap
best_index = self.find_best_point(start_i, end_i, proc_ranges)
#Publish Drive message
drive_msg = AckermannDriveStamped()
angle = (best_index-0.5*n)*data.angle_increment
if abs(angle) <= 5*math.pi/180:
velocity = 4
elif abs(angle) <= 10*math.pi/180:
velocity = 3.7
elif abs(angle) <= 15*math.pi/180:
velocity = 3.5
elif abs(angle) <= 20*math.pi/180:
velocity = 3
else:
velocity = 2.5
angle = np.clip(angle, -0.43, 0.43)
#print(angle)
#print(angle)
drive_msg.header.stamp = rospy.Time.now()
drive_msg.header.frame_id = "drive"
drive_msg.drive.speed = velocity
drive_msg.drive.steering_angle = angle
self.drive_pub.publish(drive_msg)
return
if __name__ == '__main__':
rospy.init_node("FollowGap_node", anonymous=True)
rfgs = reactive_follow_gap()
rospy.sleep(0.1)
rospy.spin()
|
py
|
1a57de9e1d1e42092b09300f09f4b74d07a4cca3
|
#!/usr/bin/env org.lxg.python3
# -*- coding: UTF-8 -*-
import time
import threading
from queue import Queue
from threading import Thread
class MyThread(threading.Thread):
def run(self):
for i in range(5):
print('thread {}, @number: {}'.format(self.name, i))
time.sleep(1)
'''
'''
class Consumer(threading.Thread):
def __init__(self, cond, name):
# 初始化
super(Consumer, self).__init__()
self.cond = cond
self.name = name
def run(self):
# 确保先运行Seeker中的方法
time.sleep(1)
self.cond.acquire()
print(self.name + ': 我这两件商品一起买,可以便宜点吗')
self.cond.notify()
self.cond.wait()
print(self.name + ': 我已经提交订单了,你修改下价格')
self.cond.notify()
self.cond.wait()
print(self.name + ': 收到,我支付成功了')
self.cond.notify()
self.cond.release()
print(self.name + ': 等待收货')
class Producer(threading.Thread):
def __init__(self, cond, name):
super(Producer, self).__init__()
self.cond = cond
self.name = name
def run(self):
self.cond.acquire()
# 释放对琐的占用,同时线程挂起在这里,直到被 notify 并重新占有琐。
self.cond.wait()
print(self.name + ': 可以的,你提交订单吧')
self.cond.notify()
self.cond.wait()
print(self.name + ': 好了,已经修改了')
self.cond.notify()
self.cond.wait()
print(self.name + ': 嗯,收款成功,马上给你发货')
self.cond.release()
print(self.name + ': 发货商品')
def producerTest():
cond = threading.Condition()
consumer = Consumer(cond, '买家(两点水)')
producer = Producer(cond, '卖家(三点水)')
consumer.start()
producer.start()
isRead = True
def write(q):
# 写数据进程
for value in ['两点水', '三点水', '四点水']:
print('写进 Queue 的值为:{0}'.format(value))
q.put(value)
def read(q):
# 读取数据进程
while isRead:
value = q.get(True)
print('从 Queue 读取的值为:{0}'.format(value))
def jiaoliu():
'''线程间通信'''
q = Queue()
t1 = Thread(target=write, args=(q,))
t2 = Thread(target=read, args=(q,))
t1.start()
t2.start()
def main():
print("Start main threading")
# 创建三个线程
threads = [MyThread() for i in range(3)]
# 启动三个线程
for t in threads:
t.start()
# 一次让新创建的线程执行 join
for t in threads:
t.join()
print("End Main threading")
if __name__ == '__main__':
# main()
# producerTest()
jiaoliu()
|
py
|
1a57df946aa7cd88ec915198c9ce838090015fd9
|
# Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.2.2
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x07N\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:45-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:45-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:7bcbff55-51b7\
-1d4d-99d2-ba0c1\
99cd193\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:c6f328a5-3dfe-8\
e42-813f-76ba1ad\
36d34\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:892a84c\
7-9c85-5445-8b17\
-2dcd7e777554\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:892a84c7-9c85\
-5445-8b17-2dcd7\
e777554\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:7b\
cbff55-51b7-1d4d\
-99d2-ba0c199cd1\
93\x22 stEvt:when=\x22\
2020-05-02T17:58\
:45-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>cv\
*\xf0\x00\x00\x01\x03IDAT8\x8d\xb5\xd3\xbd.\
\xc4A\x14\xc6\xe1\xfd\x22\xd4*W \x115\x12\x95\xc6\
MP\x88\xc6W\xc3jh(\x88+\x90\x08[\x92(\
\xb0(\x15\xe2\x0a6\x94\xa2\x10\x8d\xa7W)G\xe1\xfc\
\x93\xc9f\x97%1\xc9\x9b\xf3\x9e\x99\x93_&of\
J)\xa5\x12V\xd1\xc4y\xe8\x22\xabg\xe1\xd7RJ\
\xa5\x98/\x17\xbe\xd8\xb8\xc55f\xb1\x82\xa5\xa8\xf38\
\xc0;\x9e\xb0\xdb\x0e)\x9a&\xe6\x8a\xc3\x5c\x98\xc2\x1d\
\x86\xf0\x82\xfd\xfc\xbc\x18\xba\xc6R\xf8~TP\x8b~\
\x06\xad\xf0\xc3x\xc6^'\xc0b\xf8j\xd4J\x06x\
\xc4@\xf4#\xbe\xd6r\xaf\x80I\xbc\xa2\x85\x07\xdc\xe3\
\x03\xc7\xdf\x02\xb2\x0c\x061\x86\xf1\xd0(\x1a8\xec\x09\
\xd0%\xd8M4z\x06D\xa8y\xb0;\xbf\x02\xe4\xa0\
\xa8\xdb\xff\x0a\xa8\xfd\x15P<\xa4>\x94\xbb\xa8\xda\x0d\
p\x85\x85\x9f\xd2\xcfn\xb2\xd5\xfe\x0en\xe2\xb3\x9c\xe2\
2~a7\x9d\xe0\x0dG9`\x1a\xeb\xd8@=j\
'\xd5\xb3\xb9\x89\x94R\xe9\x13\xc7\xca\x11\xdd\x9e\x8c\xc8\
\x08\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07k\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
42-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:57-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:57-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:aae01a3c-2360\
-1d46-9ade-fe6f6\
df1ebcd\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:c514dd32-c213-1\
44f-8f5d-aafa529\
464c4\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:74b30ba\
8-4d75-ac4e-8da6\
-a0f7238df7c6\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:74b30ba8-4d75\
-ac4e-8da6-a0f72\
38df7c6\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:42-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:aa\
e01a3c-2360-1d46\
-9ade-fe6f6df1eb\
cd\x22 stEvt:when=\x22\
2020-05-02T17:59\
:57-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xd7B\
%\xed\x00\x00\x01 IDAT8\x11\xa5\xc1\xa1O\
\x94a\x1c\x00\xe0\xe7}\xbf\xf7\xf4\x98\xe0\x1cN6u\
\x93\xe0F\x85\xe2F\x901\xaa\xb3;\xfe\x06\xba\xddl\
p\xd3\xcdd3R\x08v\xfe\x01\x03\x85\x11!\xdd\x15\
\x0a\x8e\x93\xcf\x83\xbbW\xb6_\xf8\xc2\x81A\x9e'\xd5\
Z\xddE\x19\x0e\x87O\xf1\x0cI\x98\xe0\x12}\xffv\
\x89\x93\x82Ox\x8bC\x14,\xe2\x05\x0e\x90\xddl\x82\
5\xec\x14\xe1=\xbe\x0ao\xf0\x01\xaf\xd0G\xd5IB\
\x8b]\xdc/\xc8\x18\xeb\xfcA\x15Z\xb7\x9b\x22\x17!\
\xeb4HB\x83*$!\xe1\x0a\x19\xb5\x98U1\x15\
&n7F*f\xb5X\xc6\x9e\x900AE\x11\xae\
\xb0\x81\xfdbV\x83s\xfc@\x0f#l\xe31\xbe`\
\x01-^\xa2W\xcc\xba\x87S|\xd3y\x82e|\xd7\
\xd9B?\xa3\x22\xe9$\x14aNx\x88y\xe1\x810\
\x87i\x16F:\xe7\x18\x0b\x17\xc2\x08\x17\xc2Hh]\
+\xe8\xe15\x8e\x91\xb1\x81\xe7\xd8D\x1f\xbf\xb0\x86%\
\xac\xe3\x11~c\x05?\x0b\x8e\xf0\x0e\xabh\xd0\xe0\x0c\
\x1f\x91P\xd1C\xc6g$L\xb1\x80\x934\x18\x0c\xdc\
EA\xf2\xff\xea_\x8f\x14EW\xcad\x1ch\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x07@\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:15-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:15-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b12940c3-b6fd\
-3945-8016-863be\
de12a0b\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:65b3ae21-70a7-8\
149-bae7-502ce0d\
711f3\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:e2c9e49\
4-4daf-de49-b2c1\
-d1f6ffd35064\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:e2c9e494-4daf\
-de49-b2c1-d1f6f\
fd35064\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b1\
2940c3-b6fd-3945\
-8016-863bede12a\
0b\x22 stEvt:when=\x22\
2020-05-02T17:59\
:15-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>h\xd0\
x\xef\x00\x00\x00\xf5IDAT8\x11cx\xf6\xec\
\x19\x03\x0e\xcc\x84G\x0e\x8eA\x04#\x94#\x0c\xc4\x0b\
\x818\x09I\x013\x92<N\x03`69\x02\xf1\x0b\
>\x0b\xc4{\x81\xd8\x18\xcd \x82\x06\xb8\x01\xf1Q\
(\xbb\x1e\x88\x1f\x02\xf1t \x16\xc0\xe7-dAW\
>\x83$)\x07\xc4\xab\x80\xf8\x1e\x10\xa7\xe0\xf2\x16\
\xba\x01\xa7\xa1l\x16$\x0d\xee@|\x1e\x88\x0f\x01\xb1\
\x19\x928#>\x03`\xe2\xc8N\xae|\x06\x01\x93\x80\
X\x10f\x08.\x03\x18q8\x99\x1b\x88O\x00\xf1}\
\xe6\xc1\xe7\x02F$W0\x22y+\x1b\x88\xaf\x02\
\xf1\x14\x987q\x19\xc0\x84\x16u\xfe\xd0p8\x0e\x8d\
n\x06B\x81\x08\xc3\xfa@\xbc\x05\x88\xaf\x03q2z\
\x00b3\xe0\x04\x94-\x0aM\x03\xa0\xb4\xd0\x0a\xc4\x5c\
\xb8\xd2\x02zB\x02\xf9/\x07\x1aHK\x80X\x85P\
jD6@\x07j\xe3\x11 \xb6!6?\xa0\x0b(\
\xe3I\x07D\x19@RV\x06a\x00\x03\xce\xd7l^\
\xdb>3\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07\xb2\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:36-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:36-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:3af4a655-5aa4\
-5746-9437-b3a77\
d38c26d\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:504f5e6a-e941-b\
a4a-a875-3bc6392\
4df2b\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:935523c\
b-05ec-f54c-8a3c\
-5dcaeefefa6b\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:935523cb-05ec\
-f54c-8a3c-5dcae\
efefa6b\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:3a\
f4a655-5aa4-5746\
-9437-b3a77d38c2\
6d\x22 stEvt:when=\x22\
2020-05-02T17:59\
:36-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xe4\xec\
\x9a\xc2\x00\x00\x01gIDAT8\x11\x05\xc1\xbd\x8b\
\xcf\x01\x00\xc0\xe1\xcf\xf7\x1c\x83\xcb{\x0c\xfc\x03^'\
&Y\x94\xc1\xa6\x13)\xddJ7\xd8\x0cB\x8a(%\
e\xe0n \xa5\xc4\xc0\xdd\xc9\xa2d\xa6\xe4N\xf8\x91\
\xab[0<\xd8\xfc\x09\x9e'\x840`@\xd8\x80Y\
\xbc\xc4=\xacG\x180 \x84B\x08a\x15\xc2Q\xfc\
\xc1\x19\xfc\xc5\x11\x84U\x08!\x14\xc2\x04\xb6\x22L\xe1\
7n\x22\xdc\x02\x9cF\xd8\x86\xb5\x08\x85\xf0\x08?\xb1\
\x84\x11Nc\x02a\x02S\xf8\x8a%\xfc\xc2C\x84!\
\x9c\xc2'\x1c\xc4ylE\x08\xab\x11\xc26\x9c\xc7A\
|\xc6\x09\x14.\xe1\x076!\x84}x\x83\x11^c\
\x0fB\xd8\x82\x9f\xb8\x80Bx\x86y\x84\xf5\xf8\x88\xbb\
\xd8\x8bY,b\x02\xe1\x05\x9e\x22\x0c!\xdc\xc0\x0c\xc2\
I|@\x08a\x11\x93\x08\xb3\xb8\x8a0\x16\xc25<\
F8\x8ce\xacC\xd8\x88e\x1cBx\x82+\x08c\
a\x07\x16q\x1b!<\xc6\x08\xd71\xc2}\x84p\x07\
\xef\xb1\x1d\x85\xdb\xf8\x86\x10\xc2j\x9c\xc3\x03Lc@\
\x08\xe1;n\xa1\xb0\x1b#\xcc\xe0\x1d\x8e!\x84\x10\xc2\
$\xdeb\x06_\xb0\x13\x850\x8d9\x5c\xc6\x0a\xe6\xb1\
\x1f\xe1\x00\x16\xb0\x82\xcb\x98\xc3Y\x84\xb1\x10B\x08\x9b\
\xf1\x1a\xcf\x11\x16\xf0\x0a\x1b\x11B\x08\x850`\xc0\x1a\
\x84=\xf8\x87y\xfc\xc3.\x845\x180 \x14B\x08\
!\x8c\xe38.b\x12\xe3\x08!\x84\xd0\x7f\x0e\x0a\xf7\
(Z\x0b\xf5\xbb\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07v\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:59-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:59-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:d22633e2-f550\
-4544-b265-0f893\
0d4e793\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:1a11682d-8df9-0\
647-8a98-49ec6ec\
1729a\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:f9be694\
4-daa9-1f48-a13b\
-6018e8ecb298\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:f9be6944-daa9\
-1f48-a13b-6018e\
8ecb298\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:d2\
2633e2-f550-4544\
-b265-0f8930d4e7\
93\x22 stEvt:when=\x22\
2020-05-02T17:58\
:59-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x8d&\
\xae\x84\x00\x00\x01+IDAT8\xcb\x95\xd2M+\
\xc4Q\x14\xc7\xf1kF3\x94\xb5\x85\xad\xb2\x12\xe5a\
\x8f(e\xa3\xbc\x01\xe3!be\xc8\xbb\xf0.\xc4b\
\xd4x\xdc\xb1\x94\x91\xb0D6V\xff,<\xbc\x05\xdf\
\x93su:\xeeU\xa6>\xdd\xee\xff\xde\xf3\xbb\x0fs\
CQ\x14\xc1)i\xdb\x81%\xf4h\xbf=17\xe4\
\x8ae\xf21^p\x8f\xbe\x5cH\xaaX\xda\x0b\x9cj\
\x7f\x1b\xaf\x18J\x85\xa4\x8a\xcfq\xe2VZ\xc0\x1b&\
\xb5_\xb6\x01\x7f\x15\x97\xcd\xf8,>0g\x17\x0df\
[\xbe\xb8\xe4\x82\xa4\x1d\xc7\xbb^\xee\xcf\x0e\xaaZ\x98\
+\x0e\xee\xec\xc3\xc5\xf7o=\x06L\xe0\xd9\x15\xb7\xa5\
\xfe2\x132\x8d\x07T\xa43\x85\xab\xc4\xca\x15t)\
\xbf\xb3A\xdc\xca[\x89\x017n\xc22\x9ep\xad\xef\
\xa0\x85~\x134\xaa\xdf\xab\xb9\x803\xec\xe8\x03\x1a\xd0\
\x80U\x130\x92\x0b\x88\xb7\xdd\xc4\xbc)h`\xe5?\
\x01\x87n\xc5#\x17\xf0\xeb\x08-w\xdb\x07\xae\xa0\xa9\
\xaf1\xf6\xe5Xw1`L\x1fGC\xed\xe1\x13\x8b\
n\x07\x8f\xd8\xc5>.5\xa0S\x06\xbbQ\xc3\x16\xea\
\xd8\xc4\x1azM\x80\xbc\xc0\x0d\x1d\xab\xeb\xdc\x199\xf2\
\x17\xdeZ\xed\xfe\x19H9N\x00\x00\x00\x00IEN\
D\xaeB`\x82\
\x00\x00\x07r\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:27-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:27-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:41a72668-2697\
-c34b-b659-1d4d1\
9bb8739\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:0ba35df4-ed1f-9\
e45-afaf-26118e1\
dca6a\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:7aa692d\
a-b29d-b342-add3\
-1bcf9ea2dbb8\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:7aa692da-b29d\
-b342-add3-1bcf9\
ea2dbb8\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:41\
a72668-2697-c34b\
-b659-1d4d19bb87\
39\x22 stEvt:when=\x22\
2020-05-02T17:58\
:27-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>/`\
\x9cx\x00\x00\x01'IDAT8\xcb\x85\xd3\xbd+\
\xc5a\x14\x07\xf0\x9f\xeb\xe5..F\x85\x85R&E\
6\x91U\x16\xa3\x7f\x81\x85n\xd15\x9a\x94A\xfe\x02\
\x9b\x01\x03&\x8b\x92Q\x06/\x93RJ\xfa\x94\xd5_\
`\xf0<uz\xba\xdc_==\xe7\xe5w\xbe\xe7|\
\xcf9O\x85\xaa\xc3\xe9\xc7\x1e\xc6\x92\xde\x15\xfd\xe5\xcf\
\xd1YK\xf7\x12^Q\xef\x04P+\x80z\x92\xbc\x8d\
\xd3\xbf\x12\x95\xc1\xb3\x18-lg\xd8L\xf2\x1cF\xa2\
\xbfBw2L\xf9\xfd\x16B\xa6!\xbc`&\xe9\x07\
\xb8\x8b\x95d\xa1\x8eg\xb4\x92\xde\x13*z\xc4@\x08\
\xba\xc5Q\x09p\x82\xcb6\xfd\xd8I\xbe\x08:\x8cO\
\xace\x0a\xeb\xf8\x08T\xbaB\x93\xae\xb0\x15\x002\xc8\
\x22\xbe1\x9e\x95\xf7Tn\xecI\x1d\x0f\x98\x0fUe\
\xdf.\xee\xd1\xc8%o\xe0\x0d\x8d@c!\x01\xf4\x16\
\xc0+\xf8\xc2D\xa6\x90\xf9\x1e\xe3:\x044qQ\x04\
O&\xfe\xab\xd9\x1e\x97\xa2\x177X\x0e\xf3o\x16\x00\
\x87\xd8\x8f\xb6r\xb3\xfa\xd2=\x80\xa70\xffZ\xf0\xff\
\xfb\x16\xf2\x99N\x00\x83\xed\xf6\xff?\x80\x9c\xa9\x85\xf3\
6o\xa4#@\x15\xe6<\xdb);\xaa\x1fED\xd8\
P\x22Am\x98\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07\x8f\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:34-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:34-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:e6b34f6d-3d90\
-5a46-b4d8-abf36\
82ac6ff\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:fcd8b74c-779b-0\
244-adcb-5d7fa6a\
bee87\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:7c6de3b\
6-f9f3-af4a-961e\
-8f7d5a7750c1\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:7c6de3b6-f9f3\
-af4a-961e-8f7d5\
a7750c1\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:e6\
b34f6d-3d90-5a46\
-b4d8-abf3682ac6\
ff\x22 stEvt:when=\x22\
2020-05-02T17:59\
:34-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x92\xea\
G\xa3\x00\x00\x01DIDAT8\xcb\x8d\xd3\xcd+\
DQ\x1c\xc6\xf1;\x98\xc6\xc6F\x94-K\x12M^\
66^\xd76\xb2\xb0\xf0R6\xec\x06\xd9\x88By\
\xd9X\xcb\x96\x90\x11Y\xb2\x95\x97\xbc'\x0b\xd1D\xe9\
\x16;\xff\x81\xef\xaf\x9e[\xa7\xd3\x9d1\x8b\xcf=\xf7\
\xdcs\xees\xde:A\x18\x86\x95\xd8\xc01\x0e\xb0\x8d\
]dU\xb7r\x0f\xfb\x98D\x80\x84\xca\xc0\x1e]\xf8\
\xc68\xc6\xf0\xaa\x1f\x87\xf5\xc3\x08\xd6\xf1\x8b/,\xba\
!\xf6\xd2\x8b\x8b(\x11;\x18r\xea\xa6\x1e\xe7\xa8\xc3\
\x1bV\xdc\x19X\xc0\x8d\xd39\xabQ\xdd\x80&<\xea\
\xbd\x069\xcc\x14\x0a\xc8i\x1fN\xb4\xf6+\x05T\xa8\
O'^\x90\x8a\x0bh\xc3\x84\xf6\xc4\x962\xaaz7\
\x92\xea\x93\xc6}\x5c@\xc2\x9bzZ\xa7\x14x\xed\xad\
x\x88\x0b(U\xd9\x81g\x8db\xcbY\xf6\xda[\xf2\
\x05\x98\x14\x9e0\xa7z\x03>0\xe0\xf4)\x18`\x1b\
t\xeb\x8d\xb8\x8a\xcdb\x03j\xf1\xae\x91\xa3o\xa7\x98\
/& \x1aq\x09\x9fX\xc3\x19\xeeP\x95/\xa0\xc7\
\x09(q:\x0db\x0b\x0b\xa8\xd6\xb7\xb2\xb8S\xe8\xc3\
\xb5w|\xffiT@\xb9U\xda\xf1\xe3\xdd>s\xa4\
\x1bj\x0e\x9d6\xbb+\x97\xba?\xc9h\xda\xfd\x98E\
\x06S\x05dd\x1a\xcd6\x9b?s\x04\x06V\xd4G\
\x86o\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x06\xed\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:19-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:19-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:968a856f-3112\
-7443-ae82-056f2\
fee2414\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:f3cdc500-e6fd-b\
44a-9824-074090b\
4ff5d\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:811bddb\
5-357e-7b46-8990\
-8e74c910eb68\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:811bddb5-357e\
-7b46-8990-8e74c\
910eb68\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:96\
8a856f-3112-7443\
-ae82-056f2fee24\
14\x22 stEvt:when=\x22\
2020-05-02T17:58\
:19-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x8d\x0b\
\x9e5\x00\x00\x00\xa2IDAT8\x11c\xf8\xff\xff\
?\x03%\x98d\x0d\xcf\x9e=c\x04a\xb2\x0c\x00j\
d\x22\xdb\x05@\xcd\xccP\xda\x04\x88\x8bH2\x00I\
\xb3&\x10?\x05\xe2\x0a\x98w0\xfcG\x84\xe6bd\
\xb5\x18\x1a\x91\xfd\x89\xa6\xf9\x09\xcc\xe90qd\x03\xa4\
\x80x'\x10\xebB\xf9,Xl\xc6\xd0\x8c\x12\x06@\
\x892 ~\x0e\xc4\x86Hb\xe8\xcef\xc6\x1b\x0b@\
\x05i@\xfc\x16\x88-\x81X\xe5\x19\x04\xe0\xd4\x8c\xee\
\x02&(\x1d\x01\xb5\x15\xa7\xb3\xf1\xb9\x00fH\x00\x10\
\xe7\xe2J<x\x13\x12Z,0\x92\x95\x17\xa0\xe9\x9d\
\x89&\x99\x89\xea\x06\x00\x00kw\xfc\x8d\x05\xbe\xa4\xe5\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07S\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:27-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:27-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b788edbf-e465\
-954b-9a61-3b3a6\
3c0b17c\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:a08c0a17-f23c-1\
c40-a0bf-91622c0\
d6f60\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:e34016c\
6-78a9-ac42-ad55\
-386a18c8ed58\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:e34016c6-78a9\
-ac42-ad55-386a1\
8c8ed58\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b7\
88edbf-e465-954b\
-9a61-3b3a63c0b1\
7c\x22 stEvt:when=\x22\
2020-05-02T17:59\
:27-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>o\xc9\
\x9c\xc4\x00\x00\x01\x08IDAT8\xcb\x95\xd2\xbd.\
\x84A\x14\xc6\xf1Y\xbb\xcbV\xb8\x04\xb7\xe0k\xd5\xae\
@B\xaf\x10+\x11\x17\xa0\xd2\xe8]\x83\x8a\x82\x10\x1f\
\xd5\x12\xef6:\x0a\x09\x09\x12\xa5\xfc\xc2%\x88F3\
o2\x99\xec\xbe\xbb\x8a\x93I\xces\xfe\xcf\x99sf\
\x02\xc2\x90\x18\x8bg\x13\xbb8\xc5>\xa6\x10\xfe\x03w\
\xf1\x88m\xf4p\x83\xc6(p\x1d\x05N2\xfd\x09+\
\xa3t\xbe\xc2/\x8e\xb2\x9a\x1e\xd6\xab\xe0F,:\x8f\
\xb9.n\xa3\xb6\x83\x0fLV\xc1\x05.\x12\xad\x8ec\
\xbc\xe2\x0d\xed|\x89\xf9\xcc\x97\x89\xd6\x8ag\x07?\x98\
-\x99~\xf0]\x9c\xbb\xd4\xc6\x13\xf8\x0b\xf3Im\x08\
\xa8%\x0b\x1b\x04oFx)\x85\xf3\x11\xae\xb3k\x97\
\xf0V\x84\x17r85X\xc6{\x5c^\xc0D\xd2\xf9\
{\x10\x9c\x1a\x1c\xe2 \xdbG'\xc2\xedApi\xd0\
\xc4\x03\x16cr\x1a{\xf8\x1c\x06\x97\x06kx\xc1*\
\xce\xf0\x8c\xfb\xaak\xe7\x06\x1b\xf1\xab\x16\xb1\xf3\x5c\x9f\
\xbfQi\xd0\xc2LV\x5cK\x9e\xb72\xfe\x00O\xa3\
\xe0\x80\xf6\xc9\x1dU\x00\x00\x00\x00IEND\xaeB\
`\x82\
\x00\x00\x06\xff\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:29-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:29-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:52d4769c-120f\
-d248-a016-0f76e\
8c76999\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:5d80309b-4856-d\
e43-893b-7d5e502\
b5899\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:9faf887\
9-1915-f248-b102\
-81861202cd97\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:9faf8879-1915\
-f248-b102-81861\
202cd97\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:52\
d4769c-120f-d248\
-a016-0f76e8c769\
99\x22 stEvt:when=\x22\
2020-05-02T17:59\
:29-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>Ng\
\x80\xeb\x00\x00\x00\xb4IDAT8\xcb\xad\xd3\xb1\x0d\
\xc2@\x0c\x05\xd0\x84\x80\xd20\x12{\xa4\xa1\xa0K\x84\
\x18\x87\x15\xa8\x22\x14A\x81X\x83%\x1ec\x84\xc6\x91\
\xae\x81$@a\xd9\xf2\xbf\xfbg\x7f\xfb\xb2\xbe\xef\xb3\
_\xec#\x885\xca\xd9\x04\xc8\xc3\x1fQE\x5c|C\
p\xc3>\xe2\xe57\x04\x1d\xeaI\x15 O\xac\x88\xdc\
\x05M\xc4\xab\x01\x9f#\xe2\x19\xbbQ\x11C\xedc\xf4\
\xdc\xc5\xcbg<\xf1@\x8bk`\xf7\xa4\xad| (\
Qa\x8f\x1a\x0dvq\xf9\x84m\x82\x1d\xb0\x99\xdaB\
\x8b\xed\xa4=@\x81e\xf8U\xe4\xae\xc9\x18\xcb\xe4\xcc\
\xe2\xffc|Cp\xc7a\xf6\x22%D\xf5 XZ\
\xf6d\x82\x7f\xfc\xc6|\x8c\xe0\x05\x9cL\xff\x03\xfeO\
\x5c\xee\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x074\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
42-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T18:00:04-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T18:00:04-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:cf4b52ce-2059\
-9d4b-a801-1b162\
862a2fb\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:86eaaca0-f1d9-0\
849-a7d2-b9becd7\
b4797\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:64436ca\
5-5bfe-8e4f-82b7\
-c2aceef5c797\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:64436ca5-5bfe\
-8e4f-82b7-c2ace\
ef5c797\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:42-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:cf\
4b52ce-2059-9d4b\
-a801-1b162862a2\
fb\x22 stEvt:when=\x22\
2020-05-02T18:00\
:04-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x03q\
\x01\x94\x00\x00\x00\xe9IDAT8\xcb\x95\x93M\x0a\
\xc20\x10\x85\xdb\x1a\x17\xa2B\x11\xbd\x82w\x10\x5c\x0a\
\xfeau\xe1\x99\xdc\xd7^I\xd0\xb5\xa2\xe2\x01\x8c\xa2\
\x87\xd0WH\xe01L\xfcY|\xd0\xe9\xe4\xbd\xccL\
\x92\xc8Z\x1b9b\x90P\x1c\xa2\xc21\x8b\xfdw\xc8\
$\xd6\xfes\xa2\x0b\xfa\x01\x13\x16OA\x83\x0d\xfc\xe2\
\x1e\xb8\x82\xcc\xc5F\x11\x17`\x07j>'\xcb\x1e\x83\
;\x98\xb9\xb8J\xe2\x1c\xecA\x9b\x8d#\xa5\xf7\x913\
\x99Sn\xed\xc4-\xd9\xa2\x1c\x8a\xa1>\x1f\xcel\x05\
\x8e$VO\x81\xf1e\x0f\xc0\x0b\x5chhF;\x85\
\xd0\xb4\xcb\x9d\xcf\xe0\x00&\xda\xee\xd2 \x16=\x9f@\
\x13\x0c\xc1\x93\x06k\xbe]\xa4\x5c\x0c\x8c\x07\x9b\xc9\xf5\
\xb2\x85B\x88\x0d\x95]\x9a\xdc\xc0R\x1e#\x1f\xdf\x16\
t\x94~}\xd9\x0b\xb0\x01\xa9\xbcH%u\xbaa\xc9\
\x87\xb7\x90\xb2\xf9\xcf\x8f&\x94\xfbG\xac\xaey\x03\xbd\
\xe9\xce\xc4\x1c\xbdW\x11\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x07U\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:23-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:23-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:de63df63-2623\
-844f-8e1e-eff0b\
4f7ec30\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:7f81ae1e-1d38-e\
14f-af78-0bd706e\
0554b\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:1b99549\
8-6d7d-494a-9282\
-3e4d62be73dc\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:1b995498-6d7d\
-494a-9282-3e4d6\
2be73dc\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:de\
63df63-2623-844f\
-8e1e-eff0b4f7ec\
30\x22 stEvt:when=\x22\
2020-05-02T17:58\
:23-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x13\xc1\
B\xcc\x00\x00\x01\x0aIDAT8\xcb\x8d\xd3\xbdJ\
\x03Q\x10\x05\xe0\x8d\xff\x85\x16bo%b\xb0Oa\
^\xc06>\x80\x95\xa8e \xa2oc\xaf\xe0Oa\
,\x8c\x82\xe6!l\xecD?\xb0\xb4\x17\xd6f\x227\
\xeb\xee&\xc5\xe5\xee\xec\xec93g\xe6l\x96\xe7y\
\x96\xe7y\x86\xc6\xe8F\x17\xab\x11\xcf\x8c\xbe);c\
\xe0x>G\x8e>\x16&\x91d\x09p.\xc0\xcfX\
\xc1\x05\x86\x98\xaf#\xf9K\xa0\x1d\x95w\x13\xd2\xeb \
\xac$)\xea\xdf\xc7'\xda\x09\xc9\x15^\xaa\xe4d%\
C<\xc0\x17v\x0a$\xc32\x92\xb1v0\x1b\xf7a\
t\xd2Jr7\x18`1-\xf8o(I\x95c|\
\xa3\x99\xe4\x9e0(\x95Pl-\xb6p\x87\xb5\x887\
\xf0\x8a\xb3\xaa\x19\xa4\xe0>\x1e\x93v7\xf1\x8e\xd3i\
\x8ct\x8f\x87D\xca\x16>p2i\x0b\xcb\xb8\xc4-\
\x96\xe2]\xb3\x00n\xd4\x19\xa9\x85\x1ft\x22\xde\x8eM\
\xf4j\x9d\x98&\xd1\x09\xd0\x11\xdeF\x9a\xcb*W\x0e\
\x11{a\xe9\xee\xd4\x7fc\x89\x1b\xd7\xeb@\xe9\xf9\x05\
n\x18\x05\x86=\xafg~\x00\x00\x00\x00IEND\
\xaeB`\x82\
\x00\x00\x00\xb8\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x7fIDATX\x85\xedVA\x0a\xc00\
\x08\xb3c\x1f\xf3\xe5>\xad\xbb\x0e;\xb5n\x05'5\
7+JL\x84\x0a\x85\xed\xd14\x01\x88HJuC\
\xb8\xc7\xbe\x888\xbc\x1d\xd1\x0e\x9c\x1f\xeb\xf9\xa4\x962\
n\x02VC)/\xbd\x0f\xd6\xfc\xde\x82v\x9bF]\
X\x05j}\xb8\x02)\x09t\xb6dV\xbc\x9c\xc0R\
\x14\x81\x22P\x04\x8a@\xca\x83\x84\xffjV\xac\x22\x95\
\x05\xeesk\xa6\xc6k\xc1\x1b\x12*\xc2-(l\x0e\
\x00\xb8\x00\x01\x88\x133r\x8cR\x12\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x07\x81\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:36-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:36-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:51c03d8c-315d\
-d54c-984e-2806e\
99f4f1e\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:3bc258cf-7134-7\
e4f-8a00-662b2e2\
14929\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:3d94788\
6-3906-a644-be7f\
-d026a9e996eb\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:3d947886-3906\
-a644-be7f-d026a\
9e996eb\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:51\
c03d8c-315d-d54c\
-984e-2806e99f4f\
1e\x22 stEvt:when=\x22\
2020-05-02T17:59\
:36-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x85\x95\
\xaec\x00\x00\x016IDAT8\xcb\x95\xd2\xcf+\
DQ\x18\xc6\xf1\x19\xc3\xb0aea\xa34\x1b\x0bI\
I\xa8I\x84&\x1b\xc2\x1eE\xb1\x91\xa5?@v\xb6\
\xb3\x1a;Ia\x9af\xa9\x94D\x0a\xb1Q6$Y\
\xb8EJY*\xf1=3\xef[g\xde\xee\xfc0\xf5\
\xb9\xb7s\xe7\xbc\xcf}\xef9'\x12\x04A\xa4\x8c:\
\xb9w`\x17\x0b\xe6y\x81-\x88\x86\x04%\xf1\x85W\
,\xdb\x90\xb077\xa0\x19-2\x9e\xc4)\xba\xf0\xe1\
\x85\xc4l\xc0\x1c\xceq#nq\x85'\x5c\xcb\x9c^\
\xbcc\xc5v\xb0\x85\x07\x09\x19\xc4\x80\xd1\xe9\xb5\xdd\x87\
\x17,i\xc0\x04\x9e\xd1ZaAU\xa3\xdc\xd7\xf0\x83\
67\xd8\xc6\xa6\x99\xd8\x8f1\x8c\x8a\x1e\xaf\x83TP\
\xfcMi\x07\xfb\xde\xc2t\xe3\x0cw\xb8\xc4\x05\x1ee\
=\xdc\xff\xe3\xf8\xc4\xac\x8c\xa3\xeer\x88E\xc4q\x8f\
\x0d4y\xdd\x0c\xe1\x04\xc3R<mw!\x87\x19\xac\
\xe2\xc8+\xac\xf7\x02\xbe\xf1f\x8b5\xe0@:\xc8\xca\
.\xe8\x04=T\xedH\xcby()\xd6\x80\x1di\xdb\
\xeduB\xbf\xad\xca\xf1.\x09p\xbbp\x8c|\x85\xc2\
XX\xb1\x06d\xf0\x8bu\xf3\xed5q\x97=\x09H\
\x96k\xb3Z\xc0\x08\xe6\xbdS\xf6/\x7f4\xa4\xe7\xd1\
@\x19P2\x00\x00\x00\x00IEND\xaeB`\x82\
\
\x00\x00\x07\xc5\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x0a\x00\x00\x00\x0a\x08\x06\x00\x00\x00\x8d2\xcf\xbd\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x06\xbeiTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:xmpMM=\x22htt\
p://ns.adobe.com\
/xap/1.0/mm/\x22 xm\
lns:stEvt=\x22http:\
//ns.adobe.com/x\
ap/1.0/sType/Res\
ourceEvent#\x22 xml\
ns:dc=\x22http://pu\
rl.org/dc/elemen\
ts/1.1/\x22 xmlns:p\
hotoshop=\x22http:/\
/ns.adobe.com/ph\
otoshop/1.0/\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-04-26T14:37:\
13-03:00\x22 xmp:Me\
tadataDate=\x222020\
-05-03T08:46:13-\
03:00\x22 xmp:Modif\
yDate=\x222020-05-0\
3T08:46:13-03:00\
\x22 xmpMM:Instance\
ID=\x22xmp.iid:2ce5\
4e64-1b68-5140-b\
404-101ae4f2390d\
\x22 xmpMM:Document\
ID=\x22adobe:docid:\
photoshop:06387a\
18-9b10-4e44-b0d\
3-6a4c6e30ef94\x22 \
xmpMM:OriginalDo\
cumentID=\x22xmp.di\
d:5349ee02-98da-\
9648-893b-acc1e6\
33f5f6\x22 dc:forma\
t=\x22image/png\x22 ph\
otoshop:ColorMod\
e=\x223\x22 photoshop:\
ICCProfile=\x22sRGB\
IEC61966-2.1\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:5349ee02-98da\
-9648-893b-acc1e\
633f5f6\x22 stEvt:w\
hen=\x222020-04-26T\
14:37:13-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:68\
2e5c79-1a3b-8647\
-b55c-bcf3054d2d\
2f\x22 stEvt:when=\x22\
2020-04-26T14:37\
:13-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> <rdf:li stEv\
t:action=\x22saved\x22\
stEvt:instanceI\
D=\x22xmp.iid:2ce54\
e64-1b68-5140-b4\
04-101ae4f2390d\x22\
stEvt:when=\x22202\
0-05-03T08:46:13\
-03:00\x22 stEvt:so\
ftwareAgent=\x22Ado\
be Photoshop 21.\
0 (Windows)\x22 stE\
vt:changed=\x22/\x22/>\
</rdf:Seq> </xm\
pMM:History> </r\
df:Description> \
</rdf:RDF> </x:x\
mpmeta> <?xpacke\
t end=\x22r\x22?>\xcfr\x9f\xf7\x00\
\x00\x00\xadIDAT\x18\x95\x85\xce\xa1jBq\x14\
\x07\xe0\xcf\xeb\x15e\x93\x81 \x88\xcf`X\x9ba\xc5\
\x17XY_\x11\xc50L\x82\xb0w\xd8\x1e\xc0 \xa2\
\xd9\x22\xa8a\xc2\x9a`\xb2\xdb\x8c\xa2\x82\x0f`\x98\xe5\
\x9f.\xa2'\x1d8\xdf9\xe7\x97\xfa\xe9\x0d\xdc\xa8\x17\
\xbcb\x19\xdfRh\xa2\x81\xfe=8F\x84\xf15X\
\x0a\xc3\x1d\x16X\xe3\x14%P\x01+lPF\x0b[\
|%a\x0aY\xe4B\x9f\xc1\x032\xc9\xd7'T\x90\
\xc6\x11=L\xb0\x8fP\xc5\x08\xef\xf8G\x1d\x9fx\xc4\
3\xda\xa8\xc6\xa8\xe1\x03y\xcc\xf0\x1d\xae\x0f\xf1\x86\x0e\
\x9eb\xfc\xa2\x88?\x9c\xd1\x0dK\x07LC\xe6\xf9\x05\
\x90}\x1d\xd4\x12\xb7,_\x00\x00\x00\x00IEND\
\xaeB`\x82\
\x00\x00\x07P\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:22-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:22-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:280c3c82-fb61\
-9848-801e-51d32\
dbb76f7\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:727c23e0-a09c-0\
547-8742-e3fa8ab\
c7d56\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:76db3ca\
a-5ad9-1c44-a0e9\
-e4fce9b320bd\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:76db3caa-5ad9\
-1c44-a0e9-e4fce\
9b320bd\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:28\
0c3c82-fb61-9848\
-801e-51d32dbb76\
f7\x22 stEvt:when=\x22\
2020-05-02T17:58\
:22-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x08E\
\xf1w\x00\x00\x01\x05IDAT8\xcb\x8d\xd3\xbdJ\
\x03A\x14\xc5\xf1\xf8Q$\x8de@+A\x90\xa0\x95\
\x8f`0\x8d\xd8\xd8\x08\xfa\x08\x8aE\xd4G\xf0-\x04\
K1\xb5/`\xa3\xa8\x9d`\xa3\x08\x22\xe8O\xb1\x10\
|\x82M3\x0b\xd7e\xb2l1\x0c;\x97\xff\x99s\
\xe7\x9em\x15E\xd1\x9a\xb40\x93\xf65\xf4\xe3Y\xb9\
\x9a\xc0+x\xc7\x1f6\xd2\xd9t\xad@\x05\xfe\xc2\x01\
6\xf1\x81A\xaaMe\x05*0\x0cCm\x84\xc7\x89\
\x0e\x02\xbc\x9an;\x0a\xb5\x13\xbc\xa0\x97uP\xaa&\
X\x05\x1e&\xc1^\xf6\x0d\x02\xdc\xc3'\x8e\x03|\x98\
\xe0\xe5\xf4=\xfbo\x0a\xa5\x15,\xe1\xa7\xd2\xf3>~\
\xb1\x98\x83\xab\x02]\x5c\xe3,\x08l\xe1\x15\xdb\xb9\x0c\
\xe4Z\xe8\xe0\x1e\x17Ad\x90\x9c\xed\xd4\x06)L\xa0\
\x8d\x1b\x5c\x86\xda:\xbe\xb1\x97u0A\xe4\x16\xa3P\
\xeb\xe3\x19\xbbM\x83\xd4INb;Wx(\xe1&\
Qn\xe3\x0e\xe78\xc5\x13\x16j\x1d\xd4\x88\xbc\xa1\xdb\
\xe8g\xca\xa4s\x0e\xf3\xb9)\x8c\x01[\xaf\x04d`\
$Ct\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x03\x87\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\
\x00\x00\x03NIDATh\x81\xed\x98\xdbJ\x1bA\
\x1c\x87\x7f\xb3\xebfsT\x93hN[\x8b\x05\x0b\x95\
R\x8a\x22\x06-R\x9f\xa4x\xd1\xab^\x16\xdaG\xe8\
\x1b\xf4\x05\xfa\x02\x85\xde\xf4\xaa\x94*Z\xab \x95\x82\
m\xc1\x03Rb\xceg5\x9b\xd3N\xd9\xd5\xd545\
j&\x9b\xb4\xa5\xfb\xc1\x10\xf64\xf9~\x9b\xf9\xcf\x0c\
\x81\x89\x89\x89\x89\x89\x89\xc9\x1f\x84\xb0|\xf5\xc2\xc2\x02\
(\xa5-\xaf\xb7\xba\xa6\x9e\xd7[\xbd^\xc7\xa8#C\
g\x021\x84\xfd\x09\xa4e\x11w\x9f\xef\xb7\xed\xd3\xd7\
\xee\x03\xcd\x82\x97\xc96\x8b\xab\xd2![\x01a_\x94\
\xce\x85b\x08:\x8e\xcf\xee\xf1\xdbK,*\x9d\x07h\
uN\x97\xae\xd5j\x90\xec\xc5\x13i)\x8e\xa0\xfd\x88\
I\xb4\x15\x86\x05h%\xfd \x14\x87\xe40V\xba\x11\
\xa6\x00:\xba\xb0\xca\xaf\xd2\x09H\x8eC\xe3m/\x80\
)\x80\xa2(gcz\xd8z\xa4I\xcfK1\xdcp\
\xf6F\xba\x11\xa6\x00.\x92\xc7C)Bg\x83q\xdc\
tuox\x5c\x07\xa6\x00/f\xd6\xa8[,\x1b*\
\x92\x91-L\xcf1\x050J^-\x9f\x9d\xbc\x0b\xab\
\xb1a|\x88\x04\x08\xf0\xb6\xed>:*b\x16(\x08\
\xbeg\x07\xb0\x16\xf3\xe0ct\x98$d'x\x9eg\
\xee\xaf'\x01\xaa\x0a\x87\xad\xf4\x00\xd6\xe3^,G}\
\xa4P\xb5\x9dIw\xe0\xae\xc1\x19\xa3x=\x1a\xb7\x12\
\x8dS0.Y[\xae\xa2'\xbf\x80\xc0)\xb8?\x9c\
\xd5\xda\xe3{\xbb\xf4[\xa6\xffl\x08\xc5K\x8e\xbf\x7f\
\x085B@1\xee\xc9k\xed\xd1\xf8\x1e\xdd\xce9\xb1\
r\xe0\xc5\xa2V\xc4,\xfd1\x90y\xe93|\x1a=\
\xac\xf0p=\xc9\xb4\xed\xc3T\x03\xcf\x16'\xc8\xab\xad\
Ql\xe7\x5c`\x1c\xba\xbf\xe1\xb4\xd4\x99\x9ec\x1aB\
\xb9\x9a\x13o\xf6\xc7\xc8\xeb\xdd[\xf0\x88%L\x0c\xa5\
h8\x94\xc5\xa4/\x0b\x9e(L\x22\xactT\x03j\
\xf1\xe5\xaa\x0e\xbc\x8f:\xc8\xbb\xc8\x08l\x9c\x8c)\x7f\
\x9aN\x072\x98\xf2g`\xed\xeb~\x18\xe6\x00\xfa\xb4\
\xd7\xf8Y\xac\xf4a3\xee\x22\xbcR\xa2|]\xc0\x1d\
w\x11V\x0b\x0f\xd1\xc2\x81#L\xe5v%\xcc\xbbQ\
]Z\xddF{,\xc7\x98\x0a%i\xd8\x9f\xc2X\x7f\
\x0e\x14\x14u\x85\xa2\x5c\xa5(UjPo\x17\x04\x0e\
6\x0b\x0f\x87\xd8\x07\x9e7.\x0cS\x80j\xb5\x0a\xaf\
(c:\x90\xa4\xb3\xc1$n\x0f\xe4\xa1\x80j\xa2j\
k.l\xf5P.\xd7q,\xd7\x90Pd\x88\x02\x0f\
\xa7\x95\x87\xd3.@\xe0;[K\x99\x02<\x9d\xfcJ\
\xe7B\xc9S\xe9\x13q\xb41\x1b\x95\xca5\x1c\xc9U\
\xc4r2\xac\x02\x0f\x97M\x80(\xf6p7:?\x92\
j[\xfaB(\xb0\x97\xb3\xe1\xf3\x8e\x1fk)\x89\x00\
\x8bmw\xd1\xf3\x95X%-\xdb\xb0\x99\xf6b=%\
\x91\x83\xd2\xe0\xbf\xb1\x95H\xcbVl$\xbd\xd8H\xf9\
\xc8\xde\xa1W\x93V'\x81N\x0b\xba\xab\x01T\xe9\xf5\
\xb8\x07\xeb\x09\x1f\xd9-z\xc0q'\x05\xabK\x93\xd3\
\xa9\x95u'\x8an\x04H\xc9V|\x8a\xba\xb1\x1a\x1d\
\x22;E78N\xdf\xf7\x9f\xcbv\x22\xdc\x8c!\x01\
R%\x11+\x07n,G\xbcd\xbb\xe0\x06!\x9c\xf6\
vU\xe9\xe6\x05O\xc7\xa8\x10L\x01\xbe$\x9d\xf0Z\
+X\xfa1\x88\xa5\xc8\x10\xd9-\xf4koZ\x95\xe2\
\xb8\xf31\xdd\xee_\x90&&&&&&\xff\x17\x00\
~\x02\xc4\xb5\xa5\x18\xa9J\x10\x9a\x00\x00\x00\x00IE\
ND\xaeB`\x82\
\x00\x00\x07n\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:43-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:43-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2fcd4059-a3f9\
-144e-ad3e-403d8\
a86b0c0\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:95fcc203-20c1-e\
d4b-8fd4-7530c65\
6e2e1\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:c3d2897\
3-2432-f843-b13d\
-b00338c9e3cd\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:c3d28973-2432\
-f843-b13d-b0033\
8c9e3cd\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2f\
cd4059-a3f9-144e\
-ad3e-403d8a86b0\
c0\x22 stEvt:when=\x22\
2020-05-02T17:58\
:43-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xc3\xfb\
F\xbb\x00\x00\x01#IDAT8\xcb\x8d\xd2;J\
CQ\x14\x85\xe1\x93D\xb1P\x0b\xb5\x13\xb1\x15\xc9\x10\
La\xe30\x14A\x8d(jB\xc6 \xe9}\xd5\x82\
\xa0\xf8\xac,l\x1c\x82\xf8D\x10+A\xf8jg\xa0\
\xcd\xbep\xbd$!\xc5\xe2<\xb8\xfb_k\xef{\x12\
R\x1fj\xe1\x16W\xb8\xc61\xcePK\x18\xc6\x04\xc6\
\x0b\x9a\xc0P\x00\x16\xb0\x83:\x96\xf1\x8d_\xac&\xb4\
\xf1\x85\x07<\xe3\x09\x8f\xf8\x88\xc2b\x9a6\xeeC\x9b\
\x097\xd8\xc5\x14\xaa\x98\x0dU1\x1aE\xa5X\x0f\xf1\
\x8e\x91h\xa3\x95p\x81\xa5>\xe6\xb0\x87\xb7h-3\
n$\x5cb=.\x07\xc2\xad\x84J\xaex\x1f\xaf1\
\x9b\x94\x03\xb42@=.\xcb\x85\xc8\x09\x07\xe1<\x16\
\xe7\xc1Xk\x98\xe9\x04(\x15b\xe7\x9d\x07\x8a\xadu\
K\x90p\x84\x97\x0e\xc5\xdb\x98\x8f}\xa5\x08\xc8\xfa\x9e\
\x8a\x872\xd9\xc1\xf9\x0e[\x99a\xaf\x04\xa9\xcb\x5c.\
\xb1\xd2\x0f ?\x8fRn\x7f\x8d\xb5n\x80J\xee\xe3\
\xa2\xca\xb9\xdf\xf7\x0fp\x11\xef;\xf5\xa9\xf3\x22\xe0\x1c\
\x9f8\x894\xbdt\x8a\x1f,\xe6\x01sh\xa2\x11j\
\xf6P\x03\x1b\x98\xce\x12\xfd\x01\xf03\xf1\xb1\x1b\x5c2\
\xd3\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07I\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:25-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:25-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:a9cc2974-a095\
-dd47-bd4a-d21e2\
34416b4\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:1bb85f3d-b3aa-f\
548-be1d-53f6bce\
3278a\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:32cde73\
b-5f38-2b4b-94e8\
-47e8aaedbb93\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:32cde73b-5f38\
-2b4b-94e8-47e8a\
aedbb93\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:a9\
cc2974-a095-dd47\
-bd4a-d21e234416\
b4\x22 stEvt:when=\x22\
2020-05-02T17:58\
:25-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xfe\xb0\
\xab\xa2\x00\x00\x00\xfeIDAT8\xcb\xd5\xd3=/\
DA\x14\xc6\xf1]\xb6#\xd9\x84\x9ef\x85J\x22>\
\x83b\xf5:\x89\x82J#vC4l#:\x1a\xc5\
&(5\xde\x0bjQ\x90\x10\xb5F-\xbf\x84/q\
5g\x93q\xdd\xb5\x12\x95\xe2\xc9L\xe6\x9c\xf9\xe7\x99\
3\xe7\x94\xb2,+\xa5B\x19\xbb\xb8\xc2\x19Np\x8d\
\xf9|n\x96e\xa5\xce\xa5a\xaca\x03#x\xc5\x1e\
\x96\xb0\x80{\x1c\xa0\x16\xe7sy\xc0\x0a\xde\xb1\x83q\
<`4q\xb5\x8a6&q\x897L\xa4\x80\x16\xf6\
c\xdf\x87'\xbc\xe0\x0e\xb7\xf8\xc0v\x02|\xc4t\x0a\
\xd8\xc4Q\x920\x86Y\xd4c\x9d\xc1P\xc4\xaax\xc6\
T\x1ep\x18\xfb\xfe\xa2b%\xf0\x81\x22\xc0V\x0a\x88\
g\xe4U\xfe5\xe0\xcf\x0e\xfe'\xa0\xe7/\xf4*b\
\x0a\xa8\xc4<|S\xc4\x07\x8b\x00-\xb4\x7f\xb2\x9e8\
\xa9D\xa7~\xe9\xc4\xe5h\xd7c\x5c\xe0\xbc\x8bNq\
\x13\xb3PK\x01U,\xc6D6\xd0\xec\xa2\x06\xd6Q\
\xef8\xfa\x04\xf9x\x0c\xe8;\xd5N\x98\x00\x00\x00\x00\
IEND\xaeB`\x82\
\x00\x00\x07k\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:14-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:14-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:99432555-d62c\
-434d-97aa-a9c19\
01a4aba\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:9b75ec5e-4812-5\
a49-841b-f195554\
3a259\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:460a839\
f-1202-8a43-a32f\
-c4fdc20fbd6c\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:460a839f-1202\
-8a43-a32f-c4fdc\
20fbd6c\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:99\
432555-d62c-434d\
-97aa-a9c1901a4a\
ba\x22 stEvt:when=\x22\
2020-05-02T17:59\
:14-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xcc\xe6\
\x11.\x00\x00\x01 IDAT8\xcb\xa5\xd3\xbf+\
\xc5Q\x18\xc7\xf1\xefu\x91;\xf8Q\x16\x94d\xb3`\
\xd3\x1dXl\xfe\x00\x83\xc8\x1f\x80;\xa8\xfb'\xdc\x12\
\x06B\x16\x0b\xc9@\x19\xc5$\xcad\xa1n\x06\x83\x98\
^\xa3\x7f\x81\xe5\x9c:\xf1\xbd\xf7*\xc3\xa7\x9e\xd39\
\xbd\xfb<\xcf\xe7<\x19\xb2\xa0B\xa2\xac\x89\xda\xd2s\
\xd6\xe2qK\xc5\xa2\x8a\x1b\x9c\xe1\x1c\x9b\x89\xab\xec\x87\
\xcbYt\xff\x04\xcca\x19\xbb\xf8\xc2G\x9e\xdd\xa0k\
l\xc5\xfb\x0c\xc5p\x18\xc2#.p\x9b\xe3 \xc2\xc6\
\xf1\x8a\x91\xd4A\x1f\xdePA\x19\xf5\x06-D\xc8\x15\
VR\xc0\x1e.C]\xc6I\x83\xa1E\xe0!6\x22\
\xa0\x03\xcf\x98\x09\x97\x9d(\xb5\x88\xf0\x08\xb5\x08\x98\x08\
\xbd\xf76\x89\xab\x88\xf6\xc4\xc1\x03\x16#`2\x00z\
\x9aX\xae`;\xd4\x0bxAW\x04\x94\xf0\x84\xa9\x9c\
\xe8b]\xc3\x01F\xf1\x89\xf94\xc6\x0c\xc7\xa1\xafF\
\x80*\xeep\x8f\xa5\xbc\x9f8\x8c\xf7\x90@j=\x02\
\xd6\xc2\x07\xabc\x15\xeb\xe8\x8f\x80\xf8x\x0c\x03\x0df\
0\x1dZ\xd8\xc1)\xf61\x98:(\xfcw\x99\xb2?\
\xae\xf1\xaf\x95\xff\x06\xe8`\xda(\x18\x1d\xb1\x17\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x07\x81\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:49-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:49-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2ed8d1c4-39a2\
-e64a-9045-a5252\
3f71f36\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:40f2b37a-fa77-6\
f4c-92be-003048f\
2cb26\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:68d76d6\
c-4737-af46-9a91\
-13c80c801fc7\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:68d76d6c-4737\
-af46-9a91-13c80\
c801fc7\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2e\
d8d1c4-39a2-e64a\
-9045-a52523f71f\
36\x22 stEvt:when=\x22\
2020-05-02T17:58\
:49-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>ij\
\x85(\x00\x00\x016IDAT8\x11\x95\xc1\xb1K\
\x94a\x1c\x00\xe0\xe7\xfd\xf8\x1d\xda\xd1\x0d\xc5\x0d\x09-\
A8\xb5E\x87\xd0\xd4\x1f\xe0\xe0\x1e-\xfd\x0b.\xce\
5\xb4E{C\x93\xdb\x11\x18g\x0e\xb64\xe8\xe0\xe4\
\xa6\x047\x1eIC\x81e~q\xde\xeb\xc17|w\
\xe8]\xf8<1\x18\x0c\x92JFR\xc9(0\xf2\x1f\
\x81\xac\x96\xd5F*\xc9\xb4lB`\x0d/Q\x22!\
c\x11\xfbx\x8dl\x8e\xc0\x0b\x8c\xf0\x11-\x5c\xa0\x81\
\x0d\x1c\xe23Z\xc8\x18\xe2\xb7\x09\x81\x12[\xd84\xad\
\x85.\x8e\xd0P[G\x0f\x0b(\x03\x09-W\xbd\xc1\
{4\x90\xf0\x0b\x9bx\x84\x1eJc\x81\xa4\xd6\xc1s\
$\x5c\xe0\x1c\x19\x09\xe7X\xc6}\xdcE\x89\xb7\xa1\x92\
T^\xa1\x89\xaf\xb8\x8dE\xb5&\xba\xc8hc\x1d;\
\x81\x84\x7f\x08\xdc\xc3\x1a\xfa\xe6kc\x05\xfd@\x81?\
x\x8a\x12}\x14\xaeW`\x88U\x9c\xe0{ \xa1\xc4\
c|S\x1b\x99\xed\x19\x0e\x8c\x05\x86\xb8\x83\x0evU\
\x0a\xf3=D\xd7X\xe0\x0cm,\xa1\xa724\xdb\x03\
\xdc\xc2\x9e\xb1@\x13\x1bX\xc0;\xfcE\x81lZ\x81\
3<\xc1O\xfc@\x11\xf8\x80c\x9c\xa2\x89\x84l\xb6\
O\xf8\xa2\x92\x03\xdb\xd8vs\x09\xf9\x12\x02\xf7O!\
\xc3\x80\xff7\x00\x00\x00\x00IEND\xaeB`\x82\
\
\x00\x00\x07\x98\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:34-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:34-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b86bf133-5ad9\
-1e42-a6fa-c3e38\
dbadf24\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:c18791e8-9d11-a\
14d-b738-98bf778\
7e2a6\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:9fa91d6\
9-69c6-e24b-87b1\
-5e70179b2629\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:9fa91d69-69c6\
-e24b-87b1-5e701\
79b2629\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b8\
6bf133-5ad9-1e42\
-a6fa-c3e38dbadf\
24\x22 stEvt:when=\x22\
2020-05-02T17:59\
:34-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xe5U\
\xdb\xf0\x00\x00\x01MIDAT8\xcb\x8d\xd3\xcf.\
\x03Q\x14\xc7\xf1\xa9Z\xd9\xf8\xfb\x02\xe21\xa4;\xf1\
\x04\x12\x1b\xffB\xfd\xa9\x10m\xd9\xb1\xb6&\x11\x1b\x11\
\x89d4\x9d)\x1a\x22\x22\x22\xb5a!v\xc2\x02\xeb\
\x09\xf1\x10|Ornr\x8c\x8b.>\xcd\x9d\x99s\
\x7f\xf7\x9e\xdb\x99 I\x92<N\x11!\xc6!j:\
\xae\xfd\x22\xd6\xfa\xfe\x80\x9f:N0\x8c\x02\xa61\x85\
y\xccyH\xcd$^P\x94\x80*\x860\x8aAt\
c\x15\xc1?\xf6\xb1\xe4\x02f0\x80^mgO\x8b\
Z\xd1\x92\x92\xd5g\x91\x0b\x88uK]\xda\xca\xb5)\
\xf2\xc9\x98\x80\xa2\x0b\x18\xc7\x9an+k\x0a3\x9e\xc9\
?\x02\xa4\x85\xd9&VL\x07T]@\xa4'\x1f\x98\
\xd5\xb7pa&\xfaZ\x8a\xed!\xda\x1d\xc8\xe4;\x9c\
\xe3,\xb5\x83\x0d\xec\xe8\xb8\x82\x92K\xca\xeb\xcdm<\
\xa0]\xaf\xafL\x88\x04?\xe1\x1e\x9b8\xb0-\x8c`\
\x1d\x8f\xe8LmU^\xb474\xd0\xa6\xe17\xf8\x94\
\xc3\x97\x82]|\xe0\x16=\xa9\xff\xdf\x854\xb4%w\
-u\xcf\xd2\xba\x5c\x84xE\x87\x99\xec;<9\xd4\
K\xf3\xfc[\x0bcM\xbc\xba\xae\x9dc\x1d\x87.\xa0\
\xa2\x1fF\xf8\xc7\x97w\xa4+\xd6\xb5wi\xf7]>\
,\x09\xc8a\x05e\xb5\xecQ\xd6\x9a\x12&\xb0\x88\x05\
\xf4}\x01 g\x0b\x14\xcc\xbcp.\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x02$\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\
\x00\x00\x01\xebIDATx\x9c\xed\x9a\x0b\x8e\xc3 \
\x0cDa\xb5\x17\xe3\xe49ZV\xaa\x9a\x15E\xc4\x1e\
\xb0\x01\xa7\xf8IQ+\x0a\x89=5\x98O\x82\xe38\
\x8e\xe38\xce\xaeD\xca\xef\xe38V\xcarj\xdc$\
\xa5D\xfa\xf8\xab\xf1\x10\x00\x15gF\x80\x0a`\xd6\x01\
)\x9c\x00+\x1d'C\x17\x00\xb2}V\x17\x90:\x93\
\x93;v\xdd\xb7V\x06\xd1\x22\x80\xa6\x13!3:\x12\
e\xdd\x8e\xa1\xa0\x02P\x0fG\x8c\xa6\x1c9\xdfeK\
\xba\xdbOc\xfd3\xbb\xc2@\xa3\xa7\x89\xd1*\x00\x85\
\xd4h\x8d\x10o\xbe\x87t\x10\x1c\x15\xba\xb1\xf8\xac\xfd\
\xc6\x95A\x8c\xc8\x025Qb\xd6\xd7\xf3\xb2Z\xdb\xa9\
\xf4\x0a\xd0\xe3\xc8t\xe7\x10Z\x050\xe9\x84\x04\xcdA\
\xf0\x91\xb8\x00\x06lX\x8a\x0b`\xc0\x86\x11\xc0s\x13\
T\x80m\xf7\x03r\xbeR\x84\xed\xc7\x00.\x02\xd0\x89\
\x8f\xc5\xe8\xf8\xbaI\x9b3\x02\xee\x5c@;\xb4cJ\
\xc9\xd4\x1f9kS\xf4\xe2cI\xdc \xf0\x87p\xef\
\x03\x1b\xae\xed\xeb9\x9c\xe0O\xc9\x02\xa5\xb3\x88p'\
ROcS\x14\xe1\xf1'CR\x90\xad\xb3\x9a\xc8\x88\
p\xbd\xed^X\xe9\x02w\x11\xc6E\x9e8\xd7[\x11\
@=\xdb\xa0\x15-\x0d\x825\x118a\xc4\xc2\xcdN\
\x83\x1cZ\x91\x00\xdfg\xb5\x00\x1a\xe7\x0a\xa2\xf6\x16\xba\
@\x9c1\xd8\xdd\xb14\x02\x8aYZ\x8b\x93\xb5CW\
\xaa\xde-K\x05\x00\xa6\xb4\xff\x0e\xe4be\xdf\xc5\x91\
\xb1z\x0c@G\xf9\x0fG\xb3\x97\xb7\xaa\xbf\xe7pk\
\x01kY\xe0\x8e\xf2\x5c\x11]\x1b\xb0\x11biS4\
\x16\x97\x06j\x8b\xa1\xd0(B\xed-\x90\x96\xfa\xa1!\
E\x8aR\xe9\xa8.\xd0jP\x19\xe2\x17\x9csw\xef\
\x11\xa8-\x86\x90\x1c\xadE\xcfT\x98\xb2\x1bb\xd4~\
@\xef\xa0\xa4\x1590\xa4\x00\x82\xfd;\xd4\xa8\xe5S\
a\x0bi\x90Jo\xb5:\xaaX\x9a\x0a\x87\xc9\x91\xe3\
l\xcf\xf6l\xcf\xf6l\xcf\xf6lO\x08!\xfc\x01w\
\x90`\xa2\xaa\xcc\x1f\x1c\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x08\xa7\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\xc9\x00\x00\x00S\x08\x06\x00\x00\x005;# \
\x00\x00\x08nIDATx\x9c\xed]\x09\x8e\xdc8\
\x0c\xb4\x83\xf9\xff\x8f\xf6\x05yB\x1e\xe3\x05\x82\xed\x8c\
\x96\x11%\x1eER\xb6U@\x90\x1e\xb5\xac\x93\xa4x\
\xc9}\xfe\xfa\xf9\xcfq\x1c\xc7u\xfc\x1f\xe7\x7f\x7fq\
\xe5\xf4\xbbY}m;\xab\x96\xb7\x7f\xf7\xc6\xe9Y\x8b\
k\xf0\x19\xd5\xfe\xa8\xfc\xec\x8c\xa1W\xbf\x1d+]\x13\
m9\x07k\xfb\x14\x92\xfa\xd3\xcf_\x9d\xc5h\x1b\x1a\
MBS_\xd3\xce\xca\xe5-A\xf5\xc6\xc9\x95K\xe7\
\xd5\xdbL\xc4\xfaH }\x96\xae\x85\xb7\xfc\x03\xbav\
\x9e\xf6\xcfN\xf9\xc5\x94\xcf\xda?~\xd0\x82\x01\xa7K\
$\x8c\xa6~T\xf9\x91T\xae\xc1l\x8d\xe8\xb85\xf5\
%\xe5\xd9{\x10%D$\xe5\xa35\xd2\xd6\xff\xdd\x07\
e\x92\xca\xc9Y\xca{@\x13\x18W~7\xc2\xa3\xf3\
\xc9<\xa5f\xc8\xda\x1bm\xfd\xdf\xe5\xbd\x93D\x83,\
\x82\xe4\xca\xab$\x1b\x87\x0a\xa1\xc0\xe1m\xc2\xe2\x08*\
?\xbf\x98E\xa8&\xfe\xd5\xcb\xb9\xb1F\x8f\xdf\xdb\x16\
\xf7\x19\xd5\xfe#\xcb\xbd'\xc9\xc6\xc6\xe3\xf1E&(\
=\xe6\xa5n\xbc\xd1\xb3\x9c\xae,\x05\xc2N\x19!B\
\x07\xb7\xceu\xa3\x10\xedI\x82$\x8a\x99\xfb\xf8\x12\xd6\
\xcd\x18O&\xae\xce\xfc\x9f>\xe7\xdb\xc3\xaanic\
(#DHyI\xd0\xaf\x1a\x19\xcc\xb2\x19\x05\x00\x8f\
MR\xb5\x01\x92~7ql\xc0\xe05\xdcgQT\
4\xbc\xed\xaf\xc2<\xa7\xc0\x95\xe9AE\xac\xe3\xb1\xa0\
\x86\xfb\x86\x0e3U\xd1\xea\xcbG:3z\x82l;\
\x10\x14@\xba\x80\xef\xa2_\xa3\xda\x91\x10\xda\x99H\x90\
\x9aye9\x0f\x1e\x01\x04\x93x\x17\x1cI\xb4\x99\x12\
2\xc2\x05\x9d-\xe17\xb3\x08\xb0B0\xd1\x13s\xb1\
\xa2\x820F\xf3\xac&\xd6\xcd,\x03\xac\xc0$\xe8\xcd\
\x91\xaaA\xab\x81\xcbR\xcd\xc4f\x96\x0e4LRE\
X\x92M\xa3c\xb3\x1a\xd4\x1aX\xd6cv\x9aX\xf1\
\x84\x93vYD\x9f$\xd9v\xc2\x07w\x8dh\xaf\x92\
>\x13\xd9\xee\xed\xf0a\x92\xe8\x0d\xcff\x94\x0a;\xa7\
\x02\x19\x1e\xc5\xd73\x8b\xf6$\xb1\x9e\x0c\x15\x17v$\
X1U%\xa2n\xe6\xb8\x1e\x87\xd5\x83\x89\x19)(\
\x9e\xe0ZU`\xae\x82h\xbd\x81N\xae\xcd\xe5\x03\x9b\
V\x9bd61\xed\xc4%\x91b\x0dV]x\x04q\
\xaf\xe2\x01\xe3\xf6L:\xbe\x8b\xfc\xbf,>L\x82 \
\xea\x0f\x10\x04\xfa\xc4\xe3}\xcf\x89\x7fn\xe9\xb5\x91\xa8\
[\x9ak\xaa\xab\xc0r7=jN\xc8\xab\x03\x16b\
j\xdb\xcbL\xa6D\xd4\xe7\xee\xb7\xa7\xe2M\x09\x8e\xb3\
`]\xcb(U\x064\x82\x08f\xf7\xe33\xdd\xe3\x1a\
\x06\xd5\xda\x9f\xb4\xed0\x06\x920I\xb6qe\x09\x1e\
\xa2Q\x11\xfd\xf6\xcei\xc5\xd3=r\x0d9\x95\x0d\xbe\
\x0eR\xc3\xfd-.\xc0\xaayf2\xc8\xd3\xd3\xe4\xe1\
\xb1\x1d\xee\xe5t\x14+\xa5{?e\x93\xcf\xe6\x9f\xe6\
\x99\x0d\x19`\xcc\xd2\xaa[\xd5\x1b\x90\xc1 \x125*\
Z\xbd\x8c85\xf6E\xaa@\xf4l\x92\x08\x83HB\
\x9c\xb4\x9f(\xcf\xc6\xcc{D\xd3i.\xc1\xe7\xde\x98\
3\x89\xf6T\xee\xd5\x0a\x19\xc7Yp\xd3\xf0\xccpG\
n\xb4\xb6\xad\x0a\xc9\xd8\xcb7\xd3\xbc\x01\xb1R\x9aS\
\x86\xde\xf8\x86\x8bQ\xf6\x1b\x1c\x9f\x85\xb3a\x96\xd7'\
&\x12\x98\xd7c\xbf\x08\xe2\x99\xe0T\xd7}\xc2\x18\xd0\
2\xc9\xd5\xd1UO\xf2\x1dE\xf6\xe2\xa3\xd3a\xb8\xb9\
Z\xdb\xa5\xf6\x8b%\xdd\xc7\xfb\xa6\x94^\xdf\x9cj\xf8\
\xb6\xd3\xc6\xb4\xbe\x1f&\xe1\x92\xcd.\xe6s\xaf\xf3\x16\
(\xfd83\x85\x81\xeb\xcbJ\xec\xb4\xcd\x91c\xe2$\
\xcfx\xc6.\xe9{C\x81\xa8\xeb\xbbt\xd3,\xf9=\
\x88\x14x\x94\xa4\x8cL\xd9\x1f\x9d\xd0\x12<1[:\
\x12\xea\xf5\xd20\x89\xd7\x95\xa6M\xa1F!\x93Q8\
D]T{S\xf0\xb5\x0c\xdaT\xf9H\xe9\xee\x95\xfc\
\x96\xec\xd9J[\xaa\xb5\x81\xb6'jaX\xee\xb8G\
\xdf\x17\x91\x5c\x11\xee\xd5\xb1d\x99F\xa6y\xb7\xf0\x1a\
\xe3\xd6\xb1\xecS\x04\x00\x8d\x0bX\xf2b\x07\x84D\xd4\
H\xfd\xe8\xbb\x12\xd1\xb0\xa6\xc9 \x19\xe4\x8d^.\x15\
\xb46\xc9\x0cQ\x92K\xd2\xae\x84\x89\x11\xe3\xb0\x10\x14\
\xdau=\xc3>A\x80\x900Iuz\x88\xe5\xb9\x91\
\xdd\x83@\xb6\xe4\x95\xba\xe2\xad\xd8L5\x80\xc6p\x8f\
\xbc\xde\x1a-i{j\x99\xd7\xbe\xc8\xbe\x91(5\xf0\
\xa3<iw\x02\x94V%'I\xe6\xed\xb2\x83I,\
\x8c\xecG\x02\xca\x18U\x8c\x12\xd5\xc6\x93N\x12\xf8\x5c\
4\x86\xfb,N\x82\xf2\x90E\xdd\x8d@\x18\xa8#\xe6\
\xa86\x80\xefJ\xe8(\x9a\x0a\x83\xd6\x05|\x11u\xe5\
R\xa8\x01\x1fd\xc5d\x10\xe0\xdc\xd1Q^\xb5\xeaT\
\xfb\xd5\xfa\x94\x84\x03\xc2\xf19I\xb4R0\x9a\x80\xd1\
\xed{\x17:\xdapna9\x91\xeer\x8ax\xd4\xc1\
2\xef\xaa7U^\xb3\xa1U~{\x94\xaeO\xb3l\
[D0u\x05Qx\xd7>\x92Y\xb5c\x83\xd1\x91\
\x97If\x83\x90\x1c\xa7\xabf\xab\xd2\xb1\xec\xa0\xdb7\
\xaa\xf6\xa9d\x0f\x22~z\xe1t\xe8\x92=\xfb\xc6\xab\
\x06zS\xe8\xabPI\x88\x9e\xef\x1f\x07\xadMR\xb5\
@\x12\x8f\x17z\xfc\xbd>\xabN\x93}\x8a\xd9\x00Y\
\xb76NR%A\x10\xde.t\xaa\xc8\x08\x15\x82b\
3\xc87\xb4\xeb\xef\xde/z}w\x84\xa8\xf8\x85\x06\
h\xb5p\x13_\x1fOZ\x1b\xd8IR\xbd \xd1\xfe\
\xf0\xd5\x5c\xca\xab\xf7wWx3+\xba\xb0\xfe>I\
\x15,\xcc\xc4=\x83\x8a\xbeW!\xba\xff\x95i\x22u\
l\xab\xbdw+\xc2\xef\xff\x04l\xb5\xd0\x07\x17\x1d\x8c\
\x98\xa4\xd2\x05\xe9Q\xbf\xa4\xcf\x8f\xbe\x97\xf6\xcf\xd5i\
\x9f\x8f\xcc\x9e\x8e\xc6\xebO\x93C\xf0\xc2\xec\xd5U\x0a\
\xef\xf8\xa23o\xbd\x8c\xb2m\x911RRx\xee\xfa\
\x06\xc7M<\x1bi\xd8\xef\x02\xde\xd0be\xfb(D\
xn&\xd9\xd0b\xb5S<|<\x9c\xbae\x91\x16\
\xb3\x0bI\x1c\xe8m?:\x86\x93\x042\xb5Y\xc7\x9e\
\xc8\xba\xb6_\xcf\x86\xa1|\xfc\xa3\xb7\xcd\xf4\x92Ii\
\xd9l\xce\xb3w\x88qmK\xc0=g\xc9\xca\xe0\xda\
R\x8f)\xeb$\x91\xbc\x98\x81^\xe8\xa2u\xaa^\xbe\
\xa0M\xb0\xb4\x5cFC\xbc\xb8b\xd6\x9f\x84X\xe8\x85\
\xba\x0aH\x19S\x02\xc8\x1c2\xd5-\x84$\xaeJ.\
\xf4@\x92\xee\xe3m\xffIq\x14\xedz\x87D\xd9[\
p\xea\xd6\xec\xb8\xac\x88\xf6\x8e\x08\xc12\x9e\xf6\x19\x9a\
\x97\xa6\xc9S\x8b\xces\x1a\x8d\xc5\x22x$\xaa\x8c\xf5\
\x9eP/k\x9ak\xcf\xc2\x0cH\xbak\xc76\xdc{\
\xcbI\x82\xd6\xbb\xa5\xdfG\xc6D\xb8\xcd\x95\xb69\x0b\
`\x22\xd4)\x0dF\xe3\x91\x04[\xad\xdf\xf5`a\x10\
\xcf^K\x18\x98\xfb\xdc}~\xc4$\x11\x1bXy\x1f\
\xe3\x00K\x22K\xff\xd6\xba\x1a=}\xc7\x90\xfe\x86k\
MF\xc1D\x0bAKO\x82\xaa\xb4{\xcb\x0b\x1d$\
\x9e9\x14\xdeB\xe0\x92\xfd\x97\xd0\x9f\xf5\xd6\xaaj\xef\
-\xea\x96\xc7=,-G#\xdc\xb8\x1b\xc0cOl\
\xf0\xd0\xec\xa9K(g\xa8[(\x7f\xb5\x17=\x03-\
\x1a\xc8\x9c\xad72\xd5\x12Y\xe1huk\x86jF\
i\xb1\xa2j\xb3O\x17;\xbcv\x1f\x8b\xcc8IT\
\xc6m%\xb1g{\xb5$\xcfE2\xda*\x82%5\
\xb8\x98\x9d\xbb\x15\xb1\xc8\x1a#\x0c\xadB\xce\xca\x8e`\
7/\x87(f\xaa\x0e\xe6\x8e\x02\xa7ac\xa3?Q\
-E/\xbf\xe7\x0cR\xd1Z \xdb\xf6x\xb7\xb4\x86\
2\xf7L\xda\xb7\x94Q\x90\x82\xcb\xe2m:\xc9\xe7\
\x0c\xa8\xf6\xc0s\x9f\xc4J\xb0U\xb1\x92\x08\xfb\xaa\x17\
\x88Bz\xff\xa4\xcfY\x12\x09+\x19\x04\xf1\x5c\x0b\x94\
\xd0\xe8\x02\xa5ni\x17\xfb\x0e\xf1\x00tD\x9aS\x15\
,\xd1\xe7^jI\xf5\x1eD\xee)2\x03@\xda\xdf\
\x9fv\xdb78\x22;@\xd6\xb3\xb4\xed}\xa6\xd7\x9f\
&u\xa5\x8a\x00+\xf61\xaa\x7fD\xdb\x90\xf1\x8c\xd4\
-.\x11nV>\x93|\xbdX\xc5,~\xd1Sc\
F\xcfH\xc7\x8al\xa7WN\xd5\x1am;\xdcXQ\
{\xe3\x1d\xcf\x11\xbc7\xb4\xbcg\xbf\xa0\xf7\xec\xaf9\
p\xea\x96GO\xa4j\x85\xc4\x1b\xe1Il\xf4.F\
d;\xdau\x90\xc0\xd3\x8eW\xff\x1f1\x83\xa6\xbe\xb5\
|\xc6 \x5c\xbf\x9e\xf2Kj\xb8#\xb8\x93J\x97\x08\
\xa9p\x80\xcb5\xe0\x08\xc6[n\xb1Y\x8e\x22\xa1`\
\xa9?+\x97\xc2\xa2\xfeJ\xca\xbb7\x13\xd1\x847:\
\xe2\xd1\x06'\x9aP\xa3\x8fr\x8d\x9a\xc2\x95i\xcaQ\
\x92_\xd2\x96\xb4>jo\x0e\xa6\xdc\xbd7\x9e\xdfq\
\xd7\x96\xcf\x9e\xcbV\x8f\xac\xee\xd3(IH\xdb\x92\x10\
\xa1f\x0f\x90\x8e\x86La\xe1\xa9\xff\x81}\xcf\x8e\xe3\
\xf8\x17\x1f\x0f\xe1_\xd3\x90o\xab\x00\x00\x00\x00IE\
ND\xaeB`\x82\
\x00\x00\x17\xca\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x85\x00\x00\x007\x08\x06\x00\x00\x00\x22\xd8\x89B\
\x00\x00\x17\x91IDATx\x9c\xed\x9d\x0b\xb4\x8fU\
\xde\xc7\xbf\xe78:\x08En\xb9\x87\x0a\x19J\xa6t\
B\xe9&]\x94t\xbfH\xef\xe8&\x85\xe9\x9d\xc9\x94\
)]u\x91\x19MM5x\xbb\xe9\xa6\xa1L\x97\x91\
\xa6D\xd2U\x09!*\xe3\xe4r\x90D!\x97\xe3\xbc\
k{?\xbb\xf5[{\xed\xe7\xf9\xff\xff\x87)\xf3\xae\
\xf9\xae\xf5,g\xff\x9f\xe7\xd9\xcf\xbe\xfc\xf6\xef\xbe\xb7\
\xbcE\x1f\xbc~\x8b\xa4k$\xed%\xa9\x8b\xa4\x03$\
\x8d\x90T]R{I\x1fKzGRGI\xbf\x94\
\xd4U\xd2\x0d<\xef\xae\xb6\x92^\xe2\xefF\x92\xbe\x95\
\xf4\xae\xa4\x83$\xd5\x96\xb4\x8d\xf2\x81\x92\xaaJ\xaaB\
}\xcd%\xed-iOI3$5\x90T\x99\x7f]\
\xb9\x96\xa4B\x9e{\x87\xf6T\x92\xd4Z\xd2\xdb\x92*\
\xf0[wIWJ\x1a+i\x9c\xa4\x13%=\xc9\xfd\
\x0b%}#\xe9\x15\xdaw\xa3\xa4\x89\x92\xa6Q\xfe\x8d\
\xa4\x05\x92zHzAR\x0bI\x1f\x99\xfe\xf4\x95\xf4\
\xa5\xa4\xe7)_ i\xb5\xa4\xe7h\xf7\xf1\xb4\xcd}\
\xa3\xa2\xa4\xce\x92\x16J\x9a'\xe9\x03IwQ\xff\x17\
\xb4\xb53m\xff\x1f\xfaq\x1b\xdfzDRKI7\
Iz\x8d\xfb-\x19\xe7\xe9\xb4\xaf\x12}\xac\xc9\xfc\xb8\
q\x1a$i\xa9\xa4?J\xaa/\xa9?\xe3\xef\xea{\
F\xd2*Ik\x98\xdf:\x92\x9e\xe5\xfb\xae]S%\
\xb5\x91\xf4\x17I\xd7\xf1{\x91\xa43\x0b$\xfd^\xff\
\x07\xd7\x98M\x92FS~\x85\x0f\xbe'\xe90I\xef\
3\xc1ws\xdf\x0dl\x9e\xa4\xfb\x180\xf7\xfe:\x88\
\xa8\x05\x9d\xdd*i\x96\xa4\xa6\x92\xc6K\xda\x83\xfb\xae\
C\x8f\xf2\xfcB\x1a\xbc\x1e\xa2\xfa\x90\x8e\xaf\x84 \xdc\
\xf3\xd5$\x15K\xda\x9f\xfb\x95\xf9\xb7\x0d\xf5Vf\x82\
&H\xba\x9d\xf7g\xd3\x86\x05\xb4o\x91\xa4\xc7$\xbd\
Hy\x1e\x93\xfbwI\xad\xf8m\x00\x93\xec\xfe\x9e\xcf\
\xb7\xfd\xf3\xb3\xa9k\x12e7\xd8_1^U \x96\
eLh=I\x87C\x0cO\xf0\xfcg\xbc?\x95\xc5\
\xf7%\xed\xf3\x0b\xeeSI\x93\xf9\xe6\xc1\xf4\xef\x0d~\
o\xcd\x98;B{Z\xd2!\x94\xd7\xf3~K\xca\x1b\
y\xa6\xb1\xa4\x93$\x1d\x0aa_$i-\xc4=\x95\
\xf6\x9d\xce\xf8\xba\xf1:\x83\xeb\x17\x8e0\x1d\xa7(\xa3\
B7\xc1OI:\xcf\x94_\x95t\x02\xe5\xe3$]\
*\xe9\x1c\xcaGI\xba^R7\xcaGJ\x1aF\x87\
\xc5\xa0\xdc\x0fw\x11\x0d|\x8c\x89\x14\x0dw+\xf9\x0e\
I\xae\x0d\x1dh\xfc@\x88\xc9u\xfc*\xb8\xc0F\xca\
\x83%]\xc2\xfb\xa7\xc1\x15\xae\xa4\xec\xde\xaf\x01q:\
\x9c\xcc\xc0\x5cE\xf9\x18I\xa7H\xfa5\xe5\xce|\xff\
r\xca\xdd\xe1\x04\x17R\xaeD\x7f\xffD9\x9f\xef\xdf\
A\xb97\xf5\x9f\x0c\xa1|\x0cw\xf3\xf5\xf7\x92t\xac\
\xa4~\x94\xddj\xefi\xda\xdf\x8b~\xfe\x8d\xf2I\x10\
\xf7x\xca]\xcd\xea\x16\x93\xd6D\xd2\x1f\xcc\xf3\xed\x18\
s\xf1\xad.p\x1b\x87k!\xca\x17(\xf7\x85\x9b=\
H\xf9\xbf$\x952'\xa2\xdfnN\xfa9\xa2\xe8A\
\xe3\xdc\x8a\xa9K\xc76A\xc9\xf5y\xd05v\x0a\x84\
Q\x05\x165\x0dJ+\x84}\x960I\xc5\xfc\xf61\
\x03\xe6\xd8\xf8\x0a(\xb8\x07\x0dX\x0a\x9b.\xa2\xae%\
\x926H:\x02\xee\xb1\x02\xca\xbe\x98\x7f\x17\xc0Q.\
a\x85\x8a\x89hE\xdb\xc5\xca\x17\xe2l\x1b\xe5\x8e\xd4\
[J\xb9\x88U\xeb\xda4W\xd2\xd1|\xef\x078\x83\
#\x1c\xd7\x97\xedLVK._n\x03\x17\xf4\xe5N\
\x10\xf4\x12\xfa\xf4K\xc6l\x0b\xe3y\x04\x13\xfb\x03\x0b\
\xcc=\xbf\x0fm\xfa\x87\xa4S!\xb6\xf5\x8coO\xda\
\xea\xc7\xf7,I\x9bi\xd3t\x08i\x1bb\xc1q\x88\
\xb3\xb9\xbf\x02N~.\x0bH\x8c\x7f{\x08a\x0b\x1c\
\xbb-\xdf\xf3\xf7\x0f\xe3\xef\xad\x94\x1d\x03x\xce\x11\x05\
\xbf\xebL&\xe0{\x06\xad\x09,|&\x03\x17\xa26\
z\xc3&\xc4\xc2\x02X\xe8\xe1\x88\x9cLh\x83N\xb1\
\x15B\xab\xcf\xf7\xbe\xe3\xdf\x7ffx\xff\x10t\x94-\
\x94\x1b3`\xcb)\xd7fp\xb7\x9a\xf2Zd\xe7\x7f\
\x90\x82\x02n\xddbt\x8b\x10\xaf\x19\x11\xe2q\x00\xf2\
\xae\x81\xf9\xcd\xc9\xd39p\x8b\xd1\xb0\xde4\xdc\x8a\x5c\
\xb3(Cl\x89{/\xb2*cx\x10\x02\xcc\x153\
YY\xb7!\xaf\xbf/G\x1d\xb5`\xc7\xf7\x99\x95\xf9\
\xff\x06\x9e\x95tM\xe9\x90c\xa9\xbf\x0d~\xbb? \
\x08Q>\x91\xbf_P:\x06 \xdf-\x1c\x97y\xdc\
\x94\xc7\x9b\xf6\xc5PR\xceI8\x94o;\x8b\xe8\xb2\
r\xd6\xf15\xb2\xbcO9\xdf\xdf\xad\xe1\x07}uJ\
#+`\xc2\x0c\xa2\xdc\x11\xd9\x9c\x864\xd6\x7f\x16&\
TA\xf0{mt\x08\xfb\xdd\xeb\xcb1x\x9b\xe0\x04\
\xf6\x0a\xf1,f\xda\x00\xf4\xa2\xaah\xe4\xd9\xa0\x86Y\
$N?\xd8\x17\x93\xaem9\xda\xba[\x22m%\x86\
\xd8\x9f\xf2\xef\xb0\xd1\xd3P\x96p\xaf\x0a\xbe\x81lq\
y\x0e\x93\xe5\xf0\x09\xca\xd5~\xc1\xd5\xddh\xf9\x823\
^\x85\x1e\xf2O,\x86\xaf\xb2\xa8\xffpD\xe4]\x94\
\x9d\x1fa\xb1\xa4;!\xb4\x0brh\xebn\x8b\x5c\x88\
\xe2x4\xe8\x1eY<\xdb+\xe1\xf7n\xc6D\xf5(\
\xc3\x9c\xfaG\xe4\xf9\xfa\xac\xca\x8aY\xb6\xb1\x10\xc5s\
spM\xc2\x84\xbd\x97\xfbup\xa6\x09\x8bj\x126\
\xbc#\xda4\x0c\x0c\xc4f!\x97\xb0R\xc6b5\x9c\
a~\xff\xb7C.D\xd1\x22\xa2p&\xa1r\xc2\xef\
7F~\xcbCi\xbd\xd3X\x0a\x16us \x8a\x96\
\x19\xd8\xf8\x7f\xb3\xb2-\xfe\x88\xcfe{\x8aR\xeb\x11\
\xbe\x1bCg\xf4\xa1\xe9\xc6\xe7\xf1o\x05O\x141\x0d\
\xfc\xc9\x80\xe5\xc6P\xc2\x8a\x0c\x11\x1b\x8c\xbe\xc63g\
q\x1f+\xd4\xd9\xc6\x0fE\xee\xd7\xcaA\xc3\x9f\x8e\xbd\
\x9e\x86\xeb\x82{\xce[\xfa+D\x83\xf3'$\xe1\x88\
\x1c'\xb9\x03\xde\xcc\x119\xbc\xb3[\xc0\x11E3\xdc\
\x9b!f\x1a\x9b?\x869\xac\xcc\xf68n,\xbe\x89\
<\xdf\x06\x7fF\x887\xcc\xa4\xaf\x89\xdc?!K\x91\
%<|u2<3=(7@,\xfe\x01\x02\x89\
\xa1-n\xffFY\xb6\xc3b\x10\xe6\xf3\x91\xe5x\xf7\
gA>\xae\xd1\x83#\x1fo\xc9@\xfd%\xa1ao\
\xe1\xd4j\x10\x99\xec\x18\xf7\x88\xc9\xd8\x85\x98wMQ\
d\x97$|k\x22VK&qW\x0d\x87Z^\xca\
3\xa5\x91\xdfz\xa0\x8c\xc68\xd2\x118\xe3\xf6\xce\xf0\
\xed4\x5c\x011\xf6\xc4}\x9e+b\x8b\xe9_\x86|\
\xbc\x801\xd4e\xd2\x86\xe3\x86\x0d1\x85r\xe7\xc8\xea\
\xac\x1f(\x94\xd720!\x1a\xe3\x0e\xfe\x12\xbfA\xcf\
\x94\x8e>\x85\x99\x9a\x09wfx.\xa6\x047\x85\xcb\
\x84\x04s0\x1c\xa2<\x13\x19\xc3\x04\x02R\xfd\x02\xb3\
\xfe|\x14\xe1\x18\x9c\xc9\xfc&\xc4\xf9\x93 \x1f\xcd\xfb\
\xad\xe0c+\x88*\x0ay?\x9b\x89\xf3\x97\x0b\x8a\xfd\
\x95\xfb1\x82iH\xc0H\xe8\x12\xc3\x13:S\x89\xd5\
\x9dG<\xe0\xc5\x94N\x97\xe2\xcb\xf0\xd8\x90\xf2l\x8c\
\xd0k@\x98\xa3#\xf7d\xfa\xebQ\x88b\x9c\xc6!\
\xb60\x1e\xb9\xc0\xc5\x1b\x1e\xc0\xdd\xde\x03\xf1\xfd\xa4\x09\
\xa4Y\xb4BG:\x82\x08j\xe8\x0ah\xc6\xb5KQ\
\x00\xfb.\x0e*=\x83\x95\xeb\xf09f[L$\xa4\
a-\xf7\xf6\xcf\xf2y'\xafG\x99\xf2;\xfc\xd6\x90\
r!\x5c\xa5\x03\xc4\x11\x9a\xb6\x1eUqJ\x85\x96\xc4\
\xc0\x94\x01\x9c\xc7j\xb5h\x97\xc0\xb9lz\xc1\xd3\xb4\
\xb3\xb7\x896\xe6\x82\x89\xe6\xd9\x02\x14\xd9\xb1\x94\xaf\xc6\
\xa7S\xd3\xf4\xeb\x0e\xe6CDI\xbd\x07y\x18\x0b\xd9\
Gt\x8f\xdc\x99\x18\x8f\xf7*\xd6\x0c~\xbf\x97\x90\xf4\
\xec\x0c+2\x0d\xde\x0d}^\xe4\x99U\x116\x9e\x07\
\xdb\xf7\xab\xfcC\x22w\x16m\xb1\x14\x9e#p\x16\xc3\
\xde\x98\x99\xd9\xe2c\xf4\xaa\xd0\x02\x0b\xcd\xea\x1fpZ\
\x8d5\x13\xe3\xf18\x1e\xd2C\xcb9VB\x9c\x14\xe1\
\xb0\xdb\xd3\xe4LX\x5c\x93\xf0\xee\xef\xf8\xb7/\x9c\xbb\
\x13:\xccu$,\xe5\x04O\x14\xabX-\xded,\
\xa2\x83\xb3\xb3\xa8,\xc9\xe1\xb3\x9d\x09\x8ci\xec##\
V@\x0c-#\xbf=\x80\xe9:\x1b\xcb\xa7\xbcx\x0e\
\xabiX\x82I\xbe\x98pz+\xcaN\xbc\x0cM\xf9\
\xd6\xd0\x0c\xe2/[t\xda\x89w\xad\x8f\xa6\x13\xdc\xe2\
y\x886S<\xeaG\xf8\xd0\xf9>(U\x0f\xe0\xa4\
ZF\xe8<\x1b\xb8d\x8d1\x91\xe7\x8aY\xf5[\x8d\
\xf7\xd0\xe1\xe6\x0c\x83\xeb\x91O\xfeD\xe8\x16?\x90\x18\
\xc9Q|[|\xa7\x00\xbd\xa3\x02YN\x85(\x90\xa2\
\xecQ\xc2\xcaz'\x8b6T\xc7\x9a\x11\x03^\x9b\xe8\
j\x0cE\x88\xb7\xeaY\x8e\xdbO\x89\x15(\xffY\xc1\
\xe6Sx\x1c\x8d\xfe\x90\xcd\xa0y\xf4\x8f\x10\xd1\xbb\xc8\
\xccmX\x1f\xf5\x883<\xb0\x93\x83q\x16u7\xa0\
\xce\x96&\xbf\xf0]\x22\xa0\xbfA\x89\xbd\x95\x9c\x8a\xdb\
w\xd1\x04\xf4\xc1Jx\x00\xeb+\xb4V\x1e3\x0a\xf6\
\xee\x86\x09\x8c]&\xafm\x94(\xfe\x83t\x1cF\xc6\
\xd3F\xac\x93\x1e\xe4e|\x81%\xd7m7\x1e\xbf\x07\
Mz`\x22*\x5cs\xd9\x8ehu;X\xed\x85\xb0\
\xdf%\xac\xb8\x85\xe6\xc5\xeb`\xc3;\x9bTR\x84\xa6\
\xdf\x89+\x1bM\xb9\x05I;U0\x01\x07\xc0\xd1\xaa\
2\x19I\xb8\x8e\x95Q\x03\xbd)\x86|\xd2\x01\xcaR\
\x94W\x8fS\xe1|\x1f\x91^\xf8\x0d9\x19%p\xc1\
\xbc\x0c\xbe\x96\x9f\x1b\xcd\xc9x\xff6\xad\x1d\x8eS\x1c\
\x8b\x82T`\x02Os\xf1\xa2-g@+\xa2\x84.\
E>\xcdA\x11;\x89\xc9\xfa4\xe5\x1b\x05\x98N>\
\x18\xd6>\xe2\x5cZ\xc0\x84\xccC\xc3\x9f\x1f\xdc?\x1f\
[^L\x865G\xcf4\xc9\xae\x16]p\x14\x95b\
9,\xc2\x11u\x03\xcf<\x83\x8fd\x18\xce\xa1Yp\
\x81$\xf6z<\xbe\x02\xa1\x98~\xcd\xb3\xcd\x83\xe7>\
\xc0l\xde]1\x9c<\x95X\xf0q\x07\x1cQ\xd8\x01\
\xb7\x98A\x07\x07d\xd19'\xb3\x87D~\xaf\x8e\xc9\
\x97\xe6`)\x8b\xb8\xa5\xfb\x06\xca\xeb\xc9\xe8'I\x9e\
J\xc7\xd5^\x0e~\x9b\x1e\x897\x5cl\xb2\xbb\xca\xb0\
b\xae4\xf9\x1f.\xec}\xbc\xc9\xfb\xf4(B\x87\x08\
\xdd\xcd\xf3\xf9\xc6Z\xf3\xdb\x09\xc4Rvg4\x8a\xc4\
\xab~D>\xac{f\xe4^\x11\x9e\xcbX\xe6R\x88\
\x1b\xa8\xc3\x86\xd6\x07@T\xc5\x09&\x9f7w\xf3\x88\
&Z\x8c\x86-\x17\x98gcm\xf4\x18\x1a\x09f\x85\
^\xcd5XU\x1e\xe7B|\x1f\x99\xdf\x1cw96\
R\xff\xef\x13\xe2\x0f\x97\x05\x04!\xb2\xc5wg\x94\xa6\
$A\xed@>\xa6h,\x07a&\x09\xbd\xa7\xf0\xaf\
\xbfb\xc90B,<l\xcamH\xf0\x9dfX\xd5\
\xd7\xd4q1z\xcc5\x98\xa8\xde\x91eC\xd7\x0f\x1b\
\xa2\xf8E\x823\xc7\xa3\x83IW\xf7\x08\xc5\xc0Z\x22\
\xb2\x1e\xe3\x10{a\xbdO\xd06\x8f;\x8c\xe7\xd0b\
@\x82\xaf\xe5\x93,\xb3\xb8~.d\xb4>\x0a\xf0f\
\x86I,\x9f\x9b@\xcc\xfc`5\x09\x82\x899\x8e\xac\
\x12\xea\xbd\x917\x1aNq.y\x13\x1e\xde-;\x87\
\xbf_\xc1\xe1\xe5\xd1\x0c=cC\x86D\x9b\xb1\xe8\x16\
\xeb\xe1N14\x22\x8c\xed\xf3\x1b\xcap\xd0\xad\x0a<\
\xba\xfb\x90\xedu\x01\x5clP\xa4\xae\xfb\xc9\x03\x89a\
!\xe2\xf4\xbe\x9f:\xba\xb9\xab\x90OX8T\xec\xee\
!H\x96DU+\x13~\xf7\xac\xb3/f[),\
v\x1e~\x8f\xa9\x09\xef\x8d\xc7\x8e\xf69\x8eo\xe3\xe6\
\x9e\x0c\x17HJ\xef\xf3X\x01\xe1\x8e'7S\x91@\
Va$\xc9\xa74!Xw>>\x8e)\x91\x08\xe9\
\xea\x14\x82\xf0X\x9f\x12}\xfe)\xf1i\xc0\x1d\xb3B\
>\x13\x16N\x96\x0f\xb0|\x16\xa9\xa4}J2\xed\x8b\
\x10\xc3(<\x8eK\xe1\x0c\xf7\x90\xc5\x956P\x15\xcd\
\x04\xec\x0b\xbbo\x80S,\xcd\x7f?\x92\xac\xa9QX\
\x10>\xe2\x1br7%Dt\x87\x99\xady\x16C\xe8\
\x83\xc52\x14\xd10\xa2\x1aby\x16\xf9\x9e\xb9\xe2\xa3\
\xe0\x0a\x95\xe1\x18\x8aps\xe7\x84|\xe4\xe7q\xc1K\
5Y\xe1\xe3P\xbe\xce\x80e\xfe\x19\xd1\x11\x93\xef\xb3\
\xd0\xe4[\x98\xdf\x9a`9\xb4\x80-\xa7\xc1Z \xcd\
\xb0\xe5Ld\x18\xda\x17\xa2j\x08\x1ch:>\x0b\
OHEp\xabT\x85\xca\xe0\xce\x04\x82\x09q;:\
C&L\x0d\xf6\xb0\xec,\xee@\xd4\xd9\xab\x8b\xd9\x17\
\x9a\x84\xf5\x11\x939#\x0a\x90\xa9\xab\x83\xc9\xf48\x0b\
\xd6\xbd\x16s0)\xb7`\xb1\xd9\x8d\xbeop\xaf\x0f\
\xdc\xe5\x86\xc8{\x16^T\x95\xb1\xea\x9f \xf3\xab \
!\x9eP\x85I\x1a\x0e\xf1\xb56q\x8a!9\xee\x19\
\x99E\xe6\x95\x13?1.\xf8\x15\x84\xf9L\x0eu\x0e\
\xc3%\x9e\x8d5\xf2Uds\xd3k$\x16)\xc1\x0f\
\xf4\x1e\xd7rD]\x88;M\xdd\x9bs\xc9./H\
\xd90\xe3\x91\xcf*\xff0\xe2\x94y\xc8\xc43\xbc2\
\xf94J_=\x5c\xbe\xad \x8aM\x19\xda\xe2W\xf5\
\x16\xb3\x13\xbcBJj\x9d\xef\xf4\x08\x02g\xbdM\xe8\
\xba;W.\x98A?\x86F\xfc!\xb7\x07\x96U6\
\xf0\xc7\x16\x9c\x13y\xf6q\xb3!Z\x98\xe0\xb1\xfc\xd4\
lp\x1bc\x17\x06\xea\xbc\x88\x1b\xc1\x82\xcc\x89(:\
$\x98\x5c\xc2\xdd]\xc2\xaa\xaci6\x0e\x0fC\xcb\x0e\
\x15T1\xb8\xde\xb4\xbc\xde$\xd9\x0c&\x8b(\xe6\xd2\
\xee\x85e\x224\xf6\x07I\x8a\x19\x8e\x05\x14\xcb\x02\x9f\
\xc3\xe4-\xe7\x9b9+T\x118\x22\xef\x87\xfb\xba\xba\
\x09\xfb'mY\xc8\x84\x9b0\xe9m\xc6\xd4K\xc1N\
\xb8]\x81g#Da\xfd6\xf3sI\x1c\xce\x87\x03\
\x9c\x17\xc9\xacz\x1f\xdd\xa1#~\x8c\xe6\xfc\xdd\x11\xef\
b\x8c <\xbc7\xf2(\x13\xb2\xad\x835\x116\xee\
tR\xfb\xbch\xca#\xf2\xb9\x99h\xe7\xc6\xc8^V\
q8\x88W\xe6\xa6\xec\xa2\x81v~\x94Mp+\xab\
\x14\xa7%\x02\xa7aa$\x92\x9ad\xb9\xed\x0c\xbe7\
9\xb3\x1e6\xbe\xd1+K]h\x07\xf2\x91\xc7\xa7E\
Xf\xfd\x1cr*B\x8cD\x01\x0c\x09\xad\x0aJ\xe1\
\x16\xd8\xeb\x82\x04\xed\xf8 \xe3\x1a\xcfOp\x98m\x08\
\x14\xc9\xc7\xd3\x5c\xb79`3\xdc\xcd\xeaF5\xcaY\
W\x9e9\xe4\xc4cW%\x01[\x94D\xfc3\x07\x9b\
o\x15\xe6b\x22\xe7\xc3f\xbaG6\xfc64\xf9\x91\
\xb9\xe2;,\x80X\x04s<\xf2\xf9\xc0 \xf9\xc6b\
\x94Q\xd0\x0a\x22\xd6\x91\x10g\xe1\x0a>\x92\xd5\x9e\x84\
l\xb2\xc1\x9baB\xfb\xfc\xc9\x85\x117|\xb6\xd8\xce\
1N\xbb\x82X3\xe1\xc5 \xc85\x90ly\x11j\
\x08S\x1b\x13\x91OD\xd4*=v\x07zH(\xb9\
\xc2\xefwX`\xde\x1bCRj\x88\xd5<\xdb\x07\xc5\
\xc8w\xa2\xc0\x1c\x99d\xf1r\x84\xfa\x8bM\xe2\xab\xc7\
G\xc6\xed\x9c\xcd*\xdd\x8e\xeb\xdd+\x88\xf5v2]\
`%\x8b \xb6\xdfdWbz$\x10g\xbf9\x04\
K\xb3b\xa6y\xf57G\xb02\x8a\x19D\x7f\x98H\
\xcc\x01\x94\x0b\xdeb_H\x819\x9bj\x91\x89\xb5\xdc\
bV\xf6\x0b\x091\x83\x8d\x88\x13\x9bm=\x07\x054\
\x86A|\xc3s\x85?\xc1U\xdaE\x82W1\x5c\x82\
\xd8,E\xccU\xa4\xbc,\x8bw\x930\x0c\xfd\xa8\xfa\
N$Bg\x83\xb3\xe1\x18>\xa8g'\x7f%\x0a\xef\
\xc4\x0c\xdc\xf4g\xcb\xbc\xaa\x8c\x99\xfbS\xb0\xd5\xdd\x01\
\xd5 \xaajXJg\xfc\x8b\x14N\xe1\xdb\x19\x88!\
\xd0% \x80\xb2 } \x8a\xd86\xbc\xbc\x0cW\xf8\
\x5cy\xb0\xc9\x10D^\xa4\xaeLm\x08\xbf[1\xc3\
\xfdL\xc8\xf5\xbd\xa46\xc7\xfeV\xa0pW\xc7\xd2\xb2\
\xefg\xdb\xf7\x8a\xc1s1<\xc7o\x0f\x05\x04\xe19\
g\xddL\xe3\x1ar\x8a\xc78\xceh[P\x99gC\
\xc5\x98\xa4c\x8di9\xc9\x88\x06\x91\xd67\x09\xeb\xe3\
q|\x1do\xe0\x1e/\xc6_\xf1\x84\xc9\x9e*0\x0d\
~\x0dJ~\x97\xc6{\x14B\xe5\xa5&k\xfbe\xfe\
\x1dI}~\xebb>\xd7JX\xb6\xb7P\x92\xc4\xcd\
\x89\x81\xbbx@\xcan\xfb\xda\xac\xf2\xc1\xa6\xed\xd3P\
\xac\xbb\x91%\xfe9\xefOeR^%\xd0w6\xed\
*\xc1\xfd\x5c\x8d6\x1e\x82\x98\x9d\xcb8.c\xfc\xbd\
\x87\xd7\x8e\xff\xfb\xe4v<\x8cu\xb4\x84\x14\xca\x90\xe3\
.\xc0\xdd\xbf\x11q\xba\x18\xefhSD\xe8w\x10\x98\
\xcd\x82\xf7\xdf\xba,T8\x1ae\xb08\xbcW\xec\x00\
\x93>\x7f\x05\x03\xef\x13B\xfb\x19\xcb\xe2i\x9cRM\
\x8cy{\x1a+%f\xe6ym\xf9 b\x19i\xb8\
\x9c\xce\x8fAG\x09\x95\xc8\x86\x81Ck\x0c\xf1\x10\x8b\
\x8e\x84\xeb\xed\xaa\xbb\x0f\xb9l\xcd\xdd<\x93\x07\x12\xee\
c\xb1\xba\xce\xd3\xe4\x88\xeck\x9cq\xfd\x83\xe7\xeb\x19\
W\xbaw\xdf77\xfaNZ*~\x13\xc6\xc5\xef+\
m\x0a\x11\xdbm\x10\xbd \xde\xaa\xe8\x10\x0b\xf1\x03\xf9\
\xf1\xaf\x91\xc1\xc4\xae\x16\x8a\x8f3\x83\x14\xf5\xcd\xb8}\
\x1bs\xf9\x83QCe\xe9J\xe3\xb9\xb4\x9a\xfa&V\
\xb2\x7f\xbf+\x03b\x1b5\xc4\xdc?\x8b\x09(4n\
\xdf\xcd\x98\x89\x07\x91\x88[\x87\xc9\xb9\x08\x17\xfa\xdc\xc0\
\xd4|\x14\xcb\xe1%\x14R\x8f_\x05\x9bz\xf7\xa4\xbe\
\x90\x0d\xdb\x09\xf5\xb8\x9avz\x82X\x81\xef\xc4\x1fH\
\xda\x1c\xee0\x89\x09\xf0\xfd90r|d\x09\x09\xc0\
\x8dY\x5c\xc2=\xef\x0f]-2\x09\xd3\x1bX\xe9\xbe\
\xbe\x1ep`\x9b\xaaxi`\xda\x9fh\xf2C\xee\xc6\
9\xe9\xe7\xd9\xbb\x08\xee\xa1/\x83\x19\xcf\x0b\xf9\xbe\xeb\
\xfb\xc4\x98\xa29\x8e\xc9\x11l'\xcc\xaf\xcc\x87eZ\
\xcf\xe4\x5cX\xff\x08&\xcc\xbb\x5c\x8b#\x0e\xb0\x9b\x83\
\x13m.a\x22=\x9ap\xfc\xc19\xac\x80\xeb\x19\x90\
\xa4l\xa6\xbadx{\xeffG3\x11\x17\x04&j\
7\x93|[\xdfX\x14\x1bY\xe5np\x0a\x09@\x9d\
\x80\x0b\xbd9\xef\xf8qX\x88\xa76\xdb\xd3\xf9\xba\x06\
\x1ckT\x86S\xf9\xfa\x9a=\xb5\x0b\xcc\x0e5\x8b\x86\
\xc4\x9c\xfcb8\x84\xa0\x9e\xd8\xf2\xe97+\xef\xcdq\
\x11\x9b!\x84\xc9\x88\xb5=\xd3\xcc\xec\x90ST\x086\
\x01U\x8b\xa4\xea\xf5\x0e\xb2\xa9\xbf']n5r\xfe\
u\x13\x1c\x8b\x99>\xa1X\x08\xf3\x0e\x0a`\xa5\xdf\x92\
-\xfe={C\xd7D\xae\xe1\x91\x93\xfd^B\xc6.\
%\xdco\x7f\x9fl\xcav{\xde`&\xc3\x8b\xc7\x83\
\x8c\xf8h\x1c,\x8c\xc59\x1e\xd7\x18\xee\xd3\xcd\x94g\
a\xb3\xb5Z$\x1c\x09\xb52 v\xef\xb4\xba\xc8\x10\
\xdc\xe7\xa6\xae<\xf4\x06\xbf\xaf\xf7\xab\x84\xf1\xdc\xb1I\
:$\x8aJ\x01\x07\xa8e\xcez\xb6\x9d\xf4\x1f+!\
\xa0v&+\xaf\xad9\xb29\x09\xe1 \x85a\xf1\xc5\
p\x89\xf7L\xf6Wu\xde\x0b\xaf\x1a\x91\xec\xb0Z(\
Y\x0dL<e\x02,\xdb\xc2\x0evE.\xa7_x\
x\x07[\xe8 +\xaf\xc5e\xdb\x97-\x0a\x12B\xf9\
[\x89\x12{N\xe7\xdbt\x17\x8b\xce;\xe0\xfc\x82)\
\xa1\xbfC\xe1\x88\xb1\xb1\xac\xe9\xdb\x16\x12\xc5\x86\x88\xc3\
*\x5c\x89\xd6K\xf65\xd9Y=\x8dB\xf5\x06\x1bu\
\x920\xd5p\x92\xe5\x91\xa8\xa9\xcf\xf18\x0c\xb9]\x9d\
\x0e\xd5\x88\x5c\xfdQ\xaa\xacR\xf8\x01\xb2\xdd\x22\xcc&\
o\x188vn\x83\x18m\xf2\xd0\x95\x10U\xb8A\xa8\
\xfeN\xc4/~0&c\x12\xc2\x8c\xaa\xa4\x0dJ\xd6\
K<\x17\x91\xe0\x09xF\xc0I*@([\x10'\
]\x13\xc6\xd3YH;\x88\xa2\x02\xab\xbb2\xec3\x8c\
W\xd88|5:\xe6W\xa7\xe7\x18\x17\x04)}I\
gG)\x88\x05lKH+\xdb\x07\xfd\xc4\xeb6s\
\x11'\xe1\xb5\x89>\x14b\x9e=\x85\xf5\xd3=\xc8c\
\xe8Mx\xd9\x13B\xff@9\xf3\xff\xcf\x88S\xb4\xfc\
$\xecM]\xb3\x02\x91\xda\x06\xc2k`\xc6\xad2\x0a\
\xdbo3\x9c8\xb3=%\xaf\xc4\xd7\x13\x1eLb\x09\
0\x1cW\x9fS\xf2(\x0a\xa5W\x84;\x05\xe7\x84\xe5\
!\x92{`-\xceN\x18\xcf\x1d\x06D>lu5\
\xd7\x9aH\xec\xa0\x0f\xabf\x849*\xf9C\xd3\x11\x8f\
\x9b\x13:\x1ab\x989b\xa0q\xc21\x8c\x15\x18\x00\
\x7fTs\x09\xab\xdd^e$\xc5\xacf\xb2\x9b0\xa9\
\xdej\x19\x17\x9c\xa1u\xb6\xd9\xa5nC\xf1\x1f\xa0o\
\xbc\x04\x11\x1ec\xee\xf9\x8d\xc9C\x03\x0e\xda\x06Eo\
\xb5\xb9^\xe5\xb9\xf0\x04=k\x19U\x89(\x99\xad!\
<_Wx\x9a\xde5\xc45\xc6E\x8e\x86\xf0\x1b\xa0\
.\x0e\x8em\x98\x19d\xc9\xed\xc1B\xf3z\xd4\xd2\x84\
\xf1t\xbb\xe6\x7f\xdc*h\xa9\xf3\x0b(f/*Z\
F\xf8\xb7*lv\x13\x83\xdd:\xc8\x16\x9a\xc2\x80\xdc\
\xc6j\xdb/!\xa3k9+\xbf&\xa2anp\x7f\
\x0b\xb2\xbd;\xed\xf2G:\xd7c\xd5|M\xfb\x9e\x82\
X\xb63a\xab\xe9X#s\xdee{\xf2.\xda@\
\xa0\x97\xb2\xc2\x17\xf1\xde\x1c\xcf2\x0d\xf2\x91\xcd=\xb1\
<\xae&~2\x19E\xed\x0a\x06\xbc.\xd7\x12\xc6\xe9\
3bF\xe1an\x9f1\x99U\xe1&\xb3\x82\xfb+\
\xe8ScX\xf8Zci5\xa5\x9d\xfb\xa1t\x86\xa2\
|-\xe6\xe9\xf1\xac\xf4f\x94\x07\x06\xcf\xbd\xcd\xbd\xbe\
X\x84\xdb\xe9gc\xeaXO\x9fw\x9c^\xe4L\xd2\
B\xc3\x96\xd6a\xa3\xef\x015\xbd\x8d\x12Y\xc0@\xbf\
\x0e\x0b\x9ab\xe4W%V\xfb\x22&\xe7p\xa8\xd6'\
\xce\x1cj\xfek'\x7f\xa0W5\x06\xeb}:~\x00\
\x036\x1ay\x7f\x1c\x03>\x12K\xa0\x1dQ\xd3y\xe8\
\x1a\xfbC<c\xf9V'\xda\xfcgL\xd2:|\xf3\
y\xda\xe6O\xe0]J\xfb\x7f\x80\x80\x1e\xa1\xee&\xdc\
w\xab\xbd\x0a\x04Y\x05\xc2\xbb\x98I[\x8b\xe2{\x8a\
\xf9\x9f\x8c&CTo\xb2@\xd6\x12o\xa8\xc1\x04\xce\
\xc0YW\x8b\xfb\xaf\xd3\xce#\x19\x9b\x19\x10\xcb\xfe,\
\xb67y\xf6\x18\x16\x8d\x0f\xd9\x9f\x0c\xe1\x8d\xa2\xed'\
\xa0Ky\x7fE[,\xb5\xd1,\xc8\xe3h\xc3X\xda\
z\x12c>\x06\xe2\xea\xc0\x22y\x04\x93\xb7\x1dc3\
Y\x117\xf7\xe5\xe6\x80\xd3\xa1\x88\x04\xaf\xc4]\x0bk\
+3'\xbc\xcd\x83B\xf7\xe24\xdb{!\xac\xea(\
mk\xccyZO\xb1j}\xc2\xcc9p\x84\xcf!\
\x80\x97\xa1\xe4O\xe8\xc8\x18\xec\xed\xd9LZ\x0f\x06q\
>\x03r\x0f!\xf6\xf5\x10\xe6\x8d\x98\xa8k\xe8\xb0\xdf\
9?\xc7\xe45\x0cc\xa5\xee\xc9\x04\xf73\x0e\xae\x12\
\xc4Ok\xfc\x14\xe3\xe1\x90~\xbb\xe2_\xcd\xb9\x14B\
G9\x97\xf1X\x0a\x87\xaab\x1c{c1\x11\xfd\xf8\
\xf9}\xaboA\xc4\xabp4y\xb1t/c\xe87\
(\x7f\x88\xe9?\xcd\xfc\xdfc]hok\xac\xb3\xa3\
\xa9\xaf\x0e\xe3t*\x0a\xa8\xd7\x97Z1\xde\x0d0\x10\
\xea\xa1\x1f\xb5@d\xec\xc7w\x9a\xe0\xb7p\xd1\xd5\xfe\
V\x03ojd\xe8\x06d\xacw\x02\xad\x85\x10|\x07\
\x8e\xa2\xc2\x87Qb\x8e\xa3\xd2\x09\xf8\x06\x9a\xe1\xe4\xf1\
\xf1\xfde(\xa3~\xff\xc6\x83\xc8\xc8\x05&\xc6\xd1\x1f\
\xaf`\x1d&q(\x03\xd1\x04\xaeP\x0c{\xabmN\
\xa3\xb9\x15\x82\xf0\x19\xcd\xf7B\x10\xc5\xf8N\xfc\xa6\x9d\
U\x10\xc4-\x10\x84\xb7\x98\xfc\x81fK\x98 \xc7\xd5\
6\xb0*+\x07G\x02\xdd\x1e\xf8=F\x98D\x9c=\
\xe0Lw\x9b\xfb#\xcd\x02x\x85\xf7\xa7\x19\xb9\xde\xdb\
(\xd2\xc2Q7\xc1$G\x0fd\x11u\xa6<\x88\x8d\
\xe0>_\xd5\x8b1\xff|\x17\x16\xda\x81\x88\xdc\xb6x\
a\x1b\xd0\xa7\x8e\xe8T-XH\x1d \xd2&p\xc2\
\xa3\xf1\xf0\x96Z\x93\xb4\x0b+|\x1d\x14^\x1fV\xbd\
\x0e]\xa1\x19\xab\x7f\x1d\xec\xab\x15\x134\x18\x96\xb6\x09\
\xd6\x7f\x03\xb2\xf7(X\xbb\xaf\xefh\xdeY\xc7\x0a\xed\
\x06A|\xcb\x8a8\x80\xfbk\xd0\x05\xda\xf3\xcdU\xfc\
\xdd\x82\xfa\xd7\xc1\xf6J\xe9\xc4:c\xa9\xfc\x9a\xf2=\
\x10j\x1f\xca7\xc1\x91\xce\xa3|\x0b\xdf9\x9d\xf2\xf5\
\xc8v7h\xce\x07\xe0\x12w\x1dq\xba\x81s\xf7\xdd\
\xe0\xba\xb1rl\xdf\x95\x1d\xd7t\x84\xeb\xb8\x97#T\
\xa7{\xb8\xf68\x8e\xe9\x06\xdc\xb9\xd4\x1d\xf1\xb9\x95\xe7\
\xcan\xfc\x9cXp\x13\xec\xdew\xdfq+\xd4-\x14\
?>\xae\x0e\xff=\x17\xbfp\x84\xee\xc4\x98+\xbb\xef\
8\xb1\xe0\xease'\xd2\x1c\xc7s\x13\xeeML\xc7\
\xe5\x1cwreG@\xeey\xc7\xc9\xfcx9\x02r\
\x9c\xcf\x95\x9d\xc7\xd8\xbd\xeb$\x81+;\xdf\x85\xe3\x8e\
\x17I\x9a\xf8\xbfur?\x06n\xd4\x8a\x8b\x00\x00\x00\
\x00IEND\xaeB`\x82\
\x00\x00\x04u\
\x00\
\x00\x10\xbex\x9c\xedU],\xd6\x7f\x14\x7f\x98\x96\xcc\
K$\xe4e2\xf32\xab5\xcd\xbb2\xd2\x96\x94\xf7\
a\x930c\xc2P\x11K-\x14&\x9a\xa1F%\xef\
\xef\xb6\x90%/7V,+.\xb8\xe8&#/\x17\
\xd2\xc6\xba\xc8f.|\xeas\xfe\xfbY\x17\x7f\x17\xff\
+\x17\xff\xe7\xfb\xec<\xcf\xf7w\xbe\xe7w\xce\xf9~\
>\xe7\x9cG\xa5\xd2\xf8\xf39yR\xc5oU\xf7Q\
\x95\xcaD\xa5R9\xfe\x91?*\x95\xaf\xea\x1f\xbd\xac\
\xa3\xaa\x7f[P\x8bZ\xd4\xa2\x96\x83\x92C\x87\x0e\xc1\
\xd4\xd4\x14fff8~\xfc8\xf4\xf4\xf4p\xec\xd8\
1\x9c8qBt\xe6\xe6\xe6rN\xa1\x8e\xcf\xd4s\
ohh(\xef\xf0\xcc\xd8\xd8X\xf4\xdc+6\x8a\xd0\
\xc6\xc4\xc4d\xcf'\xf7\xda\xda\xda\x12\xdf\xd1\xd1\x11/\
_\xbe\xc4\xabW\xafPZZ\x8a\xc4\xc4D\xdc\xbbw\
\x0f\xf7\xef\xdfGyy9\x1e=z\x84\xe6\xe6f\xd4\
\xd5\xd5\xe1\xc9\x93'\xa2\xa7\xed\xd3\xa7O\x91\x90\x90\x80\
\x87\x0f\x1f\xa2\xb2\xb2Rlo\xdd\xba\xb5g\xc3\xf3\xf6\
\xf6v\xd1\xe7\xe7\xe7\xe3\xf1\xe3\xc7\xe2\x9f:\xda\xdb\xda\
\xdaJ|;;;\x8c\x8c\x8c\x88Mcc\xa3\x9c7\
55\xe1\xca\x95+b\x1f\x14\x14\x847o\xdeH\x1c\
\xe6\xd9\xd2\xd2\x22\xbe;;;\xd1\xd7\xd7'\xb6\xdc_\
\xbdz\x15\x96\x96\x96\xa8\xad\xad\xc5\xf9\xf3\xe7\x11\x11\x11\
\x81\x89\x89\x09\x9c;wN\xee000\x80\xbbw\xef\
\xe2\xed\xdb\xb7\x12\xcb\xc8\xc8H\xe2\x1f>|\x18/^\
\xbc@NN\x0e\xba\xbb\xbbQSS\x83\x8a\x8a\x0a\xdc\
\xb9s\x07mmm\xc8\xcc\xccDOO\x0f\x8a\x8a\x8a\
$\xce\xd8\xd8\x18\xb2\xb2\xb2\x90\x96\x96\x86\xe1\xe1aD\
FF\xca\xfb\xe1\xe1\xe1\x08\x0b\x0b\xc3\xbbw\xef\xc4?\
\xf7\xfd\xfd\xfd\x88\x8b\x8b\x93{<x\xf0@\xde\x1f\x1a\
\x1aBAA\x81\xf0\xc4\xf8\x0e\x0e\x0e\xb8q\xe3\x06\xce\
\x9c9\x03kkk\xc9\xcb\xcd\xcd\x0d7o\xdeDa\
a!._\xbe,wKJJ\xc2\xa9S\xa7\x90\x92\
\x92\x02\x7f\x7f\x7f\xe8\xeb\xeb\xc3\xc7\xc7\x07\xbe\xbe\xbe\xb0\
\xb1\xb1\x81\x93\x93\x93`J\xdf\xe4\x85XPG\xb1\xb0\
\xb0@nn\xae\xe4P\x5c\x5c\x0cggg\xb97\xe3\
3\xf6\xd7\xaf_\x11\x1b\x1b+z\xd6\x1e\xf9\xee\xe8\xe8\
\x10\x1cZ[[\x91\x97\x97\x87\xf7\xef\xdf#::\x1a\
?~\xfc\x90;hiiIN\x1f>|\x80\x81\x81\
\x81\xf8\xd2\xd5\xd5\x15Nxw\x1d\x1d\x1d\xb1\xa1\x9e\xf9\
\x8d\x8f\x8f\xa3\xaa\xaa\x0aeeer\xa6\xd4\x7fFF\
\x06\xbe}\xfb&\xfc~\xfe\xfcY\xf8'\xc6\x01\x01\x01\
\x9271\xee\xed\xed\xc5\xf2\xf22\xea\xeb\xeb199\
\x89\xc1\xc1AXYYatt\x14\xdf\xbf\x7f\xc7\xb5\
k\xd7\xc4W`` ~\xfd\xfa\x85\x95\x95\x15LO\
OK.\xac\xc9\xae\xae.lll`ff\x06\xd5\
\xd5\xd58r\xe4\xc8^\xfc\xdb\xb7o\xe3\xcb\x97/R\
\xa7\xb3\xb3\xb3\x92#\xb1'\xe7\xac7\xf2\x99\x9d\x9d\x8d\
\xcd\xcdM\xa9\x01\xee\x1b\x1a\x1a\xc4\xe6\xf5\xeb\xd7HM\
M\x95\x9c\x88\x1d}\xef\xb7vww\xb1\xb3\xb3#\xfd\
\xa1`O\xe1{\xc9\xc9\xc9\xf0\xf0\xf0\x80\x8b\x8b\x0b.\
^\xbc(\x9c3NHH\x88\xd4\x84\x9f\x9f\x9f\xf4$\
\xf1g=\x11w\xda*}\x1e\x1c\x1c\x8c\x0b\x17.`\
nnn\xdf\xf8\x5c\xeb\xeb\xeb\xc2\x8d\x86\x86\xc6^|\
\xfa#\xc7g\xcf\x9e\x95\xba\xfb\xf8\xf1\xa3\xe0EL\xd8\
g%%%\xf8\xf4\xe9\x13\x9e?\x7f\x8e\xb5\xb55\xa9\
\x0d\x9e\xb3gO\x9f>-|>{\xf6Lj~k\
kK\xe2,--\x096\xc4\x9f\xbc\xb3\xee\x89/{\
\xe4o\xee\x15\xfc\xc9-k\x90\x9c-..\x22>>\
^\xfa\x95q\x99\x0fk\xe2\xfa\xf5\xeb\x92?\xf3dM\
\xcf\xcf\xcf#**\x0a\xee\xee\xee\x12\x8f\xeb\xe7\xcf\x9f\
\xc2\x01g\x1a\xe3p\x96rFjjj\xee;\x7fy\
\xff\xed\xedm\xa9\xa9\xa9\xa9)\xe1\x998\xb0\xe7VW\
W\xb1\xb0\xb0\x80K\x97.\xc9\x1cb|\xdeW\xe9\x0f\
e\x86\xa5\xa7\xa7\xcb\xecb_r\x9e\xff\x97\xf9O\xbf\
\x9ca\xacS\xd6\x17\xb9`\x8f\x93k\xea9\xefxw\
\x9e\x13\x07\x0a\xe7\x12q\xe3\x8c#\x06111\xf2\xcb\
: \xbf\xdc\xf3\x8cxs>Q\xa8\xa3p\x1f\x1a\x1a\
*\xff\x09\x8cooo/\xb5\xe7\xe9\xe9\x09///\
\xf9\xf5\xf6\xf6\x16\x9d\xa2\xe73\xc5\xd5\xd5U\xb0\xa1^\
\xb1\xe5\xac\xe5\x99\xf2L\xe1\xcc\xfd\xfbYy_\x11r\
Fn\x0e\xfa\xbfW-jQ\xcb\xffZ\x0et\xfd\x06\
\xe0\xd1!\xf2\
\x00\x00\x12\x0b\
\x00\
\x00\x01\x00\x01\x00@@\x00\x00\x00\x00 \x00\xf5\x11\x00\
\x00\x16\x00\x00\x00\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\
\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\
\x00\x00\xaaiq\xde\x00\x00\x11\xbcIDATx\x9c\
\xed\x9a{pT\xe5\xf9\xc7?\xe7\xecfw\x93M\xba\
\x89\x92\x84\x5c\xcc\x854\xc5\x90\x12\x04\xa4B\x13*\x15\
D\x14;(b\xad\x0eQ0\xa3\x16\xd1\xb62\xd3\xca\
\x94\x96\x0e\xbdL\xa7Z\x1b\x19ia\xaa\xe3\x05\x1c\x14\
\xb0-\x1d\x0b\x8aE\x03IJ\xac`n\x5c\x86\xd0l\
\xc8&!\x84\x84\x5c\xf6\x92\xbd\x9es\x9e\xdf\x1f\xb8\xa7\
\xa4\xd5\xf9u:\xbf\x99\xfd\xcd\x98\xef\xcc\x99=\xe7}\
\xdf\xf3\x9e\xe7\xfd\x9e\xe7\xf6>g\x15@\xf8\x1cCM\
\xb4\x00\x89\xc6$\x01\x89\x16 \xd1\x98$ \xd1\x02$\
\x1a\x93\x04$Z\x80Dc\x92\x80D\x0b\x90hL\x12\
\x90h\x01\x12\x8dI\x02\x12-@\xa21I@\xa2\x05\
H4>\xf7\x04X\xe3'\x8a\xa2`\xb1X\x10\xf9\xf4\
\xfa\x88\xa2(\x88\x88\xf9\xfb\xffi\x8ca\x18(\x8a\xf2\
\x1f\xcf\xa3(\x0a\x9a\xa6]9\xe7s^\x11\xb2\xaa\xaa\
\x8aa\x18\x14\x17\x17\xf3\xe8\xa3\x8f\xd2\xd1\xd1AAA\
\x01\xdd\xdd\xdd8\x9dN\xd2\xd2\xd2\x18\x1d\x1d\xa5\xa0\xa0\
\x80\xce\xceN\xa6M\x9b\xc6\xf9\xf3\xe7III!\x18\
\x0c\x92\x9e\x9e\x8e\xcdf\xc3\xeb\xf5\x92\x93\x93c\x8eq\
\xbb\xdd\x94\x94\x94000\x80\xa6i\x5cs\xcd5\x0c\
\x0f\x0fSPP@oo/EEEtvvb\
\xb5ZQ\x14\x05\x97\xcb\x05\xc0\xd8\xd8\x18\xb9\xb9\xb9\x9c\
?\x7f\x9e\x92\x92\x12<\x1e\x0f\xa9\xa9\xa9\x8c\x8d\x8d\x91\
\x99\x99\x89\xae\xeb\x04\x83A\xae\xbd\xf6Zzzz\xf8\
\xd2\x97\xbeDkk+\xc5\xc5\xc5\xf8|>RSS\
\x11\x11B\xa1\x10S\xa6L\xa1\xaf\xaf\x8f\xa2\xa2\x22\xba\
\xba\xba(((\xe0\xc2\x85\x0b\x14\x16\x16\xb2u\xebV\
\x02\x81\x00\x8a\xc5b\x11]\xd7Y\xbcx1\x8f<\xf2\
\x08\xba\xae344DEE\x05===\xcc\x9a5\
\x8b\xfa\xfaz\xca\xca\xca\xe8\xee\xee\xa6\xa0\xa0\x00\x9b\xcd\
FWW\x17\xd7]w\x1d\x1d\x1d\x1d\xcc\x993\x87\xd6\
\xd6V\xca\xca\xca\x18\x19\x19\xc1\xe9tb\xb1X\x18\x1a\
\x1ab\xea\xd4\xa9\xd4\xd5\xd5QYYI{{;3\
g\xce\xa4\xb7\xb7\x97\xdc\xdc\x5c\x92\x93\x93\x19\x19\x19\xa1\
\xb8\xb8\x98\x8e\x8e\x0e***hjj2\xe7s\xb9\
\x5cdff\xe2\xf1x\xc8\xce\xcefpp\x90\xb9s\
\xe7\xd2\xdc\xdcLyy9mmm\xe4\xe5\xe5\x11\x8b\
\xc5\x88\xc5b\xe6\x0b\x989s&\xcd\xcd\xcd\xcc\x9d;\
\x973g\xce\x90\x9f\x9f\x8f\xd7\xeb%\x18\x0c2u\xea\
T|>\x1f\xbf\xfe\xf5\xaf9r\xe4\xc8\x15\x13PU\
\x15\x11\xe1\xe6\x9bo\xe6\xee\xbb\xef&\x10\x08p\xfa\xf4\
iv\xef\xde\x8d\xcb\xe5b\xe3\xc6\x8d\xb8\x5c.\x02\x81\
\x00{\xf7\xee\xa5\xbd\xbd\x1dM\xd30\x0c\x83\x1f\xfd\xe8\
G\x5c\x7f\xfd\xf5\xe8\xbaN\x7f\x7f??\xf9\xc9O\x88\
F\xa3\xd8l6\xee\xb9\xe7\x1e\xee\xbe\xfbn\xdcn7\
\x19\x19\x19\x18\x86a\x12|\xf8\xf0a>\xfe\xf8c\x22\
\x91\x08III\xc4b1\xd3\x07\xa5\xa5\xa5\xa1\xaa*\
\xa3\xa3\xa38\x1c\x0e\xc2\xe10V\xab\x15M\xd3P\xd5\
+~\xdb0\x0c\xe2\xdak\xb3\xd9\x88F\xa3\x13\xc6\x88\
\x08\x22b\xb6\xc5\x9f\xf1\xaf\xbf\x00b\xb1X\x04\x90C\
\x87\x0eI\x1c>\x9fO\xf2\xf2\xf2d\xdd\xbaur5\
V\xae\x5c)\x5c\xf1\x1b\xb2t\xe9R\xb3=\x1c\x0e\x8b\
\x88\xc8\xed\xb7\xdfn\xf6\xef\xda\xb5\xcb\xec\xd7u]\x86\
\x86\x86\xcc\xeb\xa1\xa1!Y\xbdz\xb5\xb8\x5c.s\xbc\
\xa2(\xa2(\x8a|\xff\xfb\xdf\x97-[\xb6\xc8\xacY\
\xb3&\xf4\xc5\xcf\xff\x8f\x0fDUU\x01\xe4\xe8\xd1\xa3\
\xa2i\x9aD\xa3Q\xd1u]\x1a\x1a\x1a\xe4\x1f\xff\xf8\
\x87\x88\x88\xc4b1\xd14M\xee\xbf\xff~Q\x14E\
l6\x9b\x1c?~\x5c\x0c\xc3\x90\xbe\xbe>\xf1x<\
\x12\x8b\xc5\xa4\xae\xaeN\xecv\xbb(\x8a\x22\xbf\xfa\xd5\
\xafD\xd34\x09\x06\x832\x7f\xfe|IJJ\x92\xe2\
\xe2b\xf9\xfd\xef\x7f/\xb1XLv\xee\xdc)O>\
\xf9\xa4X\xadV\xb1Z\xad\xa2(\x8a\xdcy\xe7\x9d\x13\
\x08\xdf\xbbw\xaf\xcc\x993\xc7$!>.~\xc4\xe5\
\xb7X,\xff\xd6~\xf5\xf5\xd5m\xffF@|\x92S\
\xa7N\x89\x88\x88\xc7\xe3\x91\xf1\xf1qS\x88\xee\xeen\
\xe9\xeb\xeb\x13\x11\x91\xe7\x9e{N\x00SP\xc30\xe4\
\x9b\xdf\xfc\xa6<\xfa\xe8\xa3\x12\x8b\xc5dppP\x0a\
\x0b\x0bEQ\x14\xd9\xb6m\x9b\x88\x88\x84B!)/\
/7\x1fz\xed\xb5\xd7J \x10\x90K\x97.\xc9\xf3\
\xcf?/\xaa\xaa\x9a/\xe1/\x7f\xf9\x8bh\x9a&\xe1\
pXt]\x17\x11\x11M\xd3d\xcb\x96-\xa6\xa6^\
}\xc4\xef\x8bkr\x9c\xcc\xf8\x9c\xff\x9b\xe6\xa8\xf1\xf8\
i\xb3\xd90\x0c\x03\x80w\xdf}\x97\xda\xdaZ3T\
<\xf0\xc0\x03\xb4\xb4\xb4\x00\x90\x93\x93\x83\xa2(TW\
W#\x22D\xa3Q\xb2\xb2\xb2\xc8\xca\xca2\xed\xf7\xa5\
\x97^\x9a\x10w\x1d\x0e\x07\xb3g\xcf6\xaf\xa3\xd1(\
^\xaf\x97\xac\xac,\xae\xbb\xee:\x0c\xc3\xc00\x0c\xb6\
m\xdb\xc6\xf2\xe5\xcb\xb1X,\xd8\xedvTUE\xd7\
u,\x16\x0b\x9b7o\xa6\xb1\xb1\x91;\xef\xbc\x13U\
U\xcd~\xc30p\xb9\x5c\x94\x95\x95\xa1\xeb:\x9a\xa6\
\x99\xfe\xc90\x8c\xcf\xcc#\xcc0\x18\x1f\x10\x8dF\x09\
\x04\x02h\x9aFSS\x13\xef\xbc\xf3\x0ev\xbb\x9d\x93\
'Or\xec\xd81\xb2\xb3\xb3\xd14\x8d`0He\
e%\xf7\xde{/\x86a`\xb7\xdby\xe1\x85\x17\xb8\
x\xf1\x22~\xbf\x9fp8\xcc\x92%K\xa8\xaa\xaa\xa2\
\xab\xab\xcbL8\x8a\x8a\x8a())a\xc6\x8c\x19|\
\xe7;\xdf!77\x17\x80\x93'O\x02\xf0\xd0C\x0f\
\xb1~\xfdz4M\xa3\xb7\xb7\x97\xc3\x87\x0fS^^\
\xce\x82\x05\x0b\xd0u\x1d\x80\xf9\xf3\xe7\xf3\xf6\xdbos\
\xe0\xc0\x01\x82\xc1 \xaf\xbe\xfa\xaa\xe9\xac\xf3\xf3\xf3Y\
\xb7n\x1d\xc5\xc5\xc5\x84B!\x8e\x1e=\x8a\xae\xeb\x8c\
\x8d\x8d\x11\x0a\x85\xd04\xcd\x9c\xe7_a\xaa\xd1\xcb/\
\xbf,\x22\x22?\xf8\xc1\x0f&\xa8\x89\xd5j\x95\x9e\x9e\
\x1e\x11\x119t\xe8\x90\x1c9rDDD\xc6\xc6\xc6\
d\xd9\xb2e2\x7f\xfe|\xa9\xaa\xaa\x92/~\xf1\x8b\
\xf2\xd7\xbf\xfeUDDN\x9f>-;v\xec\x90\xcf\
Bgg\xa7\xfc\xf4\xa7?\x15\x9b\xcd&\x80\xd4\xd4\xd4\
\x88\xd7\xeb\x95\xc7\x1f\x7f\xdc\x94\xe7\xd6[o5M \
\xeeH\xe3f!\x22\x12\x8dF%\x14\x0a}\xea\xfc\xc1\
`P|>\x9f466\xca\xb2e\xcb\xc4\xe5rM\
0\x97\xf8\xa1|\xe2\x18\x10\x11\x92\x93\x93Y\xbe|9\
\x1f}\xf4\x11===\x13X\xba\xff\xfe\xfb\x99>}\
:\xc3\xc3\xc3\xf8\xfd~\xae\xb9\xe6\x1a\xea\xea\xeaL\xd3\
\x88\xa3\xb4\xb4\x94\x993g\x92\x94\x94D4\x1a\x9d\x10\
\xefc\xb1\x18\x81@\x80\xfe\xfe~\xda\xda\xda\x08\x87\xc3\
\x00\xa6*\xe7\xe6\xe6\xe2p8\x00\xe8\xef\xef\xc7n\xb7\
s\xea\xd4)\xf2\xf3\xf3\xcd\xb0\x07\x98\xaam\xb1X\xcc\
\xebxJ\x1c\xd7h\xab\xd5\xcc\xf2\xe9\xe9\xe9a\xfd\xfa\
\xf5\x1c>|\x98X,6A\x13\xccT8))\x89\
o}\xeb[$%%166\xc6\x85\x0b\x17\x983\
g\x0e===\xb8\xddn\xce\x9e=;a\xa1_\xfe\
\xf2\x97\x99?\x7f>\xb3g\xcf\xc6\xedv\xd3\xdc\xdc\xcc\
\x91#G\x00\x987o\x1e\xd3\xa7O\xc7\xe1pp\xf0\
\xe0A\xfa\xfb\xfb\x01\xb8\xe5\x96[HOO\xa7\xa1\xa1\
\x01]\xd7Y\xb4h\x11^\xaf\x97\xf7\xdf\x7f\xdf\x8c\xd7\
K\x97.e\xf1\xe2\xc5X\xadV\xdex\xe3\x0d6n\
\xdc\xc8\xca\x95+1\x0c\xc3\x5c\xb0\xa9\xba\x9f,\xf6\xd3\
\xf6\x01\xf1>]\xd7\xb1Z\xad\xec\xdf\xbf\x9f'\x9ex\
\x82\x8b\x17/\x9a9B\xfcf\x01$%%E|>\
\x9f\x88\x88\x0c\x0f\x0f\xcb\xe5\xcb\x97Mu\x8a\xc5bR\
__/%%%\x92\x91\x911!\xbe_\x8d]\xbb\
vIZZ\x9a\xd4\xd6\xd6\x9am}}}\x92\x9e\x9e\
.V\xabU<\x1e\x8f\x88\x88\xacZ\xb5J\xbe\xf2\x95\
\xaf\x88\xdf\xef\x97\xf1\xf1qY\xb6l\x99\x00\xb2h\xd1\
\x22S\xdd\xb7n\xdd*\x80\xdcw\xdf}\x13\xcc\xe0\xb3\
\xa0\xeb\xbah\x9a&\x86aLh\x8f\xb7\x9d?\x7f^\
\x16,X \xc9\xc9\xc9\x13\xcd N\x80\xc3\xe1\x90\xce\
\xceN\x89\xc5b\xd2\xd4\xd4$\xb3f\xcd\x92\xd9\xb3g\
\xcb\x8e\x1d;\xccI\x9fy\xe6\x19\xa9\xaa\xaa\x92\xe1\xe1\
ainn\x96\xe5\xcb\x97KII\x89\xac]\xbb\xd6\
\x5c\xdc\x92%Kd\xf3\xe6\xcd\x12\x8b\xc5$\x12\x89\x88\
\x88HMM\x8d\xa8\xaa*\xdd\xdd\xdd\x12\x8b\xc5d\xd5\
\xaaU\xa2\xaa\xaa|\xf0\xc1\x07\x22\x22\xe2v\xbb\xa5\xb4\
\xb4TN\x9f>-\x22\x22\x7f\xfc\xe3\x1f\xcd\x10\x97\x95\
\x95%\x83\x83\x83\x22r%\xe4\x1a\x86a.V\xd34\
3?\xf9W2\xe22\x8f\x8e\x8eJ$\x12\x91\xcb\x97\
/\xcb\xc2\x85\x0b%55U\xacV\xeb?\xc3\xa0|\
\xb2]\x8cD\x22\x04\x83A,\x16\x0b\x03\x03\x03tw\
w\xd3\xd2\xd2B__\x9f\xa9b\x16\x8b\x85-[\xb6\
\xd0\xda\xda\xca\x8b/\xbe\xc8\x81\x03\x07p\xbb\xdd\xbc\xf2\
\xca+\xc4b1\x82\xc1 \xb7\xddv\x1bs\xe7\xce\xc5\
j\xb5\xa2\xaa*\xfb\xf7\xef\xe7\xc6\x1bo\xe4\xa1\x87\x1e\
\xe2\xfc\xf9\xf3X\xadVJKK1\x0c\x83\xea\xeaj\
.]\xba\xc4\xb4i\xd3hjjb\xc6\x8c\x19\xf4\xf7\
\xf7\xb3n\xdd:\x14EAUU\x06\x07\x07M?\x13\
\xb7uUU\xb1X,X,\x16\xacV+\x16\x8b\x85\
\x93'O\xd2\xd0\xd0\xc0\xd0\xd0\x90\xe9+D\x84\x86\x86\
\x06\x82\xc1 \x8a\xa2\x98\x1b\xaf\xabaz\x0aUU\x09\
\x85B\x88\x08\xb7\xddv\x1b\xfd\xfd\xfd\x8c\x8e\x8e\x92\x97\
\x97\x87a\x18\xec\xd9\xb3\x87\xed\xdb\xb7s\xea\xd4)\xdc\
n\xb7i\x8fq\x07:22BGG\x07ccc\
$''\x03p\xe0\xc0\x01~\xfb\xdb\xdf\xf2\xc3\x1f\xfe\
\x90[o\xbd\x95@ \x00@JJ\x0a\x00\x17.\x5c\
\xa0\xa6\xa6\x86?\xfc\xe1\x0fddd\xd0\xdb\xdb\xcb\xea\
\xd5\xab\xb9t\xe9\x92\xb9/P\x14\x85\xdf\xfc\xe67,\
^\xbc\x18EQ\xf0\xf9|tvv\xe2\xf7\xfb\xf1\xf9\
|\xf4\xf6\xf6R__\xcf\x9f\xff\xfcg\xc2\xe10\x99\
\x99\x99\xfc\xeew\xbfc\xd5\xaaU\x04\x02\x01\xea\xeb\xeb\
\xb9\xe1\x86\x1b\xb0\xd9lf~\x10\x0f\xcd\xa6\x0fP\x14\
E,\x16\x8b466J$\x12\x91\x93'OJm\
m\xad\xfc\xf2\x97\xbf\x94u\xeb\xd6IEE\x85\xa92\
\x1f~\xf8\xa1\x04\x83A9~\xfc\xb8TTT\x88\xdd\
n\x97{\xee\xb9G\xa2\xd1\xa8l\xdb\xb6M\xb2\xb3\xb3\
\xe5\xe0\xc1\x83\x12\x8b\xc5d\xdf\xbe}\x92\x92\x92\x22\x1f\
~\xf8\xa1i\x8f\x9a\xa6Imm\xad\x00f\x08|\xef\
\xbd\xf7$\x16\x8b\xc9\x9e={\xcc\xb0k\xaa\xe8'\xa1\
\xebo\x7f\xfb\x9b\x88\x88l\xdc\xb8\xf13\xb3\xba\xf8}\
EEEb\x18\x86\x0c\x0e\x0e\xca\xc2\x85\x0b\xe5\xc8\x91\
#r\xf9\xf2e\xb9\xe3\x8e;L\x1f\x107}\xab\x5c\
UUq:\x9d\xd8l6233y\xea\xa9\xa7&\
\xa8JRR\x12\x9a\xa6\xf1\xd4SO\xf1\xce;\xefp\
\xe3\x8d7\xb2{\xf7nRSS),,\x04\xc0\xe9\
tr\xe9\xd2%\x5c.\x17V\xab\x95i\xd3\xa6\x11\x0c\
\x06y\xec\xb1\xc7hmm5\xb5&\x1e\xea\xe2j\x19\
\x08\x04\xb0Z\xad\xe6.\xf0\xea\xea\x94\xc5bAQ\x14\
jkk\xf9\xeaW\xbf\xca\xe9\xd3\xa7M\xd5\xd7u\xdd\
T\xf7xhKKK\xc3\xe3\xf1\xd0\xd0\xd0@ee\
%\x16\x8b\x85\xba\xba:3*\xa5\xa6\xa6\x12\x8dF\xcd\
\xac\xd74\x01\x11\xe1\xc0\x81\x03\x1c<x\x10\x9f\xcf\xf7\
oe\xa4X,\x86\xaa\xaa455q\xc3\x0d7\xb0\
v\xedZ:::\xb8\xe3\x8e;\xd8\xb7o\x1f\x07\x0f\
\x1e\xa4\xb1\xb1\x11\x80W^y\x85\x96\x96\x16^\x7f\xfd\
u\x00\xda\xda\xdaX\xb9r%\x8b\x16-\xa2\xbf\xbf\x9f\
\xa6\xa6&\xe0J\xf6)\x22\xec\xdb\xb7\x0f\xaf\xd7\xcb\xd1\
\xa3G\x11\x11s\x9b\x1a\xb7{\x80\xb7\xdez\x8b\x81\x81\
\x01\x5c.\x17\xba\xae\x7ffV\xe7\xf7\xfb\x01\xf8\xc5/\
~\xc1\xa1C\x87P\x14\x85\xba\xba:n\xbf\xfdvz\
{{\x09\x06\x83\x9f\x9e\x07\xc4\x17\x9c\x9f\x9f\x8f\xddn\
'//\x0f]\xd7\xf1x<deeq\xee\xdc9\
\x02\x81\x00\xe5\xe5\xe5\x8c\x8c\x8c\x98L\xa6\xa6\xa6\x02\x10\
\x89D\x08\x85B\xe4\xe7\xe7\xa3\xeb:\x22\x82\xa6i\xf8\
\xfd~\x9cN'\x86a0>>Nww\xb7\xf9\xf6\
\xd3\xd2\xd2\x98>}:\x1d\x1d\x1d\xf8|>S\xa8\x99\
3g\x92\x99\x99\xc9\xc8\xc8\x08~\xbf\x9f\xcc\xccL\xbc\
^/o\xbd\xf5\x16n\xb7\x9b\x9f\xfd\xecg\xf8\xfd~\
TU%%%\x05\x9b\xcd\x86\xddngdd\x04U\
U9y\xf2$\x9a\xa6q\xe2\xc4\x09\x9ey\xe6\x19\x14\
E\xe1\xdb\xdf\xfe6;w\xee\xc4\xef\xf7300@\
KK\x0b\xe3\xe3\xe3\xff$ \xaez;w\xee\xe4\x81\
\x07\x1e\xa0\xa3\xa3\x83S\xa7Nq\xf6\xecY6m\xda\
\xc4\xbe}\xfbX\xbbv-\xc3\xc3\xc3\xd4\xd6\xd6RY\
Y\xc9\x94)S(++\xa3\xa7\xa7\x87\x8f>\xfa\x88\
\xe4\xe4d\x96,Y\xc2\x89\x13'\x08\x06\x83\x84B!\
\xdcn7\x8f?\xfe8'N\x9c ##\x03\xa7\xd3\
\xc9\x8a\x15+hkkc\xd7\xae]\xac^\xbd\x9a\x0f\
>\xf8\x80\xa5K\x97\x92\x93\x93\xc3\xcb/\xbf\xccM7\
\xdd\xc4\x993g\xf0\xfb\xfdX,\x16\x0c\xc3 ;;\
\x9b\xb2\xb22.\x5c\xb8@kk+\xed\xed\xedL\x99\
2\x85\xea\xeaj|>\x1fn\xb7\x9b\xe3\xc7\x8f\xb3a\
\xc3\x06\xd6\xacY\xc3k\xaf\xbdFgg'\xbd\xbd\xbd\
\xd8\xedvf\xcc\x98Akk\xeb\x95E+\x0a\xeb\xd6\
\xad\xc3\xe3\xf1\x5c\xd1\xfe\xb8\x03\xb1X,R]]-\
\x86a\xc8}\xf7\xdd'\x80\xfc\xfc\xe7?\x17\x11\x91\xc6\
\xc6FY\xbdz\xb5\xc4b1y\xf0\xc1\x07\x05\x90\xa7\
\x9f~ZDDn\xba\xe9&\x01d\xd3\xa6Mr\xec\
\xd81\xd9\xbau\xabl\xda\xb4I\xbe\xf7\xbd\xef\xc9\x13\
O<!\xc1`Pjkk\xe5\xa5\x97^\x92P(\
$_\xfb\xda\xd7$++K\x22\x91\x88\xec\xde\xbd[\
B\xa1\x90L\x992E\x9e{\xee9\x11\x11\xa9\xaa\xaa\
\x12@\xecv\xbb\xa4\xa7\xa7\x0b \x0f?\xfc\xb0\x88\x88\
\xacY\xb3\xc6tz\x85\x85\x85\x12\x89Dd\xfb\xf6\xed\
f\x9b\xc7\xe3\x91\xd7_\x7f]\x8a\x8a\x8aDD\xe4\xde\
{\xef\x95\xd7^{M>\xfe\xf8cy\xe1\x85\x17\xe4\
\xc7?\xfe\xb1l\xd8\xb0A\xecv\xfb\x15'{\xb5\x0f\
\xd0u\x9dH$\x82\xa2(\x94\x96\x96\x020u\xeaT\
\x86\x87\x87y\xe3\x8d7\xa8\xad\xad\xc50\x0crrr\
\xb0Z\xad\x84\xc3a\xd3f-\x16\x0b\x0e\x87\xc3T}\
UUINN\xc6\xe1p0::JUU\x155\
55466R__\xcf\x9a5k\xb0\xd9lX\
\xadV\x1c\x0e\x07\xeb\xd7\xaf\x9f\x90\xbf\xc7\xcd\xea\xear\
\x98\x88\xe0\xf5zMg\x1a\x08\x04\xb0\xd9lTTT\
\x98\xf7\xec\xd8\xb1\x83\xdc\xdc\x5c\x9e\x7f\xfey\x02\x81\x00\
\x87\x0e\x1d\x22==\x1d\xaf\xd7k\xa6\xc5iii\xe6\
\x1cf= --\x8d\xed\xdb\xb7\xf3\xdd\xef~\x17]\
\xd7Y\xb6l\x19\x1b6l\xc0f\xb3\x91\x94\x94\xc4\x8e\
\x1d;p\xbb\xdd\xd8l6\xc6\xc6\xc6\xd04\x8dp8\
l&Q\xba\xae\x93\x93\x93Cii)999\xa6\
\x0f\x09\x85B\xe8\xba\xce\xbcy\xf3x\xec\xb1\xc7\xb8\xf9\
\xe6\x9b\xd9\xb8q#k\xd6\xac\xe1\xd0\xa1C477\
\xf3\xe2\x8b/\xf2\xe4\x93O\xf2\xa7?\xfd\x89\x96\x96\x16\
\xde|\xf3M\xde|\xf3Mv\xed\xda\xc5\x8a\x15+\xcc\
\xf8\xad(\x0a\xdd\xdd\xdd\xe8\xbaNMM\x0d\xaf\xbe\xfa\
*\xd1h\x94\xbc\xbc<\x9e}\xf6YJKKy\xf6\
\xd9g)..f\xc5\x8a\x15\xec\xdc\xb9\x13\x9f\xcfG\
JJ\x0a3f\xcc ==\x9d\xac\xac,\x82\xc1 \
\x91Hd\xa2\x13\xb4\xd9l\xdcr\xcb-<\xf2\xc8#\
f\xb9\xbb\xb7\xb7\x17EQ\xb8x\xf1\x22\xe7\xce\x9d\x03\
\xaelh\xda\xda\xda\x18\x1d\x1d\xa5\xac\xac\x8c\xc2\xc2B\
\xce\x9e=KKK\x0b\x0f>\xf8 \xe3\xe3\xe3\xe4\xe6\
\xe6\x12\x0e\x87\x89F\xa3x<\x1es\x8f\x7f\xe1\xc2\x05\
\x1e~\xf8a\x82\xc1 \x99\x99\x99\xec\xd9\xb3\x07\x8f\xc7\
C(\x14b\xf5\xea\xd5\xf4\xf6\xf6\xd2\xd9\xd9\xc9\xca\x95\
+\x01\xe8\xea\xea\xe2\xc4\x89\x13\xa4\xa5\xa5\x91\x9f\x9fO\
yy9g\xce\x9c\xa1\xb5\xb5\x95\xbb\xee\xba\x0b\xa7\xd3\
\x89\xd3\xe9\xe4\xf2\xe5\xcb\xe4\xe6\xe6\xf2\xde{\xef\xd1\xd5\
\xd5\xc5\xd7\xbf\xfeu\xd2\xd2\xd2hmm\xe5\xdc\xb9s\
\xac_\xbf\x9e@ \x80\xcb\xe52\x8b\xadO?\xfd4\
===\xff\xac\x0a\xc7++\xc7\x8e\x1d\xa3\xa1\xa1\x81\
y\xf3\xe6\xd1\xde\xde\xce\x8c\x193x\xf7\xddw\xb1\xdb\
\xedtuu\xe1\xf5z\xd9\xbcy3\xe7\xce\x9d#+\
+\x8bc\xc7\x8e\xf1\x8do|\x83\xae\xae.\x92\x92\x92\
\x10\x11\x06\x06\x06Lb*++ijjb\xc1\x82\
\x05x<\x1e,\x16\x0b\x9a\xa6\xe1\xf3\xf9\x98={6\
\xf5\xf5\xf5\xd4\xd5\xd5q\xfd\xf5\xd7S]]\xcd\xdf\xff\
\xfew\xa2\xd1(\x19\x19\x19\xbc\xff\xfe\xfb\xacZ\xb5\x0a\
\xaf\xd7Kvv6G\x8e\x1c\xe1\xae\xbb\xee\xa2\xa3\xa3\
\x83\xe4\xe4d|>\x1f\x22Bvv6\x1d\x1d\x1d,\
\x5c\xb8\x90\xfd\xfb\xf7SZZj~\x0f\xb0\xdb\xed\x84\
B!\xb2\xb2\xb2hoo\xa7\xbc\xbc\x9c\xf3\xe7\xcf\xf3\
\xf6\xdbo\xb3w\xef\xde\x89_\x86\xd2\xd2\xd2\xb0Z\xad\
D\x22\x91\x09{mUUM\x8f\xac(\x0aN\xa7\x93\
H$b\xe6\xe4\xb1X\x0c\x9b\xcdF$\x12!55\
\x95@ @rr\xb2i\xbf\xaa\xaa\x9a\xa5\xf2h4\
\x8a\xd3\xe9d||\x1c\x87\xc3A \x10@Dp:\
\x9d\xe6\x98p8Ljj*~\xbf\x1f\x87\xc3A4\
\x1a%))\x09\xf8\xe7\xf66\x16\x8b\x99\xf3\xa4\xa4\xa4\
\x10\x0a\x85\xcc\xa4\xca\xe1p`\xb3\xd9\xcc\xbeH$\xc2\
\x17\xbe\xf0\x05\xc6\xc7\xc7\xcd\x84\xce0\x0cB\xa1\xd0\xe4\
\xa71\xeb\xbf6|Zq\x01\x98\x90\x15~\xd6\x98\xff\
\x16\xf1t\xfc?m\xffo\xe6\xfa\xb4q0\xf9qt\
\xf2\xff\x01\x93\x04$Z\x80Dc\x92\x80D\x0b\x90h\
L\x12\x90h\x01\x12\x8dI\x02\x12-@\xa21I@\
\xa2\x05H4&\x09H\xb4\x00\x89\xc6$\x01\x89\x16 \
\xd1\xf8\xdc\x13\xf0?E\x0a}G\xcd\xb2\x85|\x00\x00\
\x00\x00IEND\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x03\
\x00\x00p7\
\x00i\
\x00m\x00g\
\x00\x0c\
\x05\x1b\xb0\xc7\
\x00c\
\x00i\x00l\x00-\x00f\x00i\x00l\x00e\x00.\x00p\x00n\x00g\
\x00\x12\
\x0d\xc4\x15'\
\x00c\
\x00i\x00l\x00-\x00v\x00i\x00e\x00w\x00-\x00q\x00u\x00i\x00l\x00t\x00.\x00p\x00n\
\x00g\
\x00\x12\
\x0f\xad\x8fg\
\x00c\
\x00i\x00l\x00-\x00m\x00e\x00d\x00i\x00a\x00-\x00p\x00l\x00a\x00y\x00.\x00p\x00n\
\x00g\
\x00\x10\
\x0d\xc9]\x07\
\x00c\
\x00i\x00l\x00-\x00s\x00e\x00t\x00t\x00i\x00n\x00g\x00s\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0b\x0b\xb0\xa7\
\x00c\
\x00i\x00l\x00-\x00h\x00o\x00m\x00e\x00.\x00p\x00n\x00g\
\x00\x0c\
\x08\x9b\xb0\x07\
\x00c\
\x00i\x00l\x00-\x00c\x00o\x00d\x00e\x00.\x00p\x00n\x00g\
\x00\x0c\
\x05\xfb\xbeg\
\x00c\
\x00i\x00l\x00-\x00s\x00a\x00v\x00e\x00.\x00p\x00n\x00g\
\x00\x11\
\x06G\x9f\xc7\
\x00c\
\x00i\x00l\x00-\x00c\x00h\x00e\x00c\x00k\x00-\x00a\x00l\x00t\x00.\x00p\x00n\x00g\
\
\x00\x0e\
\x06\x17\x85\xa7\
\x00c\
\x00i\x00l\x00-\x00p\x00e\x00n\x00c\x00i\x00l\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0e\xfd\xbf\xa7\
\x00c\
\x00i\x00l\x00-\x00p\x00l\x00u\x00s\x00.\x00p\x00n\x00g\
\x00\x09\
\x0fK\x84\xa7\
\x00c\
\x00i\x00l\x00-\x00x\x00.\x00p\x00n\x00g\
\x00\x15\
\x03Q:'\
\x00c\
\x00i\x00l\x00-\x00c\x00h\x00e\x00v\x00r\x00o\x00n\x00-\x00r\x00i\x00g\x00h\x00t\
\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0f\x14D'\
\x00t\
\x00r\x00a\x00s\x00h\x00-\x003\x002\x00.\x00p\x00n\x00g\
\x00\x0d\
\x0bz\xc5'\
\x00c\
\x00i\x00l\x00-\x00s\x00h\x00a\x00r\x00e\x00.\x00p\x00n\x00g\
\x00\x11\
\x0c\x84-\xa7\
\x00c\
\x00i\x00l\x00-\x00s\x00i\x00z\x00e\x00-\x00g\x00r\x00i\x00p\x00.\x00p\x00n\x00g\
\
\x00\x14\
\x0f=8\xc7\
\x00c\
\x00i\x00l\x00-\x00c\x00h\x00e\x00v\x00r\x00o\x00n\x00-\x00l\x00e\x00f\x00t\x00.\
\x00p\x00n\x00g\
\x00\x13\
\x0a\x0a\x0a\xa7\
\x00s\
\x00u\x00b\x00l\x00i\x00m\x00e\x00-\x00t\x00e\x00x\x00t\x00-\x004\x008\x00.\x00p\
\x00n\x00g\
\x00\x15\
\x025\x13\xc7\
\x00c\
\x00i\x00l\x00-\x00e\x00x\x00t\x00e\x00r\x00n\x00a\x00l\x00-\x00l\x00i\x00n\x00k\
\x00.\x00p\x00n\x00g\
\x00\x11\
\x0d@y\x07\
\x00c\
\x00i\x00l\x00-\x00c\x00l\x00i\x00p\x00b\x00o\x00a\x00r\x00d\x00.\x00p\x00n\x00g\
\
\x00\x15\
\x08\xca\x95\xe7\
\x00c\
\x00i\x00l\x00-\x00l\x00o\x00o\x00p\x00-\x00c\x00i\x00r\x00c\x00u\x00l\x00a\x00r\
\x00.\x00p\x00n\x00g\
\x00\x13\
\x04%\x01G\
\x00c\
\x00i\x00l\x00-\x00f\x00o\x00l\x00d\x00e\x00r\x00-\x00o\x00p\x00e\x00n\x00.\x00p\
\x00n\x00g\
\x00\x10\
\x09\x8fx\xe7\
\x00c\
\x00i\x00l\x00-\x00s\x00a\x00t\x00e\x00l\x00i\x00t\x00e\x00.\x00p\x00n\x00g\
\x00\x0e\
\x0f\xcc\xddg\
\x00w\
\x00i\x00d\x00g\x00e\x00t\x00s\x00-\x006\x004\x00.\x00p\x00n\x00g\
\x00\x0c\
\x0bo\xbb'\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00.\x00p\x00n\x00g\
\x00\x12\
\x02\xfe[\xc7\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00_\x00s\x00m\x00a\x00l\x00l\x00.\x00p\x00n\
\x00g\
\x00\x11\
\x01LY\x1f\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00_\x00i\x00c\x00o\x00n\x00.\x00i\x00c\x00o\
\
\x00\x0c\
\x0bo\xa3\xff\
\x00m\
\x00p\x00i\x00_\x00l\x00o\x00g\x00o\x00.\x00i\x00c\x00o\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x08\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x22\x00\x02\x00\x00\x00\x04\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x03\xd4\x00\x01\x00\x00\x00\x01\x00\x00\xba\xf3\
\x00\x00\x01}\x91@\x8d\xb7\
\x00\x00\x03\xaa\x00\x00\x00\x00\x00\x01\x00\x00\xa3%\
\x00\x00\x01}\x9fv1\x97\
\x00\x00\x03\xfc\x00\x00\x00\x00\x00\x01\x00\x00\xbfl\
\x00\x00\x01}\xdbT\xbb\x1e\
\x00\x00\x03\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x9az\
\x00\x00\x01}\x97W\x0f\xd1\
\x00\x00\x00\x22\x00\x02\x00\x00\x00\x17\x00\x00\x00\x09\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x02\x90\x00\x00\x00\x00\x00\x01\x00\x00s\x03\
\x00\x00\x01y w\x85\x07\
\x00\x00\x01\xa0\x00\x00\x00\x00\x00\x01\x00\x00P\xc1\
\x00\x00\x01y w\x82\xa5\
\x00\x00\x03\x18\x00\x00\x00\x00\x00\x01\x00\x00\x891\
\x00\x00\x01y w\x85\x81\
\x00\x00\x00.\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01y w\x851\
\x00\x00\x01\x02\x00\x00\x00\x00\x00\x01\x00\x00,\xab\
\x00\x00\x01y w\x8a\x8c\
\x00\x00\x01H\x00\x00\x00\x00\x00\x01\x00\x00;/\
\x00\x00\x01y w\x89\xe2\
\x00\x00\x01 \x00\x00\x00\x00\x00\x01\x00\x004>\
\x00\x00\x01y w\x81\x7f\
\x00\x00\x00\xe4\x00\x00\x00\x00\x00\x01\x00\x00%5\
\x00\x00\x01y w\x83D\
\x00\x00\x02\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x81\xc2\
\x00\x00\x01y w\x87\xba\
\x00\x00\x03D\x00\x00\x00\x00\x00\x01\x00\x00\x90\xb6\
\x00\x00\x01y w\x8a~\
\x00\x00\x02d\x00\x00\x00\x00\x00\x01\x00\x00ox\
\x00\x00\x01}\x94\xbdm}\
\x00\x00\x00\xc6\x00\x00\x00\x00\x00\x01\x00\x00\x1d\xbb\
\x00\x00\x01y w\x86V\
\x00\x00\x01\xee\x00\x00\x00\x00\x00\x01\x00\x00X\xd6\
\x00\x00\x01y w\x8a\xd1\
\x00\x00\x02\x0e\x00\x00\x00\x00\x00\x01\x00\x00`[\
\x00\x00\x01y w\x8a\xfb\
\x00\x00\x02\xc0\x00\x00\x00\x00\x00\x01\x00\x00zu\
\x00\x00\x01y w\x82\xd4\
\x00\x00\x00L\x00\x00\x00\x00\x00\x01\x00\x00\x07R\
\x00\x00\x01y w\x8cd\
\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x16\x05\
\x00\x00\x01y w\x8a\xb5\
\x00\x00\x01j\x00\x00\x00\x00\x00\x01\x00\x00B\x86\
\x00\x00\x01y w\x8a\x19\
\x00\x00\x01\xd0\x00\x00\x00\x00\x00\x01\x00\x00X\x1a\
\x00\x00\x01}\x95E\xa3\xdc\
\x00\x00\x026\x00\x00\x00\x00\x00\x01\x00\x00h$\
\x00\x00\x01y w\x82\x96\
\x00\x00\x01\x88\x00\x00\x00\x00\x00\x01\x00\x00I\x89\
\x00\x00\x01y w\x8ds\
\x00\x00\x00v\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xc1\
\x00\x00\x01y w\x88$\
\x00\x00\x03j\x00\x00\x00\x00\x00\x01\x00\x00\x98R\
\x00\x00\x01}\x95D\xe9\x11\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
py
|
1a57e1320aca4b5b343ecd2fdbabe00252321b37
|
# -*- coding: utf-8 -*-
"""Project myip
Will show your IP address.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'myip'
project = "Otype myip"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'Shows your IP address'
authors = ['Hans-Gunther Schmidt']
authors_string = ', '.join(authors)
emails = ['[email protected]']
license = 'MIT'
copyright = '2014 ' + authors_string
url = 'http://otype.de/'
|
py
|
1a57e1adf91778ad49fea8498935adc80d004623
|
import os
import re
from subprocess import PIPE, Popen
def git_file_deltas(git_dir, commit, compare=None):
#source: http://stackoverflow.com/a/2713363
pass
def sub_git_remote_url(git_dir):
args = ['config', '--get', "remote.origin.url"]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
return gitout
def sub_git_cmd(git_dir, args):
"""
run git command
args are the full command with args
git_dir, the actual full path of the .git/ repo directory to run commands against
returns popen object for access to stdout+stderr
"""
git_dir_to_use = None
if os.path.exists(os.path.join(git_dir, '.git')):
#ok file exists
git_dir_to_use = os.path.join(git_dir, '.git')
elif os.path.isfile(os.path.join(git_dir, '.git')):
#it's a submodule, dereference the actual git info
git_dir_to_use = os.path.join(git_dir, '.git')
# else:
# raise Exception("Error, the .git location for %s doesn't exists" % git_dir)
try:
p = Popen(
[
'git',
'--git-dir',
git_dir_to_use,
] + args,
stdout=PIPE, stderr=PIPE
)
except OSError as e:
# Is git missing ?
if e.errno == 2:
e.strerror += ": git"
raise(e)
return p
def sub_get_current_branch(git_dir):
#HT: http://stackoverflow.com/a/12142066
args = [
'rev-parse',
'--abbrev-ref',
'HEAD'
]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
return gitout
def get_project_snapshot(git_dir, submodules=False, log_count=1, submodule_count=1):
root_info = sub_git_info(git_dir, log_count=log_count)
root_info['current_branch'] = sub_get_current_branch(git_dir)
if submodules:
root_info['submodules'] = list(sub_git_submodules(git_dir, log_count=submodule_count))
return root_info
def sub_git_info(git_dir, log_count=1):
"""
Given a git dir and log count, return a json formatted representation
"""
return_dict = {}
kv_line_format = {
'sha': '%H',
'author': '%an <%ae>',
'date': '%ai',
'subject': '%s',
'message': '%b'
}
KV_DELIMITER = ':~@#$~:'
LINE_DELIMITER = '@#\n#@'
# construct an output of git log that is essentially a:
# key=value
# key=value, etc
# but a sing a custom Key=Value delimiter, and a custom Line delimiter since
# there might be newlines in messages and subjects
# the git log -z format delimits the entire log by null, but we need separation of each property
line_by_line_format = LINE_DELIMITER.join(['%s%s%s' % (k, KV_DELIMITER, v) for k, v in kv_line_format.items()])
args = ['log',
'-%s' % log_count,
'-z',
'--pretty=format:%s' % line_by_line_format
]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
url = sub_git_remote_url(git_dir)
all_raw_revs = gitout.split('\0')
def parse_rev_block(block_text):
ret = {}
for prop in block_text.split(LINE_DELIMITER):
if len(prop) == 0:
continue
try:
k, v = prop.split(KV_DELIMITER)
except ValueError:
k = "GitParseError"
v = prop
ret[k] = v
return ret
commit_list = [parse_rev_block(s) for s in all_raw_revs]
for commit in commit_list:
commit['commit_url'] = get_commit_url(url, commit['sha'])
commit['compare_master'] = get_compare_url(url, commit['sha'], 'master')
return_dict['commits'] = commit_list
return return_dict
def get_git_sub_info(git_dir, sub_path, log_count=1):
full_sub_path = os.path.join(git_dir, sub_path)
sub_info = sub_git_info(full_sub_path, log_count=log_count)
return sub_info
def sub_git_submodules(git_dir, log_count=1):
"""
Using shell, get the active submodule info
"""
args =['submodule', 'status' ]
with sub_git_cmd(git_dir, args) as p:
gitout = p.stdout.read().decode('utf-8').strip()
for x in gitout:
splits = x.strip().split(' ')
if len(splits) == 3:
sub_sha = splits[0].strip()
sub_path = splits[1]
sub_log = get_git_sub_info(git_dir, sub_path, log_count=log_count)
sub_log['path'] = sub_path
sub_log['branch'] = splits[2]
sub_log['sha_sha'] = sub_sha
yield sub_log
def split_repo_url(repo_url):
"""
Repo url splits to [git_account, git_repo]
even if it's git://, or git@
"""
if re.search(r'^\w+://', repo_url):
chunks = repo_url.split("/")[-2:]
elif repo_url.startswith("git@"):
chunks = repo_url.split(':')[-1].split('/')
return chunks
def get_commit_url(repo_url, hexsha, compare=False):
chunks = split_repo_url(repo_url)
url = "https://github.com/%s/%s/commit/%s" % (chunks[0], chunks[1].replace('.git', ''), hexsha)
return url
def get_compare_url(repo_url, start_cmp, end_cmp):
chunks = split_repo_url(repo_url)
url = "https://github.com/%(account)s/%(repo)s/compare/%(start_cmp)s...%(end_cmp)s" % {
"account": chunks[0],
"repo": chunks[1].replace('.git', ''),
"start_cmp": start_cmp,
"end_cmp": end_cmp
}
return url
|
py
|
1a57e22327e223d7fbd55b3c70b9bd7acc26e3fe
|
fruit = input()
day = input()
quantity = float(input())
price = 0
if day == "sunday" or day == "saturday":
if fruit == "banana":
price = quantity * 2.70
elif fruit == "apple":
price = quantity * 1.25
elif fruit == "orange":
price = quantity * 0.90
elif fruit == "grapefruit":
price = quantity * 1.60
elif fruit == "kiwi":
price = quantity * 3.00
elif fruit == "pineapple":
price = quantity * 5.60
elif fruit == "grapes":
price = quantity * 4.20
else:
if fruit == "banana":
price = quantity * 2.50
elif fruit == "apple":
price = quantity * 1.20
elif fruit == "orange":
price = quantity * 0.85
elif fruit == "grapefruit":
price = quantity * 1.45
elif fruit == "kiwi":
price = quantity * 2.70
elif fruit == "pineapple":
price = quantity * 5.50
elif fruit == "grapes":
price = quantity * 3.80
if price >= 0:
print(f"{price:.2f}")
else:
print("error")
|
py
|
1a57e2fd42fda3d42554359d14bfd6031bf596bc
|
import collections
from datetime import datetime, timedelta
import json
import os
import urllib.parse
import configparser
import pprint
import redis
from pymongo.errors import WriteError, DuplicateKeyError
from pymongo import MongoClient
from config import Config
from agaveflask.logs import get_logger
logger = get_logger(__name__)
def _do_get(getter, key):
obj = getter(key)
if obj is None:
raise KeyError('"{}" not found'.format(key))
try:
return json.loads(obj.decode('utf-8'))
# handle non-JSON data
except ValueError:
return obj.decode('utf-8')
def _do_set(setter, key, value):
obj = json.dumps(value)
setter(key, obj.encode('utf-8'))
class StoreMutexException(Exception):
pass
class AbstractStore(collections.MutableMapping):
"""A persitent dictionary."""
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __iter__(self):
"""Iterator for the keys."""
pass
def __len__(self):
"""Size of db."""
pass
def set_with_expiry(self, key, obj):
"""Set `key` to `obj` with automatic expiration of the configured seconds."""
pass
def update(self, key, field, value):
"Atomic ``self[key][field] = value``."""
pass
def pop_field(self, key, field):
"Atomic pop ``self[key][field]``."""
pass
def update_subfield(self, key, field1, field2, value):
"Atomic ``self[key][field1][field2] = value``."""
pass
def getset(self, key, value):
"Atomically: ``self[key] = value`` and return previous ``self[key]``."
pass
def mutex_acquire(self, key):
"""Try to use key as a mutex.
Raise StoreMutexException if not available.
"""
busy = self.getset(key, True)
if busy:
raise StoreMutexException('{} is busy'.format(key))
def mutex_release(self, key):
self[key] = False
class AbstractTransactionalStore(AbstractStore):
"""Adds basic transactional semantics to the AbstractStore interface."""
def within_transaction(self, f, key):
"""Execute a callable, f, within a lock on key `key`."""
pass
class MongoStore(AbstractStore):
def __init__(self, host, port, database='abaco', db='0', user=None, password=None):
"""
Note: pop_fromlist, append_tolist, and within_transaction were removed from the Redis
store functions as they weren't necessary, don't work, or don't work in Mongo.
Creates an abaco `store` which maps to a single mongo
collection within some database.
:param host: the IP address of the mongo server.
:param port: port of the mongo server.
:param database: the mongo database to use for abaco.
:param db: an integer mapping to a mongo collection within the
mongo database.
:return:
"""
mongo_uri = 'mongodb://{}:{}'.format(host, port)
if user and password:
logger.info("Using mongo user {} and passowrd: ***".format(user))
u = urllib.parse.quote_plus(user)
p = urllib.parse.quote_plus(password)
mongo_uri = 'mongodb://{}:{}@{}:{}'.format(u, p, host, port)
self._mongo_client = MongoClient(mongo_uri)
self._mongo_database = self._mongo_client[database]
self._db = self._mongo_database[db]
def __getitem__(self, fields):
"""
Atomically does either:
Gets and returns 'self[key]' or 'self[key][field1][field2][...]' as a dictionary
"""
key, _, subscripts = self._process_inputs(fields)
result = self._db.find_one(
{'_id': key},
projection={'_id': False})
if result == None:
raise KeyError(f"'_id' of '{key}' not found")
try:
return eval('result' + subscripts)
except KeyError:
raise KeyError(f"Subscript of {subscripts} does not exists in document of '_id' {key}")
def __setitem__(self, fields, value):
"""
Atomically does either:
Sets 'self[key] = value' or sets 'self[key][field1][field2][...] = value'
"""
key, dots, _ = self._process_inputs(fields)
try:
if isinstance(fields, str) and isinstance(value, dict):
result = self._db.update_one(
filter={'_id': key},
update={'$set': value},
upsert=True)
else:
result = self._db.update_one(
filter={'_id': key},
update={'$set': {dots: value}},
upsert=True)
except WriteError:
raise WriteError(
"Likely due to trying to set a subfield of a field that does not exists." +
"\n Try setting a dict rather than a value. Ex. store['id_key', 'key', 'field'] = {'subfield': 'value'}")
if result.raw_result['nModified'] == 0:
if not 'upserted' in result.raw_result:
logger.debug(f'Field not modified, old value likely the same as new. Key: {key}, Fields: {dots}, Value: {value}')
def __delitem__(self, fields):
"""
Atomically does either:
Deletes 'self[key]'
Unsets 'self[key][field1][field2][...]'
"""
key, dots, subscripts = self._process_inputs(fields)
if not subscripts:
result = self._db.delete_one({'_id': key})
if result.raw_result['n'] == 0:
logger.debug(f"No document with '_id' found. Key:{key}, Fields:{dots}")
else:
result = self._db.update_one(
filter={'_id': key},
update={'$unset': {f'{dots}': ''}})
if result.raw_result['nModified'] == 0:
logger.debug(f"Doc with specified fields not found. Key:{key}, Fields:{dots}")
def __iter__(self):
for cursor in self._db.find():
yield cursor['_id']
# return self._db.scan_iter()
def __len__(self):
"""
Returns the estimated document count of a store to give length
We don't use '.count_documents()' as it's O(N) versus O(1) of estimated
Length for a document or subdocument comes from len(store['key']['field1'][...]) using dict len()
"""
return self._db.estimated_document_count()
def __repr__(self):
"""
Returns a pretty string of the entire store with '_id' visible for developer use
"""
return pprint.pformat(list(self._db.find()))
def _process_inputs(self, fields):
"""
Takes in fields and returns the key corresponding with '_id', dot notation
for getting to a specific field in a Mongo query/filter (ex. 'field1.field2.field3.field4')
and the subscript notation for returning a specified field from a result dictionary
(ex. `['field1']['field2']['field3']['field4']`)
"""
if isinstance(fields, str):
key = dots = fields
subscripts = ''
elif isinstance(fields, list) and len(fields) == 1:
key = dots = fields[0]
subscripts = ''
else:
key = fields[0]
dots = '.'.join(fields[1:])
subscripts = "['" + "']['".join(fields[1:]) + "']"
return key, dots, subscripts
def _prepset(self, value):
if type(value) is bytes:
return value.decode('utf-8')
return value
def pop_field(self, fields):
"""
Atomically pops 'self[key] = value' or 'self[key][field1][field2][...] = value'
"""
key, dots, subscripts = self._process_inputs(fields)
if not subscripts:
result = self._db.find_one(
{'_id': key},
projection={'_id': False})
if result == None:
raise KeyError(f"'_id' of '{key}' not found")
del_result = self._db.delete_one({'_id': key})
if del_result.raw_result['n'] == 0:
raise KeyError(f"No document deleted")
return result
else:
result = self._db.find_one_and_update(
filter={'_id': key},
update={'$unset': {dots: ''}})
try:
return eval('result' + subscripts)
except KeyError:
raise KeyError(f"Subscript of {subscripts} does not exist in document of '_id' {key}")
def set_with_expiry(self, fields, value, log_ex):
"""
Atomically:
Sets 'self[key] = value' or 'self[key][field1][field2][...] = value'
Creates 'exp' subdocument in document root with current time for use with MongoDB TTL expiration index
Note: MongoDB TTL checks every 60 secs to delete files
"""
key, dots, _ = self._process_inputs(fields)
log_ex_config = int(Config.get('web','log_ex'))
time_change = log_ex_config - log_ex
blah = datetime.utcnow()- timedelta(seconds=time_change)
logger.debug(f"What time is this being set to: {blah} ")
if len(fields) == 1 and isinstance(value, dict):
result = self._db.update_one(
filter={'_id': key},
update={'$set': {'exp': datetime.utcnow()- timedelta(seconds=time_change)},
'$set': value},
upsert=True)
else:
result = self._db.update_one(
filter={'_id': key},
update={'$set': {'exp': datetime.utcnow() - timedelta(seconds=time_change), dots: self._prepset(value)}},
upsert=True)
def full_update(self, key, value, upsert=False):
result = self._db.update_one(key, value, upsert)
return result
def getset(self, fields, value):
"""
Atomically does either:
Sets 'self[key] = value' and returns previous 'self[key]'
Sets 'self[key][field1][field2][...] = value' and returns previous 'self[key][field1][field2][...]'
"""
key, dots, subscripts = self._process_inputs(fields)
result = self._db.find_one_and_update(
filter={'_id': key, dots: {'$exists': True}},
update={'$set': {dots: value}})
if result == None:
raise KeyError(f"1Subscript of {subscripts} does not exist in document of '_id' {key}")
try:
if len(fields) == 1:
return eval(f"result['{key}']")
else:
return eval('result' + subscripts)
except KeyError:
raise KeyError(f"Subscript of {subscripts} does not exist in document of '_id' {key}")
def items(self, filter_inp=None, proj_inp={'_id': False}):
" Either returns all with no inputs, or filters when given filters"
return list(self._db.find(
filter=filter_inp,
projection=proj_inp))
def add_if_empty(self, fields, value):
"""
Atomically:
Sets 'self[key] = value' or 'self[key][field1][field2][...] = value'
Only if the specified key/field(s) combo does not exist or is empty
Returns the value if it was added; otherwise, returns None
Note: Will not override a field set to a value in order to create a subfield
"""
key, dots, _ = self._process_inputs(fields)
try:
if len(fields) == 1 and isinstance(value, dict):
result = self._db.update_one(
filter={'_id': key},
update={'$setOnInsert': value},
upsert=True)
if result.upserted_id:
return key
elif len(fields) == 1:
result = self._db.update_one(
filter={'_id': key},
update={'$setOnInsert': {dots: value}},
upsert=True)
if result.upserted_id:
return key
else:
try:
result = self._db.update_one(
filter={'_id': key},
update={'$setOnInsert': {dots: value}},
upsert=True)
if result.upserted_id:
return fields
except WriteError:
print("Likely due to trying to set a subfield of a field that is already set to one value")
pass
return None
except DuplicateKeyError:
return None
def aggregate(self, pipeline, options = None):
return self._db.aggregate(pipeline, options)
def create_index(self, index_list):
return self._db.create_index(index_list)
|
py
|
1a57e511c7ff44d3fa7a03a02d7b98d0158c12e1
|
# Ensures that:
# 1. all worker containers in the database are still responsive; workers that have stopped
# responding are shutdown and removed from the database.
# 2. Enforce ttl for idle workers.
#
# In the future, this module will also implement:
# 3. all actors with stateless=true have a number of workers proportional to the messages in the queue.
# Execute from a container on a schedule as follows:
# docker run -it --rm -v /var/run/docker.sock:/var/run/docker.sock abaco/core python3 -u /actors/health.py
import os
import shutil
import time
import datetime
from agaveflask.auth import get_api_server
import channelpy
from aga import Agave
from auth import get_tenants, get_tenant_verify
import codes
from config import Config
from docker_utils import rm_container, DockerError, container_running, run_container_with_docker
from models import Actor, Worker, is_hashid
from channels import ClientsChannel, CommandChannel, WorkerChannel
from stores import actors_store, clients_store, executions_store, workers_store
from worker import shutdown_worker
TAG = os.environ.get('TAG') or Config.get('general', 'TAG') or ''
if not TAG[0] == ':':
TAG = ':{}',format(TAG)
AE_IMAGE = '{}{}'.format(os.environ.get('AE_IMAGE', 'abaco/core'), TAG)
from agaveflask.logs import get_logger, get_log_file_strategy
logger = get_logger(__name__)
# max executions allowed in a mongo document; if the total executions for a given actor exceeds this number,
# the health process will place
MAX_EXECUTIONS_PER_MONGO_DOC = 25000
def get_actor_ids():
"""Returns the list of actor ids currently registered."""
return [aid for aid in actors_store]
def check_workers_store(ttl):
logger.debug("Top of check_workers_store.")
"""Run through all workers in workers_store and ensure there is no data integrity issue."""
for worker in workers_store.items():
aid = worker['actor_id']
check_worker_health(aid, worker, ttl)
def get_worker(wid):
"""
Check to see if a string `wid` is the id of a worker in the worker store.
If so, return it; if not, return None.
"""
worker = workers_store.items({'id': wid})
if worker:
return worker
return None
def clean_up_socket_dirs():
logger.debug("top of clean_up_socket_dirs")
socket_dir = os.path.join('/host/', Config.get('workers', 'socket_host_path_dir').strip('/'))
logger.debug("processing socket_dir: {}".format(socket_dir))
for p in os.listdir(socket_dir):
# check to see if p is a worker
worker = get_worker(p)
if not worker:
path = os.path.join(socket_dir, p)
logger.debug("Determined that {} was not a worker; deleting directory: {}.".format(p, path))
shutil.rmtree(path)
def clean_up_fifo_dirs():
logger.debug("top of clean_up_fifo_dirs")
fifo_dir = os.path.join('/host/', Config.get('workers', 'fifo_host_path_dir').strip('/'))
logger.debug("processing fifo_dir: {}".format(fifo_dir))
for p in os.listdir(fifo_dir):
# check to see if p is a worker
worker = get_worker(p)
if not worker:
path = os.path.join(fifo_dir, p)
logger.debug("Determined that {} was not a worker; deleting directory: {}.".format(p, path))
shutil.rmtree(path)
def clean_up_ipc_dirs():
"""Remove all directories created for worker sockets and fifos"""
clean_up_socket_dirs()
clean_up_fifo_dirs()
def delete_client(ag, client_name):
"""Remove a client from the APIM."""
try:
ag.clients.delete(clientName=client_name)
except Exception as e:
m = 'Not able to delete client from APIM. Got an exception: {}'.format(e)
logger.error(m)
return None
def clean_up_apim_clients(tenant):
"""Check the list of clients registered in APIM and remove any that are associated with retired workers."""
username = os.environ.get('_abaco_{}_username'.format(tenant), '')
password = os.environ.get('_abaco_{}_password'.format(tenant), '')
if not username:
msg = "Health process did not get a username for tenant {}; " \
"returning from clean_up_apim_clients".format(tenant)
if tenant in ['SD2E', 'TACC-PROD']:
logger.error(msg)
else:
logger.info(msg)
return None
if not password:
msg = "Health process did not get a password for tenant {}; " \
"returning from clean_up_apim_clients".format(tenant)
if tenant in ['SD2E', 'TACC-PROD']:
logger.error(msg)
else:
logger.info(msg)
return None
api_server = get_api_server(tenant)
verify = get_tenant_verify(tenant)
ag = Agave(api_server=api_server,
username=username,
password=password,
verify=verify)
logger.debug("health process created an ag for tenant: {}".format(tenant))
try:
cs = ag.clients.list()
clients = cs.json()['result']
except Exception as e:
msg = "Health process got an exception trying to retrieve clients; exception: {}".format(e)
logger.error(msg)
return None
for client in clients:
# check if the name of the client is an abaco hash (i.e., a worker id). if not, we ignore it from the beginning
name = client.get('name')
if not is_hashid(name):
logger.debug("client {} is not an abaco hash id; skipping.".format(name))
continue
# we know this client came from a worker, so we need to check to see if the worker is still active;
# first check if the worker even exists; if it does, the id will be the client name:
worker = get_worker(name)
if not worker:
logger.info("no worker associated with id: {}; deleting client.".format(name))
delete_client(ag, name)
logger.info("client {} deleted by health process.".format(name))
continue
# if the worker exists, we should check the status:
status = worker.get('status')
if status == codes.ERROR:
logger.info("worker {} was in ERROR status so deleting client; worker: {}.".format(name, worker))
delete_client(ag, name)
logger.info("client {} deleted by health process.".format(name))
else:
logger.debug("worker {} still active; not deleting client.".format(worker))
def clean_up_clients_store():
logger.debug("top of clean_up_clients_store")
secret = os.environ.get('_abaco_secret')
if not secret:
logger.error("health.py not configured with _abaco_secret. exiting clean_up_clients_store.")
return None
for client in clients_store.items():
wid = client.get('worker_id')
if not wid:
logger.error("client object in clients_store without worker_id. client: {}".format(client))
continue
tenant = client.get('tenant')
if not tenant:
logger.error("client object in clients_store without tenant. client: {}".format(client))
continue
actor_id = client.get('actor_id')
if not actor_id:
logger.error("client object in clients_store without actor_id. client: {}".format(client))
continue
client_key = client.get('client_key')
if not client_key:
logger.error("client object in clients_store without client_key. client: {}".format(client))
continue
# check to see if the wid is the id of an actual worker:
worker = get_worker(wid)
if not worker:
logger.info(f"worker {wid} is gone. deleting client {client}.")
clients_ch = ClientsChannel()
msg = clients_ch.request_delete_client(tenant=tenant,
actor_id=actor_id,
worker_id=wid,
client_id=client_key,
secret=secret)
if msg['status'] == 'ok':
logger.info(f"Client delete request completed successfully for "
"worker_id: {wid}, client_id: {client_key}.".format(wid, client_key))
else:
logger.error(f"Error deleting client for "
"worker_id: {wid}, client_id: {client_key}. Message: {msg}")
else:
logger.info(f"worker {wid} still here. ignoring client {client}.")
def check_worker_health(actor_id, worker, ttl):
"""Check the specific health of a worker object."""
logger.debug("top of check_worker_health")
worker_id = worker.get('id')
logger.info("Checking status of worker from db with worker_id: {}".format(worker_id))
if not worker_id:
logger.error("Corrupt data in the workers_store. Worker object without an id attribute. {}".format(worker))
try:
workers_store.pop_field([actor_id])
except KeyError:
# it's possible another health agent already removed the worker record.
pass
return None
# make sure the actor id still exists:
try:
actors_store[actor_id]
except KeyError:
logger.error("Corrupt data in the workers_store. Worker object found but no corresponding actor. {}".format(worker))
try:
# todo - removing worker objects from db can be problematic if other aspects of the worker are not cleaned
# up properly. this code should be reviewed.
workers_store.pop_field([actor_id])
except KeyError:
# it's possible another health agent already removed the worker record.
pass
return None
def zero_out_workers_db():
"""
Set all workers collections in the db to empty. Run this as part of a maintenance; steps:
1) remove all docker containers
2) run this function
3) run clean_up_apim_clients().
4) run zero_out_clients_db()
:return:
"""
for worker in workers_store.items(proj_inp=None):
del workers_store[worker['_id']]
def zero_out_clients_db():
"""
Set all clients collections in the db to empty. Run this as part of a maintenance; steps:
1) remove all docker containers
2) run zero_out_workers_db()
3) run clean_up_apim_clients().
4) run this function
:return:
"""
for client in clients_store.items():
clients_store[client['_id']] = {}
def check_workers(actor_id, ttl):
"""Check health of all workers for an actor."""
logger.info("Checking health for actor: {}".format(actor_id))
try:
workers = Worker.get_workers(actor_id)
except Exception as e:
logger.error("Got exception trying to retrieve workers: {}".format(e))
return None
logger.debug("workers: {}".format(workers))
host_id = os.environ.get('SPAWNER_HOST_ID', Config.get('spawner', 'host_id'))
logger.debug("host_id: {}".format(host_id))
for worker in workers:
# if the worker has only been requested, it will not have a host_id.
if 'host_id' not in worker:
# @todo- we will skip for now, but we need something more robust in case the worker is never claimed.
continue
# ignore workers on different hosts
if not host_id == worker['host_id']:
continue
# first check if worker is responsive; if not, will need to manually kill
logger.info("Checking health for worker: {}".format(worker))
ch = WorkerChannel(worker_id=worker['id'])
worker_id = worker.get('id')
result = None
try:
logger.debug("Issuing status check to channel: {}".format(worker['ch_name']))
result = ch.put_sync('status', timeout=5)
except channelpy.exceptions.ChannelTimeoutException:
logger.info("Worker did not respond, removing container and deleting worker.")
try:
rm_container(worker['cid'])
except DockerError:
pass
try:
Worker.delete_worker(actor_id, worker_id)
logger.info("worker {} deleted from store".format(worker_id))
except Exception as e:
logger.error("Got exception trying to delete worker: {}".format(e))
# if the put_sync timed out and we removed the worker, we also need to delete the channel
# otherwise the un-acked message will remain.
try:
ch.delete()
except Exception as e:
logger.error("Got exception: {} while trying to delete worker channel for worker: {}".format(e, worker_id))
finally:
try:
ch.close()
except Exception as e:
logger.error("Got an error trying to close the worker channel for dead worker. Exception: {}".format(e))
if result and not result == 'ok':
logger.error("Worker responded unexpectedly: {}, deleting worker.".format(result))
try:
rm_container(worker['cid'])
Worker.delete_worker(actor_id, worker_id)
except Exception as e:
logger.error("Got error removing/deleting worker: {}".format(e))
else:
# worker is healthy so update last health check:
Worker.update_worker_health_time(actor_id, worker_id)
logger.info("Worker ok.")
# now check if the worker has been idle beyond the ttl:
if ttl < 0:
# ttl < 0 means infinite life
logger.info("Infinite ttl configured; leaving worker")
return
# we don't shut down workers that are currently running:
if not worker['status'] == codes.BUSY:
last_execution = worker.get('last_execution_time', 0)
# if worker has made zero executions, use the create_time
if last_execution == 0:
last_execution = worker.get('create_time', datetime.datetime.min)
logger.debug("using last_execution: {}".format(last_execution))
try:
assert type(last_execution) == datetime.datetime
except:
logger.error("Time received for TTL measurements is not of type datetime.")
last_execution = datetime.datetime.min
if last_execution + datetime.timedelta(seconds=ttl) < datetime.datetime.utcnow():
# shutdown worker
logger.info("Shutting down worker beyond ttl.")
shutdown_worker(actor_id, worker['id'])
else:
logger.info("Still time left for this worker.")
if worker['status'] == codes.ERROR:
# shutdown worker
logger.info("Shutting down worker in error status.")
shutdown_worker(actor_id, worker['id'])
# else:
# logger.debug("Worker not in READY status, will postpone.")
def get_host_queues():
"""
Read host_queues string from config and parse to return a Python list.
:return: list[str]
"""
try:
host_queues_str = Config.get('spawner', 'host_queues')
return [ s.strip() for s in host_queues_str.split(',')]
except Exception as e:
msg = "Got unexpected exception attempting to parse the host_queues config. Exception: {}".format(e)
logger.error(e)
raise e
def start_spawner(queue, idx='0'):
"""
Start a spawner on this host listening to a queue, `queue`.
:param queue: (str) - the queue the spawner should listen to.
:param idx: (str) - the index to use as a suffix to the spawner container name.
:return:
"""
command = 'python3 -u /actors/spawner.py'
name = 'healthg_{}_spawner_{}'.format(queue, idx)
try:
environment = dict(os.environ)
except Exception as e:
environment = {}
logger.error("Unable to convert environment to dict; exception: {}".format(e))
environment.update({'AE_IMAGE': AE_IMAGE.split(':')[0],
'queue': queue,
})
if not '_abaco_secret' in environment:
msg = 'Error in health process trying to start spawner. Did not find an _abaco_secret. Aborting'
logger.critical(msg)
raise
# check logging strategy to determine log file name:
log_file = 'abaco.log'
if get_log_file_strategy() == 'split':
log_file = 'spawner.log'
try:
run_container_with_docker(AE_IMAGE,
command,
name=name,
environment=environment,
mounts=[],
log_file=log_file)
except Exception as e:
logger.critical("Could not restart spawner for queue {}. Exception: {}".format(queue, e))
def check_spawner(queue):
"""
Check the health and existence of a spawner on this host for a particular queue.
:param queue: (str) - the queue to check on.
:return:
"""
logger.debug("top of check_spawner for queue: {}".format(queue))
# spawner container names by convention should have the format <project>_<queue>_spawner_<count>; for example
# abaco_default_spawner_2.
# so, we look for container names containing a string with that format:
spawner_name_segment = '{}_spawner'.format(queue)
if not container_running(name=spawner_name_segment):
logger.critical("No spawners running for queue {}! Launching new spawner..".format(queue))
start_spawner(queue)
else:
logger.debug("spawner for queue {} already running.".format(queue))
def check_spawners():
"""
Check health of spawners running on a given host.
:return:
"""
logger.debug("top of check_spawners")
host_queues = get_host_queues()
logger.debug("checking spawners for queues: {}".format(host_queues))
for queue in host_queues:
check_spawner(queue)
def manage_workers(actor_id):
"""Scale workers for an actor if based on message queue size and policy."""
logger.info("Entering manage_workers for {}".format(actor_id))
try:
actor = Actor.from_db(actors_store[actor_id])
except KeyError:
logger.info("Did not find actor; returning.")
return
workers = Worker.get_workers(actor_id)
for worker in workers:
time_difference = time.time() - worker['create_time']
if worker['status'] == 'PROCESSING' and time_difference > 1:
logger.info("LOOK HERE - worker creation time {}".format(worker['create_time']))
#TODO - implement policy
def shutdown_all_workers():
"""
Utility function for properly shutting down all existing workers.
This function is useful when deploying a new version of the worker code.
"""
# iterate over the workers_store directly, not the actors_store, since there could be data integrity issue.
logger.debug("Top of shutdown_all_workers.")
actors_with_workers = set()
for worker in workers_store.items():
actors_with_workers.add(worker['actor_id'])
for actor_id in actors_with_workers:
check_workers(actor_id, 0)
def main():
logger.info("Running abaco health checks. Now: {}".format(time.time()))
# TODO - turning off the check_spawners call in the health process for now as there seem to be some issues.
# the way the check works currently is to look for a spawner with a specific name. However, that check does not
# appear to be working currently.
# check_spawners()
try:
clean_up_ipc_dirs()
except Exception as e:
logger.error("Got exception from clean_up_ipc_dirs: {}".format(e))
try:
ttl = Config.get('workers', 'worker_ttl')
except Exception as e:
logger.error("Could not get worker_ttl config. Exception: {}".format(e))
try:
ttl = int(ttl)
except Exception as e:
logger.error("Invalid ttl config: {}. Setting to -1.".format(e))
ttl = -1
ids = get_actor_ids()
logger.info("Found {} actor(s). Now checking status.".format(len(ids)))
for id in ids:
# manage_workers(id)
check_workers(id, ttl)
tenants = get_tenants()
for t in tenants:
logger.debug("health process cleaning up apim_clients for tenant: {}".format(t))
clean_up_apim_clients(t)
# TODO - turning off the check_workers_store for now. unclear that removing worker objects
# check_workers_store(ttl)
if __name__ == '__main__':
main()
|
py
|
1a57e5c2dab9390977fffb36c63adbb147fa4a24
|
"""Home Assistant Cast integration for Cast."""
from typing import Optional
from pychromecast.controllers.homeassistant import HomeAssistantController
import voluptuous as vol
from homeassistant import auth, config_entries, core
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv, dispatcher
from homeassistant.helpers.network import get_url
from .const import DOMAIN, SIGNAL_HASS_CAST_SHOW_VIEW
SERVICE_SHOW_VIEW = "show_lovelace_view"
ATTR_VIEW_PATH = "view_path"
ATTR_URL_PATH = "dashboard_path"
async def async_setup_ha_cast(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up Home Assistant Cast."""
user_id: Optional[str] = entry.data.get("user_id")
user: Optional[auth.models.User] = None
if user_id is not None:
user = await hass.auth.async_get_user(user_id)
if user is None:
user = await hass.auth.async_create_system_user(
"Home Assistant Cast", [auth.GROUP_ID_ADMIN]
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, "user_id": user.id}
)
if user.refresh_tokens:
refresh_token: auth.models.RefreshToken = list(user.refresh_tokens.values())[0]
else:
refresh_token = await hass.auth.async_create_refresh_token(user)
async def handle_show_view(call: core.ServiceCall):
"""Handle a Show View service call."""
hass_url = get_url(hass, require_ssl=True)
controller = HomeAssistantController(
# If you are developing Home Assistant Cast, uncomment and set to your dev app id.
# app_id="5FE44367",
hass_url=hass_url,
client_id=None,
refresh_token=refresh_token.token,
)
dispatcher.async_dispatcher_send(
hass,
SIGNAL_HASS_CAST_SHOW_VIEW,
controller,
call.data[ATTR_ENTITY_ID],
call.data[ATTR_VIEW_PATH],
call.data.get(ATTR_URL_PATH),
)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_SHOW_VIEW,
handle_show_view,
vol.Schema(
{
ATTR_ENTITY_ID: cv.entity_id,
ATTR_VIEW_PATH: str,
vol.Optional(ATTR_URL_PATH): str,
}
),
)
|
py
|
1a57e6b37e746c862bac7eb3c89ff7b85683e07a
|
# Steps from, "How To Configure a Production-Ready Mesosphere Cluster on Ubuntu 14.04," - Sep 25, 2014
# https://www.digitalocean.com/community/tutorials/how-to-configure-a-production-ready-mesosphere-cluster-on-ubuntu-14-04
from fabric.api import run, env, execute, task
from fabric.context_managers import shell_env
import math
# list master and slave hosts
env.roledefs = {
'masters': ['ip-address-master1', 'ip-address-master2', 'ip-address-master2'],
'slaves': ['ip-address-slave1', 'ip-address-slave2', 'ip-address-slave3']
}
# docker on mesos
mesos_containerizers = "mesos,docker"
docker_images = ['lab41/spark-mesos-dockerworker-ipython']
def configure_packages():
run('sudo sh -c "echo \'nameserver 8.8.8.8\' >> /etc/resolvconf/resolv.conf.d/base"')
run('sudo resolvconf -u')
run(' DISTRO=$(lsb_release -is | tr \'[:upper:]\' \'[:lower:]\'); \
CODENAME=$(lsb_release -cs); \
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF; \
echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | sudo tee /etc/apt/sources.list.d/mesosphere.list')
def install_mesos():
execute(configure_packages)
run(' sudo apt-get update && \
sudo apt-get install --yes mesos')
def install_mesosphere():
execute(configure_packages)
run(' sudo apt-get update && \
sudo apt-get install --yes mesosphere')
def configure_zookeeper():
host_str = ""
for index,host in enumerate(env.roledefs['masters']):
host_str += "{}:2181,".format(host)
host_str = host_str[:-1]
run('sudo sed -i "s|^zk.*|zk://{}/mesos|g" /etc/mesos/zk'.format(host_str))
def configure_zookeeper_masters():
host_id = env.roledefs['masters'].index(env.host)
run('sudo sh -c "echo \'{}\' > /etc/zookeeper/conf/myid"'.format(host_id))
for index,host in enumerate(env.roledefs['masters']):
run('sudo sh -c "echo \'server.{}={}:2888:3888\' >> /etc/zookeeper/conf/zoo.cfg"'.format(index, host))
def configure_quorum():
run('sudo sh -c "echo \'{}\' > /etc/mesos-master/quorum"'.format(len(env.roledefs['masters'])/2 + 1))
def configure_mesos_ip():
run('sudo sh -c "echo \'{}\' > /etc/mesos-master/ip"'.format(env.host))
run('sudo sh -c "echo \'{}\' > /etc/mesos-master/hostname"'.format(env.host))
def configure_marathon():
run('sudo mkdir -p /etc/marathon/conf')
run('sudo cp /etc/mesos-master/hostname /etc/marathon/conf')
run('sudo cp /etc/mesos/zk /etc/marathon/conf/master')
run('sudo cp /etc/marathon/conf/master /etc/marathon/conf/zk')
run('sudo sed -i "s|mesos|marathon|g" /etc/marathon/conf/zk')
def start_masters():
run('echo manual | sudo tee /etc/init/mesos-slave.override')
run('sudo stop mesos-slave; sudo restart zookeeper')
run('sudo start mesos-master')
run('sudo start marathon')
def start_slaves():
run('echo manual | sudo tee /etc/init/zookeeper.override')
run('echo manual | sudo tee /etc/init/mesos-master.override')
run('echo {} | sudo tee /etc/mesos-slave/ip'.format(env.host))
run('sudo cp /etc/mesos-slave/ip /etc/mesos-slave/hostname')
run('sudo sh -c "echo \'{}\' > /etc/mesos-slave/containerizers"'.format(mesos_containerizers))
run('sudo sh -c "echo \'5mins\' > /etc/mesos-slave/executor_registration_timeout"')
run('sudo stop zookeeper; sudo stop mesos-master; sudo start mesos-slave')
def docker_pull_containers():
for image in docker_images:
run('docker pull {}'.format(image))
def configure_and_start_masters():
execute(configure_zookeeper_masters)
execute(configure_quorum)
execute(configure_mesos_ip)
execute(configure_marathon)
execute(start_masters)
def configure_and_start_slaves():
execute(start_slaves)
def pull_docker_images():
execute(docker_pull_containers)
def docker_restart():
run('sudo service docker restart')
|
py
|
1a57e8092af46adf54768d06bdffb276d5a97cee
|
from ray.rllib.agents.dqn.apex import ApexTrainer
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, DEFAULT_CONFIG as DDPG_CONFIG
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import TrainerConfigDict
from ray.util.iter import LocalIterator
APEX_DDPG_DEFAULT_CONFIG = DDPGTrainer.merge_trainer_configs(
DDPG_CONFIG, # see also the options in ddpg.py, which are also supported
{
"optimizer": {
"max_weight_sync_delay": 400,
"num_replay_buffer_shards": 4,
"debug": False,
},
"exploration_config": {"type": "PerWorkerOrnsteinUhlenbeckNoise"},
"n_step": 3,
"num_gpus": 0,
"num_workers": 32,
"buffer_size": 2000000,
# TODO(jungong) : update once Apex supports replay_buffer_config.
"no_local_replay_buffer": True,
# Whether all shards of the replay buffer must be co-located
# with the learner process (running the execution plan).
# This is preferred b/c the learner process should have quick
# access to the data from the buffer shards, avoiding network
# traffic each time samples from the buffer(s) are drawn.
# Set this to False for relaxing this constraint and allowing
# replay shards to be created on node(s) other than the one
# on which the learner is located.
"replay_buffer_shards_colocated_with_driver": True,
"learning_starts": 50000,
"train_batch_size": 512,
"rollout_fragment_length": 50,
"target_network_update_freq": 500000,
"min_sample_timesteps_per_reporting": 25000,
"worker_side_prioritization": True,
"min_time_s_per_reporting": 30,
},
_allow_unknown_configs=True,
)
class ApexDDPGTrainer(DDPGTrainer):
@classmethod
@override(DDPGTrainer)
def get_default_config(cls) -> TrainerConfigDict:
return APEX_DDPG_DEFAULT_CONFIG
@staticmethod
@override(DDPGTrainer)
def execution_plan(
workers: WorkerSet, config: dict, **kwargs
) -> LocalIterator[dict]:
"""Use APEX-DQN's execution plan."""
return ApexTrainer.execution_plan(workers, config, **kwargs)
|
py
|
1a57e8c4fd0e6b347031a7ab1e1c46b4db3ea418
|
# -*- coding: utf-8 -*-
from setuptools import setup
project = "fbone"
setup(
name = project,
version = '0.1',
url = '',
description = '',
author = '',
author_email = '',
packages = ["fbone"],
include_package_data = True,
zip_safe = False,
install_requires=[
'Flask>=0.10.1',
'Flask-SQLAlchemy',
'Flask-WTF',
'Flask-Script',
'Flask-Babel',
'Flask-Testing',
'Flask-Mail',
'Flask-Cache',
'Flask-Login',
'Flask-OpenID',
'nose',
'fabric',
],
test_suite ='tests',
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries'
]
)
|
py
|
1a57e8e7be4c71ba99f44c1422dda4b617aefd20
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from builtins import object
class Body(object):
def inspect(self, inspector):
raise NotImplementedError(
"class '%s' should override method '%s'"
% (self.__class__.__name__, method)
)
# version
__id__ = "$Id$"
#
# End of file
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.