filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_10871 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.log_stream_widget_definition import LogStreamWidgetDefinition
from datadog_api_client.v1.model.notebook_cell_time import NotebookCellTime
from datadog_api_client.v1.model.notebook_distribution_cell_attributes import NotebookDistributionCellAttributes
from datadog_api_client.v1.model.notebook_graph_size import NotebookGraphSize
from datadog_api_client.v1.model.notebook_heat_map_cell_attributes import NotebookHeatMapCellAttributes
from datadog_api_client.v1.model.notebook_log_stream_cell_attributes import NotebookLogStreamCellAttributes
from datadog_api_client.v1.model.notebook_markdown_cell_attributes import NotebookMarkdownCellAttributes
from datadog_api_client.v1.model.notebook_split_by import NotebookSplitBy
from datadog_api_client.v1.model.notebook_timeseries_cell_attributes import NotebookTimeseriesCellAttributes
from datadog_api_client.v1.model.notebook_toplist_cell_attributes import NotebookToplistCellAttributes
globals()["LogStreamWidgetDefinition"] = LogStreamWidgetDefinition
globals()["NotebookCellTime"] = NotebookCellTime
globals()["NotebookDistributionCellAttributes"] = NotebookDistributionCellAttributes
globals()["NotebookGraphSize"] = NotebookGraphSize
globals()["NotebookHeatMapCellAttributes"] = NotebookHeatMapCellAttributes
globals()["NotebookLogStreamCellAttributes"] = NotebookLogStreamCellAttributes
globals()["NotebookMarkdownCellAttributes"] = NotebookMarkdownCellAttributes
globals()["NotebookSplitBy"] = NotebookSplitBy
globals()["NotebookTimeseriesCellAttributes"] = NotebookTimeseriesCellAttributes
globals()["NotebookToplistCellAttributes"] = NotebookToplistCellAttributes
class NotebookCellResponseAttributes(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {}
@cached_property
def discriminator():
return None
attribute_map = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NotebookCellResponseAttributes - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
graph_size (NotebookGraphSize): [optional] # noqa: E501
split_by (NotebookSplitBy): [optional] # noqa: E501
time (NotebookCellTime): [optional] # noqa: E501
definition (LogStreamWidgetDefinition): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_spec_property_naming": _spec_property_naming,
"_configuration": _configuration,
"_visited_composed_classes": self._visited_composed_classes,
}
required_args = {}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
"anyOf": [],
"allOf": [],
"oneOf": [
NotebookDistributionCellAttributes,
NotebookHeatMapCellAttributes,
NotebookLogStreamCellAttributes,
NotebookMarkdownCellAttributes,
NotebookTimeseriesCellAttributes,
NotebookToplistCellAttributes,
],
}
|
the-stack_0_10873 | import logging
import moderngl
from moderngl_window.loaders.base import BaseLoader
from moderngl_window.opengl import program
from moderngl_window.exceptions import ImproperlyConfigured
logger = logging.getLogger(__name__)
class Loader(BaseLoader):
kind = "single"
def load(self) -> moderngl.Program:
"""Loads a shader program from a single glsl file.
Each shader type is separated by preprocessors
- VERTEX_SHADER
- FRAGMENT_SHADER
- GEOMETRY_SHADER
- TESS_CONTROL_SHADER
- TESS_EVALUATION_SHADER
Example:
.. code:: glsl
#version 330
#if defined VERTEX_SHADER
in vec3 in_position;
in vec2 in_texcoord_0;
out vec2 uv0;
void main() {
gl_Position = vec4(in_position, 1);
uv0 = in_texcoord_0;
}
#elif defined FRAGMENT_SHADER
out vec4 fragColor;
uniform sampler2D texture0;
in vec2 uv0;
void main() {
fragColor = texture(texture0, uv0);
}
#endif
Returns:
moderngl.Program: The Program instance
"""
self.meta.resolved_path, source = self._load_source(self.meta.path)
shaders = program.ProgramShaders.from_single(self.meta, source)
shaders.handle_includes(self._load_source)
prog = shaders.create()
# Wrap the program if reloadable is set
if self.meta.reloadable:
# Disable reload flag so reloads will return Program instances
self.meta.reloadable = False
# Wrap it ..
prog = program.ReloadableProgram(self.meta, prog)
return prog
def _load_source(self, path):
"""Finds and loads a single source file.
Args:
path: Path to resource
Returns:
Tuple[resolved_path, source]: The resolved path and the source
"""
resolved_path = self.find_program(path)
if not resolved_path:
raise ImproperlyConfigured("Cannot find program '{}'".format(path))
logger.info("Loading: %s", path)
with open(str(resolved_path), "r") as fd:
return resolved_path, fd.read()
|
the-stack_0_10874 | from datetime import date, datetime, time
from typing import Any, Dict, Optional
from flask import url_for
from flask_frozen import UrlForLogger
from git import Repo
from naucse import views
from naucse.models import Course
from naucse.utils.views import page_content_cache_key, get_edit_info
def get_course_from_slug(slug: str) -> Course:
""" Gets the actual course instance from a slug.
"""
parts = slug.split("/")
if parts[0] == "course":
return views.model.courses[parts[1]]
else:
return views.model.runs[(int(parts[0]), parts[1])]
def course_info(slug: str, *args, **kwargs) -> Dict[str, Any]:
"""Return info about the given course.
Return some extra info when it's a run (based on COURSE_INFO/RUN_INFO)
"""
course = get_course_from_slug(slug)
if course.is_link():
raise ValueError("Circular dependency.")
if "course" in slug:
attributes = Course.COURSE_INFO
else:
attributes = Course.RUN_INFO
data = {}
for attr in attributes:
val = getattr(course, attr)
if isinstance(val, (date, datetime, time)):
val = val.isoformat()
data[attr] = val
return data
def serialize_license(license) -> Optional[Dict[str, str]]:
"""Serialize a License instance into a dict.
"""
if license:
return {
"url": license.url,
"title": license.title
}
return None
def render(page_type: str, slug: str, *args, **kwargs) -> Dict[str, Any]:
"""Return a rendered page for a course, based on page_type and slug.
"""
course = get_course_from_slug(slug)
if course.is_link():
raise ValueError("Circular dependency.")
path = []
if kwargs.get("request_url"):
path = [kwargs["request_url"]]
logger = UrlForLogger(views.app)
with views.app.test_request_context(*path):
with logger:
info = {
"course": {
"title": course.title,
"url": views.course_url(course),
"vars": course.vars,
"canonical": course.canonical,
"is_derived": course.is_derived,
},
}
if page_type == "course":
info["content"] = views.course_content(course)
info["edit_info"] = get_edit_info(course.edit_path)
elif page_type == "calendar":
info["content"] = views.course_calendar_content(course)
info["edit_info"] = get_edit_info(course.edit_path)
elif page_type == "calendar_ics":
info["calendar"] = str(views.generate_calendar_ics(course))
info["edit_info"] = get_edit_info(course.edit_path)
elif page_type == "course_page":
lesson_slug, page, solution, *_ = args
lesson = views.model.get_lesson(lesson_slug)
content_offer_key = kwargs.get("content_key")
not_processed = object()
content = not_processed
if content_offer_key is not None:
# the base repository has a cached version of the content
content_key = page_content_cache_key(Repo("."), lesson_slug, page, solution, course.vars)
# if the key matches what would be produced here, let's not return anything
# and the cached version will be used
if content_offer_key == content_key:
content = None
request_url = kwargs.get("request_url")
if request_url is None:
request_url = url_for('course_page', course=course, lesson=lesson, page=page, solution=solution)
lesson_url, subpage_url, static_url = views.relative_url_functions(request_url, course, lesson)
page, session, prv, nxt = views.get_page(course, lesson, page)
# if content isn't cached or the version was refused, let's render
# the content here (but just the content and not the whole page with headers, menus etc)
if content is not_processed:
content = views.page_content(
lesson, page, solution, course,
lesson_url=lesson_url,
subpage_url=subpage_url,
static_url=static_url,
without_cache=True,
)
if content is None:
info["content"] = None
info["content_urls"] = []
else:
info["content"] = content["content"]
info["content_urls"] = content["urls"]
info.update({
"page": {
"title": page.title,
"css": page.info.get("css"), # not page.css since we want the css without limitation
"latex": page.latex,
"attributions": page.attributions,
"license": serialize_license(page.license),
"license_code": serialize_license(page.license_code)
},
"edit_info": get_edit_info(page.edit_path)
})
if session is not None:
info["session"] = {
"title": session.title,
"url": url_for("session_coverpage", course=course.slug, session=session.slug),
"slug": session.slug,
}
prev_link, session_link, next_link = views.get_footer_links(course, session, prv, nxt, lesson_url)
info["footer"] = {
"prev_link": prev_link,
"session_link": session_link,
"next_link": next_link
}
elif page_type == "session_coverpage":
session_slug, coverpage, *_ = args
session = course.sessions.get(session_slug)
info.update({
"session": {
"title": session.title,
"url": url_for("session_coverpage", course=course.slug, session=session.slug),
},
"content": views.session_coverpage_content(course, session, coverpage),
"edit_info": get_edit_info(session.get_edit_path(course, coverpage)),
})
else:
raise ValueError("Invalid page type.")
# generate list of absolute urls which need to be frozen further
urls = set()
for endpoint, values in logger.iter_calls():
url = url_for(endpoint, **values)
if url.startswith(f"/{slug}"): # this is checked once again in main repo, but let's save cache space
urls.add(url)
info["urls"] = list(urls)
return info
def get_footer_links(slug, lesson_slug, page, request_url=None):
course = get_course_from_slug(slug)
if course.is_link():
raise ValueError("Circular dependency.")
try:
lesson = views.model.get_lesson(lesson_slug)
except LookupError:
raise ValueError("Lesson not found")
path = []
if request_url is not None:
path = [request_url]
with views.app.test_request_context(*path):
def lesson_url(lesson, *args, **kwargs):
return url_for("course_page", course=course, lesson=lesson, *args, **kwargs)
page, session, prv, nxt = views.get_page(course, lesson, page)
prev_link, session_link, next_link = views.get_footer_links(course, session, prv, nxt, lesson_url)
return {
"prev_link": prev_link,
"session_link": session_link,
"next_link": next_link
}
|
the-stack_0_10875 | """
Test the optimization of transfers, generating a few simplified scenarios
and checking that the optimizer finds the expected outcome.
"""
from unittest import mock
from operator import itemgetter
from airsenal.framework.squad import Squad
from airsenal.framework.optimization_utils import (
get_discount_factor,
next_week_transfers,
count_expected_outputs,
)
from airsenal.framework.optimization_transfers import (
make_optimum_single_transfer,
make_optimum_double_transfer,
)
class DummyPlayer(object):
"""
fake player that we can add to a squad, giving a specified expected score.
"""
def __init__(self, player_id, position, points_dict):
"""
we generate squad to avoid >3-players-per-team problem,
and set price to 0 to avoid overrunning budget.
"""
self.player_id = player_id
self.fpl_api_id = player_id
self.name = "player_{}".format(player_id)
self.position = position
self.team = "DUMMY_TEAM_{}".format(player_id)
self.purchase_price = 0
self.is_starting = True
self.is_captain = False
self.is_vice_captain = False
self.predicted_points = {"DUMMY": points_dict}
self.sub_position = None
def calc_predicted_points(self, dummy):
pass
def generate_dummy_squad(player_points_dict=None):
"""
Fill a squad up with dummy players.
player_points_dict is a dictionary
{ player_id: { gw: points,...} ,...}
"""
if not player_points_dict: # make a simple one
player_points_dict = {i: {1: 2} for i in range(15)}
t = Squad()
for i in range(15):
if i < 2:
position = "GK"
elif i < 7:
position = "DEF"
elif i < 12:
position = "MID"
else:
position = "FWD"
t.add_player(DummyPlayer(i, position, player_points_dict[i]))
return t
def predicted_point_mock_generator(point_dict):
"""
return a function that will mock the get_predicted_points function
the point_dict it is given should be keyed by position, i.e.
{"GK" : {player_id: points, ...}, "DEF": {}, ... }
"""
def mock_get_predicted_points(gameweek, tag, position, team=None):
"""
return an ordered list in the same way as the real
get_predicted_points func does. EXCEPT - we return dummy players rather
than just ids (so the Squad.add_player can add them)
"""
output_pid_list = [(k, v) for k, v in point_dict[position].items()]
output_pid_list.sort(key=itemgetter(1), reverse=True)
# return output_pid_list
if isinstance(gameweek, list):
gameweek = gameweek[0]
return [
(DummyPlayer(entry[0], position, {gameweek: entry[1]}), entry[1])
for entry in output_pid_list
]
return mock_get_predicted_points
def test_subs():
"""
mock squads with some players predicted some points, and
some predicted to score zero, and check we get the right starting 11.
"""
points_dict = {
0: {1: 0},
1: {1: 2},
2: {1: 2},
3: {1: 2},
4: {1: 0},
5: {1: 2},
6: {1: 2},
7: {1: 2},
8: {1: 2},
9: {1: 0},
10: {1: 2},
11: {1: 4},
12: {1: 0},
13: {1: 2},
14: {1: 3},
}
# should get 4,4,2, with players 0,4,9,12 on the bench,
# captain player 11, vice-captain player 14
# should have 29 points (9*2 + 3 + (2*4) )
t = generate_dummy_squad(points_dict)
ep = t.get_expected_points(1, "DUMMY")
assert ep == 29
assert t.players[0].is_starting is False
assert t.players[4].is_starting is False
assert t.players[9].is_starting is False
assert t.players[12].is_starting is False
assert t.players[11].is_captain is True
assert t.players[14].is_vice_captain is True
def test_single_transfer():
"""
mock squad with all players predicted 2 points, and potential transfers
with higher scores, check we get the best transfer.
"""
t = generate_dummy_squad()
position_points_dict = {
"GK": {0: 2, 1: 2, 100: 0, 101: 0, 200: 3, 201: 2}, # in the orig squad
"DEF": {
2: 2,
3: 2,
4: 2,
5: 2,
6: 2, # in the orig squad
103: 0,
104: 0,
105: 5,
106: 2,
107: 2,
203: 0,
204: 0,
205: 1,
206: 2,
207: 2,
},
"MID": {
7: 2,
8: 2,
9: 2,
10: 2,
11: 2, # in the orig squad
108: 2,
109: 2,
110: 3,
111: 3,
112: 0,
208: 2,
209: 2,
210: 3,
211: 3,
212: 0,
},
"FWD": {12: 2, 13: 2, 14: 2, 113: 6, 114: 3, 115: 7}, # in the orig squad
}
mock_pred_points = predicted_point_mock_generator(position_points_dict)
with mock.patch(
"airsenal.framework.optimization_transfers.get_predicted_points",
side_effect=mock_pred_points,
):
new_squad, pid_out, pid_in = make_optimum_single_transfer(t, "DUMMY", [1])
# we should expect - player 115 to be transfered in, and to be captain.
assert pid_in[0] == 115
for p in new_squad.players:
if p.player_id == 115:
assert p.is_captain is True
else:
assert p.is_captain is False
# expected points should be 10*2 + 7*2 = 34
assert new_squad.get_expected_points(1, "DUMMY") == 34
def test_double_transfer():
"""
mock squad with two players predicted low score, see if we get better players
transferred in.
"""
t = generate_dummy_squad()
position_points_dict = {
"GK": {0: 2, 1: 2, 100: 0, 101: 0, 200: 3, 201: 7}, # in the orig squad
"DEF": {
2: 2,
3: 2,
2: 2,
5: 2,
6: 2, # in the orig squad
103: 0,
104: 0,
105: 5,
106: 2,
107: 2,
203: 0,
204: 0,
205: 1,
206: 2,
207: 2,
},
"MID": {
7: 2,
8: 2,
9: 2,
10: 2,
11: 2, # in the orig squad
108: 2,
109: 2,
110: 3,
111: 3,
112: 0,
208: 2,
209: 2,
210: 3,
211: 3,
212: 0,
},
"FWD": {12: 2, 13: 2, 14: 2, 113: 6, 114: 3, 115: 8}, # in the orig squad
}
mock_pred_points = predicted_point_mock_generator(position_points_dict)
with mock.patch(
"airsenal.framework.optimization_transfers.get_predicted_points",
side_effect=mock_pred_points,
):
new_squad, pid_out, pid_in = make_optimum_double_transfer(t, "DUMMY", [1])
# we should expect 201 and 115 to be transferred in, and 1,15 to
# be transferred out. 115 should be captain
assert 201 in pid_in
assert 115 in pid_in
print(new_squad)
for p in new_squad.players:
if p.player_id == 115:
assert p.is_captain is True
else:
assert p.is_captain is False
def test_get_discount_factor():
"""
Discount factor discounts future gameweek score predictions based on the
number of gameweeks ahead. It uses two discount types based on a discount
of 14/15, exponential ({14/15}^{weeks ahead}) and constant
(1-{14/15}*weeks ahead)
"""
assert get_discount_factor(1, 4) == (14 / 15) ** (4 - 1)
assert get_discount_factor(1, 4, "constant") == 1 - ((1 / 15) * (4 - 1))
assert get_discount_factor(1, 20, "const") == 0
assert get_discount_factor(1, 1, "const") == 1
assert get_discount_factor(1, 1, "exp") == 1
def test_next_week_transfers_no_chips_no_constraints():
# First week (blank starting strat with 1 free transfer available)
strat = (1, 0, {"players_in": {}, "chips_played": {}})
# No chips or constraints
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=True,
max_transfers=2,
)
# (no. transfers, free transfers following week, points hit)
expected = [(0, 2, 0), (1, 1, 0), (2, 1, 4)]
assert actual == expected
def test_next_week_transfers_any_chip_no_constraints():
# All chips, no constraints
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={
"chips_allowed": ["wildcard", "free_hit", "bench_boost", "triple_captain"],
"chip_to_play": None,
},
)
expected = [
(0, 2, 0),
(1, 1, 0),
(2, 1, 4),
("W", 1, 0),
("F", 1, 0),
("B0", 2, 0),
("B1", 1, 0),
("B2", 1, 4),
("T0", 2, 0),
("T1", 1, 0),
("T2", 1, 4),
]
assert actual == expected
def test_next_week_transfers_no_chips_zero_hit():
# No points hits
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=0,
allow_unused_transfers=True,
max_transfers=2,
)
expected = [(0, 2, 0), (1, 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_no_unused():
# 2 free transfers available, no wasted transfers
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=False,
max_transfers=2,
)
expected = [(1, 2, 0), (2, 1, 0)]
assert actual == expected
def test_next_week_transfers_chips_already_used():
# Chips allowed but previously used
strat = (
1,
0,
{
"players_in": {},
"chips_played": {
1: "wildcard",
2: "free_hit",
3: "bench_boost",
4: "triple_captain",
},
},
)
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
)
expected = [(0, 2, 0), (1, 1, 0), (2, 1, 4)]
assert actual == expected
def test_next_week_transfers_play_wildcard():
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={"chips_allowed": [], "chip_to_play": "wildcard"},
)
expected = [("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_allow_wildcard():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={"chips_allowed": ["wildcard"], "chip_to_play": None},
)
expected = [(0, 2, 0), (1, 2, 0), (2, 1, 0), ("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_allow_wildcard_no_unused():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=False,
max_transfers=2,
chips={"chips_allowed": ["wildcard"], "chip_to_play": None},
)
expected = [(1, 2, 0), (2, 1, 0), ("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_play_wildcard():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={"chips_allowed": [], "chip_to_play": "wildcard"},
)
expected = [("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_play_bench_boost_no_unused():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=False,
max_transfers=2,
chips={"chips_allowed": [], "chip_to_play": "bench_boost"},
)
expected = [("B1", 2, 0), ("B2", 1, 0)]
assert actual == expected
def test_next_week_transfers_play_triple_captain_max_transfers_3():
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=True,
max_transfers=3,
chips={"chips_allowed": [], "chip_to_play": "triple_captain"},
)
expected = [("T0", 2, 0), ("T1", 1, 0), ("T2", 1, 4), ("T3", 1, 8)]
assert actual == expected
def test_count_expected_outputs_no_chips_no_constraints():
# No constraints or chips, expect 3**num_gameweeks strategies
count = count_expected_outputs(
3,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={},
)
assert count == 3 ** 3
# Max hit 0
# Include:
# (0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 1), (0, 1, 2),
# (0, 2, 0), (0, 2, 1), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 1, 0), (1, 1, 1)
# Exclude:
# (0, 2, 2), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 0), (2, 0, 1),
# (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)
def test_count_expected_outputs_no_chips_zero_hit():
count = count_expected_outputs(
3,
free_transfers=1,
max_total_hit=0,
next_gw=1,
max_transfers=2,
chip_gw_dict={},
)
assert count == 13
# Start with 2 FT and no unused
# Include:
# (0, 0, 0), (1, 1, 1), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 1),
# (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)
# Exclude:
# (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (0, 2, 1),
# (0, 2, 2), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 1, 0), (2, 0, 0)
def test_count_expected_outputs_no_chips_2ft_no_unused():
count = count_expected_outputs(
3,
free_transfers=2,
max_total_hit=None,
allow_unused_transfers=False,
next_gw=1,
max_transfers=2,
)
assert count == 14
# Wildcard, 2 weeks, no constraints
# Strategies:
# (0, 0), (0, 1), (0, 2), (0, 'W'), (1, 0), (1, 1), (1, 2), (1, 'W'), (2, 0),
# (2, 1), (2, 2), (2, 'W'), ('W', 0), ('W', 1), ('W', 2)
def test_count_expected_wildcard_allowed_no_constraints():
count = count_expected_outputs(
2,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chips_allowed": ["wildcard"]},
2: {"chips_allowed": ["wildcard"]},
3: {"chips_allowed": ["wildcard"]},
},
)
assert count == 15
# Bench boost, 2 weeks, no constraints
# Strategies:
# (0, 0), (0, 1), (0, 2), (0, 'B0'), (0, 'B1'), (0, 'B2'), (1, 0), (1, 1), (1, 2),
# (1, 'B0'), (1, 'B1'), (1, 'B2'), (2, 0), (2, 1), (2, 2), (2, 'B0'), (2, 'B1'),
# (2, 'B2'), ('B0', 0), ('B0', 1), ('B0', 2), ('B1', 0), ('B1', 1), ('B1', 2),
# ('B2', 0), ('B2', 1), ('B2', 2),
def count_expected_bench_boost_allowed_no_constraints():
count = count_expected_outputs(
2,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chips_allowed": ["bench_boost"]},
2: {"chips_allowed": ["bench_boost"]},
3: {"chips_allowed": ["bench_boost"]},
},
)
assert count == 27
# Force playing wildcard in first week
# Strategies:
# ("W",0), ("W,1), ("W",2)
def count_expected_play_wildcard_no_constraints():
count = count_expected_outputs(
2,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chip_to_play": "wildcard", "chips_allowed": []},
2: {"chip_to_play": None, "chips_allowed": []},
},
)
assert count == 3
# Force playing free hit in first week, 2FT, don't allow unused
# Strategies:
# (0,0), ("F",1), ("F",2)
def count_expected_play_free_hit_no_unused():
count = count_expected_outputs(
2,
free_transfers=2,
max_total_hit=None,
allow_unused_transfers=False,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chip_to_play": "free_hit", "chips_allowed": []},
2: {"chip_to_play": None, "chips_allowed": []},
},
)
assert count == 3
|
the-stack_0_10878 | import numpy as np
from sklearn import datasets
from lightgbm.sklearn import LGBMRegressor
from hummingbird.ml import convert
import onnxruntime
import torch
x, y = datasets.load_wine(return_X_y=True)
x = x.astype(np.float32)
model = LGBMRegressor(n_estimators=10)
model.fit(x, y)
preds = model.predict(x)
pytorch_model = convert(model, "pytorch")
torch.onnx.export(
pytorch_model.model,
(torch.from_numpy(x)),
"model.onnx",
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch"}, "output": {0: "batch"}},
)
np.savez_compressed(
open("io.npz", "wb"), input=x[:1], output=preds[:1],
)
# sanity check - onnxruntime inference
sess = onnxruntime.InferenceSession("model.onnx")
outputs = sess.run(None, {"input": x[:1]})[0][:, 0]
assert np.allclose(outputs, preds[:1])
|
the-stack_0_10879 | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import pytest
import main
def test_main(capsys):
main.main()
out, _ = capsys.readouterr()
expected = "Completed Task #0."
assert expected in out
def test_env_vars():
with pytest.raises(Exception, match=r".*failed.*"):
main.main(fail_rate="0.999999")
def test_bad_env_vars(capsys):
main.main(fail_rate="2") # Does not fail, so retry is not triggered
out, _ = capsys.readouterr()
assert "Invalid FAIL_RATE env var value" in out
def test_run_script():
output = (
subprocess.run(
["python3", "main.py"],
stdout=subprocess.PIPE,
check=True,
)
.stdout.strip()
.decode()
)
assert "Completed" in output
my_env = {"FAIL_RATE": "0.99999999"}
with pytest.raises(subprocess.CalledProcessError, match=r".*non-zero.*"):
subprocess.run(
["python3", "main.py"],
env=my_env,
stderr=subprocess.PIPE,
check=True,
)
|
the-stack_0_10881 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
"""LoadBalancerFrontendIPConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerFrontendIPConfigurationListResult"]
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
frontend_ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FrontendIPConfiguration"
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
|
the-stack_0_10884 | # -*- coding:utf-8 -*-
import os
from concurrent.futures.thread import ThreadPoolExecutor
from flask_restful import Resource, reqparse, request
from flask import g, app
from common.log import loggers
from common.audit_log import audit_log
from common.db import DB
from common.utility import uuid_prefix, salt_api_for_product
from common.sso import access_required
import json
from common.xlsx import Xlsx
from fileserver.git_fs import gitlab_project, gitlab_project_name
from system.user import update_user_privilege, update_user_product
from common.const import role_dict
from fileserver.rsync_fs import rsync_config
logger = loggers()
parser = reqparse.RequestParser()
parser.add_argument("host_id", type=str, required=True, trim=True)
parser.add_argument("target_id", type=str, default='', trim=True)
parser.add_argument("target", type=str, default='', trim=True)
parser.add_argument("IP", type=str, default='', trim=True)
parser.add_argument("location", type=str, default='', trim=True)
parser.add_argument("model", type=str, default='', trim=True)
parser.add_argument("type", type=str, default='', trim=True)
parser.add_argument("project", type=str, default='', trim=True)
parser.add_argument("client", type=str, default='', trim=True)
parser.add_argument("pool", type=str, default='', trim=True)
parser.add_argument("path", type=str, default='', trim=True)
parser.add_argument("key_word", type=str, default='', trim=True)
parser.add_argument("file_name", type=str, default='', trim=True)
parser.add_argument("cipher", type=str, default='', trim=True)
class Target(Resource):
@access_required(role_dict["common_user"])
def get(self, target_id):
db = DB()
status, result = db.select_by_id("target", target_id)
db.close_mysql()
if status is True:
if result:
return {"data": result, "status": True, "message": ""}, 200
else:
return {"status": False, "message": "%s does not exist" % target_id}, 404
else:
return {"status": False, "message": result}, 500
@access_required(role_dict["product"])
def delete(self, target_id):
db = DB()
status, result = db.delete_by_id("target", target_id)
db.close_mysql()
logger.info('delete:' + str(result))
if status is not True:
logger.error("Delete product error: %s" % result)
return {"status": False, "message": result}, 500
if result is 0:
return {"status": False, "message": "%s does not exist" % target_id}, 404
return {"status": True, "message": ""}, 200
@access_required(role_dict["product"])
def put(self, target_id):
args = parser.parse_args()
logger.info(args['host_id'])
args["id"] = target_id
logger.info('id:' + target_id)
del args['path'], args['key_word'], args['file_name'], args['target_id'], args['cipher']
target = args
db = DB()
status, result = db.select_by_id('target', target_id)
origion_IP = result['IP']
host_id = result['host_id']
args['host_id'] = host_id
if origion_IP != args['IP']:
status, message = judge_target_IP_exist(args['IP'], args['host_id'])
if status is not True:
return {"status": False, "message": message}, 500
status, result = db.update_by_id("target", json.dumps(target, ensure_ascii=False), target_id)
db.close_mysql()
if status is not True:
logger.error("Modify target: %s" % result)
return {"status": False, "message": result}, 500
return {"status": True, "message": result}, 200
class TargetList(Resource):
@access_required(role_dict["common_user"])
def get(self):
logger.info("TargetLIST")
host_id = request.args.get("host_id")
db = DB()
status, result = db.select("target", "where data -> '$.host_id'='%s'" % host_id)
if status is True:
target_list = result
else:
db.close_mysql()
return {"status": False, "message": result}, 500
db.close_mysql()
return {"data": target_list, "status": True, "message": ""}, 200
@access_required(role_dict["product"])
def post(self):
args = parser.parse_args()
args["id"] = uuid_prefix("t")
del args['path'], args['key_word'], args['file_name'], args['target_id'], args['cipher']
target = args
db = DB()
status, message = judge_target_IP_exist(args['IP'], args['host_id'])
if status is True:
insert_status, insert_result = db.insert("target", json.dumps(target, ensure_ascii=False))
if insert_status is not True:
db.close_mysql()
return {"status": False, "message": str(insert_result)}, 500
else:
db.close_mysql()
return {"status": False, "message": message}, 500
db.close_mysql()
return {"status": True, "message": message}, 200
def judge_target_IP_exist(IP, host_id):
db = DB()
status, result = db.select("target", "where data -> '$.IP'='%s' AND data -> '$.host_id'='%s'" % (
IP, host_id))
if status is not True:
return False, 'select error'
else:
if len(result) == 0:
return True, ''
else:
return False, 'IP already exists'
# 上传文件
class UploadTarget(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("UploadTarget")
args = parser.parse_args()
host_id = args['host_id']
file = request.files['file']
file.save(os.path.join('/tmp', file.filename))
db = DB()
try:
xlsx_file = Xlsx(os.path.join('/tmp', file.filename))
xlsx_file.read()
config_db_result = xlsx_file.export_db()
targets = config_db_result.split(';')
status, set_repeat = self.get_repeat_target(targets)
if not status:
logger.info('存在重复IP')
return {"status": True, "message": "存在重复IP!为:" + str(set_repeat)}, 200
exist_ip_list = []
for i in range(0, len(targets) - 1):
target_dic = eval(targets[i])
target_dic['host_id'] = host_id
target_dic['id'] = uuid_prefix('t')
logger.info(str(target_dic))
status, message = judge_target_IP_exist(target_dic['IP'], host_id)
if status:
insert_status, insert_result = db.insert("target", json.dumps(target_dic, ensure_ascii=False))
if insert_status is not True:
logger.error("error:" + insert_result)
return {"status": False, "message": str(insert_result)}, 200
else:
exist_ip_list.append(target_dic['IP'])
if len(exist_ip_list) == 0:
return {"status": True, "message": ""}, 200
else:
return {"status": False, "message": "表格中有已经存在的IP:" + str(exist_ip_list) + ',其余IP已经添加完成'}, 200
except Exception as e:
logger.info('error:' + str(e))
return {"status": False, "message": str(e)}, 200
finally:
logger.info("close db")
db.close_mysql()
def get_repeat_target(self, target_list):
set_base = set()
set_repeat = set()
for i in range(0, len(target_list) - 1):
target_dic = eval(target_list[i])
key = target_dic['IP']
if set_base.__contains__(key):
set_repeat.add(key)
else:
set_base.add(key)
if set_repeat:
return False, set_repeat
else:
return True, set_repeat
class ConfigGenerate(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("ConfigGenerate")
db = DB()
# 首先取得所有所需配置参数,并做处理
args = parser.parse_args()
host_id = args['host_id']
key_word = args['key_word']
path = args['path']
file_name = args['file_name']
path_str = str(path)
if path_str:
if path_str.endswith('/'):
path_str = path_str
else:
path_str = path_str + '/'
else:
path_str = '/usr/local/prometheus/conf.d/'
if file_name:
file_name = file_name
else:
file_name = 'snmpconf_' + key_word + '.json'
state, result = db.select('host', "where data -> '$.id'='%s'" % host_id)
if state is False:
return {"status": False, "message": '主机信息未知'}, 500
host = dict(result[0])
product_id = host['product_id']
minion_id = host['minion_id']
state, product_result = db.select('product', "where data -> '$.id'='%s'" % product_id)
if state is False:
return {"status": False, "message": 'product未知'}, 500
product_host = product_result[0]
master_id = product_host['salt_master_id']
salt_api = salt_api_for_product(product_id)
# 完成关键词搜索的文件的生成
status, result = db.select("target", "where data -> '$.host_id'='%s'" % host_id)
if status is True:
target_list = result
else:
db.close_mysql()
return {"status": False, "message": result}, 500
try:
strresult = '[\n'
for target in target_list:
model = str(target['model'])
if model.__contains__(key_word):
target_str = target.pop('target')
del target['host_id'], target['id']
resdic = {"targets": [target_str], "labels": target}
strresult += " " + str(resdic) + ',\n'
strresult = strresult[:-1] + '\n]'
except Exception as e:
return {"status": False, "message": '监控目标信息解析出错'}, 500
# 上传文件到gitlab中
project_name_list = list(get_host_project(host))
logger.info('project_name_list' + str(project_name_list))
if len(project_name_list) == 0:
return {"status": False, "message": '该主机无归属项目'}, 200
elif len(project_name_list) > 1:
return {"status": False, "message": '该主机所属项目不唯一!' + str(project_name_list)}, 200
state, result = db.select('projects', "where data -> '$.name'='%s'" % project_name_list[0])
project_gitlab_name = result[0]['gitlab_name']
logger.info("project_gitlab_name:" + project_gitlab_name)
project, _ = gitlab_project_name(product_id, project_gitlab_name)
# 完成命令拼装
source = '/tmp/' + project_gitlab_name + '/' + minion_id + '/' + file_name
source_tmp = '/tmp/' + project_gitlab_name + '/' + minion_id + '/tmp_file'
dest = path_str + file_name
command = 'salt-cp ' + minion_id + ' ' + source_tmp + ' ' + dest
# 支持的action create, delete, move, update
branch_name = "master"
data_create = {
'branch': branch_name,
'commit_message': command,
'actions': [
{
'action': "create",
'file_path': minion_id + '/' + file_name,
'content': strresult
}
]
}
data_update = {
'branch': branch_name,
'commit_message': command,
'actions': [
{
'action': "update",
'file_path': minion_id + '/' + file_name,
'content': strresult
}
]
}
if isinstance(project, dict):
return project, 500
else:
try:
project.commits.create(data_create)
except Exception as e:
# logger.info('update'+str(e))
project.commits.create(data_update)
# 验证权限,执行发送功能
command_path = 'mkdir -p ' + path_str
logger.info('minion_id:' + minion_id)
salt_api.shell_remote_execution(minion_id, command_path)
# 因为传输中名称需要中文,故使用中间文件
command_list = []
command_list.append('cd /tmp/' + project_gitlab_name + ' \n ')
command_list.append('git pull \n ')
command_list.append('cp ' + source + ' ' + source_tmp + ' \n ')
command_list.append(command + ' \n ')
command_list.append('rm -f ' + source_tmp + ' \n ')
command_final = ''.join(command_list)
logger.info('command:' + command_final)
result = salt_api.shell_remote_execution([master_id], command_final)
logger.info('result:' + str(result))
if str(result).__contains__('True'):
return {"status": True, "message": '配置发送成功'}, 200
else:
return {"status": False, "message": '配置发送失败:' + str(result)}, 500
def get_host_project(host):
minion_id = host['minion_id']
db = DB()
status, group_list = db.select('groups', '')
project_name_list = []
try:
for group in group_list:
minion_list = list(group['minion'])
if minion_list.__contains__(minion_id):
project_name_list = project_name_list + group['projects']
except Exception as e:
logger.info('Exception:' + str(e))
db.close_mysql()
return project_name_list
class PingList(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("PingList")
args = parser.parse_args()
db = DB()
host_id = args['host_id']
cipher = args['cipher']
state, result = db.select('host', "where data -> '$.id'='%s'" % host_id)
minion_id = result[0]['minion_id']
logger.info('minion_id:' + minion_id)
product_id = result[0]['product_id']
salt_api = salt_api_for_product(product_id)
state, targets = db.select('target', "where data -> '$.host_id'='%s'" % host_id)
targets_not = []
thread_pool = ThreadPoolExecutor(max_workers=10, thread_name_prefix="target_")
futures = []
for target in targets:
future = thread_pool.submit(pingTarget, target, minion_id, salt_api, cipher)
futures.append(future)
thread_pool.shutdown(wait=True)
for future in futures:
result = future.result()
logger.info(str(result['status']))
if str(result['status']).__contains__("Timeout") | str(result['status']).__contains__("Unknown"):
targets_not.append(result["target"])
return {"status": True, "message": '配置发送成功', "data": targets_not}, 200
def pingTarget(target, minion_id, salt_api, cipher):
command = 'snmpwalk -v 2c -t 0.5 -c \'' + cipher + '\' ' + target["IP"] + ' 1.3.6.1.2.1.1.1'
logger.info(command)
exec_result = salt_api.shell_remote_execution([minion_id], command)
result = {'target': target, 'status': exec_result}
return result
class SinglePing(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("SinglePing")
args = parser.parse_args()
target_id = args['target_id']
cipher = args['cipher']
# 获得所需参数minion_id、product_id、target_ip
db = DB()
state, result = db.select_by_id('target', target_id)
target_ip = result['IP']
host_id = result['host_id']
state, result = db.select_by_id('host', host_id)
minion_id = result['minion_id']
product_id = result['product_id']
salt_api = salt_api_for_product(product_id)
command = 'snmpwalk -v 2c -t 0.5 -c \'' + cipher + '\' ' + target_ip + ' 1.3.6.1.2.1.1.1'
logger.info('command:'+command)
sysDescr = salt_api.shell_remote_execution([minion_id], command)
response_data = {}
if str(sysDescr[minion_id]).__contains__("Timeout") | str(sysDescr[minion_id]).__contains__("Unknown"):
response_data['status'] = '设备网络不通'
else:
response_data['status'] = "设备正常"
response_data['sysDescr'] = str(sysDescr[minion_id])
return {"status": True, "message": '成功', "data": response_data}, 200
class TruncateTarget(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("TruncateTarget")
args = parser.parse_args()
host_id = args['host_id']
db = DB()
state, result = db.delete('target', "where data -> '$.host_id'='%s'" % host_id)
if state:
return {"status": True, "message": '成功'}, 200
else:
return {"status": False, "message": '删除失败'}, 500
|
the-stack_0_10885 | # 3_cmakefile_gen.py - helper to create CMakeLists.txt files
# for directory tree of IDL files, to build as merged typesupport static library
# Started 2020Nov09 Neil Puthuff
import sys
import os
file_header = '''# Copyright 2020 Real-Time Innovations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(ConnextDdsRosDdsTypes)
'''
file_midblock = '''
# for unbounded strings & sequences use -DUNBOUNDED_ALL on CMake cmdline
if(UNBOUNDED_ALL)
set(extra_params UNBOUNDED)
endif()
connextdds_generate_ros_dds_types(
LANG ${LANG}
OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
IDL_FILES ${idl_files}
INCLUDE_DIRS ${top_level_source_dir}
${extra_params}
)
'''
cmake_file_opened = False
cmake_libname = ""
# walk along the provided paths, searching for IDL files
for root, dirs, files in os.walk(sys.argv[1]):
# if dirs are listed, prepare to create a CMakeLists.txt file
if len(dirs) > 0:
# there are subdirs; this might be the place to put a CMakeLists.txt file
# if a CMakeLists.txt file is already opened, finish it
if cmake_file_opened == True:
# write remainder of file, then close it.
f_cmake.write("{}".format(file_midblock))
f_cmake.write("add_library( {} OBJECT\n ".format(cmake_libname) + "${generated_file_list}\n)\n\n")
f_cmake.write("set_property(TARGET {} PROPERTY \n POSITION_INDEPENDENT_CODE ON\n)\n\n".format(cmake_libname))
f_cmake.write("target_include_directories({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_INCLUDE_DIRS}\n ${top_level_binary_dir}\n)\n\n")
f_cmake.write("target_compile_definitions({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_COMPILE_DEFINITIONS}\n)\n\n")
f_cmake.write("add_dependencies({} \n stdlibrary \n)\n".format(cmake_libname))
f_cmake.close()
cmake_file_opened = False
cmake_file_root = root
# check for IDL files in this dir
if len(files) > 0:
for fcand in files:
if fcand.endswith('.idl'):
if cmake_file_opened == False:
# open file, init with header and such, make libname
f_cmake = open('{}/CMakeLists.txt'.format(cmake_file_root), "w")
f_cmake.write("{}\n".format(file_header))
cmake_file_opened = True
# create libname for this directory
cmake_libname = cmake_file_root.strip(".").strip("/").strip("\\").replace("_", "") + "library"
print("CMakeLists.txt file in {} for {}".format(cmake_file_root, cmake_libname))
# add IDL file to CMakeList.txt file
myDir = os.path.split(root)
f_cmake.write("list(APPEND idl_files \"${CMAKE_CURRENT_SOURCE_DIR}/" + "{}/{}\")\n".format(myDir[1] ,fcand))
if cmake_file_opened == True:
# write remainder of file, then close it.
f_cmake.write("{}".format(file_midblock))
f_cmake.write("add_library( {} OBJECT\n ".format(cmake_libname) + "${generated_file_list}\n)\n\n")
f_cmake.write("set_property(TARGET {} PROPERTY \n POSITION_INDEPENDENT_CODE ON\n)\n\n".format(cmake_libname))
f_cmake.write("target_include_directories({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_INCLUDE_DIRS}\n ${top_level_binary_dir}\n)\n\n")
f_cmake.write("target_compile_definitions({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_COMPILE_DEFINITIONS}\n)\n\n")
f_cmake.write("add_dependencies({} \n stdlibrary \n)\n".format(cmake_libname))
f_cmake.close()
cmake_file_opened = False
|
the-stack_0_10886 | import sys
import os
import pdb
import pathlib
import time
import base64
sys.path.append(os.path.join(str(pathlib.Path(__file__).parent.resolve()),'../../lib'))
from module import Module
class Exfiltration(Module):
description = 'This module downloads the specified files on victim to the attacker'
@classmethod
def module_options(cls):
h = {
'path' : {'desc' : 'Directory on the attacker machine where the files are downloaded. Default is shared/victim_data/<victim_id>. NOTE : The default path can be accessed in both docker and host, accessibility of custom path will depend on where you run the program.', 'required' : False},
'location' : {'desc': 'Path of directory or file on victim to exfiltrate.','required': True}
}
return h
def __init__(self,name,utility,language,options):
## We are loading the script in the script variable here
super(Exfiltration, self).__init__(name,self.description,utility,language,getattr(self,f"script_{language}")(options))
## This class is called when victim returns the output for the task of this module. What is to be done with the output is defined here
def handle_task_output(self,data,options,victim_id, task_id):
## Default Dumping path
dump_path = os.path.join(str(pathlib.Path(__file__).parent.resolve()),'../../shared/victim_data',victim_id)
if not os.path.exists(dump_path):
os.makedirs(dump_path)
filename = f"exfiltration_file_{task_id}.zip"
filepath = os.path.join(dump_path,filename)
if 'path' in options:
if not os.path.exists(options['path']):
print(f"Provided save path does not exists - {options['path']}. Saving to default directory {filepath}")
else:
filepath = os.path.join(options['path'],filename)
## Check if we have write perms else save to /tmp/SpyderC2
if not os.access(os.path.dirname(filepath), os.W_OK):
dump_path = os.path.join('/tmp','SpyderC2',victim_id)
print(f"No write access to {os.path.dirname(filepath)}. Saving to {dump_path}")
if not os.path.exists(dump_path):
os.makedirs(dump_path,exist_ok=True)
filepath = os.path.join(dump_path,filename)
## Dump the zip file
with open(filepath, "wb") as f:
if self.language == 'python':
f.write(data)
else:
## Incase of powershell we send by base64 encoding
decoded = base64.b64decode(data)
f.write(decoded)
f.close()
output = filepath
return output
def script_python(self,options):
script = """def execute_command():
import os
from os.path import isfile, join
import shutil
location = '##location##'
if isfile(location):
path = shutil.make_archive(location, 'zip',os.path.dirname(location), location)
elif os.path.isdir(location):
path = shutil.make_archive(location, 'zip',location, location)
else:
## Doesn't exist
pass
content = open(path,"rb").read()
return content"""
## TODO - make this through a loop for all params
## TODO - this should be required parameter
if 'location' in options:
value = options['location']
else:
value = options['stager_location']
script = script.replace('##location##',value.replace('\\','\\\\'))
return script
def script_powershell(self,options):
script = """Compress-Archive -Path ##location## -DestinationPath ##location##.zip -Force
$bytes = [System.IO.File]::ReadAllBytes('##location##.zip')
$encoded = [System.Convert]::ToBase64String($bytes)
return $encoded"""
if 'location' in options:
value = options['location']
else:
value = options['stager_location']
script = script.replace('##location##',value)
return script
|
the-stack_0_10887 | from typing import Sequence
from ..types import TealType, require_type
from ..errors import TealInputError
from ..ir import TealOp, Op, TealSimpleBlock
from .expr import Expr
class NaryExpr(Expr):
"""N-ary expression base class.
This type of expression takes an arbitrary number of arguments.
"""
def __init__(self, op: Op, inputType: TealType, outputType: TealType, args: Sequence[Expr]):
if len(args) < 2:
raise TealInputError("NaryExpr requires at least two children.")
for arg in args:
if not isinstance(arg, Expr):
raise TealInputError("Argument is not a pyteal expression: {}".format(arg))
require_type(arg.type_of(), inputType)
self.op = op
self.outputType = outputType
self.args = args
def __teal__(self):
start = None
end = None
for i, arg in enumerate(self.args):
argStart, argEnd = arg.__teal__()
if i == 0:
start = argStart
end = argEnd
else:
end.setNextBlock(argStart)
opBlock = TealSimpleBlock([TealOp(self.op)])
argEnd.setNextBlock(opBlock)
end = opBlock
return start, end
def __str__(self):
ret_str = "(" + str(self.op),
for a in self.args:
ret_str += " " + a.__str__()
ret_str += ")"
return ret_str
def type_of(self):
return self.outputType
NaryExpr.__module__ = "pyteal"
def And(*args: Expr) -> NaryExpr:
"""Logical and expression.
Produces 1 if all arguments are nonzero. Otherwise produces 0.
All arguments must be PyTeal expressions that evaluate to uint64, and there must be at least two
arguments.
Example:
``And(Txn.amount() == Int(500), Txn.fee() <= Int(10))``
"""
return NaryExpr(Op.logic_and, TealType.uint64, TealType.uint64, args)
def Or(*args: Expr) -> NaryExpr:
"""Logical or expression.
Produces 1 if any argument is nonzero. Otherwise produces 0.
All arguments must be PyTeal expressions that evaluate to uint64, and there must be at least two
arguments.
"""
return NaryExpr(Op.logic_or, TealType.uint64, TealType.uint64, args)
def Concat(*args: Expr) -> NaryExpr:
"""Concatenate byte strings.
Produces a new byte string consisting of the contents of each of the passed in byte strings
joined together.
All arguments must be PyTeal expressions that evaluate to bytes, and there must be at least two
arguments.
Example:
``Concat(Bytes("hello"), Bytes(" "), Bytes("world"))``
"""
return NaryExpr(Op.concat, TealType.bytes, TealType.bytes, args)
|
the-stack_0_10889 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental module transforms JAX functions to be executed by TensorFlow."""
import functools
import re
import string
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import ad_util, api, api_util, config
from jax import core, custom_derivatives, dtypes
from jax import linear_util as lu
from jax import numpy as jnp
from jax import random, tree_util
from jax._src import util
from jax.api_util import flatten_fun
from jax.interpreters import ad, batching
from jax.interpreters import masking
from jax.interpreters import pxla
from jax.interpreters import sharded_jit
from jax.interpreters import xla
from jax._src.lax import lax
from jax._src.lax import linalg as lax_linalg
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
import jax._src.random
from jax.lib import xla_bridge as xb
import numpy as np
import tensorflow as tf # type: ignore[import]
# These don't have public equivalents.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]
# pylint: enable=g-direct-tensorflow-import
from jax.lib import xla_client
# The scope name need to be a valid TensorFlow name. See
# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731
_VALID_SCOPE_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_INVALID_SCOPE_CHAR = re.compile("[^A-Za-z0-9_.\\/>-]")
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub("_", name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = ".{}".format(scope_name)
return scope_name
# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,
# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)
TfVal = Any
DType = Any
def _is_tfval(v: TfVal) -> bool:
if isinstance(v, (tf.Tensor, tf.Variable)):
return True
try:
# Note: this conversion is overkill and just intended as a type check; this code
# is in principle only run if core.skip_checks is False.
# TODO: it is not true that this code is run only without skip_checks
_safe_convert_to_tensor(v)
return True
except ValueError:
return False
def _safe_convert_to_tensor(val, dtype=None) -> TfVal:
dtype = dtype if dtype else (val.dtype if hasattr(val, "dtype") else None)
conversion_type = to_tf_dtype(dtype) if dtype else None
# We can convert directly, because all dtypes (even bfloat16) are the same
# in JAX and TF.
return tf.convert_to_tensor(val, dtype=conversion_type)
# The implementation rules for primitives. The rule will be called with the
# arguments (TfVal) and must return TfVal (or a sequence thereof,
# if primitive.multiple_results). The vast majority of primitives do not need
# to worry about core.unit inputs or results. The exception are primarily the
# control-flow primitives.
tf_impl: Dict[core.Primitive,
Callable[..., Any]] = {}
# Some primitive implementation rules need the abstract values of arguments
# and the results. This is the case for the primitives implemented using
# _convert_jax_impl and those that need to adjust the shape of the outputs
# due to missing TF shape inference rules for TFXLA ops. The rules for these
# primitives should be added to `tf_impl_with_avals`.
# The abstract value are passed to the implementation as two special kwargs
# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a
# core.AbstractValue, or a tuple thereof when primitive.multiple_results).
tf_impl_with_avals: Dict[core.Primitive,
Callable[..., Any]] = {}
# XLA is not linked in all environments; when converting a primitive, if this
# variable is disabled, we try harder to use only standard TF ops if they are
# applicable to the concrete use case; if the resulting conversion path ends up
# requiring a TFXLA operation, an exception is thrown instead.
_enable_xla = True
def _xla_path_disabled_error(primitive_name: str) -> Exception:
assert not _enable_xla
return NotImplementedError(
f"Call to {primitive_name} can only be converted through TFXLA, but "
"XLA is disabled")
def convert(fun: Callable, *,
in_shapes: Optional[Sequence[Any]]=None,
with_gradient=True, enable_xla=True) -> Callable:
"""Transforms `fun` to be executed by TensorFlow.
See [README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or (nested) standard Python containers (tuple/list/dict)
thereof.
in_shapes: an optional sequence of shape specifications,
one for each argument of the function to be converted. Default is a
list of `None`, in which case the argument shape specifications are taken
from the shapes of the actual arguments.
A non-default `in_shapes` is needed sometimes when the actual arguments
have partially-specified shapes. If an argument is a pytree, then the
shape specification must be a matching pytree or `None`.
See [how optional parameters are matched to arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification should be a string, with comma-separated dimension
specifications, and optionally wrapped in parentheses. A dimension
specification is either a number, or the placeholder `_`, or a lowercase
word denoting a name for a dimension variable, or an arithmetic expression
with integer literals, dimension variables, and the operators `+` and `*`.
In presence of dimension variables, the conversion is done with a
shape abstraction that allows any concrete value for the variable.
Examples of shape specifications:
* `[None, "(batch, 16)"]`: no specification for the first argument (takes
the shape from the actual argument); the second argument is a 2D
array with the first dimension size set to a variable `batch` and the
second dimension 16.
* `["(batch, _)", "(batch,)"]`: the leading dimensions of the two arguments
must match. The second dimension of the first argument is taken from the
actual argument shape.
* `[(batch, 2 * batch)]`: a 2D matrix with the second dimension having
double the size of the first one.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is
saved in a SavedModel, the custom gradients are currently lost and
an error will be raised if a gradient computation is attempted.
This is due to a current bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
"""
global _enable_xla
_enable_xla = enable_xla
api._check_callable(fun)
def converted_fun(*args: TfVal) -> TfVal:
# TODO: is there a better way to check if we are inside a transformation?
if config.omnistaging_enabled:
if not core.trace_state_clean():
raise ValueError("convert must be used outside all JAX transformations."
+ f"Trace state: {core.thread_local_state.trace_state}")
else:
if (core.thread_local_state.trace_state.trace_stack.downward or
core.thread_local_state.trace_state.trace_stack.upward or
core.thread_local_state.trace_state.substack != [core.Sublevel(0)]):
raise ValueError("convert must be used outside all JAX transformations."
+ f"Trace state: {core.thread_local_state.trace_state}")
def check_arg(a):
if not _is_tfval(a):
msg = (f"Argument {a} of type {type(a)} of jax2tf.convert(f) should "
"be NumPy array, scalar, tf.Variable, or tf.Tensor")
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
# Name input tensors
args = tuple(
tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"), a) # type: ignore
for i, a in enumerate(args))
# This function may take pytrees of TfVals. We can only set
# tf.custom_gradient on functions that take a flat argument list.
args_flat, in_tree = tree_util.tree_flatten((args, {}))
if in_shapes is None:
in_shapes_ = (None,) * len(args)
else:
if not isinstance(in_shapes, Sequence) or len(args) != len(in_shapes):
msg = ("in_shapes must be a sequence as long as the argument list "
f"({len(args)}). Got in_shapes={in_shapes}.")
raise TypeError(msg)
in_shapes_ = tuple(in_shapes)
# Expand the in_shapes to match the argument pytree
in_shapes_flat = tuple(api_util.flatten_axes("jax2tf.convert in_shapes",
in_tree.children()[0], in_shapes_))
# Construct the abstract values for the flat arguments, possibly based on
# the input shapes and the in_shapes if given. May create new shape
# variables.
args_avals_flat = _input_avals(args_flat, in_shapes_flat)
f = lu.wrap_init(fun)
# out_tree_thunk() will be the output tree, after running _interpret_fun.
flat_fun, out_tree_thunk = flatten_fun(f, in_tree)
# Prepare the grad_fn for tf.custom_gradient.
def converted_grad_fn(*out_cts_flat: TfVal,
_out_cts_avals: Sequence[core.AbstractValue],
variables=None):
if variables:
raise ValueError("Unexpected variables used in forward pass. "
"This should not happen for first-order differentiation. "
f"variables={variables}")
def fun_vjp_jax(args_jax, out_cts_jax):
# One may think that we can get the pullback while we are converting
# the main function in the first place. That is problematic, because the
# pullback may contain captured tracers from the conversion of the
# main function. Those tracers will confuse the conversion of the
# pullback. So, we construct the vjp anew.
_, pullback_jax = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if in_shapes is None:
vjp_in_shapes = None
else:
args_in_shapes = tree_util.tree_unflatten(in_tree.children()[0], in_shapes_flat)
out_cts_in_shapes = tree_util.tree_unflatten(
out_tree_thunk(),
tuple(str(out_aval.shape) for out_aval in _out_cts_avals)) # type: ignore
vjp_in_shapes = [args_in_shapes, out_cts_in_shapes]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
# TODO: enable higher-order gradients
with tf.name_scope("jax2tf_vjp"):
in_cts = convert(fun_vjp_jax, with_gradient=False,
in_shapes=vjp_in_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert not _shape_env, f"Unexpected shape environment {_shape_env}"
_shape_env = _make_shape_env(args_avals_flat, args_flat)
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
outs, out_avals = util.unzip2(out_with_avals)
return (tuple(outs),
functools.partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = ("The jax2tf-converted function does not support gradients. "
"Use `with_gradient` parameter to enable gradients")
# We use PreventGradient, which is propagated through a SavedModel.
out_flat = [tf.raw_ops.PreventGradient(input=o, message=message)
for o, _ in out_flat_raw]
finally:
_shape_env = {}
out_flat = [tf.identity(x, "jax2tf_out") for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
# Internals
def _interpret_fun(fun: lu.WrappedFun,
in_vals: Sequence[TfVal],
in_avals: Sequence[core.AbstractValue]
) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
new_main = core.new_base_main if config.omnistaging_enabled else core.new_main
with new_main(TensorFlowTrace) as main: # type: ignore
fun = _interpret_subtrace(fun, main, in_avals)
out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = fun.call_wrapped(*in_vals)
del main
return tuple(out_vals)
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
"""Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs) -> Sequence[TfVal]`.
"""
def wrapped(*tf_args: TfVal,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
# We wrap the jax_impl under _interpret_fun to abstract the TF values
# from jax_impl and turn them into JAX abstract values.
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return jax_results if multiple_results else [jax_results]
tf_results_with_avals = _interpret_fun(lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
tf_results, _ = util.unzip2(tf_results_with_avals)
return tf_results if multiple_results else tf_results[0]
return wrapped
@lu.transformation
def _interpret_subtrace(main: core.MainTrace,
in_avals: Sequence[core.AbstractValue],
*in_vals: TfVal):
trace = TensorFlowTrace(main, core.cur_sublevel())
in_tracers = tuple(TensorFlowTracer(trace, val, aval)
for val, aval in util.safe_zip(in_vals, in_avals))
# The outs may be core.unit, see comment in TensorFlowTrace.pure.
outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]
out_tracers: Iterable[TensorFlowTracer] = map(trace.full_raise, outs) # type: ignore
out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (
tuple((t.val, t.aval) for t in out_tracers))
yield out_vals_with_avals
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
"""Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
"""
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple(v for v, _ in out_with_avals)
### tracer
PolyDim = Union[int, masking.Poly] # A polymorphic shape dimension
def _poly_dim_to_tf_dim(dim: PolyDim) -> Optional[int]:
if isinstance(dim, int):
return dim
elif dim.is_constant:
return int(dim)
else:
return None
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:
"""Generate a TF shape, possibly containing None for polymorphic dimensions."""
return tuple(map(_poly_dim_to_tf_dim, aval.shape)) # type: ignore[attr-defined]
def _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:
"""
Called for constants that occur in the program, or for input values to the
converted function. The returned shape may have unknown components, but
only when called for inputs.
"""
if isinstance(val, (tf.Tensor, tf.Variable)):
# May be partially known
return tuple(val.shape), to_jax_dtype(val.dtype)
else: # Must be a numeric value
assert core.skip_checks or _is_tfval(val), f"Non TfVal: {val}"
raw_aval = xla.abstractify(val)
return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]
def _input_avals(args: Sequence[TfVal], in_shapes: Sequence[Optional[str]]) -> Sequence[core.AbstractValue]:
"""Abstract values for the input arguments."""
def input_aval(arg: TfVal, in_shape: Optional[str]) -> core.AbstractValue:
"""The abstract value for an input."""
raw_shape, dtype = _tfval_shape_dtype(arg)
if in_shape is None:
if any(d is None for d in raw_shape):
msg = ("in_shape must be specified when the argument "
f"shape {raw_shape} is partially known.")
raise TypeError(msg)
else:
return core.ShapedArray(raw_shape, dtype)
in_shape_spec = masking.parse_spec(in_shape)
if len(in_shape_spec) != len(raw_shape):
msg = (f"in_shape {in_shape} has different rank than actual argument "
f"shape {raw_shape}")
raise TypeError(msg)
try:
# TODO: improve finalize_spec error reporting, so we don't have to code our own
shape = masking.finalize_spec(in_shape_spec, raw_shape)
except TypeError:
msg = (f"in_shape {in_shape} has `_` placeholders for argument shape "
f"dimensions that are unknown: {raw_shape}")
raise TypeError(msg)
for dim_idx, (s, raw_s) in enumerate(util.safe_zip(shape, raw_shape)):
s_int: Optional[int] = _poly_dim_to_tf_dim(s)
if s_int != raw_s and s_int is not None:
msg = (f"in_shape {in_shape} (resolved to {shape}) does not match "
f"argument shape {raw_shape} in dimension {dim_idx}")
raise TypeError(msg)
return core.ShapedArray(shape, dtype)
return tuple(map(input_aval, args, in_shapes))
# A shape environment maps shape variables to TfVal.
ShapeEnv = Dict[str, TfVal]
_shape_env = {} # type: ShapeEnv
def _eval_shape(shape: Sequence[PolyDim]) -> Sequence[TfVal]:
assert all(map(lambda x: x is not None, shape)), (
f"Argument shape should be a valid JAX shape but got {shape}")
return masking.eval_poly_shape(shape, _shape_env)
# Extracting a shape environment by solving the shape variables.
# The shape environment will be derived by using `tf.shape` from the
# dynamic shape of arguments, not their static shape.
def _make_shape_env(args_avals: Sequence[core.AbstractValue],
args: Sequence[TfVal]) -> Dict[str, TfVal]:
eqns = [(p, tf.shape(a)[i])
for a_aval, a in util.safe_zip(args_avals, args)
for i, p in enumerate(a_aval.shape)]
return _solve_shape_vars(eqns)
ShapeEqn = Tuple[PolyDim, TfVal]
def _solve_shape_vars(eqns: Sequence[ShapeEqn]) -> Dict[str, TfVal]:
"""Solves a number of equations "poly = tfval" into an shape environment."""
# A simple variable elimination algorithm for now
solved: Dict[str, TfVal] = {} # Already solved vars
def simplify_poly(p: PolyDim) -> Optional[Union[TfVal,
Tuple[str, TfVal, TfVal]]]:
# Simplifies polynomial given `solved`
# Returns (v, f, rest) such that p = v * f + rest, or
# rest such that p = rest, or None
v = None
f = None
rest = []
if isinstance(p, int):
return tf.constant(p)
for m, m_factor in p.items():
simpl_m: Union[str, TfVal] = simplify_mon(m, p)
if isinstance(simpl_m, str): # A var
if v is not None:
return None
v = simpl_m
f = m_factor
else: # A value
rest.append(tf.math.multiply(simpl_m, m_factor))
rest_val = functools.reduce(tf.math.add, rest, tf.constant(0))
return rest_val if v is None else (v, f, rest_val)
def simplify_mon(m: masking.Mon, in_poly: masking.Poly) -> Union[str, TfVal]:
# Simplifies monomial given `solved`
# Returns either a variable, or a solved value
if not m:
return tf.constant(1)
if m.degree > 1:
msg = ("only linear polynomials are supported as input shape "
f"specifications. Found '{m}' in '{in_poly}'.")
raise TypeError(msg)
var = list(m.keys())[0]
return solved.get(var, var)
remaining = eqns
while remaining:
new_remaining = []
for eqn in remaining:
eq_p, eq_val = eqn
p_simpl = simplify_poly(eq_p)
if p_simpl is None:
new_remaining.append(eqn)
continue
if not isinstance(p_simpl, tuple):
# TODO: add an assertion rest == eq_v
continue
var, factor, rest = p_simpl
# p = var * factor + rest
# TODO: add an assertion eq_v >= rest and (eq_v - rest) mod factor == 0
solved[var] = tf.math.floordiv(tf.math.subtract(eq_val, rest), factor)
if len(new_remaining) < len(remaining):
remaining = new_remaining
else:
msg = "Cannot solve"
raise ValueError(msg)
return solved
def shape_as_value(x):
"""Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
"""
return shape_as_value_p.bind(x)
# TODO: move this to masking or to some common library, if approved
shape_as_value_p = core.Primitive("shape_as_value")
shape_as_value_p.multiple_results = True
def _shape_as_value_impl(x):
x_shape = np.shape(x)
def dim_to_int(dim: PolyDim) -> int:
dim_int = _poly_dim_to_tf_dim(dim)
if dim_int is None:
msg = ("shape_as_value is not implemented for non-constant shapes "
"except for masking and jax2tf. "
f"Has shape: {x_shape}")
raise TypeError(msg)
else:
return dim_int
return tuple(map(dim_to_int, x_shape))
shape_as_value_p.def_impl(_shape_as_value_impl)
def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:
rank = len(x_aval.shape) # type: ignore[attr-defined]
return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank
shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)
def _shape_as_value_translation(comp, x):
return xla_client._xla.ops.Tuple(comp,
tuple(xb.constant(comp, d)
for d in comp.GetShape(x).dimensions()))
xla.translations[shape_as_value_p] = _shape_as_value_translation
def _shape_as_value_jvp_rule(primals, tangents):
# The shape does not depend on the contents of the input
x, = primals
zero = ad.Zero.from_value(0.)
return shape_as_value(x), (zero,) * len(x.shape)
ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule
def _shape_as_value__batching_rule(batched_args, batch_dims):
xv, = batched_args
batch_dim, = batch_dims
batch_size = xv.shape[batch_dim]
batched_shape = shape_as_value(xv)
one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]
res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)
return res, (0,) * len(one_shape)
batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule
def _shape_as_value_masking_rule(operands, operands_logical_shapes):
x_logical_shape, = operands_logical_shapes
return tuple(x_logical_shape)
masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule
def _shape_as_value_tf(x: TfVal,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue) -> TfVal:
x_aval = _in_avals[0]
def dim_to_tfval(dim: PolyDim, dim_idx: int) -> TfVal:
dim_int = _poly_dim_to_tf_dim(dim)
if dim_int is not None:
return tf.convert_to_tensor(dim_int)
else:
return tf.shape(x)[dim_idx]
return tuple(dim_to_tfval(dim, dim_idx)
for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]
tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf
# TODO(b/26854495): pylint doesn't understand slots and inheritance.
# pylint: disable=assigning-non-slot
class TensorFlowTracer(core.Tracer):
"""Tracer class that boxes a TF value and a JAX abstract value.
In addition to the TF value we carry the JAX abstract value because there are
two cases when it cannot be recovered from the value: (a) when the abstract
value is core.abstract_unit, in which case the value is tf.nan; (b) when we
are converting with polymorphic shapes, in which case the shape of the value
may have dimensions set to `None`, which the JAX abstract value may contain
more precise information.
When the value has a partially-known shape, the dimensions marked as `None`
must correspond to non-constant dimensions in the abstract value.
See README.md for details.
"""
# val: TfVal
# _aval: core.AbstractValue
__slots__ = ["val", "_aval"]
def __init__(self, trace: 'TensorFlowTrace', val: TfVal,
aval: core.AbstractValue):
self._trace = trace
self._aval = aval
if aval is core.abstract_unit:
self.val = val
elif isinstance(val, (tf.Tensor, tf.Variable)):
val_shape, val_dtype = _tfval_shape_dtype(val)
aval_dtype = np.dtype(self._aval.dtype) # type: ignore[attr-defined]
if val_dtype != aval_dtype and (val_dtype == tf.int32 and aval_dtype == jnp.int64 or
val_dtype == tf.int64 and aval_dtype == jnp.int32 or
val_dtype == tf.float32 and aval_dtype == jnp.float64 or
val_dtype == tf.float64 and aval_dtype == jnp.float32):
# We expect that x64 values are turned into x32
val = tf.cast(val, dtype=aval_dtype)
val_dtype = aval_dtype
if not core.skip_checks:
assert aval_dtype == val_dtype, f"expected {aval_dtype} == {val_dtype}"
for aval_dim, val_dim in util.safe_zip(self._aval.shape, val_shape): # type: ignore[attr-defined]
if val_dim is None:
assert isinstance(aval_dim, masking.Poly), f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
elif not isinstance(aval_dim, masking.Poly):
assert aval_dim == val_dim, f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
else:
# We have a TF value with known shape, and the abstract shape is a polynomial
# As an extra check, verify the value if the shape env are only constants
try:
aval_int = int(masking.eval_poly(aval_dim, _shape_env))
except TypeError:
continue
assert aval_int == val_dim, f"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}." # type: ignore
self.val = val
else: # Must be a numeric value
self.val = _safe_convert_to_tensor(val, dtype=self._aval.dtype) # type: ignore[attr-defined]
@property
def aval(self):
return self._aval
def full_lower(self):
return self
class TensorFlowTrace(core.Trace):
"""Trace class that underlies the jax2tf transformation.
We are going to ensure that jax2tf.convert is never nested inside other
transformations. This is sufficient for intended use cases (converting
fully-transformed JAX code). It also simplifies our job because we do not have
to handle situations where we apply primitives on a mix of TF values and
JAX tracers from an outer transformation. E.g., for addition both the TF values
and the JAX tracers have an override and they get confused if they see values
from the other world.
Hence a TFT trace does not interact with non-TFT traces at lower-level. For
higher-order control-flow primitives we invoke recursively
_interpret_fun on the body of the conditional, which will create a nested TFT.
We do want to allow transformations nested inside a TensorFlowTrace (TFT), but
those will introduce their own MainTrace, and any operations involving those
will be done on those traces, i.e., not a concern for TFT.
"""
def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:
"""Lifts a non-Tracer into the TensorFlowTracer.
This function may be called by way of trace.full_raise.
The value may be a core.unit. During JAX transformations we sometimes
produce a Jaxpr that has arguments of abstract value core.abstract_unit
and results equal to core.unit. These are arguments and results that are
not used in the computation.
In TF world, we represent core.unit as NaN. This is safe, as these values
should never be used.
"""
if val is core.unit:
return TensorFlowTracer(self, tf.constant(np.nan, tf.float32), core.abstract_unit)
else:
shape, dtype = _tfval_shape_dtype(val)
return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))
def lift(self, val: core.Tracer) -> TensorFlowTracer:
# This would be called when we need to raise a tracer from a lower-level
# main into the TensorFlowTrace. Since the TensorFlowTrace is never nested
# inside another transform, there are no lower-level main traces.
assert False
def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:
# This is called when we need to raise a tracer from the same master,
# but a lower sublevel. This could come from a nested jit.
return TensorFlowTracer(self, val.val, val._aval)
def process_primitive(self, primitive: core.Primitive,
tracers: Sequence[TensorFlowTracer],
params) -> TensorFlowTracer:
impl, impl_needs_avals = self.get_primitive_impl(primitive)
args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)
out_aval = primitive.abstract_eval(*args_avals, **params)
args_tf: Sequence[TfVal] = [t.val for t in tracers]
if impl_needs_avals:
val_out: TfVal = impl(*args_tf, _in_avals=args_avals, # type: ignore
_out_aval=out_aval, **params)
else:
val_out = impl(*args_tf, **params)
if primitive.multiple_results:
out = [TensorFlowTracer(self, v, a)
for v, a in util.safe_zip(val_out, out_aval)] # type: ignore
else:
out = TensorFlowTracer(self, val_out, out_aval) # type: ignore
# Check that the impl rule returned a value of expected shape and dtype
# TODO: adapt this to match polymorphic shapes
if not core.skip_checks:
if primitive.multiple_results:
for o, expected_aval in zip(out, out_aval): # type: ignore
assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (
f"{primitive}: out.aval = {o.aval}; expected {expected_aval}")
else:
assert out.aval == out_aval, ( # type: ignore
f"{primitive}: out.aval = {out.aval}; expected {out_aval}") # type: ignore
return out # type: ignore
def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,
tracers: Sequence[TensorFlowTracer], params):
assert call_primitive.multiple_results
vals: Sequence[TfVal] = [t.val for t in tracers]
f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))
if call_primitive == core.named_call_p:
with tf.name_scope(_sanitize_scope_name(params["name"])):
vals_out: Sequence[Tuple[TfVal,
core.AbstractValue]] = f.call_wrapped(*vals)
elif call_primitive == sharded_jit.sharded_call_p:
vals_out = _sharded_call(f, vals, **params)
else:
vals_out = f.call_wrapped(*vals)
return [TensorFlowTracer(self, v, a) for v, a in vals_out]
def post_process_call(self, call_primitive: core.Primitive,
out_tracers: Sequence[TensorFlowTracer], params):
# We encountered a call primitive, e.g., remat_call_p, whose result
# (out_tracers) include TensorFlowTracer that were not passed through
# its arguments (captured from the environment).
vals = tuple(t.val for t in out_tracers)
main = self.main
def todo(vals: Sequence[TfVal]):
trace = TensorFlowTrace(main, core.cur_sublevel())
return [TensorFlowTracer(trace, v, out_tracer.aval)
for v, out_tracer in util.safe_zip(vals, out_tracers)]
return vals, todo
def process_map(self, map_primitive, f, tracers, params):
raise NotImplementedError("process_map")
def post_process_map(self, map_primitive, out_tracers, params):
raise NotImplementedError("post_process_map")
def process_custom_jvp_call(self, prim, fun, jvp, tracers):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del jvp # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_jvp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del fwd, bwd, out_trees # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:
# Returns the primitive implementation and whether the implementation
# takes abstract values (see definition of tf_impl_with_avals)
try:
return tf_impl[p], False
except KeyError:
try:
return tf_impl_with_avals[p], True
except KeyError as err:
msg = "TensorFlow interpretation rule for '{}' not implemented"
raise NotImplementedError(msg.format(p)) from err
def to_tf_dtype(jax_dtype):
if jax_dtype == dtypes.float0:
return tf.float32
else:
return tf.dtypes.as_dtype(jax_dtype)
def to_jax_dtype(tf_dtype):
return tf_dtype.as_numpy_dtype
def _unexpected_primitive(p: core.Primitive, *args, **kwargs):
assert False, f"Encountered unexpected primitive {p}"
for unexpected in xla.call_translations: # Call primitives are inlined
tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)
# Primitives that are not yet implemented must be explicitly declared here.
tf_not_yet_impl = [
"reduce", "rng_uniform",
"igamma_grad_a",
"random_gamma_grad",
# Not high priority?
"after_all", "all_to_all", "create_token",
"infeed", "outfeed", "pmax_p",
"pmin", "ppermute", "psum", "pmax", "pgather",
"axis_index", "pdot", "all_gather",
"xla_pmap",
"call_tf",
]
try:
tf_impl[lax.tie_in_p] = lambda x, y: y
except AttributeError:
pass
tf_impl[ad_util.stop_gradient_p] = tf.stop_gradient
tf_impl[ad_util.zeros_like_p] = tf.zeros_like
def _add(x: TfVal, y: TfVal) -> TfVal:
return tf.raw_ops.AddV2(x=x, y=y)
tf_impl[ad_util.add_jaxvals_p] = _add
tf_impl[xla.device_put_p] = lambda x, device=None: x
tf_impl[lax.neg_p] = tf.math.negative
tf_impl[lax.sign_p] = tf.math.sign
tf_impl[lax.floor_p] = tf.math.floor
tf_impl[lax.ceil_p] = tf.math.ceil
def _round(operand, *, rounding_method):
if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:
sign = tf.math.sign(operand)
operand *= sign
floor = tf.math.floor(operand)
operand -= floor
cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))
return sign * (tf.where(cond, tf.constant(np.array(1), operand.dtype),
tf.math.round(operand)) + floor)
else:
return tf.math.round(operand)
tf_impl[lax.round_p] = _round
tf_impl[lax.nextafter_p] = tf.math.nextafter
def _population_count(x):
orig_dtype = x.dtype
return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)
tf_impl[lax.population_count_p] = _population_count
tf_impl[lax.is_finite_p] = tf.math.is_finite
tf_impl[lax.abs_p] = tf.math.abs
tf_impl[lax.pow_p] = tf.math.pow
tf_impl[lax.integer_pow_p] = tf.math.pow
tf_impl[lax.exp_p] = tf.math.exp
tf_impl[lax.expm1_p] = tf.math.expm1
tf_impl[lax.log_p] = tf.math.log
tf_impl[lax.log1p_p] = tf.math.log1p
tf_impl[lax.tan_p] = tf.math.tan
tf_impl[lax.tanh_p] = tf.math.tanh
tf_impl[lax.sin_p] = tf.math.sin
tf_impl[lax.sinh_p] = tf.math.sinh
tf_impl[lax.cos_p] = tf.math.cos
tf_impl[lax.cosh_p] = tf.math.cosh
tf_impl[lax.acos_p] = tf.math.acos
tf_impl[lax.asin_p] = tf.math.asin
tf_impl[lax.atan_p] = tf.math.atan
tf_impl[lax.atan2_p] = tf.math.atan2
tf_impl[lax.acosh_p] = tf.math.acosh
tf_impl[lax.atanh_p] = tf.math.atanh
tf_impl[lax.asinh_p] = tf.math.asinh
tf_impl[lax.sqrt_p] = tf.math.sqrt
tf_impl[lax.rsqrt_p] = tf.math.rsqrt
tf_impl[lax.lgamma_p] = tf.math.lgamma
tf_impl[lax.digamma_p] = tf.math.digamma
tf_impl[lax.igamma_p] = tf.math.igamma
tf_impl[lax.igammac_p] = tf.math.igammac
tf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc
tf_impl[lax.erf_p] = tf.math.erf
tf_impl[lax.erfc_p] = tf.math.erfc
tf_impl[lax.erf_inv_p] = tf.math.erfinv
tf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e
tf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e
tf_impl[lax.complex_p] = tf.complex
def _conj(x, **kwargs):
# The only dtypes that are allowed are: float32, float64, complex64, and
# complex128.
if x.dtype == tf.float32:
return tf.cast(x, tf.complex64)
elif x.dtype == tf.float64:
return tf.cast(x, tf.complex128)
else:
return tf.math.conj(x)
tf_impl[lax.conj_p] = _conj
tf_impl[lax.real_p] = tf.math.real
tf_impl[lax.imag_p] = tf.math.imag
tf_impl[lax.add_p] = _add
tf_impl[lax.sub_p] = tf.math.subtract
tf_impl[lax.mul_p] = tf.math.multiply
def _iota(*, dtype, shape, dimension):
dtype = to_tf_dtype(dtype)
# Some dtypes are unsupported, like uint32, so we just fall back to int32.
# TODO(mattjj, necula): improve tf.range dtype handling
shape_tf = _eval_shape(shape)
vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)
vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]
return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)
tf_impl[lax.iota_p] = _iota
def _div(lhs, rhs):
if lhs.dtype.is_integer:
quotient = tf.math.floordiv(lhs, rhs)
select = tf.math.logical_and(
tf.not_equal(tf.math.sign(lhs), tf.math.sign(rhs)),
tf.not_equal(tf.math.floormod(lhs, rhs), 0))
return tf.where(select, quotient + 1, quotient)
else:
return tf.math.truediv(lhs, rhs)
def _rem(lhs, rhs):
return tf.math.sign(lhs) * tf.math.floormod(tf.math.abs(lhs),
tf.math.abs(rhs))
tf_impl[lax.div_p] = _div
tf_impl[lax.rem_p] = _rem
tf_impl[lax.max_p] = tf.math.maximum
tf_impl[lax.min_p] = tf.math.minimum
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
tf.int8: tf.uint8,
tf.int16: tf.uint16,
tf.int32: tf.uint32,
tf.int64: tf.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}
# Note: Bitwise operations only yield identical results on unsigned integers!
# pylint: disable=protected-access
def _shift_right_arithmetic_raw(x, y):
if x.dtype.is_unsigned:
assert x.dtype == y.dtype
orig_dtype = x.dtype
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]
x = tf.cast(x, signed_dtype)
y = tf.cast(y, signed_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
else:
return tf.bitwise.right_shift(x, y)
def _shift_right_arithmetic(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA
# semantics to return the shift by the max value (x_bits - 1).
# TODO: it is likely better to add XlaOps for shifts
x_bits = 8 * x.dtype.size
clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)
return _shift_right_arithmetic_raw(x, clamp_y)
tf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic
def _shift_right_logical_raw(x, y):
if x.dtype.is_unsigned:
return tf.bitwise.right_shift(x, y)
else:
assert x.dtype == y.dtype
orig_dtype = x.dtype
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]
x = tf.cast(x, unsigned_dtype)
y = tf.cast(y, unsigned_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
def _shift_right_logical(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(_shift_in_bounds(x, y),
_shift_right_logical_raw(x, y),
tf.zeros_like(x))
tf_impl[lax.shift_right_logical_p] = _shift_right_logical
def _shift_left(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(_shift_in_bounds(x, y),
tf.bitwise.left_shift(x, y),
tf.zeros_like(x))
tf_impl[lax.shift_left_p] = _shift_left
def _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:
# Return the TF expression for when y is within bounds (0 <= y < |x|)
x_bits = 8 * x.dtype.size
# TF does not have comparisons for uint16 and uint32 (despite what the
# documentation says)
y_comp = tf.cast(y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y
y_lt_x_bits = tf.math.less(y_comp, x_bits)
y_ge_0 = tf.math.greater_equal(y_comp, 0)
return tf.logical_and(y_lt_x_bits, y_ge_0)
def _not(x):
"""Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
"""
if x.dtype == tf.bool:
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
tf_impl[lax.not_p] = _not
def bool_to_int8(f, argnums):
"""Computes bool valued functions using int8."""
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if not any(args[i].dtype == tf.bool for i in argnums):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)
for i, a in enumerate(args)]
if "_in_avals" in kwargs:
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [cast_aval(aval) if i in argnums else aval
for i, aval in enumerate(kwargs["_in_avals"])]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs["_out_aval"])
kwargs = dict(kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)
return wrapper
tf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))
tf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))
tf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))
tf_impl[lax.eq_p] = tf.math.equal
tf_impl[lax.ne_p] = tf.math.not_equal
tf_impl[lax.ge_p] = tf.math.greater_equal
tf_impl[lax.gt_p] = tf.math.greater
tf_impl[lax.le_p] = tf.math.less_equal
tf_impl[lax.lt_p] = tf.math.less
tf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky
def _convert_element_type(operand, *, new_dtype, weak_type=False):
old_dtype = operand.dtype.as_numpy_dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = tf.math.real(operand)
if (dtypes.issubdtype(old_dtype, np.floating) and
not (dtypes.issubdtype(new_dtype, np.floating) or
dtypes.issubdtype(new_dtype, np.complexfloating) or
new_dtype == np.bool_)):
sign = tf.math.sign(operand)
operand = sign * tf.math.floor(sign * operand)
return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.convert_element_type_p] = _convert_element_type
def _bitcast_convert_type(operand, new_dtype):
return tf.bitcast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type
def _clamp(minval, operand, maxval):
# The below permits mirroring the behavior of JAX when maxval < minval
maxval = tf.broadcast_to(maxval, operand.shape)
minval = tf.math.minimum(tf.broadcast_to(minval, operand.shape), maxval)
return tf.clip_by_value(operand, minval, maxval)
tf_impl[lax.clamp_p] = _clamp
def _concatenate(*operands, dimension):
return tf.concat(operands, axis=dimension)
tf_impl[lax.concatenate_p] = _concatenate
def _conv_general_dimension_numbers_proto(dimension_numbers):
"""Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers."""
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_precision_config_proto(precision):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision))
return proto
# _try_tf_conv returns a Tensor when it succeeds, or a string describing why
# it did not succeed otherwise.
def _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
out_shape) -> Union[str, TfVal]:
# TODO(bchetioui): this function is not exhaustive wrt which convolution cases
# can be translated into TF primitives. Further investigation is needed to
# fully flesh it out.
if not lhs.dtype in [tf.float16, tf.float32, tf.float64]:
return f"tf.nn.convolution is not supported for dtype {lhs.dtype}"
if feature_group_count != 1:
return "tf.nn.convolution does not support grouped convolutions"
# TODO(bchetioui): is there something to do with batch_group_count?
if batch_group_count != 1:
return "Unimplemented support for batch_group_count != 1"
nb_spatial_dimensions = len(lhs.shape) - 2
# TF can only deal with 1D, 2D and 3D convolution
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
return ("TensorFlow can only handle convolutions with 1, 2, or 3 "
"spatial dimensions")
# TODO(bchetioui): handle different stride cases
if list(window_strides) != [1] * nb_spatial_dimensions:
return ("Unimplemented support for window_strides != "
f"{tuple([1] * nb_spatial_dimensions)}")
success = lambda res: (res, None)
failure = lambda msg: (None, msg)
def convert_padding():
# TODO(bchetioui): in this instance, we can not use padtype_to_pads as
# string padding is not implemented for transposed convolution.
if list(lhs_dilation) != [1] * nb_spatial_dimensions:
return failure("Padding conversion is not supported for transposed "
"convolution.")
lhs_perm, rhs_perm, _ = dimension_numbers
effective_rhs_shape = [(k-1) * r + 1 for k, r in
zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)]
lhs_shape = np.take(lhs.shape, lhs_perm)[2:]
# TF only allows 'VALID' and 'SAME' padding
for pad_str in ['VALID', 'SAME']:
gen_padding = lax.padtype_to_pads(
lhs_shape, effective_rhs_shape, window_strides, pad_str)
if list(gen_padding) == list(padding):
return success(pad_str)
return failure("Input padding not supported in TensorFlow.")
def convert_dim_nums():
lhs_spec, rhs_spec, out_spec = dimension_numbers
# TF only allows filters with shape:
# spatial_filter_shape + [in_channels, out_channels]. In JAX however,
# rhs_spec is represented as a tuple containing the following:
# [out_channels, in_channels] + spatial_filter_shape.
supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +
list(range(nb_spatial_dimensions)))
if list(rhs_spec) != supported_rhs_shape:
return failure("Input filter (RHS) shape format not supported in "
"TensorFlow")
# TF only supports same LHS and output data format
if lhs_spec != out_spec:
return failure("TensorFlow requires the same data format for LHS and "
"output.")
# Alphabet extracted from the documentation of tf.conv{1,2,3}d
spatial_dim_alphabet = 'DHW'[-nb_spatial_dimensions:]
# TF only supports the following data formats:
# - [batch_size, in_channels] + input_spatial_shape
# TODO(bchetioui): TF currently does not support the above on CPU. To avoid
# failing on this platform, this path is commented out for now.
#if list(lhs_spec) == list(range(len(lhs_spec))):
# return "NC" + spatial_dim_alphabet
# - [batch_size] + input_spatial_shape + [in_channels]
if list(lhs_spec) == ([0, len(lhs_spec) - 1] +
list(range(1, len(lhs_spec) - 1))):
return success("N" + spatial_dim_alphabet + "C")
return failure("Data format is unsupported by TensorFlow")
def convert_dilation_and_compute_result(tf_padding, tf_dim_nums):
no_dilation = [1] * nb_spatial_dimensions
# TODO(bchetioui): is there a generic way to do a transposed atrous
# convolution in TensorFlow?
if not (list(lhs_dilation) == no_dilation or
list(rhs_dilation) == no_dilation):
return "Both LHS and RHS dilations are set"
# This is a non-dilated or atrous convolution
if list(lhs_dilation) == no_dilation:
return tf.nn.convolution(
lhs, rhs, strides=window_strides, padding=tf_padding,
data_format=tf_dim_nums, dilations=rhs_dilation)
# TODO(bchetioui): the below path is unreachable for now, as passing a lhs
# dilation to this function will result in convert_padding returning None
# systematically. This must be investigated further.
# Dilation of the LHS is transposed convolution
return tf.nn.conv_transpose(
lhs, rhs, out_shape, window_strides, padding=tf_padding,
data_format=tf_dim_nums, dilations=lhs_dilation)
tf_padding, error = convert_padding()
if tf_padding is None:
return error
tf_dim_nums, error = convert_dim_nums()
if tf_dim_nums is None:
return error
return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)
def _conv_general_dilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, lhs_shape, rhs_shape, precision,
_in_avals, _out_aval):
"""Implementation of lax.conv_general_dilated_p using XlaConv."""
if not _enable_xla:
info_or_result = _try_tf_conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count, _aval_to_tf_shape(_out_aval)
)
if not isinstance(info_or_result, str):
return info_or_result
else:
raise _xla_path_disabled_error("conv_general_dilated")
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _conv_general_precision_config_proto(precision)
assert batch_group_count == 1 # TODO(phawkins): implement batch_group_count
out = tfxla.conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dnums_proto, feature_group_count=feature_group_count,
precision_config=precision_config_proto)
# TODO: implement shape inference for XlaConv
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated
def _dot_general(lhs, rhs, dimension_numbers, precision, preferred_element_type):
"""Implementation of lax.dot_general_p in terms of tf.linalg.einsum."""
del precision
del preferred_element_type
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_dim, rhs_dim = len(lhs.shape), len(rhs.shape)
# This condition ensures that:
# 1) the considered dtype is not tf.bfloat16/tf.int32, which are supported by
# tf.linalg.einsum but not by tf.linalg.matmul;
# 2) the batch dimensions are ordered in the same way in lhs and rhs (this is
# not strictly necessary, but we would have to reshape the array if that
# were not the case;
# 3) lhs and rhs have the same number of dimensions +/- 1
# 4) the number of non-batch dimensions in both tensors is either 1 or 2
# 5) the contracting dimensions are consistent with those of a classic
# matrix/matrix, vector/matrix or matrix/vector multiplication.
if (not lhs.dtype in [tf.bfloat16, tf.int32]
and lhs_batch == rhs_batch == tuple(range(len(lhs_batch)))
and lhs_dim - rhs_dim in [-1, 0, 1]
and 1 <= lhs_dim - len(lhs_batch) <= 2
and 1 <= rhs_dim - len(rhs_batch) <= 2
and lhs_contracting == (len(lhs.shape) - 1,)
and rhs_contracting == (len(lhs_batch),)):
# All the inputs to tf.linalg.matmul must have 2 inner dimensions,
# after their batch dimensions, so we need to expand the dimensions
# appropriately. We can get to this branch with three combinations of
# inner shapes:
# - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]
# - in this case, the resulting inner shape is [a, c];
# - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]
# - in this case, we need to expand lhs to [1, b], and the resulting
# shape is [c]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [1, c];
# - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]
# - in this case, we need to expand rhs to [b, 1], and the resulting
# shape is [a]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [a, 1];
# - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]
# - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],
# and the resulting shape is (). We need to squeeze the result of
# tf.linalg.matmul as it will have shape [1, 1].
squeeze_idxs = []
if lhs_dim - len(lhs_batch) == 1:
lhs = tf.expand_dims(lhs, lhs_dim - 1)
squeeze_idxs.append(len(lhs.shape) - 2)
if rhs_dim - len(rhs_batch) == 1:
rhs = tf.expand_dims(rhs, rhs_dim - 2)
squeeze_idxs.append(len(rhs.shape) - 1)
result = tf.linalg.matmul(lhs, rhs)
if len(squeeze_idxs) != 0:
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids = []
for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids.append(shared_id)
not_none = lambda x: x is not None
out_axis_ids = list(filter(
not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))
assert lhs.dtype == rhs.dtype
spec = "{},{}->{}".format("".join(lhs_axis_ids),
"".join(rhs_axis_ids),
"".join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
tf_impl[lax.dot_general_p] = _dot_general
def _broadcast(operand, *, sizes):
result_shape = tf.TensorShape(sizes).concatenate(operand.shape)
return tf.broadcast_to(operand, result_shape)
tf_impl[lax.broadcast_p] = _broadcast
def _broadcast_in_dim(operand, *, shape, broadcast_dimensions):
inshape = [1] * len(shape)
for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):
if orig_shape_i != 1: inshape[broadcast_dim_i] = shape[broadcast_dim_i]
inshape_tf = _eval_shape(inshape)
shape_tf = _eval_shape(shape)
return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)
tf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim
def _reshape(operand, *, new_sizes, dimensions):
if dimensions is None:
dimensions = tf.range(tf.rank(operand))
new_sizes_tf = _eval_shape(new_sizes)
return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)
tf_impl[lax.reshape_p] = _reshape
def _squeeze(operand, *, dimensions, _in_avals, _out_aval):
op_shape = _in_avals[0].shape
new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)
new_shape_tf = _eval_shape(new_shape)
return tf.reshape(operand, new_shape_tf)
tf_impl_with_avals[lax.squeeze_p] = _squeeze
def _pad(operand, padding_value, *, padding_config,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del _in_avals
low, high, interior = util.unzip3(padding_config)
if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):
return tf.pad(operand, util.safe_zip(low, high),
mode="CONSTANT", constant_values=padding_value)
if not _enable_xla:
raise _xla_path_disabled_error("pad")
out = tfxla.pad(operand, padding_value, low, high, interior)
return out
tf_impl_with_avals[lax.pad_p] = _pad
def _rev(operand, *, dimensions):
return tf.reverse(operand, dimensions)
tf_impl[lax.rev_p] = _rev
tf_impl[lax.select_p] = tf.where
def _transpose(operand, *, permutation):
return tf.transpose(operand, perm=permutation)
tf_impl[lax.transpose_p] = _transpose
axes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)
tf_impl[lax.reduce_sum_p] = (
bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))
tf_impl[lax.reduce_prod_p] = (
bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))
tf_impl[lax.reduce_max_p] = (
bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))
tf_impl[lax.reduce_min_p] = (
bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))
tf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)
tf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)
def _argminmax(fn, operand, axes, index_dtype):
axis, = axes
output_type = tf.int32
if dtypes.iinfo(index_dtype).bits > 32:
output_type = tf.int64
# TODO(phawkins): handle axes larger than 2^31.
result = fn(operand, axis=axis, output_type=output_type)
return tf.cast(result, to_tf_dtype(index_dtype))
tf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)
tf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)
_add_fn = tf.function(_add, autograph=False)
_ge_fn = tf.function(tf.math.greater_equal, autograph=False)
def _select_and_gather_add(tangents: TfVal,
operand: TfVal,
select_prim: core.Primitive,
window_dimensions: Sequence[int],
window_strides: Sequence[int],
base_dilation: Sequence[int],
window_dilation: Sequence[int],
padding: Sequence[Tuple[int, int]],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Note: this function follows the pattern in
# jax.lax._select_and_gather_add_translation.
dtype = operand.dtype
nbits = dtypes.finfo(dtype.as_numpy_dtype).bits
# Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,
# we thus intend to let the code throw a different exception on this platform.
max_bits = 64
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda dtype, x: tf.constant(np.array(x), dtype)
if double_word_reduction:
word_dtype = lax._UINT_DTYPES[nbits]
double_word_dtype = lax._UINT_DTYPES[nbits * 2]
# Packs two values into a tuple.
def pack(a, b):
a = _bitcast_convert_type(a, word_dtype)
b = _bitcast_convert_type(b, word_dtype)
a = _convert_element_type(a, new_dtype=double_word_dtype)
b = _convert_element_type(b, new_dtype=double_word_dtype)
a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))
return tf.bitwise.bitwise_or(a, b)
# Unpacks the first element of a tuple.
def fst(t):
assert t.dtype == double_word_dtype
st = _shift_right_logical(t, const(double_word_dtype, nbits))
return _bitcast_convert_type(
_convert_element_type(st, new_dtype=word_dtype), dtype
)
# Unpacks the second element of a tuple.
def snd(t):
return _bitcast_convert_type(
_convert_element_type(t, new_dtype=word_dtype), dtype
)
else:
raise NotImplementedError(f"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits.")
assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim
def reducer(x, y):
which = tf_impl[select_prim]
return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)
init = -np.inf if select_prim is lax.ge_p else np.inf
init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))
out = _specialized_reduce_window(reducer, init_identity,
pack(operand, tangents),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation,
_in_avals=_in_avals, _out_aval=_out_aval)
return snd(out)
tf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add
def _get_shape_from_tensor_or_array(x):
if isinstance(x.shape, tf.TensorShape):
return tuple(x.shape.as_list())
return tuple(x.shape)
def _common_reduce_window(operand, init_val, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_path_disabled_error("reduce_window")
o_spec = tf.TensorSpec((), dtype=operand.dtype)
reducer_fn = tf.function(reducer, autograph=False).get_concrete_function(o_spec, o_spec)
if not isinstance(init_val, tf.Tensor):
assert core.skip_checks or _is_tfval(init_val), f"Non TfVal: {init_val}"
init_val = tf.constant(init_val, operand.dtype)
out = tfxla.reduce_window(operand, init_val,
reducer_fn, window_dimensions,
window_strides, base_dilations=base_dilation,
window_dilations=window_dilation, padding=padding)
# TODO: implement shape inference for XlaReduceWindow
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation,
_in_avals, _out_aval):
"""TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
"""
assert len(consts) == 0, "Reduction computation cannot have constants"
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(
operand, init_value, reducer, window_dimensions, window_strides, padding,
base_dilation, window_dilation, _in_avals, _out_aval
)
# _try_tf_pool returns a Tensor when it succeeds, or a string describing why
# it did not succeed otherwise. It currently only supports reduce_window_max
# and reduce_window_sum.
# TODO(bchetioui): this function is not exhaustive wrt which
# reduce_window_max or reduce_window_sum cases can be translated into a call to
# max_pool or avg_pool. Further investigation is needed to fully flesh it out.
def _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation) -> Union[str, TfVal]:
# Contrarily to the main path, tf.int8 is actually a valid type for
# tf.nn.max_pool.
if op_name == "reduce_window_max" and operand.dtype in [
tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128
]:
return f"tf.nn.max_pool does not support operands of type {operand.dtype}"
if op_name == "reduce_window_sum" and operand.dtype not in [
tf.float16, tf.float32, tf.float64
]:
return f"tf.nn.avg_pool does not support operands of type {operand.dtype}"
has_batch_dim = window_dimensions[0] == 1
has_channel_dim = window_dimensions[-1] == 1
nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
return ("TensorFlow can only handle pooling for arrays with 1, 2, or "
"3 spatial dimensions")
# TODO(bchetioui): does a simple conversion with another base dilation exist?
if list(base_dilation) != [1] * len(operand.shape):
return "Unimplemented support for base dilation"
# TODO(bchetioui): does a simple conversion with another window_dilation
# exist? The whole story seems similar to convolution.
if list(window_dilation) != [1] * len(operand.shape):
return "Unimplemented support for window dilation"
if list(padding) != [(0, 0)] * len(operand.shape):
return "Unimplemented support for padding"
# ReduceWindow in XLA takes an array of rank N as a parameter, but
# tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default
# shape of the form [batch_size] + input_spatial_shape + [num_channels]
tf_operand = operand
tf_window_dimensions = list(window_dimensions)
tf_window_strides = list(window_strides)
if not has_batch_dim:
tf_operand = tf.expand_dims(tf_operand, 0)
tf_window_dimensions = [1] + tf_window_dimensions
tf_window_strides = [1] + tf_window_strides
if not has_channel_dim:
tf_operand = tf.expand_dims(tf_operand, -1)
tf_window_dimensions.append(1)
tf_window_strides.append(1)
tf_data_format = "N" + "DHW"[-nb_spatial_dimensions:] + "C"
tf_padding = "VALID"
if op_name == "reduce_window_max":
result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
elif op_name == "reduce_window_sum":
avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
result = avg * np.prod(tf_window_dimensions)
else:
return f"Unimplemented support for {op_name}"
if not has_batch_dim:
result = tf.squeeze(result, 0)
if not has_channel_dim:
result = tf.squeeze(result, -1)
return result
def _specialized_reduce_window(reducer, identity, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval,
name=None):
"""Wraps the TensorFlow reduce window operation based on a reducer and an
identity function defining the initial value of the reduction depending on
the dtype of the operand.
Args:
reducer: reduction function of type TfVal -> TfVal -> TfVal
identity: function that takes a TensorFlow dtype as a parameter and returns
the starting value of the reduction.
operand: N dimensional array containing elements of type T
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
name: the name of the specialized reduce window primitive for which this
conversion function is called. This information may help to choose a
different conversion path (optional)
Returns:
The reduced operand.
"""
if name in ["reduce_window_max", "reduce_window_sum"]:
res = _try_tf_pool(name, operand, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
if not isinstance(res, str):
return res
return _common_reduce_window(
operand, identity(operand.dtype), reducer, window_dimensions,
window_strides, padding, base_dilation, window_dilation, _in_avals,
_out_aval
)
def _get_max_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(-np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).min
else:
assert dtypes.issubdtype(numpy_tf_dtype, np.bool_), (
f"{tf_dtype} has no defined max identity"
)
return False
def _get_min_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).max
else:
assert dtypes.issubdtype(numpy_tf_dtype, np.bool_), (
f"{tf_dtype} has no defined min identity"
)
return True
# pylint: disable=protected-access
tf_impl_with_avals[lax.reduce_window_sum_p] = (
functools.partial(_specialized_reduce_window, _add, lambda x: 0,
name="reduce_window_sum"))
tf_impl_with_avals[lax.reduce_window_min_p] = (
functools.partial(_specialized_reduce_window, tf.math.minimum,
_get_min_identity, name="reduce_window_min"))
tf_impl_with_avals[lax.reduce_window_max_p] = (
functools.partial(_specialized_reduce_window, tf.math.maximum,
_get_max_identity, name="reduce_window_max"))
tf_impl_with_avals[lax.reduce_window_p] = _reduce_window
# pylint: enable=protected-access
# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,
# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is
# O(n^2) on other backends. This may be implemented using associative_scan
# instead to favor different backends.
tf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_min), multiple_results=False)
tf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_max), multiple_results=False)
# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for
# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes
# will fail when running in compiled mode, but are otherwise compatible with
# the operation. A non-XLA path can thus be defined for all dtypes, though the
# tests will crash.
tf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_sum), multiple_results=False)
tf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_prod), multiple_results=False)
def _select_and_scatter(
operand, source, init_value, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
raise NotImplementedError("TODO: jax2tf can not convert _select_and_scatter")
tf_impl[lax.select_and_scatter_p] = _select_and_scatter
@functools.partial(bool_to_int8, argnums=(0, 1))
def _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,
window_strides, padding, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_path_disabled_error("select_and_scatter_add")
init_value = tf.zeros((), operand.dtype)
select_fn = (tf.function(tf_impl[select_prim], autograph=False)
.get_concrete_function(init_value, init_value))
scatter_fn = _add_fn.get_concrete_function(init_value, init_value)
out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,
padding, source, init_value, select_fn,
scatter_fn)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add
def _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):
res = _convert_jax_impl(
functools.partial(jax._src.random._threefry2x32_lowering,
use_rolled_loops=False),
multiple_results=True)(*args, _in_avals=_in_avals, _out_aval=_out_aval)
return res
tf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl
# Use the vmap implementation, otherwise on TPU the performance is really bad
# With use_vmap=True on, we get about the same performance for JAX and jax2tf.
tf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(
functools.partial(jax._src.random._gamma_impl, use_vmap=True),
multiple_results=False)
def _gather_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
@functools.partial(bool_to_int8, argnums=0)
def _gather(operand, start_indices, *, dimension_numbers, slice_sizes,
_in_avals, _out_aval):
"""Tensorflow implementation of gather."""
del _in_avals
if not _enable_xla:
raise _xla_path_disabled_error("gather")
proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)
slice_sizes_tf = _eval_shape(slice_sizes)
out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.gather_p] = _gather
def _slice(operand, start_indices, limit_indices, strides):
if strides is None:
strides = [1] * len(start_indices)
slices = tuple(map(slice, start_indices, limit_indices, strides))
return operand[slices]
tf_impl[lax.slice_p] = _slice
def _dynamic_slice(operand, *start_indices, slice_sizes):
# Here we could use tf.slice. Similarly, for lax.gather we can sometimes use
# tf.gather. But those have different semantics for index-out-of-bounds than
# JAX (and XLA). We have tried to force compilation, by wrapping into
# tf.xla.experimental.compile, or tf.function(jit_compile=True), but
# those solutions are brittle because they do not work when nested into an
# outer compilation (see b/162814494 and b/163006262). They also do not
# survive well being put in a SavedModel. Hence, we now use TFXLA slicing
# and gather ops.
if not _enable_xla:
raise _xla_path_disabled_error("dynamic_slice")
res = tfxla.dynamic_slice(operand, tf.stack(start_indices),
size_indices=slice_sizes)
# TODO: implement shape inference for XlaDynamicSlice
res.set_shape(tuple(slice_sizes))
return res
tf_impl[lax.dynamic_slice_p] = _dynamic_slice
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
def _scatter(operand, scatter_indices, updates, *,
update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del unique_indices, _in_avals
assert len(update_consts) == 0, "Update computation cannot have constants"
if not _enable_xla:
raise _xla_path_disabled_error("scatter")
proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)
def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
o_spec = tf.TensorSpec((), dtype=operand.dtype)
xla_update_computation = (
tf.function(update_computation, autograph=False).get_concrete_function(o_spec, o_spec))
out = tfxla.scatter(operand, scatter_indices, updates, xla_update_computation, proto,
indices_are_sorted=indices_are_sorted)
# TODO: implement shape analysis for XlaScatter
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.scatter_p] = _scatter
tf_impl_with_avals[lax.scatter_min_p] = _scatter
tf_impl_with_avals[lax.scatter_max_p] = _scatter
tf_impl_with_avals[lax.scatter_mul_p] = _scatter
tf_impl_with_avals[lax.scatter_add_p] = _scatter
def _dynamic_update_slice(operand, update, *start_indices):
if not _enable_xla:
raise _xla_path_disabled_error("dynamic_update_slice")
return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))
tf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice
def _cond(index: TfVal, *operands: TfVal,
branches: Sequence[core.ClosedJaxpr],
linear: Sequence[bool]) -> Sequence[TfVal]:
del linear
# tf.cond needs lambdas with no arguments.
branches_tf = [functools.partial(_interpret_jaxpr, jaxpr, *operands)
for jaxpr in branches]
return tf.switch_case(index, branches_tf)
tf_impl[lax_control_flow.cond_p] = _cond
def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(args, [cond_nconsts,
body_nconsts])
if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]
# The conditional is not a scalar, this must be a batched while
return _batched_cond_while(*args,
cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts, body_jaxpr=body_jaxpr)
# The conditional must return a single value to TF
def cond_tf_func(*args: TfVal) -> TfVal:
pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)
return pred
body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)
return tf.while_loop(cond_tf_func, body_tf_func, init_carry)
def _batched_cond_while(*args: TfVal,
cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr
) -> Sequence[TfVal]:
"""Interprets a while_loop with a batched condition.
A batched while has a conditional that returns a tensor of booleans, and
a body that returns a list of tensors whose leading dimensions match those
of the conditional tensor.
We need to turn it into a while with scalar boolean conditional. We will
expand the loop carry to include a prefix with the current tensor boolean
condition. We prepend to the loop the first calculation of the tensor boolean
condition. The loop condition will use a "reduce_any" to calculate a scalar
boolean from the tensor boolean condition. The end of the loop body will
compute the new carry using a "tf.where", and we compute the new tensor
boolean condition.
"""
cond_consts, body_consts, init_carry = util.split_list(args, [cond_nconsts,
body_nconsts])
# Initial computation of batched condition
init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)
assert init_pred_b is not core.unit
def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:
pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))
return pred
def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:
new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr,
*body_consts, *carry)
def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:
pred_b_bcast = _broadcast_in_dim(pred_b,
shape=new_c.shape,
broadcast_dimensions=list(range(len(pred_b.shape))))
return tf.where(pred_b_bcast, new_c, c)
selected_carry: Sequence[TfVal] = list(
util.safe_map(select_one_carry, new_carry, carry))
next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)
return (next_pred_b, *selected_carry)
_, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,
(init_pred_b, *init_carry))
return res_carry
tf_impl[lax_control_flow.while_p] = _while
# We use the scan impl rule to rewrite in terms of while.
tf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(lax_control_flow._scan_impl)
def _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:
# Some types originally incompatible with tf.math.top_k can be promoted
# to a compatible type without loss of precision.
def promote_tf_dtype(tf_dtype):
if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:
return tf.uint32
if tf_dtype in [tf.int8, tf.int16]:
return tf.int32
if tf_dtype is tf.float16:
return tf.float32
return None
conversion_dtype = promote_tf_dtype(operand.dtype)
if conversion_dtype:
values, indices = tf.math.top_k(tf.dtypes.cast(operand, conversion_dtype),
k=k, sorted=True)
return tf.dtypes.cast(values, operand.dtype), indices
else:
return tf.math.top_k(operand, k=k, sorted=True)
tf_impl[lax.top_k_p] = _top_k
def _sort(*operands: TfVal, dimension: int, is_stable: bool,
num_keys: int) -> Tuple[TfVal, ...]:
if not _enable_xla:
raise _xla_path_disabled_error("sort")
assert 1 <= num_keys <= len(operands)
assert all([operands[0].shape == op.shape for op in operands[1:]])
assert 0 <= dimension < len(
operands[0].shape
), f"Invalid {dimension} for ndim {len(operands[0].shape)}"
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:
assert len(tf_args) == 2 * len(operands)
# We build a comparison:
# arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))
# all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]
inside_comparison = None
for key_idx in range(num_keys - 1, -1, -1):
a = tf_args[2 * key_idx]
b = tf_args[2 * key_idx + 1]
a_lt_b = tf.math.less(a, b)
if inside_comparison is None:
inside_comparison = a_lt_b
else:
inside_comparison = tf.math.logical_or(
a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))
return inside_comparison
comparator_spec: List[tf.TensorSpec] = []
comparator_jax_in_avals: List[core.AbstractValue] = []
for op in operands:
o_spec = tf.TensorSpec((), dtype=op.dtype)
comparator_spec.extend([o_spec, o_spec])
o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))
comparator_jax_in_avals.extend([o_aval, o_aval])
# Use the same comparator that JAX uses when compiling to XLA, to get the
# proper NaN/Inf total order, and the lexicographic ordering.
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator(*tf_args: TfVal) -> TfVal:
return _convert_jax_impl(
lax._sort_lt_comparator, multiple_results=False)(
*tf_args,
_in_avals=comparator_jax_in_avals,
_out_aval=core.ShapedArray((), np.bool_),
num_keys=num_keys)
xla_comparator_computation = (
tf.function(lexicographic_comparator,
autograph=False).get_concrete_function(*comparator_spec))
results = tfxla.variadic_sort(operands, dimension=dimension,
is_stable=is_stable,
comparator=xla_comparator_computation)
return results
tf_impl[lax.sort_p] = _sort
def _fft(x, fft_type, fft_lengths):
FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))
if fft_type == IRFFT:
expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)
else:
expected_lengths = x.shape[-len(fft_lengths):]
if expected_lengths != fft_lengths:
raise NotImplementedError(
f"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of "
f"array with shape={x.shape}.")
tf_funcs = {FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],
IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],
RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],
IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]}
return tf_funcs[fft_type][len(fft_lengths) - 1](x)
tf_impl[lax_fft.fft_p] = _fft
def _qr(operand, full_matrices):
return tf.linalg.qr(operand, full_matrices=full_matrices)
tf_impl[lax_linalg.qr_p] = _qr
def _svd(operand, full_matrices, compute_uv):
result = tf.linalg.svd(operand, full_matrices, compute_uv)
if not compute_uv:
return result,
s, u, v = result
return s, u, tf.linalg.adjoint(v)
tf_impl[lax_linalg.svd_p] = _svd
def _eig(operand: TfVal, compute_left_eigenvectors: bool,
compute_right_eigenvectors: bool):
if compute_left_eigenvectors and compute_right_eigenvectors:
# TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to
# sort the left eigenvectors in the right order. The jax.numpy.linalg API
# suggests to me that left eigenvectors are anyway seldom used, so I
# think it is acceptable to leave as unimplemented for now.
msg = ("Conversion of eig is not implemented when both "
"compute_left_eigenvectors and compute_right_eigenvectors are set "
"to True.")
raise NotImplementedError(msg)
elif not (compute_left_eigenvectors or compute_right_eigenvectors):
return tuple([tf.linalg.eigvals(operand)])
elif compute_right_eigenvectors:
return tuple(tf.linalg.eig(operand))
else: # compute_left_eigenvectors == True
wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))
wHH = tf.math.conj(wH)
return tuple([wHH, vl])
tf_impl[lax_linalg.eig_p] = _eig
def _eigh(operand: TfVal, lower: bool):
if operand.shape[-1] == 0:
v, w = operand, tf.reshape(operand, operand.shape[:-1])
else:
if not lower:
operand = tf.linalg.adjoint(operand)
w, v = tf.linalg.eigh(operand)
cast_type = { tf.complex64: tf.float32
, tf.complex128: tf.float64 }.get(operand.dtype)
if cast_type is not None:
w = tf.cast(w, cast_type)
return v, w
tf_impl[lax_linalg.eigh_p] = _eigh
def _lu(operand: TfVal, _in_avals, _out_aval):
return _convert_jax_impl(lax_linalg._lu_python)(operand, _in_avals=_in_avals,
_out_aval=_out_aval)
tf_impl_with_avals[lax_linalg.lu_p] = _lu
def _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,
transpose_a: bool, conjugate_a: bool,
unit_diagonal: bool):
if unit_diagonal:
a = tf.linalg.set_diag(a, tf.ones(a.shape[:-1], dtype=a.dtype))
if not left_side:
rank = len(a.shape)
transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]
a = tf.transpose(a, transpose_dimensions)
b = tf.transpose(b, transpose_dimensions)
lower = not lower
# adjoint == transpose for real dtypes, so special care need only be taken
# for complex types.
if a.dtype in [tf.complex64, tf.complex128]:
if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):
a = tf.math.conj(a)
result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)
if not left_side:
result = tf.transpose(result, transpose_dimensions)
return result
tf_impl[lax_linalg.triangular_solve_p] = _triangular_solve
def _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):
return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(
*args, const_lengths=const_lengths, jaxprs=jaxprs, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve
def _custom_jvp_call_jaxpr(*args: TfVal,
fun_jaxpr: core.ClosedJaxpr,
jvp_jaxpr_thunk: Callable,
num_consts: int) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr
def _custom_vjp_call_jaxpr(*args: TfVal,
fun_jaxpr: core.ClosedJaxpr,
**_) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr
def _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
tf_impl[ad.custom_lin_p] = _custom_lin
def split_to_logical_devices(
tensor: TfVal,
partition_dimensions: pxla.PartitionsOrReplicated):
"""Like TPUMPStrategy.experimental_split_to_logical_devices.
For jax2tf purposes we want to avoid needing to thread the `strategy` object
through the generated computation. It seems that the original function needs
the strategy object only for error checking, which we assume is done upstream
by JAX.
Args:
tensor: Input tensor to annotate.
partition_dimensions: A list of integers, with one integer per tensor
dimension, specifying in how many parts the dimension should be split. The
product of integers must equal the number of devices per replica.
use_sharding_op: whether to use a sharding op, or not.
Returns:
an annotated tensor.
"""
# This corresponds to the sharding annotations in
# xla_bridge._sharding_to_proto.
if partition_dimensions is None:
return xla_sharding.replicate(tensor, use_sharding_op=True)
num_partition_splits = np.prod(partition_dimensions)
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],
in_parts: Sequence[pxla.PartitionsOrReplicated],
out_parts_thunk,
**_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)
vals_out = f.call_wrapped(*sharded_vals)
out_parts_flat = out_parts_thunk()
assert len(out_parts_flat) == len(vals_out), f"expected {len(out_parts_flat)} == {len(vals_out)}"
sharded_vals_out = [
(split_to_logical_devices(val, val_part), val_aval)
for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)
]
return sharded_vals_out
def _sharding_constraint(arg: TfVal, *,
partitions: pxla.PartitionsOrReplicated):
return split_to_logical_devices(arg, partitions)
tf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint
def _register_checkpoint_pytrees():
"""Registers TF custom container types as pytrees."""
m = tf.Module()
# The types here are automagically changed by TensorFlow's checkpointing
# infrastructure.
m.a = (tf.Module(), tf.Module())
m.b = [tf.Module(), tf.Module()]
m.c = {"a": tf.Module()}
tuple_wrapper = type(m.a)
list_wrapper = type(m.b)
dict_wrapper = type(m.c)
# TF AutoTrackable swaps container types out for wrappers.
assert tuple_wrapper is not tuple
assert list_wrapper is not list
assert dict_wrapper is not dict
jax.tree_util.register_pytree_node(
tuple_wrapper, lambda xs: (tuple(xs), None), lambda _, xs: tuple(xs))
jax.tree_util.register_pytree_node(
list_wrapper, lambda xs: (tuple(xs), None), lambda _, xs: list(xs))
jax.tree_util.register_pytree_node(
dict_wrapper,
lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: dict(zip(k, xs)))
_register_checkpoint_pytrees()
|
the-stack_0_10891 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FasterRcnn anchor generator."""
import numpy as np
class AnchorGenerator():
"""Anchor generator for FasterRcnn."""
def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
"""Anchor generator init method."""
self.base_size = base_size
self.scales = np.array(scales)
self.ratios = np.array(ratios)
self.scale_major = scale_major
self.ctr = ctr
self.base_anchors = self.gen_base_anchors()
def gen_base_anchors(self):
"""Generate a single anchor."""
w = self.base_size
h = self.base_size
if self.ctr is None:
x_ctr = 0.5 * (w - 1)
y_ctr = 0.5 * (h - 1)
else:
x_ctr, y_ctr = self.ctr
h_ratios = np.sqrt(self.ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * self.scales[None, :]).reshape(-1)
hs = (h * h_ratios[:, None] * self.scales[None, :]).reshape(-1)
else:
ws = (w * self.scales[:, None] * w_ratios[None, :]).reshape(-1)
hs = (h * self.scales[:, None] * h_ratios[None, :]).reshape(-1)
base_anchors = np.stack(
[
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
],
axis=-1).round()
return base_anchors
def _meshgrid(self, x, y, row_major=True):
"""Generate grid."""
xx = np.repeat(x.reshape(1, len(x)), len(y), axis=0).reshape(-1)
yy = np.repeat(y, len(x))
if row_major:
return xx, yy
return yy, xx
def grid_anchors(self, featmap_size, stride=16):
"""Generate anchor list."""
base_anchors = self.base_anchors
feat_h, feat_w = featmap_size
shift_x = np.arange(0, feat_w) * stride
shift_y = np.arange(0, feat_h) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = np.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)
shifts = shifts.astype(base_anchors.dtype)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.reshape(-1, 4)
return all_anchors
|
the-stack_0_10892 | import os
import json
from pytesseract import image_to_data, image_to_string, Output
from ocr_utils import list_files_path, get_files_list
from eval_utils import get_accuracy
from PIL import Image, ImageDraw, ImageFont
class ocr:
def __init__(self, input_dir, output_dir):
self.input_dir = input_dir
self.output_dir = output_dir
self.input_image_list = []
self.output_image_list = []
def load_data(self):
files_path = list_files_path(self.input_dir)
self.input_image_list = get_files_list(files_path)
os.makedirs(os.path.join(self.output_dir, 'texts'), exist_ok=True)
os.makedirs(os.path.join(self.output_dir, 'images'), exist_ok=True)
os.makedirs(os.path.join(self.output_dir, 'jsons'), exist_ok=True)
for im in self.input_image_list:
base_name = os.path.basename(im)
file_name = os.path.splitext(base_name)[0] + "___tess."
self.output_image_list.append(file_name)
def predict(self, output_type='txt'):
for im_in, im_out in zip(self.input_image_list, self.output_image_list):
if output_type == 'txt':
output_path = os.path.join(
os.path.join(self.output_dir, 'texts'), im_out
) + 'txt'
tf = open(output_path, "wt")
result = image_to_string(im_in + "tif", config='--oem 1')
tf.write(result)
tf.close()
elif output_type == 'json':
output_path = os.path.join(
os.path.join(self.output_dir, 'jsons'), im_out
) + 'json'
tf = open(output_path, "w")
dd = image_to_data(im_in + "tif", output_type=Output.DICT)
json.dump(dd, tf, indent=2)
tf.close()
else:
print("ERROR: Unknown format!")
def eval(self, method="char"):
accuracy = []
for gt, of in zip(self.input_image_list, self.output_image_list):
# output_path = self.output_dir + "/texts/" + of + "txt"
output_path = os.path.join(
os.path.join(self.output_dir, "texts"), of
) + "txt"
accuracy.append(get_accuracy(gt + "txt", output_path, method))
try:
print(
"%s based accuracy : %.2f" %
(method, sum(accuracy) / len(accuracy))
)
except TypeError:
print("ERROR: Can't measure accuracy!")
def draw_bb(self):
for im_in, im_out in zip(self.input_image_list, self.output_image_list):
img = Image.open(im_in + 'tif').convert("RGB")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
json_path = os.path.join(
os.path.join(self.output_dir, 'jsons'), im_out
) + 'json'
tf = open(json_path, "r")
file_json = json.load(tf)
for i in range(len(file_json["level"])):
if file_json["text"][i] != "":
x1 = file_json["left"][i]
y1 = file_json["top"][i]
x2 = file_json["left"][i] + file_json["width"][i]
y2 = file_json["top"][i] + file_json["height"][i]
draw.text(
(x1, y1), file_json["text"][i], fill='red', font=font
)
draw.rectangle(((x1, y1), (x2, y2)), outline='red')
output_path = os.path.join(
os.path.join(self.output_dir, 'images'), im_out
) + 'jpg'
img.save(output_path) |
the-stack_0_10893 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
import airflow
from airflow import models
from airflow.gcp.hooks.automl import CloudAutoMLHook
from airflow.gcp.operators.automl import (
AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator,
AutoMLImportDataOperator, AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_TRACKING_BUCKET = os.environ.get(
"GCP_AUTOML_TRACKING_BUCKET",
"gs://automl-video-datasets/youtube_8m_videos_animal_tiny.csv",
)
# Example values
DATASET_ID = "VOT123456789"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"video_object_tracking_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_video_tracking_dataset",
"video_object_tracking_dataset_metadata": {},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_TRACKING_BUCKET]}}
default_args = {"start_date": airflow.utils.dates.days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Video Intelligence Object Tracking
with models.DAG(
"example_automl_video_tracking",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={"extract_object_id": extract_object_id},
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(
task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION
)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> \
delete_model_task >> delete_datasets_task
|
the-stack_0_10895 | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for hyper-parameter searching with Matrix Factorization"""
import numpy as np
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.hyperopt import Discrete, Continuous
from cornac.hyperopt import GridSearch, RandomSearch
# Load MovieLens 100K ratings
ml_100k = movielens.load_feedback(variant="100K")
# Define an evaluation method to split feedback into train, validation and test sets
ratio_split = RatioSplit(data=ml_100k, test_size=0.1, val_size=0.1, verbose=True)
# Instantiate MAE and RMSE for evaluation
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
# Define a base MF model with fixed hyper-parameters
mf = cornac.models.MF(max_iter=20, learning_rate=0.01, early_stop=True, verbose=True)
# Wrap MF model inside GridSearch along with the searching space
gs_mf = GridSearch(
model=mf,
space=[
Discrete("k", [10, 30, 50]),
Discrete("use_bias", [True, False]),
Discrete("lambda_reg", [1e-1, 1e-2, 1e-3, 1e-4]),
],
metric=rmse,
eval_method=ratio_split,
)
# Wrap MF model inside RandomSearch along with the searching space, try 30 times
rs_mf = RandomSearch(
model=mf,
space=[
Discrete("k", [10, 30, 50]),
Discrete("use_bias", [True, False]),
Continuous("lambda_reg", low=1e-4, high=1e-1),
],
metric=rmse,
eval_method=ratio_split,
n_trails=30,
)
# Put everything together into an experiment and run it
cornac.Experiment(
eval_method=ratio_split,
models=[gs_mf, rs_mf],
metrics=[mae, rmse],
user_based=False,
).run()
|
the-stack_0_10896 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
Gaffer.Metadata.registerNode(
Gaffer.Expression,
"description",
"""
Utility node for computing values via
scripted expressions.
""",
"layout:customWidget:Expression:widgetType", "GafferUI.ExpressionUI.ExpressionWidget",
plugs = {
# This plug is added by the expressionCompatibility.py
# config file to provide compatibility for loading old
# files, so we must hide it.
"engine" : (
"plugValueWidget:type", "",
"nodule:type", "",
),
# This plug is added by the expressionCompatibility.py
# config file to provide compatibility for loading old
# files, so we must hide it.
"expression" : (
"plugValueWidget:type", "",
"nodule:type", "",
),
"user" : (
"plugValueWidget:type", "",
),
}
)
# PlugValueWidget popup menu for creating expressions
##########################################################################
def __createExpression( plug, language ) :
node = plug.node()
parentNode = node.ancestor( Gaffer.Node )
with Gaffer.UndoScope( node.scriptNode() ) :
expressionNode = Gaffer.Expression()
parentNode.addChild( expressionNode )
expressionNode.setExpression(
Gaffer.Expression.defaultExpression( plug, language ),
language
)
__editExpression( plug )
def __editExpression( plug ) :
expressionNode = plug.getInput().node()
GafferUI.NodeEditor.acquire( expressionNode )
def __popupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if not isinstance( plug, Gaffer.ValuePlug ) :
return
node = plug.node()
if node is None or node.parent() is None :
return
input = plug.getInput()
if input is not None or not plugValueWidget._editable() or Gaffer.MetadataAlgo.readOnly( plug ) :
return
languages = [ l for l in Gaffer.Expression.languages() if Gaffer.Expression.defaultExpression( plug, l ) ]
if not languages :
return
menuDefinition.prepend( "/ExpressionDivider", { "divider" : True } )
for language in languages :
menuDefinition.prepend(
"/Create " + IECore.CamelCase.toSpaced( language ) + " Expression...",
{
"command" : functools.partial( __createExpression, plug, language )
}
)
__popupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __popupMenu )
# ExpressionWidget
##########################################################################
class ExpressionWidget( GafferUI.Widget ) :
def __init__( self, node, **kw ) :
column = GafferUI.ListContainer( spacing = 4 )
GafferUI.Widget.__init__( self, column, **kw )
self.__node = node
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.Label( "Language" )
self.__languageMenu = GafferUI.MenuButton( "", menu = GafferUI.Menu( Gaffer.WeakMethod( self.__languageMenuDefinition ) ) )
self.__languageMenu.setEnabled( not Gaffer.MetadataAlgo.readOnly( node ) )
self.__textWidget = GafferUI.MultiLineTextWidget( role = GafferUI.MultiLineTextWidget.Role.Code )
self.__textWidget.setEditable( not Gaffer.MetadataAlgo.readOnly( node ) )
self.__activatedConnection = self.__textWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ) )
self.__editingFinishedConnection = self.__textWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__editingFinished ) )
self.__dropTextConnection = self.__textWidget.dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ) )
self.__contextMenuConnection = self.__textWidget.contextMenuSignal().connect( Gaffer.WeakMethod( self.__expressionContextMenu ) )
self.__messageWidget = GafferUI.MessageWidget()
self.__expressionChangedConnection = self.__node.expressionChangedSignal().connect( Gaffer.WeakMethod( self.__expressionChanged ) )
self.__errorConnection = self.__node.errorSignal().connect( Gaffer.WeakMethod( self.__error ) )
self.__update()
def node( self ) :
return self.__node
def textWidget( self ) :
return self.__textWidget
__expressionContextMenuSignal = Gaffer.Signal2()
## This signal is emitted whenever a popup menu
# for an ExpressionWidget is about to be shown.
# This provides an opportunity to customise the
# menu from external code. The signature for
# slots is ( menuDefinition, widget ), and slots
# should just modify the menu definition in place.
@classmethod
def expressionContextMenuSignal( cls ) :
return cls.__expressionContextMenuSignal
def __expressionContextMenuDefinition( self ) :
menuDefinition = IECore.MenuDefinition()
bookmarks = Gaffer.MetadataAlgo.bookmarks( self.__node.parent() )
def __bookmarkMenu( bookmarks ) :
bookmarkMenuDefinition = IECore.MenuDefinition()
def __walk( graphComponent, result ) :
if (
isinstance( graphComponent, Gaffer.ValuePlug ) and
self.__node.identifier( graphComponent ) and
not graphComponent.relativeName( graphComponent.node() ).startswith( "__" )
) :
result.append( graphComponent )
for c in graphComponent.children( Gaffer.Plug ) :
__walk( c, result )
for bookmark in bookmarks :
compatiblePlugs = []
__walk( bookmark, compatiblePlugs )
if not compatiblePlugs :
continue
for plug in compatiblePlugs :
label = "/" + bookmark.getName()
if len( compatiblePlugs ) > 1 :
label += "/" + plug.relativeName( bookmark )
bookmarkMenuDefinition.append(
label,
{
"command" : functools.partial( self.__textWidget.insertText, self.__node.identifier( plug ) ),
"active" : self.__textWidget.getEditable() and not Gaffer.MetadataAlgo.readOnly( self.__node['__expression'] ),
}
)
return bookmarkMenuDefinition
menuDefinition.append( "/Insert Bookmark", { "subMenu" : functools.partial( __bookmarkMenu, bookmarks ) } )
self.expressionContextMenuSignal()( menuDefinition, self )
return menuDefinition
def __expressionContextMenu( self, *unused ) :
menuDefinition = self.__expressionContextMenuDefinition()
if not len( menuDefinition.items() ) :
return False
title = self.__node.relativeName( self.__node.scriptNode() )
title = ".".join( [ IECore.CamelCase.join( IECore.CamelCase.split( x ) ) for x in title.split( "." ) ] )
self.____expressionContextMenu = GafferUI.Menu( menuDefinition, title = title )
self.____expressionContextMenu.popup()
return True
def __update( self ) :
expression = self.__node.getExpression()
self.__textWidget.setText( expression[0] )
self.__languageMenu.setText( IECore.CamelCase.toSpaced( expression[1] ) )
self.__messageWidget.clear()
self.__messageWidget.setVisible( False )
def __languageMenuDefinition( self ) :
currentLanguage = self.__node.getExpression()[1]
result = IECore.MenuDefinition()
for language in self.__node.languages() :
result.append(
"/" + IECore.CamelCase.toSpaced( language ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__changeLanguage ), language = language ),
"checkBox" : language == currentLanguage,
}
)
return result
def __changeLanguage( self, unused, language ) :
## \todo Can we do better? Maybe start with the default expression
# for the current output plugs?
self.__node.setExpression( "", language )
def __setExpression( self ) :
language = self.__node.getExpression()[1]
with Gaffer.UndoScope( self.__node.scriptNode() ) :
try :
self.__node.setExpression( self.__textWidget.getText(), language )
self.__messageWidget.setVisible( False )
except Exception as e :
self.__messageWidget.clear()
self.__messageWidget.setVisible( True )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error, "Parse error", str( e )
)
def __expressionChanged( self, node ) :
self.__update()
def __activated( self, widget ) :
self.__setExpression()
def __editingFinished( self, widget ) :
self.__setExpression()
def __dropText( self, widget, dragData ) :
if isinstance( dragData, IECore.StringVectorData ) :
return repr( list( dragData ) )
elif isinstance( dragData, Gaffer.Plug ) :
name = self.__node.identifier( dragData )
return name if name else None
elif isinstance( dragData, Gaffer.Set ) :
if len( dragData ) == 1 :
return self.__dropText( widget, dragData[0] )
else :
return None
return None
# An error in the expression could occur during a compute triggered by a repaint.
# ( For example, if a user uses an expression to drive Backdrop text )
# If we forced a repaint right away, this would be a recursive repaint which could cause
# a Qt crash, so we wait for idle.
@GafferUI.LazyMethod()
def __error( self, plug, source, error ) :
self.__messageWidget.setVisible( True )
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Error, "Execution error", error )
|
the-stack_0_10897 | import requests
import logging
from .config import Config
from prometheus_client.core import Gauge
import base64
CONF = Config()
class SonarQubeClient:
def __init__(self, url, user_token, **kwargs):
if url.endswith("/"):
url = url[:-1]
self._url = url
self._user_token = user_token
self._basic_authen = base64.b64encode(("%s:" % self._user_token).encode("ascii")).decode("ascii")
self._authenticate_header = {"Authorization": "Basic %s" % self._basic_authen}
self._kwargs = kwargs
logging.debug("Initialized SonarQube: url: %s, userToken: ****, %s" % (self._url, self._kwargs))
def _request(self, endpoint):
res = requests.get("{}/{}".format(self._url, endpoint), headers=self._authenticate_header, **self._kwargs)
res.raise_for_status()
return res.json()
def get_projects(self, page_index=1, page_size=100):
return self._request(endpoint="api/components/search?qualifiers=TRK&p={}&ps={}".format(page_index, page_size))
def get_metrics(self):
return self._request(endpoint="api/metrics/search")
def get_measures(self, component_key, metric_key):
return self._request(endpoint="api/measures/component?component={}&metricKeys={}".format(component_key, metric_key))
class Metric:
def __init__(self):
self._key = None
self._values = []
self._description = None
self._domain = None
self._type = None
self._tranform = False
self._tranform_map = {}
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def values(self):
return self._values
@values.setter
def values(self, value):
self._values.extend(value)
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, value):
self._domain = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def tranform(self):
return self._tranform
@tranform.setter
def tranform(self, value):
self._tranform = value
@property
def tranform_map(self):
return self._tranform_map
@tranform_map.setter
def tranform_map(self, value):
self._tranform_map = value
class SonarQubeCollector:
def __init__(self, sonar_client : SonarQubeClient):
self._sonar_client = sonar_client
self._cached_metrics = []
# initialize gauges
logging.info("Intitializing...")
self._metrics = {}
raw_metrics = self._sonar_client.get_metrics()["metrics"]
for raw_metric in raw_metrics:
metric = Metric()
for supported_m in CONF.supported_keys:
if "domain" in raw_metric and raw_metric["domain"] == supported_m["domain"] and raw_metric["key"] in supported_m["keys"]:
metric.domain = raw_metric["domain"]
metric.key = raw_metric["key"]
metric.type = raw_metric["type"]
if "description" in raw_metric:
metric.description = raw_metric["description"]
else:
metric.description = raw_metric["name"]
if "tranformKeys" in supported_m and raw_metric["key"] in supported_m["tranformKeys"].keys():
metric.tranform = True
metric.tranform_map = supported_m["tranformKeys"][raw_metric["key"]]
self._metrics[metric.key] = metric
self._queried_metrics = str()
self._gauges = {}
for _, m in self._metrics.items():
if m.tranform:
self._gauges[m.key] = Gauge (name="sonar_{}".format(m.key), documentation=m.description, labelnames=("key", "name", "domain", "type", "value"))
else:
self._gauges[m.key] = Gauge (name="sonar_{}".format(m.key), documentation=m.description, labelnames=("key", "name", "domain", "type"))
self._queried_metrics = "{},{}".format(m.key, self._queried_metrics)
logging.info("Initialized %s metrics." % len(self._metrics.keys()))
def collect(self):
return self._cached_metrics
def run(self):
logging.info("Collecting data from SonarQube...")
response = self._sonar_client.get_projects()
total_projects = int(response['paging']['total'])
logging.info("There are %s projects in SonarQube" % total_projects)
processed_projects = 0
page_index = 1
while processed_projects < total_projects:
projects = self._sonar_client.get_projects(page_index=page_index)["components"]
for p in projects:
measures = self._sonar_client.get_measures(component_key=p["key"], metric_key=self._queried_metrics)["component"]["measures"]
for measure in measures:
m = self._metrics[measure["metric"]]
value = measure["value"]
gauge = self._gauges[measure["metric"]]
if m.tranform:
value = m.tranform_map[measure["value"]]
gauge.labels(p["key"], p["name"], m.domain, m.type, measure["value"]).set(value)
else:
gauge.labels(p["key"], p["name"], m.domain, m.type).set(value)
processed_projects += 1
page_index += 1
logging.info("{} projects were processed, {} project remaining".format(processed_projects, (total_projects - processed_projects)))
data = []
for key, g in self._gauges.items():
data.extend(g.collect())
self._cached_metrics = data
logging.info("SonarQube's data collected")
|
the-stack_0_10898 | import torch
import torch.nn as nn
import math
from torch.autograd import Variable
def make_mlp(dim_list, activation='relu', batch_norm=True, dropout=0):
layers = []
# batch_norm=True
dropout=0.25
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
return nn.Sequential(*layers)
def get_noise(shape, noise_type):
if noise_type == 'gaussian':
return torch.randn(*shape).cuda()
elif noise_type == 'uniform':
return torch.rand(*shape).sub_(0.5).mul_(2.0).cuda()
raise ValueError('Unrecognized noise type "%s"' % noise_type)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, embedding_dim, dropout=0, obs_len=8):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(obs_len, embedding_dim)
position = torch.arange(0, obs_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, embedding_dim, 2) *
-(math.log(100.0) / embedding_dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe,
requires_grad=False)
return self.dropout(x)
class Encoder(nn.Module):
"""Encoder is part of both TrajectoryGenerator and
TrajectoryDiscriminator"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, num_layers=1, obs_len=8,
dropout=0.0, pos_embed_flag=True
):
super(Encoder, self).__init__()
self.pos_embed = PositionalEncoding(embedding_dim)
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.num_layers = num_layers
self.pos_embed_flag = pos_embed_flag
# self.encoder = nn.LSTM(
# embedding_dim, h_dim, num_layers, dropout=dropout
# )
##TO DO Encoder -- Feedforward
# self.encoder = nn.Sequential(nn.Linear(embedding_dim*obs_len, h_dim*8), nn.Dropout(p=0.25), nn.Linear(h_dim*8, h_dim))
self.encoder = nn.Sequential(nn.Linear(embedding_dim*obs_len, h_dim), nn.Dropout(p=0.0))
self.spatial_embedding = nn.Linear(2, embedding_dim)
# def init_hidden(self, batch):
# return (
# torch.zeros(self.num_layers, batch, self.h_dim).cuda(),
# torch.zeros(self.num_layers, batch, self.h_dim).cuda()
# )
def forward(self, obs_traj):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
Output:
- final_h: Tensor of shape (self.num_layers, batch, self.h_dim)
"""
# Encode observed Trajectory
batch = obs_traj.size(1)
obs_len = obs_traj.size(0)
##obs_traj --> (obs_len, batch, 2)
obs_traj_embedding = self.spatial_embedding(obs_traj.contiguous().view(-1, 2))
##obs_traj_embedding --> (obs_len * batch, embedding)
obs_traj_embedding = obs_traj_embedding.view(-1, batch, self.embedding_dim)
##obs_traj_embedding --> (obs_len, batch, embedding)
obs_traj_embedding = obs_traj_embedding.permute(1, 0, 2)
##obs_traj_embedding --> (batch, obs_len, embedding)
if self.pos_embed_flag:
# print("Embedding")
obs_traj_embedding = self.pos_embed(obs_traj_embedding)
obs_coord_embedding = obs_traj_embedding.contiguous().view(batch, -1)
## CAN ADD POSITION EMBEDDING HERE
## TO DO
hidden = self.encoder(obs_coord_embedding)
# hidden = output.view(batch, obs_len, -1)
# state_tuple = self.init_hidden(batch)
# output, state = self.encoder(obs_traj_embedding, state_tuple)
# final_h = state[0]
return hidden
class Decoder(nn.Module):
"""Decoder is part of TrajectoryGenerator"""
def __init__(
self, seq_len, obs_len=8, embedding_dim=64, h_dim=128, mlp_dim=1024, num_layers=1,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, pooling_type='pool_net',
neighborhood_size=2.0, grid_size=8
):
super(Decoder, self).__init__()
self.seq_len = seq_len
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.pool_every_timestep = pool_every_timestep
# self.decoder = nn.LSTM(
# embedding_dim, h_dim, num_layers, dropout=dropout
# )
self.decoder = nn.Sequential(nn.Linear(h_dim + embedding_dim, 8*embedding_dim))
if pool_every_timestep:
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=self.h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
elif pooling_type == 'spool':
self.pool_net = SocialPooling(
h_dim=self.h_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout,
neighborhood_size=neighborhood_size,
grid_size=grid_size
)
mlp_dims = [h_dim + bottleneck_dim, mlp_dim, h_dim]
self.mlp = make_mlp(
mlp_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
self.spatial_embedding = nn.Linear(2, embedding_dim)
# self.hidden2pos = nn.Linear(h_dim, 2)
self.hidden2pos = nn.Linear(embedding_dim, 2)
def forward(self, last_pos, last_pos_rel, noise_output, seq_start_end):
"""
Inputs:
- last_pos: Tensor of shape (batch, 2)
- last_pos_rel: Tensor of shape (batch, 2)
- state_tuple: (hh, ch) each tensor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- pred_traj: tensor of shape (self.seq_len, batch, 2)
"""
pred_len = 8
batch = last_pos.size(0)
pred_traj_fake_rel = []
last_pos_embedding = self.spatial_embedding(last_pos_rel)
decoder_input = torch.cat((noise_output, last_pos_embedding), dim=1)
decoder_output = self.decoder(decoder_input)
decoder_output = decoder_output.contiguous().view(batch, pred_len, -1)
decoder_output = decoder_output.contiguous().view(batch*pred_len, -1)
pred_traj_fake_rel = self.hidden2pos(decoder_output)
pred_traj_fake_rel = pred_traj_fake_rel.contiguous().view(batch, pred_len, 2)
# decoder_input = decoder_input.view(1, batch, self.embedding_dim)
# for _ in range(self.seq_len):
# output, state_tuple = self.decoder(decoder_input, state_tuple)
# rel_pos = self.hidden2pos(output.view(-1, self.h_dim))
# curr_pos = rel_pos + last_pos
# if self.pool_every_timestep:
# decoder_h = state_tuple[0]
# pool_h = self.pool_net(decoder_h, seq_start_end, curr_pos)
# decoder_h = torch.cat(
# [decoder_h.view(-1, self.h_dim), pool_h], dim=1)
# decoder_h = self.mlp(decoder_h)
# decoder_h = torch.unsqueeze(decoder_h, 0)
# state_tuple = (decoder_h, state_tuple[1])
# embedding_input = rel_pos
# decoder_input = self.spatial_embedding(embedding_input)
# decoder_input = decoder_input.view(1, batch, self.embedding_dim)
# pred_traj_fake_rel.append(rel_pos.view(batch, -1))
# last_pos = curr_pos
# pred_traj_fake_rel = torch.stack(pred_traj_fake_rel, dim=0)
return pred_traj_fake_rel
class PoolHiddenNet(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0
):
super(PoolHiddenNet, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim
mlp_pre_dim = embedding_dim + h_dim
mlp_pre_pool_dims = [mlp_pre_dim, 512, bottleneck_dim]
self.spatial_embedding = nn.Linear(2, embedding_dim)
self.mlp_pre_pool = make_mlp(
mlp_pre_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout)
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tensor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
Output:
- pool_h: Tensor of shape (batch, bottleneck_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
curr_end_pos = end_pos[start:end]
# Repeat -> H1, H2, H1, H2
curr_hidden_1 = curr_hidden.repeat(num_ped, 1)
# Repeat position -> P1, P2, P1, P2
curr_end_pos_1 = curr_end_pos.repeat(num_ped, 1)
# Repeat position -> P1, P1, P2, P2
curr_end_pos_2 = self.repeat(curr_end_pos, num_ped)
curr_rel_pos = curr_end_pos_1 - curr_end_pos_2
curr_rel_embedding = self.spatial_embedding(curr_rel_pos)
mlp_h_input = torch.cat([curr_rel_embedding, curr_hidden_1], dim=1)
curr_pool_h = self.mlp_pre_pool(mlp_h_input)
curr_pool_h = curr_pool_h.view(num_ped, num_ped, -1).max(1)[0]
pool_h.append(curr_pool_h)
pool_h = torch.cat(pool_h, dim=0)
return pool_h
class SocialPooling(nn.Module):
"""Current state of the art pooling mechanism:
http://cvgl.stanford.edu/papers/CVPR16_Social_LSTM.pdf"""
def __init__(
self, h_dim=64, activation='relu', batch_norm=True, dropout=0.0,
neighborhood_size=2.0, grid_size=8, pool_dim=None
):
super(SocialPooling, self).__init__()
self.h_dim = h_dim
self.grid_size = grid_size
self.neighborhood_size = neighborhood_size
if pool_dim:
mlp_pool_dims = [grid_size * grid_size * h_dim, pool_dim]
else:
mlp_pool_dims = [grid_size * grid_size * h_dim, h_dim]
self.mlp_pool = make_mlp(
mlp_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def get_bounds(self, ped_pos):
top_left_x = ped_pos[:, 0] - self.neighborhood_size / 2
top_left_y = ped_pos[:, 1] + self.neighborhood_size / 2
bottom_right_x = ped_pos[:, 0] + self.neighborhood_size / 2
bottom_right_y = ped_pos[:, 1] - self.neighborhood_size / 2
top_left = torch.stack([top_left_x, top_left_y], dim=1)
bottom_right = torch.stack([bottom_right_x, bottom_right_y], dim=1)
return top_left, bottom_right
def get_grid_locations(self, top_left, other_pos):
cell_x = torch.floor(
((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *
self.grid_size)
cell_y = torch.floor(
((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *
self.grid_size)
grid_pos = cell_x + cell_y * self.grid_size
return grid_pos
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tesnsor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- end_pos: Absolute end position of obs_traj (batch, 2)
Output:
- pool_h: Tensor of shape (batch, h_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
grid_size = self.grid_size * self.grid_size
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
curr_hidden_repeat = curr_hidden.repeat(num_ped, 1)
curr_end_pos = end_pos[start:end]
curr_pool_h_size = (num_ped * grid_size) + 1
curr_pool_h = curr_hidden.new_zeros((curr_pool_h_size, self.h_dim))
# curr_end_pos = curr_end_pos.data
top_left, bottom_right = self.get_bounds(curr_end_pos)
# Repeat position -> P1, P2, P1, P2
curr_end_pos = curr_end_pos.repeat(num_ped, 1)
# Repeat bounds -> B1, B1, B2, B2
top_left = self.repeat(top_left, num_ped)
bottom_right = self.repeat(bottom_right, num_ped)
grid_pos = self.get_grid_locations(
top_left, curr_end_pos).type_as(seq_start_end)
# Make all positions to exclude as non-zero
# Find which peds to exclude
x_bound = ((curr_end_pos[:, 0] >= bottom_right[:, 0]) +
(curr_end_pos[:, 0] <= top_left[:, 0]))
y_bound = ((curr_end_pos[:, 1] >= top_left[:, 1]) +
(curr_end_pos[:, 1] <= bottom_right[:, 1]))
within_bound = x_bound + y_bound
within_bound[0::num_ped + 1] = 1 # Don't include the ped itself
within_bound = within_bound.view(-1)
# This is a tricky way to get scatter add to work. Helps me avoid a
# for loop. Offset everything by 1. Use the initial 0 position to
# dump all uncessary adds.
grid_pos += 1
total_grid_size = self.grid_size * self.grid_size
offset = torch.arange(
0, total_grid_size * num_ped, total_grid_size
).type_as(seq_start_end)
offset = self.repeat(offset.view(-1, 1), num_ped).view(-1)
grid_pos += offset
grid_pos[within_bound != 0] = 0
grid_pos = grid_pos.view(-1, 1).expand_as(curr_hidden_repeat)
curr_pool_h = curr_pool_h.scatter_add(0, grid_pos,
curr_hidden_repeat)
curr_pool_h = curr_pool_h[1:]
pool_h.append(curr_pool_h.view(num_ped, -1))
pool_h = torch.cat(pool_h, dim=0)
pool_h = self.mlp_pool(pool_h)
return pool_h
class TrajectoryGenerator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, encoder_h_dim=64,
decoder_h_dim=128, mlp_dim=1024, num_layers=1, noise_dim=(0, ),
noise_type='gaussian', noise_mix_type='ped', pooling_type=None,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, neighborhood_size=2.0, grid_size=8
):
super(TrajectoryGenerator, self).__init__()
if pooling_type and pooling_type.lower() == 'none':
pooling_type = None
self.obs_len = obs_len
self.pred_len = pred_len
self.mlp_dim = mlp_dim
self.encoder_h_dim = encoder_h_dim
self.decoder_h_dim = decoder_h_dim
self.embedding_dim = embedding_dim
self.noise_dim = noise_dim
self.num_layers = num_layers
self.noise_type = noise_type
self.noise_mix_type = noise_mix_type
self.pooling_type = pooling_type
self.noise_first_dim = 0
self.pool_every_timestep = pool_every_timestep
self.bottleneck_dim = 1024
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
self.decoder = Decoder(
pred_len,
embedding_dim=embedding_dim,
h_dim=decoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
pool_every_timestep=pool_every_timestep,
dropout=dropout,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
pooling_type=pooling_type,
grid_size=grid_size,
neighborhood_size=neighborhood_size
)
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm
)
elif pooling_type == 'spool':
self.pool_net = SocialPooling(
h_dim=encoder_h_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout,
neighborhood_size=neighborhood_size,
grid_size=grid_size
)
if self.noise_dim == None or self.noise_dim[0] == 0:
self.noise_dim = None
else:
self.noise_first_dim = noise_dim[0]
# Decoder Hidden
if pooling_type:
input_dim = encoder_h_dim + bottleneck_dim
else:
input_dim = encoder_h_dim
# if self.mlp_decoder_needed():
# mlp_decoder_context_dims = [
# input_dim, mlp_dim, decoder_h_dim - self.noise_first_dim
# ]
if self.mlp_decoder_needed():
mlp_decoder_context_dims = [
input_dim, decoder_h_dim - self.noise_first_dim
]
self.mlp_decoder_context = make_mlp(
mlp_decoder_context_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def add_noise(self, _input, seq_start_end, user_noise=None):
"""
Inputs:
- _input: Tensor of shape (_, decoder_h_dim - noise_first_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Outputs:
- decoder_h: Tensor of shape (_, decoder_h_dim)
"""
if not self.noise_dim:
return _input
if self.noise_mix_type == 'global':
noise_shape = (seq_start_end.size(0), ) + self.noise_dim
else:
noise_shape = (_input.size(0), ) + self.noise_dim
if user_noise is not None:
z_decoder = user_noise
else:
z_decoder = get_noise(noise_shape, self.noise_type)
if self.noise_mix_type == 'global':
_list = []
for idx, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
_vec = z_decoder[idx].view(1, -1)
_to_cat = _vec.repeat(end - start, 1)
_list.append(torch.cat([_input[start:end], _to_cat], dim=1))
decoder_h = torch.cat(_list, dim=0)
return decoder_h
decoder_h = torch.cat([_input, z_decoder], dim=1)
return decoder_h
def mlp_decoder_needed(self):
if (
self.noise_dim or self.pooling_type or
self.encoder_h_dim != self.decoder_h_dim
):
return True
else:
return False
def forward(self, obs_traj, obs_traj_rel, seq_start_end, user_noise=None):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
- obs_traj_rel: Tensor of shape (obs_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Output:
- pred_traj_rel: Tensor of shape (self.pred_len, batch, 2)
"""
batch = obs_traj_rel.size(1)
obs_len = obs_traj_rel.size(0)
# Encode seq
final_encoder_h = self.encoder(obs_traj_rel)
# Pool States
# if self.pooling_type:
# end_pos = obs_traj[-1, :, :]
# pool_h = self.pool_net(final_encoder_h, seq_start_end, end_pos)
# # Construct input hidden states for decoder
# mlp_decoder_context_input = torch.cat(
# [final_encoder_h.view(-1, self.encoder_h_dim), pool_h], dim=1)
# else:
# mlp_decoder_context_input = final_encoder_h.view(
# -1, self.encoder_h_dim)
mlp_decoder_context_input = final_encoder_h.view(-1, self.encoder_h_dim)
# Add Noise
# if self.mlp_decoder_needed():
noise_input = self.mlp_decoder_context(mlp_decoder_context_input)
# else:
# noise_input = mlp_decoder_context_input
noise_output = self.add_noise(
noise_input, seq_start_end, user_noise=user_noise)
# decoder_h = torch.unsqueeze(decoder_h, 0)
# decoder_c = torch.zeros(
# self.num_layers, batch, self.decoder_h_dim
# ).cuda()
# state_tuple = (decoder_h, decoder_c)
last_pos = obs_traj[-1]
last_pos_rel = obs_traj_rel[-1]
# Predict Trajectory
decoder_out = self.decoder(
last_pos,
last_pos_rel,
noise_output,
seq_start_end,
)
pred_traj_fake_rel = decoder_out.permute(1, 0, 2)
return pred_traj_fake_rel
class TrajectoryDiscriminator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, h_dim=64, mlp_dim=1024,
num_layers=1, activation='relu', batch_norm=True, dropout=0.0,
d_type='local'
):
super(TrajectoryDiscriminator, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.seq_len = obs_len + pred_len
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.d_type = d_type
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
# real_classifier_dims = [h_dim, mlp_dim, 1]
# self.real_classifier = make_mlp(
# real_classifier_dims,
# activation=activation,
# batch_norm=batch_norm,
# dropout=dropout
# )
# if d_type == 'global':
# mlp_pool_dims = [h_dim + embedding_dim, mlp_dim, h_dim]
# self.pool_net = PoolHiddenNet(
# embedding_dim=embedding_dim,
# h_dim=h_dim,
# mlp_dim=mlp_pool_dims,
# bottleneck_dim=h_dim,
# activation=activation,
# batch_norm=batch_norm
# )
real_classifier_dims = [(obs_len + pred_len) * 2, 16, 8, 1]
self.real_classifier = make_mlp(
real_classifier_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
# def forward(self, traj, traj_rel, seq_start_end=None):
# """
# Inputs:
# - traj: Tensor of shape (obs_len + pred_len, batch, 2)
# - traj_rel: Tensor of shape (obs_len + pred_len, batch, 2)
# - seq_start_end: A list of tuples which delimit sequences within batch
# Output:
# - scores: Tensor of shape (batch,) with real/fake scores
# """
# final_h = self.encoder(traj_rel)
# # Note: In case of 'global' option we are using start_pos as opposed to
# # end_pos. The intution being that hidden state has the whole
# # trajectory and relative postion at the start when combined with
# # trajectory information should help in discriminative behavior.
# if self.d_type == 'local':
# classifier_input = final_h.squeeze()
# else:
# classifier_input = self.pool_net(
# final_h.squeeze(), seq_start_end, traj[0]
# )
# scores = self.real_classifier(classifier_input)
# return scores
def forward(self, traj, traj_rel, seq_start_end=None):
"""
Inputs:
- traj: Tensor of shape (obs_len + pred_len, batch, 2)
- traj_rel: Tensor of shape (obs_len + pred_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- scores: Tensor of shape (batch,) with real/fake scores
"""
batch = traj_rel.shape[1]
traj_rel = traj_rel.permute(1, 0, 2)
classifier_input = traj_rel.contiguous().view(batch, -1)
scores = self.real_classifier(classifier_input)
return scores |
the-stack_0_10899 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from st2common.exceptions.content import ParseException
__all__ = [
'ActionAliasFormatParser',
'extract_parameters_for_action_alias_db',
'extract_parameters',
]
class ActionAliasFormatParser(object):
def __init__(self, alias_format=None, param_stream=None):
self._format = alias_format or ''
self._param_stream = param_stream or ''
def get_extracted_param_value(self):
"""
Match command against the format string and extract paramters from the command string.
:rtype: ``dict``
"""
result = {}
param_stream = self._param_stream
# As there's a lot of questions about using regular expressions,
# I'll try to be thorough when documenting this code.
# I'll split the whole convoluted regex into snippets to make it
# a bit more readable (hopefully).
snippets = dict()
# Formats for keys and values: key is a non-spaced string,
# value is anything in quotes or curly braces, or a single word.
snippets['key'] = r'\s*(\S+?)\s*'
snippets['value'] = r'""|\'\'|"(.+?)"|\'(.+?)\'|({.+?})|(\S+)'
# Extended value: also matches unquoted text (caution).
snippets['ext_value'] = r'""|\'\'|"(.+?)"|\'(.+?)\'|({.+?})|(.+?)'
# Key-value pair:
snippets['pairs'] = r'(?:^|\s+){key}=({value})'.format(**snippets)
# End of string: multiple space-separated key-value pairs:
snippets['ending'] = r'.*?(({pairs}\s*)*)$'.format(**snippets)
# Default value in optional parameters:
snippets['default'] = r'\s*=\s*(?:{ext_value})\s*'.format(**snippets)
# Optional parameter (has a default value):
snippets['optional'] = '{{' + snippets['key'] + snippets['default'] + '}}'
# Required parameter (no default value):
snippets['required'] = '{{' + snippets['key'] + '}}'
# 1. Matching the arbitrary key-value pairs at the end of the command
# to support extra parameters (not specified in the format string),
# and cutting them from the command string afterwards.
ending_pairs = re.match(snippets['ending'], param_stream, re.DOTALL)
has_ending_pairs = ending_pairs and ending_pairs.group(1)
if has_ending_pairs:
kv_pairs = re.findall(snippets['pairs'], ending_pairs.group(1), re.DOTALL)
param_stream = param_stream.replace(ending_pairs.group(1), '')
param_stream = " %s " % (param_stream)
# 2. Matching optional parameters (with default values).
optional = re.findall(snippets['optional'], self._format, re.DOTALL)
# Transforming our format string into a regular expression,
# substituting {{ ... }} with regex named groups, so that param_stream
# matched against this expression yields a dict of params with values.
param_match = r'\1["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?'
reg = re.sub(r'(\s*)' + snippets['optional'], r'(?:' + param_match + r')?', self._format)
reg = re.sub(r'(\s*)' + snippets['required'], param_match, reg)
reg = '^\s*' + reg + r'\s*$'
# 3. Matching the command against our regex to get the param values
matched_stream = re.match(reg, param_stream, re.DOTALL)
if not matched_stream:
# If no match is found we throw since this indicates provided user string (command)
# didn't match the provided format string
raise ParseException('Command "%s" doesn\'t match format string "%s"' %
(self._param_stream, self._format))
# Compiling results from the steps 1-3.
if matched_stream:
result = matched_stream.groupdict()
for param in optional:
matched_value = result[param[0]] if matched_stream else None
matched_result = matched_value or ''.join(param[1:])
if matched_result is not None:
result[param[0]] = matched_result
if has_ending_pairs:
for pair in kv_pairs:
result[pair[0]] = ''.join(pair[2:])
if self._format and not (self._param_stream.strip() or any(result.values())):
raise ParseException('No value supplied and no default value found.')
return result
def extract_parameters_for_action_alias_db(action_alias_db, format_str, param_stream):
"""
Extract parameters from the user input based on the provided format string.
Note: This function makes sure that the provided format string is indeed available in the
action_alias_db.formats.
"""
formats = []
formats = action_alias_db.get_format_strings()
if format_str not in formats:
raise ValueError('Format string "%s" is not available on the alias "%s"' %
(format_str, action_alias_db.name))
result = extract_parameters(format_str=format_str, param_stream=param_stream)
return result
def extract_parameters(format_str, param_stream):
parser = ActionAliasFormatParser(alias_format=format_str, param_stream=param_stream)
return parser.get_extracted_param_value()
|
the-stack_0_10900 | from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.core.mail import send_mail
from django.conf import settings
from .authentication import WebpageTokenAuth
from .models import User, Comment, Edit
from .serializers import CommentSerializer, EditSerializer
@api_view(['GET'])
def form_validator(request):
"""
API call to validate the sign up form data on the client.
This validates that:
#. The chosen **Username** is not already taken.
#. The chosen **Email** is not already taken.
"""
Username = request.GET.get('Username', None)
Email = request.GET.get('Email', None)
usernameExists = User.objects.filter(Username=Username).exists()
emailExists = User.objects.filter(Email=Email).exists()
return Response({"UsernameExists": usernameExists, "EmailExists": emailExists}, status=status.HTTP_200_OK)
@api_view(['GET'])
def user_comments(request):
"""
Endpoint to get all the comments made by a user.
This expects a ``UserID`` to be provided as a query parameter.
"""
try:
comments = Comment.objects.filter(UserID=request.query_params.get('UserID'))
return Response(CommentSerializer(comments, many=True).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@authentication_classes([WebpageTokenAuth])
@permission_classes([IsAuthenticated])
def comment_submit(request):
"""
Endpoint to submit a new user comment given:
- ``CommentText``: The text the user wrote.
- ``AHJPK``: The AHJ primary key of the AHJPage they commented on.
- ``ReplyingTo``: The UserID of the user who wrote the comment this comment is replying to, if any.
"""
comment_text = request.data.get('CommentText', None)
if comment_text is None:
return Response('Missing comment text', status=status.HTTP_400_BAD_REQUEST)
AHJPK = request.data.get('AHJPK', None)
ReplyingTo = request.data.get('ReplyingTo', None)
comment = Comment.objects.create(UserID=User.objects.get(Email=request.user),
AHJPK=AHJPK,
CommentText=comment_text, ReplyingTo=ReplyingTo)
# send the serialized comment back to the front-end
return Response(CommentSerializer(comment).data, status=status.HTTP_200_OK)
@api_view(['GET'])
def user_edits(request):
"""
Endpoint returning all edits made a user.
This expects a ``UserID`` to be provided as a query parameter.
"""
try:
edits = Edit.objects.filter(ChangedBy=request.query_params.get('UserID'))
return Response(EditSerializer(edits, many=True).data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def send_support_email(request):
"""
Endpoint to send mail to SunSpec's support email address.
This expects as POST data:
- ``Email``: The email of the user writing to SunSpec support.
- ``Subject``: The subject of the email.
- ``Message``: The body of the email.
"""
try:
email = request.data.get('Email')
subject = request.data.get('Subject')
message = request.data.get('Message')
full_message = f'Sender: {email}\nMessage: {message}'
send_mail(subject, full_message, settings.EMAIL_HOST_USER, [settings.SUNSPEC_SUPPORT_EMAIL], fail_silently=False)
return Response(status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST)
|
the-stack_0_10901 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.29.7'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle(self.rateLimit)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
|
the-stack_0_10902 | import scipy.stats as st
import math
import torch
import numpy as np
import torch.nn as nn
from functools import partial
# Target function definition
def f(input_):
r"""
Bimodal function
:param x:
:return:
"""
x = input_ + 0.5
y_left = st.skewnorm(a=4, loc=.3, scale=.7).pdf(3 * x) / 1.6
y_right = st.skewnorm(a=4, loc=.3, scale=.6).pdf(3 * (1 - x)) / 1.4
return 2 * (y_left + y_right) - 1
# REPULSIVE FUNCTION
def pairwise_rbf(y_ent_pts_new, y_ent_pts_old, std_pts):
# computation of the weights
return torch.mean(torch.exp(-(1 / (2 * std_pts**2)) * torch.norm(y_ent_pts_new - y_ent_pts_old, dim=1, keepdim=True)**2))
def optimize(net, optimizer, batch, add_repulsive_constraint=False, **kwargs):
criterion = nn.MSELoss()
if add_repulsive_constraint:
criterion_repulsive = partial(pairwise_rbf, std_pts=kwargs['bandwidth_repulsive'])
info = {}
x, y = batch # x is an image and y is an integer !
output = net(x)
if not add_repulsive_constraint:
loss = criterion(output, y)
info['data_loss'] = loss.item()
else:
data_loss = criterion(output, y)
info['data_loss'] = data_loss.item()
# entropy loss
net.eval()
y_rep = net(kwargs['batch_repulsive'])
net.train()
y_rep_ref = kwargs['reference_net'](kwargs['batch_repulsive']).detach()
entropy_loss = criterion_repulsive(y_rep, y_rep_ref) # close to 1 if the probs are the same, else close to 0
info['repulsive_loss'] = entropy_loss.item()
# total loss
loss = data_loss + kwargs['lambda_repulsive'] * entropy_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# logging
info['total_loss'] = loss.item()
return info
|
the-stack_0_10905 | # TODO: This code is comparing HyperparameterRanges_CS with HyperparameterRanges.
# If the latter code is removed, this test can go as well.
import numpy as np
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from numpy.testing import assert_allclose
from autogluon.core.searcher import \
HyperparameterRanges_CS
from autogluon.core.searcher import \
HyperparameterRanges_Impl, HyperparameterRangeCategorical, \
HyperparameterRangeContinuous, HyperparameterRangeInteger
from autogluon.core.searcher import LinearScaling, \
LogScaling
def test_to_ndarray():
np.random.seed(123456)
random_state = np.random.RandomState(123456)
prob_categ = 0.3
for iter in range(20):
# Create ConfigurationSpace
num_hps = np.random.randint(low=1, high=20)
if iter == 0:
_prob_categ = 0.
elif iter == 1:
_prob_categ = 1.
else:
_prob_categ = prob_categ
config_space = CS.ConfigurationSpace()
ndarray_size = 0
_hp_ranges = dict()
for hp_it in range(num_hps):
name = str(hp_it)
if np.random.random() < _prob_categ:
num_choices = np.random.randint(low=2, high=11)
choices = tuple([str(i) for i in range(num_choices)])
hp = CSH.CategoricalHyperparameter(name, choices=choices)
hp2 = HyperparameterRangeCategorical(name, choices)
ndarray_size += num_choices
else:
ndarray_size += 1
rand_coin = np.random.random()
if rand_coin < 0.5:
log_scaling = (rand_coin < 0.25)
hp = CSH.UniformFloatHyperparameter(
name=name, lower=0.5, upper=5., log=log_scaling)
hp2 = HyperparameterRangeContinuous(
name, lower_bound=0.5, upper_bound=5.,
scaling=LogScaling() if log_scaling else LinearScaling())
else:
log_scaling = (rand_coin < 0.75)
hp = CSH.UniformIntegerHyperparameter(
name=name, lower=2, upper=10, log=log_scaling)
hp2 = HyperparameterRangeInteger(
name=name, lower_bound=2, upper_bound=10,
scaling=LogScaling() if log_scaling else LinearScaling())
config_space.add_hyperparameter(hp)
_hp_ranges[name] = hp2
hp_ranges_cs = HyperparameterRanges_CS(config_space)
hp_ranges = HyperparameterRanges_Impl(
*[_hp_ranges[x] for x in config_space.get_hyperparameter_names()])
# Compare ndarrays created by both codes
for cmp_it in range(5):
config_cs = hp_ranges_cs.random_candidate(random_state)
_config = config_cs.get_dictionary()
config = (_config[name]
for name in config_space.get_hyperparameter_names())
ndarr_cs = hp_ranges_cs.to_ndarray(config_cs)
ndarr = hp_ranges.to_ndarray(config)
assert_allclose(ndarr_cs, ndarr, rtol=1e-4)
def test_to_ndarray_name_last_pos():
np.random.seed(123456)
random_state = np.random.RandomState(123456)
config_space = CS.ConfigurationSpace()
config_space.add_hyperparameters([
CSH.UniformFloatHyperparameter('a', lower=0., upper=1.),
CSH.UniformIntegerHyperparameter('b', lower=2, upper=3),
CSH.CategoricalHyperparameter('c', choices=('1', '2', '3')),
CSH.UniformIntegerHyperparameter('d', lower=2, upper=3),
CSH.CategoricalHyperparameter('e', choices=('1', '2'))])
hp_a = HyperparameterRangeContinuous(
'a', lower_bound=0., upper_bound=1., scaling=LinearScaling())
hp_b = HyperparameterRangeInteger(
'b', lower_bound=2, upper_bound=3, scaling=LinearScaling())
hp_c = HyperparameterRangeCategorical('c', choices=('1', '2', '3'))
hp_d = HyperparameterRangeInteger(
'd', lower_bound=2, upper_bound=3, scaling=LinearScaling())
hp_e = HyperparameterRangeCategorical('e', choices=('1', '2'))
for name_last_pos in ['a', 'c', 'd', 'e']:
hp_ranges_cs = HyperparameterRanges_CS(
config_space, name_last_pos=name_last_pos)
if name_last_pos == 'a':
lst = [hp_b, hp_c, hp_d, hp_e, hp_a]
elif name_last_pos == 'c':
lst = [hp_a, hp_b, hp_d, hp_e, hp_c]
elif name_last_pos == 'd':
lst = [hp_a, hp_b, hp_c, hp_e, hp_d]
else:
lst = [hp_a, hp_b, hp_c, hp_d, hp_e]
hp_ranges = HyperparameterRanges_Impl(*lst)
names = [hp.name for hp in hp_ranges.hp_ranges]
config_cs = hp_ranges_cs.random_candidate(random_state)
_config = config_cs.get_dictionary()
config = (_config[name] for name in names)
ndarr_cs = hp_ranges_cs.to_ndarray(config_cs)
ndarr = hp_ranges.to_ndarray(config)
assert_allclose(ndarr_cs, ndarr, rtol=1e-4)
|
the-stack_0_10906 | #!/usr/bin/env python
#
# toolbar.py - FSLeyes toolbars
#
# Author: Paul McCarthy <[email protected]>
#
"""This module provides the :class:`FSLeyesToolBar` class, the base class
for all toolbars in *FSLeyes*.
"""
import logging
import wx
import wx.lib.newevent as wxevent
import numpy as np
import fsleyes.panel as fslpanel
import fsleyes.icons as icons
log = logging.getLogger(__name__)
class FSLeyesToolBar(fslpanel.FSLeyesPanel):
"""Base class for all *FSLeyes* toolbars.
The ``FSLeyesToolBar`` is a regular :class:`wx.PyPanel` which to which a
group of *tools* can be added, where a tool may be any ``wx`` control.
See also the :class:`.ControlToolBar`, which is the true base-class for
all toolbars that are added to FSLeyes view panels.
Tools can be added to a ``FSLeyesToolBar`` with the following methods:
.. autosummary::
:nosignatures:
AddTool
InsertTool
InsertTools
SetTools
MakeLabelledTool
When the horizontal size of a ``FSLeyesToolBar`` becomes too small to
display all of its tools, the toolbar is compressed: some tools are
hidden, and buttons are displayed on each end of the toolbar, allowing the
user to scroll through the toolbar, to access the hidden tools. The user
may also use the mouse wheel to scroll through the toolbar.
A collapsed ``FSLeyesToolBar`` looks something like this:
.. image:: images/fsleyestoolbar.png
:scale: 50%
:align: center
"""
def __init__(self,
parent,
overlayList,
displayCtx,
viewPanel,
height=32,
orient=wx.HORIZONTAL,
*args,
**kwargs):
"""Create a ``FSLeyesToolBar``.
:arg parent: The :mod:`wx` parent object.
:arg overlayList: The :class:`.OverlayList`, containing all overlays
being displayed.
:arg displayCtx: A :class:`.DisplayContext`, which defines how the
overlays are to be displayed.
:arg viewPanel: The :class:`.ViewPanel` that owns this toolbar.
:arg height: Desired toolbar height in pixels. This value is used
to look up appropriately sized left/right arrow
icons.
:arg actionz: A dictionary of actions passed through to the
:meth:`.ActionProvider.__init__`.
All other arguments are passed through to
:meth:`.FSLeyesPanel.__init__`.
"""
if orient not in (wx.HORIZONTAL, wx.VERTICAL):
raise ValueError('Invalid orientation: {}'.format(orient))
fslpanel.FSLeyesPanel.__init__(self,
parent,
overlayList,
displayCtx,
viewPanel.frame,
*args,
**kwargs)
self.__tools = []
self.__visibleOffset = 0
self.__numVisible = 0
self.__height = height
self.__orient = orient
font = self.GetFont()
self.SetFont(font.Smaller())
style = wx.BU_EXACTFIT | wx.BU_NOTEXT
if orient == wx.HORIZONTAL:
lBmp = icons.loadBitmap('thinLeftArrow{}' .format(height))
rBmp = icons.loadBitmap('thinRightArrow{}'.format(height))
else:
lBmp = icons.loadBitmap('thinUpArrow{}' .format(height))
rBmp = icons.loadBitmap('thinDownArrow{}'.format(height))
self.__leftButton = wx.Button(self, style=style)
self.__rightButton = wx.Button(self, style=style)
self.__leftButton .SetBitmap(lBmp)
self.__rightButton.SetBitmap(rBmp)
for btn in [self.__leftButton, self.__rightButton]:
size = btn.GetBestSize()
btn.SetMinSize(size)
self.__sizer = wx.BoxSizer(orient)
self.SetSizer(self.__sizer)
self.__sizer.Add(self.__leftButton, flag=wx.EXPAND)
self.__sizer.Add((0, 0), flag=wx.EXPAND, proportion=1)
self.__sizer.Add(self.__rightButton, flag=wx.EXPAND)
self.__leftButton .Bind(wx.EVT_BUTTON, self.__onLeftButton)
self.__rightButton.Bind(wx.EVT_BUTTON, self.__onRightButton)
self .Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
self .Bind(wx.EVT_SIZE, self.__drawToolBar)
def GetOrient(self):
"""Returns the orientation of this ``FSLeyesToolBar``, either
``wx.HORIZONTAL`` or ``wx.VERTICAL``.
"""
return self.__orient
def MakeLabelledTool(self,
tool,
labelText,
labelSide=wx.TOP,
expand=False):
"""Creates a panel containing the given tool, and a label for the
tool. The panel is returned, but is not added to this
``FSLeyesToolBar`` - you will have to do that yourself, e.g.::
labelledTool = toolbar.MakeLabelledTool(tool, 'Label', wx.BOTTOM)
toolbar.AddTool(labelledTool)
:arg tool: A :mod:`wx` control.
:arg labelText: A label for the tool.
:arg labelSide: Which side of the tool to put the label - ``wx.TOP``,
``wx.BOTTOM``, ``wx.LEFT``, or ``wx.RIGHT``.
:arg expand: Defaults to ``False``. If ``True``, the widget and
label will be set up so they expand to fit all
available space
"""
if labelSide in (wx.TOP, wx.BOTTOM): orient = wx.VERTICAL
elif labelSide in (wx.LEFT, wx.RIGHT): orient = wx.HORIZONTAL
oldParent = tool.GetParent()
panel = wx.Panel(oldParent)
sizer = wx.BoxSizer(orient)
panel.SetSizer(sizer)
tool.Reparent(panel)
label = wx.StaticText(panel, style=wx.ALIGN_CENTRE_HORIZONTAL)
label.SetLabel(labelText)
if expand:
sizerArgs = {
'flag' : wx.EXPAND,
'proportion' : 1
}
else:
sizerArgs = {
'flag' : wx.ALIGN_CENTRE,
}
if labelSide in (wx.TOP, wx.LEFT):
sizer.Add(label, **sizerArgs)
sizer.Add(tool, **sizerArgs)
else:
sizer.Add(tool, **sizerArgs)
sizer.Add(label, **sizerArgs)
return panel
def Enable(self, *args, **kwargs):
"""Enables/disables all tools in this ``FSLeyesToolBar``.
:arg args: Passed to the ``Enable`` method of each tool.
:arg kwargs: Passed to the ``Enable`` method of each tool.
"""
super(FSLeyesToolBar, self).Enable(*args, **kwargs)
for t in self.__tools:
t.Enable(*args, **kwargs)
def GetTools(self):
"""Returns a list containing all tools in this ``FSLeyesToolBar``. """
return self.__tools[:]
def GetToolCount(self):
"""Returns the number of tools in this ``FSLeyesToolBar``. """
return len(self.__tools)
def AddDivider(self):
"""Adds a :class:`.ToolBarDivider` to the end of the toolbar. """
self.InsertDivider()
def InsertDivider(self, index=None):
"""Inserts a :class:`.ToolBarDivider` into the toolbar at the
specified ``index``.
"""
if self.__orient == wx.VERTICAL: orient = wx.HORIZONTAL
elif self.__orient == wx.HORIZONTAL: orient = wx.VERTICAL
self.InsertTool(ToolBarDivider(self, self.__height, orient), index)
def AddTool(self, tool):
"""Adds the given tool to this ``FSLeyesToolBar``. """
self.InsertTool(tool)
def InsertTools(self, tools, index=None):
"""Inserts the given sequence of tools into this ``FSLeyesToolBar``,
at the specified index.
:arg tools: A sequence of tools to add.
:arg index: Insert the tools before this index (default: end).
"""
if index is None:
index = self.GetToolCount()
for i, tool in enumerate(tools, index):
self.InsertTool(tool, i, postevent=False)
wx.PostEvent(self, ToolBarEvent())
def SetTools(self, tools, destroy=False):
"""Replaces all of the existing tools in this ``FSLeyesToolBar``
with the given sequence of tools.
:arg tools: Sequence of new tools to add.
:arg destroy: If ``True`` all of the old tools are destroyed.
"""
self.ClearTools(destroy, postevent=False)
for tool in tools:
self.InsertTool(tool, postevent=False, redraw=False)
self.__drawToolBar()
wx.PostEvent(self, ToolBarEvent())
def InsertTool(self, tool, index=None, postevent=True, redraw=True):
"""Inserts the given tool into this ``FSLeyesToolBar``, at the
specified index.
:arg tool: The tool to insert.
:arg index: Index to insert the tool.
:arg postevent: If ``True``, a :data:`ToolBarEvent` will be generated.
Pass ``False`` to suppress this event.
:arg redraw: If ``True``, the toolbar is redrawn. Pass ``False``
to suppress this behaviour.
"""
if index is None:
index = len(self.__tools)
log.debug('{}: adding tool at index {}: {}'.format(
type(self).__name__, index, type(tool).__name__))
tool.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel)
# gtk3: something somewhere sometimes
# clobbers the best size, so widgets
# don't get shown. Only observed with
# BitmapToggleButtons.
size = tool.GetBestSize()
tool.SetMinSize(size)
tool.SetMaxSize(size)
self.__tools.insert(index, tool)
self.__sizer.Insert(index + 1, tool, flag=wx.ALIGN_CENTRE)
self.InvalidateBestSize()
if redraw:
self.__drawToolBar()
if postevent:
wx.PostEvent(self, ToolBarEvent())
def DoGetBestSize(self):
"""Calculates and returns the best size for this toolbar, simply the
minimum size that will fit all tools.
This method is called by :mod:`wx` when this toolbar is laid out.
"""
# Calculate the minimum/maximum size
# for this toolbar, given the addition
# of the new tool. If the orientation
# of this toolbar (set in __init__) is
# HORIZONTAL, the ttlSpace is used to
# store total width, otherwise it is
# used to store total height.
ttlSpace = 0
minWidth = 0
minHeight = 0
for tool in self.__tools:
tw, th = tool.GetBestSize().Get()
if tw > minWidth: minWidth = tw
if th > minHeight: minHeight = th
if self.__orient == wx.HORIZONTAL: ttlSpace += tw
else: ttlSpace += th
if self.__orient == wx.HORIZONTAL:
leftWidth = self.__leftButton .GetBestSize().GetWidth()
rightWidth = self.__rightButton.GetBestSize().GetWidth()
minWidth = minWidth + leftWidth + rightWidth
else:
topHeight = self.__leftButton .GetBestSize().GetHeight()
bottomHeight = self.__rightButton.GetBestSize().GetHeight()
minHeight = minHeight + topHeight + bottomHeight
if self.__orient == wx.HORIZONTAL: size = (ttlSpace, minHeight)
else: size = (minWidth, ttlSpace)
# The agw.AuiManager does not honour the best size when
# toolbars are floated, but it does honour the minimum
# size. So I'm just setting the minimum size to the best
# size.
log.debug('Setting toolbar size: {}'.format(size))
self.SetMinSize( size)
self.SetMaxSize( size)
self.CacheBestSize(size)
return size
def ClearTools(
self,
destroy=False,
startIdx=None,
endIdx=None,
postevent=True):
"""Removes all tools, or a range of tools, from this
``FSLeyesToolBar``.
:arg destroy: If ``True``, the removed tools are destroyed.
:arg startIdx: Start index of tools to remove. If not provided,
defaults to 0.
:arg endIdx: End index of tools to remove (exclusive). If not
provided, defaults to :meth:`GetToolCount()`.
:arg postevent: If ``True``, a :data:`ToolBarEvent` will be
generated. Set to ``False`` to suppress the event.
"""
if len(self.__tools) == 0:
return
if startIdx is None: startIdx = 0
if endIdx is None: endIdx = len(self.__tools)
for i in range(startIdx, endIdx):
tool = self.__tools[i]
self.__sizer.Detach(tool)
if destroy:
tool.Destroy()
self.__tools[startIdx:endIdx] = []
self.InvalidateBestSize()
self.Layout()
if postevent:
wx.PostEvent(self, ToolBarEvent())
def __onMouseWheel(self, ev):
"""Called when the mouse wheel is rotated on this ``FSLeyesToolBar``.
Calls :meth:`__onLeftButton` or :meth:`__onRightButton`, depending
on the rotation direction.
"""
wheelDir = ev.GetWheelRotation()
if wheelDir < 0: self.__onRightButton()
elif wheelDir > 0: self.__onLeftButton()
def __onLeftButton(self, ev=None):
"""Called when the left toolbar button is pressed.
If the toolbar is compressed, it is scrolled to the left.
"""
self.__visibleOffset -= 1
if self.__visibleOffset <= 0:
self.__visibleOffset = 0
log.debug('Left button pushed - setting start '
'tool index to {}'.format(self.__visibleOffset))
self.__drawToolBar()
def __onRightButton(self, ev=None):
"""Called when the right toolbar button is pressed.
If the toolbar is compressed, it is scrolled to the right.
"""
self.__visibleOffset += 1
if self.__visibleOffset + self.__numVisible >= len(self.__tools):
self.__visibleOffset = len(self.__tools) - self.__numVisible
log.debug('Right button pushed - setting start '
'tool index to {}'.format(self.__visibleOffset))
self.__drawToolBar()
def __drawToolBar(self, *a):
"""Draws this ``FSLeyesToolBar``.
If the toolbar is big enough, all tools are drawn. Otherwise, the
method figures out out how many tools can be drawn, and which tools to
draw, given the current size.
"""
sizer = self.__sizer
tools = self.__tools
orient = self.__orient
lbtn = self.__leftButton
rbtn = self.__rightButton
if orient == wx.HORIZONTAL:
availSpace = self.GetSize().GetWidth()
reqdSpace = [tool.GetBestSize().GetWidth() for tool in tools]
leftSpace = lbtn .GetBestSize().GetWidth()
rightSpace = rbtn .GetBestSize().GetWidth()
else:
availSpace = self.GetSize().GetHeight()
reqdSpace = [tool.GetBestSize().GetHeight() for tool in tools]
leftSpace = lbtn .GetBestSize().GetHeight()
rightSpace = rbtn .GetBestSize().GetHeight()
enoughSpace = availSpace >= sum(reqdSpace)
sizer.Show(lbtn, not enoughSpace)
sizer.Show(rbtn, not enoughSpace)
# show all tools
if enoughSpace:
log.debug('{}: All tools fit ({} >= {})'.format(
type(self).__name__, availSpace, sum(reqdSpace)))
self.__visibleOffset = 0
self.__numVisible = len(tools)
for tool in tools:
sizer.Show(tool)
# show <numVisible> tools, starting from <visibleOffset>
# (see __onMouseWheel/__onLeftButton/__onRightButton)
else:
reqdSpace = reqdSpace[self.__visibleOffset:]
cumSpace = np.cumsum(reqdSpace) + leftSpace + rightSpace
biggerIdxs = [int(i) for i in np.where(cumSpace > availSpace)[0]]
if len(biggerIdxs) == 0:
lastIdx = len(tools)
else:
lastIdx = biggerIdxs[0] + self.__visibleOffset
self.__numVisible = lastIdx - self.__visibleOffset
log.debug('{}: {} tools fit ({} - {})'.format(
type(self).__name__, self.__numVisible, self.__visibleOffset, lastIdx))
lbtn.Enable(self.__visibleOffset > 0)
rbtn.Enable(lastIdx < len(tools))
for i in range(len(tools)):
sizer.Show(tools[i], self.__visibleOffset <= i < lastIdx)
self.Layout()
_ToolBarEvent, _EVT_TOOLBAR_EVENT = wxevent.NewEvent()
EVT_TOOLBAR_EVENT = _EVT_TOOLBAR_EVENT
"""Identifier for the :data:`ToolBarEvent` event. """
ToolBarEvent = _ToolBarEvent
"""Event emitted when one or more tools is/are added/removed to/from a
:class:`FSLeyesToolBar`.
"""
class ToolBarDivider(wx.Panel):
"""An empty ``wx.Panel`` intended to be used for dividing space in a
:class:`FSLeyesToolBar`.
"""
def __init__(self,
parent,
width=10,
height=32,
orient=wx.VERTICAL):
wx.Panel.__init__(self, parent)
if orient == wx.VERTICAL: size = (width, height)
elif orient == wx.HORIZONTAL: size = (height, width)
self.SetMinSize(size)
self.SetMaxSize(size)
|
the-stack_0_10908 | _base_ = "finetune-eval-base.py"
# dataset settings
data_source_cfg = dict(
type="ImageListMultihead",
memcached=False,
mclient_path='/no/matter',
# this will be ignored if type != ImageListMultihead
)
data_train_list = "data/xview/meta/train-1000.txt"
data_train_root = 'data/xview'
data_val_list = "data/xview/meta/val.txt"
data_val_root = 'data/xview'
data_test_list = "data/xview/meta/test.txt"
data_test_root = 'data/xview'
dataset_type = "AUROCDataset"
img_norm_cfg = dict(mean=[0.368,0.381,0.3436], std=[0.2035,0.1854,0.1849])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
batch_size=64, # x4 from update_interval
workers_per_gpu=5,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
custom_hooks = [
dict(
name="val",
type='ValidateHook',
dataset=data['val'],
by_epoch=False,
initial=False,
interval=25,
imgs_per_gpu=32,
workers_per_gpu=5,
eval_param=dict()),
dict(
name="test",
type='ValidateHook',
dataset=data['test'],
by_epoch=False,
initial=False,
interval=25,
imgs_per_gpu=32,
workers_per_gpu=5,
eval_param=dict()),
]
by_iter =True
# learning policy
lr_config = dict(
by_epoch=False,
policy='step',
step=[833,1667],
gamma=0.1 # multiply LR by this number at each step
)
# momentum and weight decay from VTAB and IDRL
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.,
paramwise_options={'\Ahead.': dict(lr_mult=100)})
# runtime settings
# total iters or total epochs
total_iters=2500
checkpoint_config = dict(interval=2500)
log_config = dict(
interval=1,
by_epoch=False,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
optimizer_config = dict(update_interval=4)
|
the-stack_0_10909 | '''
Created on 8 mrt. 2011
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
epruyt <e.pruyt (at) tudelft (dot) nl>
'''
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from math import exp
from ema_workbench.em_framework import (RealParameter, CategoricalParameter,
Outcome, perform_experiments)
from ema_workbench.util import ema_logging
from ema_workbench.connectors.vensim import VensimModel
class ScarcityModel(VensimModel):
def returnsToScale(self, x, speed, scale):
return (x*1000, scale*1/(1+exp(-1 * speed * (x-50))))
def approxLearning(self, x, speed, scale, start):
x = x-start
loc = 1 - scale
a = (x*10000, scale*1/(1+exp(speed * x))+loc)
return a
def f(self, x, speed, loc):
return (x/10, loc*1/(1+exp(speed * x)))
def priceSubstite(self, x, speed, begin, end):
scale = 2 * end
start = begin - scale/2
return (x+2000, scale*1/(1+exp(-1 * speed * x)) + start)
def run_model(self, scenario, policy):
"""Method for running an instantiated model structure """
kwargs = scenario
loc = kwargs.pop("lookup shortage loc")
speed = kwargs.pop("lookup shortage speed")
lookup = [self.f(x/10, speed, loc) for x in range(0, 100)]
kwargs['shortage price effect lookup'] = lookup
speed = kwargs.pop("lookup price substitute speed")
begin = kwargs.pop("lookup price substitute begin")
end = kwargs.pop("lookup price substitute end")
lookup = [self.priceSubstite(x, speed, begin, end)
for x in range(0, 100, 10)]
kwargs['relative price substitute lookup'] = lookup
scale = kwargs.pop("lookup returns to scale speed")
speed = kwargs.pop("lookup returns to scale scale")
lookup = [self.returnsToScale(x, speed, scale)
for x in range(0, 101, 10)]
kwargs['returns to scale lookup'] = lookup
scale = kwargs.pop("lookup approximated learning speed")
speed = kwargs.pop("lookup approximated learning scale")
start = kwargs.pop("lookup approximated learning start")
lookup = [self.approxLearning(x, speed, scale, start)
for x in range(0, 101, 10)]
kwargs['approximated learning effect lookup'] = lookup
super(ScarcityModel, self).run_model(kwargs, policy)
if __name__ == "__main__":
ema_logging.log_to_stderr(ema_logging.DEBUG)
model = ScarcityModel("scarcity", wd=r'./models/scarcity',
model_file=r'\MetalsEMA.vpm')
model.outcomes = [Outcome('relative market price', time=True),
Outcome('supply demand ratio', time=True),
Outcome('real annual demand', time=True),
Outcome('produced of intrinsically demanded', time=True),
Outcome('supply', time=True),
Outcome('Installed Recycling Capacity', time=True),
Outcome('Installed Extraction Capacity', time=True)]
model.uncertainties = [
RealParameter("price elasticity of demand", 0, 0.5),
RealParameter("fraction of maximum extraction capacity used",
0.6, 1.2),
RealParameter("initial average recycling cost", 1, 4),
RealParameter("exogenously planned extraction capacity",
0, 15000),
RealParameter("absolute recycling loss fraction", 0.1, 0.5),
RealParameter("normal profit margin", 0, 0.4),
RealParameter("initial annual supply", 100000, 120000),
RealParameter("initial in goods", 1500000, 2500000),
RealParameter("average construction time extraction capacity",
1, 10),
RealParameter("average lifetime extraction capacity", 20, 40),
RealParameter("average lifetime recycling capacity", 20, 40),
RealParameter("initial extraction capacity under construction",
5000, 20000),
RealParameter("initial recycling capacity under construction",
5000, 20000),
RealParameter("initial recycling infrastructure", 5000, 20000),
# order of delay
CategoricalParameter("order in goods delay", (1, 4, 10, 1000)),
CategoricalParameter("order recycling capacity delay", (1, 4, 10)),
CategoricalParameter("order extraction capacity delay", (1, 4, 10)),
# uncertainties associated with lookups
RealParameter("lookup shortage loc", 20, 50),
RealParameter("lookup shortage speed", 1, 5),
RealParameter("lookup price substitute speed", 0.1, 0.5),
RealParameter("lookup price substitute begin", 3, 7),
RealParameter("lookup price substitute end", 15, 25),
RealParameter("lookup returns to scale speed", 0.01, 0.2),
RealParameter("lookup returns to scale scale", 0.3, 0.7),
RealParameter("lookup approximated learning speed", 0.01, 0.2),
RealParameter("lookup approximated learning scale", 0.3, 0.6),
RealParameter("lookup approximated learning start", 30, 60)]
results = perform_experiments(model, 50)
|
the-stack_0_10910 | class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
# sold[i] selling at day i or do nothing
# sold[i] = max( sold[i-1], hold[i-1] + prices[i] - fee)
# hold[i] buying at day i or do nothing
# hold[i] = max( hold[i-1], sold[i-1] - prices[i])
N = len(prices)
sold = [0] * N
hold = [0] * N
sold[0] = 0
hold[0] = -prices[0]
for i in range(1, N):
sold[i] = max( sold[i-1], hold[i-1] + prices[i] - fee)
hold[i] = max( hold[i-1], sold[i-1] - prices[i])
return sold[N-1]
|
the-stack_0_10911 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tensorflow as tf
from object_detection import eval_util
from object_detection import inputs
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn': inputs.create_train_input_fn,
'create_eval_input_fn': inputs.create_eval_input_fn,
'create_predict_input_fn': inputs.create_predict_input_fn,
}
def _get_groundtruth_data(detection_model, class_agnostic):
"""Extracts groundtruth data from detection_model.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_masks': 3D float32 tensor of instance masks (if provided in
groundtruth)
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = detection_model.groundtruth_lists(
fields.BoxListFields.boxes)[0]
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
groundtruth_classes_one_hot = tf.ones([groundtruth_boxes_shape[0], 1])
else:
groundtruth_classes_one_hot = detection_model.groundtruth_lists(
fields.BoxListFields.classes)[0]
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=1) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = (
detection_model.groundtruth_lists(fields.BoxListFields.masks)[0])
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {key: tf.unstack(tensor)
for key, tensor in tensor_dict.items()}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
detection_model = detection_model_fn(is_training=is_training,
add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = True if boxes_shape[1] is not None else False
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
detection_model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_masks_list=gt_masks_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_weights_list=labels[
fields.InputDataFields.groundtruth_weights])
preprocessed_images = features[fields.InputDataFields.image]
prediction_dict = detection_model.predict(
preprocessed_images, features[fields.InputDataFields.true_image_shape])
detections = detection_model.postprocess(
prediction_dict, features[fields.InputDataFields.true_image_shape])
if mode == tf.estimator.ModeKeys.TRAIN:
if train_config.fine_tune_checkpoint and hparams.load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map, train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.itervalues()]
if train_config.add_regularization_loss:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.contrib.tpu.CrossShardOptimizer(
training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
if train_config.freeze_variables:
trainable_variables = tf.contrib.framework.filter_variables(
tf.trainable_variables(),
exclude_patterns=train_config.freeze_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(detections)
}
eval_metric_ops = None
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
class_agnostic = (fields.DetectionResultFields.detection_classes
not in detections)
groundtruth = _get_groundtruth_data(detection_model, class_agnostic)
use_original_images = fields.InputDataFields.original_image in features
original_images = (
features[fields.InputDataFields.original_image] if use_original_images
else features[fields.InputDataFields.image])
eval_dict = eval_util.result_dict_for_single_example(
original_images[0:1],
features[inputs.HASH_KEY][0],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=False)
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
img_summary = None
if not use_tpu and use_original_images:
detection_and_groundtruth = (
vis_utils.draw_side_by_side_evaluation_image(
eval_dict, category_index, max_boxes_to_draw=20,
min_score_thresh=0.2))
img_summary = tf.summary.image('Detections_Left_Groundtruth_Right',
detection_and_groundtruth)
if mode == tf.estimator.ModeKeys.EVAL:
# Eval metrics on a single example.
eval_metrics = eval_config.metrics_set
if not eval_metrics:
eval_metrics = ['coco_detection_metrics']
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_metrics, category_index.values(), eval_dict,
include_metrics_per_category=False)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if img_summary is not None:
eval_metric_ops['Detections_Left_Groundtruth_Right'] = (
img_summary, tf.no_op())
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.iteritems()}
if use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
return model_fn
def create_estimator_and_inputs(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fn': An evaluation input function.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
'eval_steps': Number of evaluation steps. Either directly from input or from
configuration.
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
configs = get_configs_from_pipeline_file(pipeline_config_path)
configs = merge_external_params_with_configs(
configs,
hparams,
train_steps=train_steps,
eval_steps=eval_steps,
**kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_config = configs['eval_input_config']
if train_steps is None:
train_steps = configs['train_config'].num_steps
if eval_steps is None:
eval_steps = configs['eval_config'].num_examples
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config)
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(model_config=model_config)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu)
if use_tpu_estimator:
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief:
pipeline_config_final = create_pipeline_proto_from_configs(
configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps,
eval_steps=eval_steps)
def create_train_and_eval_specs(train_input_fn,
eval_input_fn,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_name='eval'):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fn: Function that produces features and labels on eval data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_steps: Number of eval steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_name: String name given to main `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. The first `EvalSpec` is for
evaluation data. If `eval_on_train_data` is True, the second `EvalSpec` in
the list will correspond to training data.
"""
exporter = tf.estimator.FinalExporter(
name=final_exporter_name, serving_input_receiver_fn=predict_input_fn)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=train_steps)
eval_specs = [
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=eval_steps,
exporters=exporter)
]
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn,
steps=eval_steps))
return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, eval_steps, train_steps,
name):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
eval_steps: Number of steps to run during each evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.contrib.training.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = estimator.evaluate(
input_fn=input_fn,
steps=eval_steps,
checkpoint_path=ckpt,
name=name)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fn = train_and_eval_dict['eval_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps,
export_strategies=export_strategies,
eval_delay_secs=120,)
|
the-stack_0_10913 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The MicroBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that we reject low difficulty headers to prevent our block tree from filling up with useless bloat"""
from test_framework.messages import (
CBlockHeader,
from_hex,
)
from test_framework.p2p import (
P2PInterface,
msg_headers,
)
from test_framework.test_framework import MicroBitcoinTestFramework
import os
class RejectLowDifficultyHeadersTest(MicroBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.chain = 'testnet3' # Use testnet chain because it has an early checkpoint
self.num_nodes = 2
def add_options(self, parser):
parser.add_argument(
'--datafile',
default='data/blockheader_testnet3.hex',
help='Test data file (default: %(default)s)',
)
def run_test(self):
self.log.info("Read headers data")
self.headers_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.options.datafile)
with open(self.headers_file_path, encoding='utf-8') as headers_data:
h_lines = [l.strip() for l in headers_data.readlines()]
# The headers data is taken from testnet3 for early blocks from genesis until the first checkpoint. There are
# two headers with valid POW at height 1 and 2, forking off from genesis. They are indicated by the FORK_PREFIX.
FORK_PREFIX = 'fork:'
self.headers = [l for l in h_lines if not l.startswith(FORK_PREFIX)]
self.headers_fork = [l[len(FORK_PREFIX):] for l in h_lines if l.startswith(FORK_PREFIX)]
self.headers = [from_hex(CBlockHeader(), h) for h in self.headers]
self.headers_fork = [from_hex(CBlockHeader(), h) for h in self.headers_fork]
self.log.info("Feed all non-fork headers, including and up to the first checkpoint")
peer_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface())
peer_checkpoint.send_and_ping(msg_headers(self.headers))
assert {
'height': 546,
'hash': '000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70',
'branchlen': 546,
'status': 'headers-only',
} in self.nodes[0].getchaintips()
self.log.info("Feed all fork headers (fails due to checkpoint)")
with self.nodes[0].assert_debug_log(['bad-fork-prior-to-checkpoint']):
peer_checkpoint.send_message(msg_headers(self.headers_fork))
peer_checkpoint.wait_for_disconnect()
self.log.info("Feed all fork headers (succeeds without checkpoint)")
# On node 0 it succeeds because checkpoints are disabled
self.restart_node(0, extra_args=['-nocheckpoints'])
peer_no_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface())
peer_no_checkpoint.send_and_ping(msg_headers(self.headers_fork))
assert {
"height": 2,
"hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39",
"branchlen": 2,
"status": "headers-only",
} in self.nodes[0].getchaintips()
# On node 1 it succeeds because no checkpoint has been reached yet by a chain tip
peer_before_checkpoint = self.nodes[1].add_p2p_connection(P2PInterface())
peer_before_checkpoint.send_and_ping(msg_headers(self.headers_fork))
assert {
"height": 2,
"hash": "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39",
"branchlen": 2,
"status": "headers-only",
} in self.nodes[1].getchaintips()
if __name__ == '__main__':
RejectLowDifficultyHeadersTest().main()
|
the-stack_0_10916 | import os
from . import configs
from flask import Flask
from flask_cors import CORS
from flask_redis import FlaskRedis
import psycopg2
redis_store = FlaskRedis()
root_dir = os.path.dirname(os.path.abspath(__file__))
conn = psycopg2.connect(
database=os.environ.get("DB_NAME", os.getenv("DB_NAME")),
user=os.environ.get("DB_USER", os.getenv("DB_USER")),
password=os.environ.get("DB_PASSWORD", os.getenv("DB_PASSWORD")),
sslmode=os.environ.get("DB_SSL", os.getenv("DB_SSL")),
port=os.environ.get("DB_PORT", os.getenv("DB_PORT")),
host=os.environ.get("DB_HOST", os.getenv("DB_HOST"))
)
conn.set_session(autocommit=True)
db = conn.cursor()
def create_app():
app = Flask(__name__)
app.config.from_object(configs.Config)
app.config['PROPAGATE_EXCEPTIONS'] = True
redis_store.init_app(app)
CORS(app, resources={r"/api/*": {"origins": "*"}})
from .controllers import api_blueprint
app.register_blueprint(api_blueprint)
return app |
the-stack_0_10919 | #MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram import Client, filters
HOME_TEXT = "<b>Helo, [{}](tg://user?id={})\n\nIam MusicPlayer 2.0 which plays music in Channels and Groups 24*7\n\nI can even Stream Youtube Live in Your Voicechat\n\nDeploy Your Own bot from source code below\n\nHit /help to know about available commands.</b>"
HELP = """
<b>Add the bot and User account in your Group with admin rights.
Start a VoiceChat
Use /play <song name> or use /play as a reply to an audio file or youtube link.
You can also use /dplay <song name> to play a song from Deezer.</b>
**Common Commands**:
**/play** Reply to an audio file or YouTube link to play it or use /play <song name>.
**/dplay** Play music from Deezer, Use /dplay <song name>
**/player** Show current playing song.
**/help** Show help for commands
**/playlist** Shows the playlist.
**Admin Commands**:
**/skip** [n] ... Skip current or n where n >= 2
**/join** Join voice chat.
**/leave** Leave current voice chat
**/vc** Check which VC is joined.
**/stop** Stop playing.
**/radio** Start Radio.
**/stopradio** Stops Radio Stream.
**/replay** Play from the beginning.
**/clean** Remove unused RAW PCM files.
**/pause** Pause playing.
**/resume** Resume playing.
**/mute** Mute in VC.
**/unmute** Unmute in VC.
**/restart** Restarts the Bot.
"""
@Client.on_message(filters.command('start'))
async def start(client, message):
buttons = [
[
InlineKeyboardButton('⚙️ Update Channel', url='https://t.me/musi_c_world'),
InlineKeyboardButton('🤖 Other Bots', url='https://t.me/Soulsharper'),
],
[
InlineKeyboardButton('👨🏼💻 Developer', url='https://t.me/Soulsharper'),
InlineKeyboardButton('🧩 Source', url='https://github.com/subinps/MusicPlayer'),
],
[
InlineKeyboardButton('👨🏼🦯 Help', callback_data='help'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply(HOME_TEXT.format(message.from_user.first_name, message.from_user.id), reply_markup=reply_markup)
@Client.on_message(filters.command("help"))
async def show_help(client, message):
buttons = [
[
InlineKeyboardButton('⚙️ Update Channel', url='https://t.me/musi_c_world'),
InlineKeyboardButton('🤖 Other Bots', url='https://t.me/musi_c_world'),
],
[
InlineKeyboardButton('👨🏼💻 Developer', url='https://t.me/Soulsharper'),
InlineKeyboardButton('🧩 Source', url='https://github.com/subinps/MusicPlayer'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply_text(
HELP,
reply_markup=reply_markup
)
|
the-stack_0_10922 | import logging
import torch
from ..datasets import build_loader
from ..tasks import build_task
from ..utils import get_default_parser, env_setup, \
Timer, get_eta, dist_get_world_size
def add_args(parser):
## Basic options
parser.add_argument('--dataset', type=str, default='CIFAR10',
help='dataset')
parser.add_argument('--data-root', type=str, required=True,
help='root directory of the dataset')
parser.add_argument('--n-epoch', type=int, default=20,
help='# of epochs to train')
parser.add_argument('--batch-size', type=int, default=128,
help='batch size for training (per node)')
parser.add_argument('--n-worker', type=int, default=8,
help='# of workers for data prefetching (per node)')
parser.add_argument('--lr', type=float, default=0.1,
help='base learning rate (default: 0.1)')
# parser.add_argument('--data-root', type=str, default='D:/Data/SmallDB/CIFAR-10',
# help='root directory of the dataset')
# parser.add_argument('--n-epoch', type=int, default=1,
# help='# of epochs to train')
## Hyperparameters
parser.add_argument('--optim', type=str, default='SGD',
help='optimizer (default: SGD)')
parser.add_argument('--wd', type=float, default=5e-4,
help='weight decay (default: 5e-4)')
parser.add_argument('--momentum', type=float, default=0.9,
help='optimizer momentum (default: 0.9)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='enables nesterov momentum')
parser.add_argument('--lr-schedule', type=str, default='Linear',
help='learning rate schedule (default: Linear)')
parser.add_argument('--lr-update-per-epoch', action='store_true', default=False,
help='update learning rate after each epoch instead of each iter by default')
parser.add_argument('--lr-decay-epoch', type=int, default=50,
help='learning rate schedule (default: 50)')
parser.add_argument('--lr-schedule-gamma', type=float, default=0.1,
help='intepretation depends on lr_schedule (default: 0.1)')
## Training Settings
parser.add_argument('--reset', action='store_true', default=False,
help='DANGER: purge the exp_dir and start a fresh new training run')
parser.add_argument('--pretrain', type=str, default=None,
help='pretrained weights')
parser.add_argument('--batch-size-per-gpu', type=int, default=None,
help='alternative to batch_size (and overrides it)')
parser.add_argument('--n-worker-per-gpu', type=int, default=None,
help='alternative n_worker (and overrides it)')
parser.add_argument('--epoch-size', type=int, default=float('inf'),
help='maximum # of examples per epoch')
parser.add_argument('--no-val', action='store_false', dest='val', default=True,
help='turn off validation')
parser.add_argument('--log-interval', type=int, default=50,
help='after every how many iters to log the training status')
parser.add_argument('--save-interval', type=int, default=5,
help='after every how many epochs to save the learned model')
parser.add_argument('--val-interval', type=int, default=5,
help='after every how many epochs to save the learned model')
parser.add_argument('--train-gather', action='store_true', default=False,
help='gather results over batches during training, which is required '
'to compute metrics over the entire training set at the end of '
'every epoch',
)
def main():
## Overall timer
tmr_main = Timer()
## Argument parser and environment setup
parser = get_default_parser('llcv - training script')
add_args(parser)
args = env_setup(parser, 'train', ['data_root', 'pretrain'])
## Prepare the dataloader
train_loader = build_loader(args, is_train=True)
logging.info(f'# of classes: {len(train_loader.dataset.classes)}')
n_train = len(train_loader.dataset)
logging.info(f'# of training examples: {n_train}')
assert n_train
if args.epoch_size < n_train:
logging.warning(f'Epoch size ({args.epoch_size}) is set to smaller than the # of training examples')
train_epoch_size = args.epoch_size
else:
train_epoch_size = n_train
if args.val:
val_loader = build_loader(args, is_train=False)
n_val = len(val_loader.dataset)
logging.info(f'# of validation examples: {n_val}')
else:
n_val = 0
## Initialize task
task = build_task(args, train_loader, is_train=True)
if task.resume_epoch >= args.n_epoch:
logging.warning(f'The model is already trained for {task.resume_epoch} epochs')
return
if n_val and task.has_val_score:
if task.resume_epoch:
best_epoch, best_score = task.query_best_model()
else:
best_score = best_epoch = 0
## Start training
last_saved_epoch = 0
n_iter_epoch = 0
n_iter_total = (args.n_epoch - task.resume_epoch)*len(train_loader)
speed_ratio = dist_get_world_size()
logging.info('Training starts')
tmr_train = Timer()
for epoch in range(task.resume_epoch + 1, args.n_epoch + 1):
task.train_mode(args.train_gather)
n_seen = 0
n_warpup = 0
t_warmup = 0
tmr_epoch = Timer()
for i, data in enumerate(train_loader):
i += 1
# the last batch can be smaller than normal
this_batch_size = len(data[0])
tmr_iter = Timer()
task.forward(data)
task.backward()
tmr_iter.stop()
if not args.lr_update_per_epoch:
task.update_lr_iter()
n_seen += this_batch_size
t_iter = tmr_iter.elapsed()
if i <= args.timing_warmup_iter:
n_warpup += this_batch_size
t_warmup += t_iter
if i % args.log_interval == 0:
t_total = tmr_epoch.check()
if i <= args.timing_warmup_iter:
ave_speed = n_seen/t_total if t_total else float('inf')
else:
ave_speed = (n_seen - n_warpup)/(t_total - t_warmup)if (t_total - t_warmup) else float('inf')
ave_speed *= speed_ratio
task.log_iter(
'train e%d: %4d/%4d, %5.4gHz' %
(epoch, i, len(train_loader), ave_speed),
', ETA: ' + get_eta(tmr_train.check(), n_iter_epoch + i, n_iter_total),
)
task.log_iter_tb(
(epoch-1)*len(train_loader) + i,
is_train=True,
)
if n_seen >= train_epoch_size:
break
task.dist_gather(is_train=True)
task.log_epoch(f'train e{epoch} summary: ')
task.log_epoch_tb(epoch, is_train=True)
task.reset_epoch()
if n_val and (epoch % args.val_interval == 0 or epoch == args.n_epoch):
n_seen = 0
task.test_mode()
n_warpup = 0
t_warmup = 0
tmr_val = Timer()
for i, data in enumerate(val_loader):
i += 1
this_batch_size = len(data[0])
tmr_iter = Timer()
with torch.no_grad():
task.forward(data)
tmr_iter.stop()
n_seen += this_batch_size
t_iter = tmr_iter.elapsed()
if i <= args.timing_warmup_iter:
n_warpup += this_batch_size
t_warmup += t_iter
if i % args.log_interval == 0:
t_total = tmr_val.check()
if i <= args.timing_warmup_iter:
ave_speed = n_seen/t_total if t_total else float('inf')
else:
ave_speed = (n_seen - n_warpup)/(t_total - t_warmup)if (t_total - t_warmup) else float('inf')
ave_speed *= speed_ratio
task.log_iter(
'val e%d: %4d/%4d, %6.5gHz' %
(epoch, i, len(val_loader), ave_speed),
)
task.dist_gather(is_train=False)
if task.has_val_score:
new_score = task.get_test_scores()[0]
if new_score > best_score:
best_score = new_score
best_epoch = epoch
task.mark_best_model(best_epoch, best_score)
task.save(epoch)
last_saved_epoch = epoch
task.log_epoch(f'val e{epoch} summary: ')
task.log_epoch_tb(epoch, is_train=False)
task.reset_epoch()
tmr_epoch.stop()
logging.info('end of epoch %d/%d: epoch time: %s, ETA: %s' %
(epoch, args.n_epoch, tmr_epoch.elapsed(to_str=True),
get_eta(tmr_train.check(), epoch, args.n_epoch))
)
if last_saved_epoch != epoch and epoch % args.save_interval == 0:
task.save(epoch)
last_saved_epoch = epoch
if args.lr_update_per_epoch:
task.update_lr_epoch()
n_iter_epoch += len(train_loader)
if last_saved_epoch != args.n_epoch:
# saving the last epoch if n_epoch is not divisible by save_interval
task.save(args.n_epoch)
tmr_main.stop()
logging.info(f'Training finished with total elapsed time {tmr_main.elapsed(to_str=True)}')
if n_val and task.has_val_score:
logging.info(f'The best model is obtained at epoch {best_epoch} with score {best_score:.6g}')
if __name__ == '__main__':
main()
|
the-stack_0_10924 | """collection of methods for generating merger populations and rates"""
import utils
import sfh
from astropy.cosmology import Planck15 as cosmo
from astropy import units as u
import numpy as np
from tqdm import tqdm
def get_mergers(zbins, mets, metallicities, alpha, z_interp, downsample):
met_weights = sfh.get_metallicity_weights(zbins, mets)
mergers_tot = []
for met_read, met, ii in tqdm(zip(metallicities, mets, range(len(metallicities))), total=len(metallicities)):
BBH, mass_stars = utils.get_cosmic_data(alpha=alpha, met_read=met_read)
mergers = []
for zbin_low, zbin_high, jj in zip(zbins[1:], zbins[:-1], range(len(zbins))):
# get the midpoint of the zbin
midz = zbin_low + (zbin_high - zbin_low) / 2
# get the star formation rate from Madau & Fragos (2017)
sfr = sfh.madau_17(midz) * u.Msun * u.yr ** (-1) * u.Mpc ** (-3)
# we want *anything* that merges between the formation and today!
t_delay_min = 0
t_delay_max = cosmo.lookback_time(midz).to(u.Myr).value
BBH_merge = BBH.loc[(BBH.tphys > t_delay_min) & (BBH.tphys < t_delay_max)].copy()
if len(BBH_merge) > 0:
# log the formation and merger times
BBH_merge['t_form'] = cosmo.lookback_time(midz).to(u.Myr).value
BBH_merge['t_merge'] = BBH_merge.t_form - BBH_merge.tphys
# filter just to be safe
BBH_merge = BBH_merge.loc[BBH_merge.t_merge > 1e-3].copy()
# log the merger redshift
BBH_merge['z_merge'] = z_interp(BBH_merge.t_merge)
# log the formation redshift
BBH_merge['z_form'] = np.ones(len(BBH_merge)) * midz
# down sample because we have too much data
BBH_merge = BBH_merge.sample(int(len(BBH_merge) / downsample))
# calculate the number of mergers per unit mass formed
#merger_rate_per_mass = BBH_merge['initial_mass'] / (mass_stars / downsample)
# calculate the total amount of mass formed at redshift bin: midz and metallicity: met
SFR_met_weighted = (sfr * met_weights[ii, jj]).to(u.Msun * u.Gpc ** (-3) * u.yr ** (-1))
# calculate the number of merging BBH formed per comoving volume per source-frame time
BBH_merge['dN_dVdtf_source'] = (SFR_met_weighted * (1/((mass_stars * u.Msun) / downsample))).value
# account for the expansion between the formation time and merger time for each BBH
dt_f_dt_m = (1 + BBH_merge['z_merge']) * cosmo.H(BBH_merge['z_merge']) / \
((1 + BBH_merge['z_form']) * cosmo.H(BBH_merge['z_form']))
# calculate the number of merging BBHs per source-frame time per covoving volume
BBH_merge['dN_dVdtm_source'] = BBH_merge['dN_dVdtf_source'] * dt_f_dt_m
# calculate the number of merging BBHs per comvoing volume in the detector frame
BBH_merge['dN_dVdtm_det'] = BBH_merge['dN_dVdtm_source'] * 1 / (1 + BBH_merge['z_merge'])
# differential comoving volume at merger redshift
if len(mergers) > 0:
BBH_merge['dV_dz'] = cosmo.differential_comoving_volume(np.array(BBH_merge['z_merge'].values)).to(
u.Gpc ** (3) * u.steradian ** (-1)).value * (4 * np.pi)
if len(mergers) == 0:
mergers = BBH_merge
else:
mergers = mergers.append(BBH_merge)
else:
continue
if len(mergers_tot) == 0:
mergers_tot = mergers
else:
mergers_tot = mergers_tot.append(mergers)
if len(mergers_tot) > 0:
return mergers_tot
else:
return []
|
the-stack_0_10926 | import ctypes
import enum
import numpy as np
from astropy import units as u
from panoptes.pocs.camera.sdk import AbstractSDKDriver
from panoptes.utils import error
from panoptes.utils import get_quantity_value
####################################################################################################
#
# Main ASI Driver class.
#
# The methods of this class call the functions fron ASICamera2.h using the ctypes foreign function
# library. Based on v1.13.0930 of the ZWO ASI SDK.
#
####################################################################################################
class ASIDriver(AbstractSDKDriver):
def __init__(self, library_path=None, **kwargs):
"""Main class representing the ZWO ASI library interface.
On construction loads the shared object/dynamically linked version of the ASI SDK library,
which must be already installed (see https://astronomy-imaging-camera.com/software-drivers).
The name and location of the shared library can be manually specified with the library_path
argument, otherwise the ctypes.util.find_library function will be used to try to locate it.
Args:
library_path (str, optional): path to the library e.g. '/usr/local/lib/libASICamera2.so'
Returns:
`~pocs.camera.libasi.ASIDriver`
Raises:
panoptes.utils.error.NotFound: raised if library_path not given & find_libary fails to
locate the library.
OSError: raises if the ctypes.CDLL loader cannot load the library.
"""
super().__init__(name='ASICamera2', library_path=library_path, **kwargs)
self._product_ids = self.get_product_ids() # Supported camera models
# Methods
def get_SDK_version(self):
""" Get the version of the ZWO ASI SDK """
# First set return type for function to pointer to null terminated string
self._CDLL.ASIGetSDKVersion.restype = ctypes.c_char_p
version = self._CDLL.ASIGetSDKVersion().decode('ascii') # Get bytes so decode to str
version = version.replace(', ', '.') # Format the version string properly
return version
def get_devices(self):
"""Gets currently connected camera info.
Returns:
dict: All currently connected camera serial numbers with corresponding integer
camera IDs.
Notes:
If a camera does not have a serial number it will attempt to fall back to string ID.
Cameras with neither serial number nor string ID will be left out of the dictionary
as they have no unique indentifier.
"""
n_cameras = self.get_num_of_connected_cameras()
if n_cameras == 0:
raise error.PanError("No ZWO ASI camera devices found")
# Get the IDs
cameras = {}
for camera_index in range(n_cameras):
info = self.get_camera_property(camera_index)
camera_ID = info['camera_ID']
self.open_camera(camera_ID)
try:
serial_number = self.get_serial_number(camera_ID)
except RuntimeError as err:
# If at first you don't succeed, try, except, else, finally again.
self.logger.warning(f"Error getting serial number: {err}")
try:
string_ID = self.get_ID(camera_ID)
except RuntimeError as err:
self.logger.warning(f"Error getting string ID: {err}")
msg = f"Skipping ZWO ASI camera {camera_ID} with no serial number or string ID."
self.logger.error(msg)
break
else:
msg = f"Using string ID '{string_ID}' in place of serial number."
self.logger.warning(msg)
serial_number = string_ID
finally:
self.close_camera(camera_ID)
cameras[serial_number] = camera_ID
self.logger.debug(f"Got camera serial numbers: {list(cameras.keys())}")
return cameras
def get_num_of_connected_cameras(self):
""" Get the count of connected ASI cameras """
count = self._CDLL.ASIGetNumOfConnectedCameras() # Return type is int, needs no Pythonising
self.logger.debug("Found {} connected ASI cameras".format(count))
return count
def get_product_ids(self):
"""Get product IDs of cameras supported by the SDK."""
n_pids = self._CDLL.ASIGetProductIDs(0) # Call once to get number of product IDs
if n_pids > 0:
# Make array of C ints of required size.
product_ids = (ctypes.c_int * n_pids)()
# Call again to get product IDs. Should get same n_pids as before.
assert n_pids == self._CDLL.ASIGetProductIDs(ctypes.byref(product_ids))
else:
self.logger.error("Error getting supported camera product IDs from SDK.")
raise RuntimeError("ZWO SDK support 0 SDK products?")
self.logger.debug("Got {} supported camera product IDs from SDK.".format(n_pids))
return list(product_ids)
def get_camera_property(self, camera_index):
""" Get properties of the camera with given index """
camera_info = CameraInfo()
error_code = self._CDLL.ASIGetCameraProperty(ctypes.byref(camera_info), camera_index)
if error_code != ErrorCode.SUCCESS:
msg = "Error calling ASIGetCameraProperty: {}".format(ErrorCode(error_code).name)
self.logger.error(msg)
raise RuntimeError(msg)
pythonic_info = self._parse_info(camera_info)
self.logger.debug("Got info from camera {camera_ID}, {name}".format(**pythonic_info))
return pythonic_info
def get_camera_property_by_id(self, camera_ID):
"""Get properties of the camera with a given integer ID."""
camera_info = CameraInfo()
self._call_function('ASIGetCameraPropertyByID',
camera_ID,
ctypes.byref(camera_info))
pythonic_info = self._parse_info(camera_info)
self.logger.debug("Got info from camera {camera_ID}, {name}".format(**pythonic_info))
return pythonic_info
def open_camera(self, camera_ID):
""" Open camera with given integer ID """
self._call_function('ASIOpenCamera', camera_ID)
self.logger.debug("Opened camera {}".format(camera_ID))
def init_camera(self, camera_ID):
""" Initialise camera with given integer ID """
self._call_function('ASIInitCamera', camera_ID)
self.logger.debug("Initialised camera {}".format(camera_ID))
def close_camera(self, camera_ID):
""" Close camera with given integer ID """
self._call_function('ASICloseCamera', camera_ID)
self.logger.debug("Closed camera {}".format(camera_ID))
def get_ID(self, camera_ID):
"""Get string ID from firmaware for the camera with given integer ID
The saved ID is an array of 8 unsigned chars for some reason.
"""
struct_ID = ID()
self._call_function('ASIGetID', camera_ID, ctypes.byref(struct_ID))
bytes_ID = bytes(struct_ID.id)
string_ID = bytes_ID.decode()
self.logger.debug("Got string ID '{}' from camera {}".format(string_ID, camera_ID))
return string_ID
def set_ID(self, camera_ID, string_ID):
"""Save string ID to firmware of camera with given integer ID
The saved ID is an array of 8 unsigned chars for some reason. To preserve some sanity
this method takes an 8 byte UTF-8 string as input.
"""
bytes_ID = string_ID.encode() # Convert string to bytes
if len(bytes_ID) > 8:
bytes_ID = bytes_ID[:8] # This may chop out part of a UTF-8 multibyte character
self.logger.warning("New ID longer than 8 bytes, truncating {} to {}".format(
string_ID, bytes_ID.decode()))
else:
bytes_ID = bytes_ID.ljust(8) # Pad to 8 bytes with spaces, if necessary
uchar_ID = (ctypes.c_ubyte * 8).from_buffer_copy(bytes_ID)
self._call_function('ASISetID', camera_ID, ID(uchar_ID))
self.logger.debug("Set camera {} string ID to '{}'".format(camera_ID, bytes_ID.decode()))
def get_num_of_controls(self, camera_ID):
""" Gets the number of control types supported by the camera with given integer ID """
n_controls = ctypes.c_int()
self._call_function('ASIGetNumOfControls', camera_ID, ctypes.byref(n_controls))
n_controls = n_controls.value # Convert from ctypes c_int type to Python int
self.logger.debug("Camera {} has {} controls".format(camera_ID, n_controls))
return n_controls
def get_control_caps(self, camera_ID):
""" Gets the details of all the controls supported by the camera with given integer ID """
n_controls = self.get_num_of_controls(camera_ID) # First get number of controls
controls = {}
for i in range(n_controls):
control_caps = ControlCaps()
self._call_function('ASIGetControlCaps',
camera_ID,
ctypes.c_int(i),
ctypes.byref(control_caps))
control = self._parse_caps(control_caps)
controls[control['control_type']] = control
self.logger.debug("Got details of {} controls from camera {}".format(n_controls, camera_ID))
return controls
def get_control_value(self, camera_ID, control_type):
""" Gets the value of the control control_type from camera with given integer ID """
value = ctypes.c_long()
is_auto = ctypes.c_int()
self._call_function('ASIGetControlValue',
camera_ID,
ControlType[control_type],
ctypes.byref(value),
ctypes.byref(is_auto))
nice_value = self._parse_return_value(value, control_type)
return nice_value, bool(is_auto)
def set_control_value(self, camera_ID, control_type, value):
""" Sets the value of the control control_type on camera with given integet ID """
if value == 'AUTO':
# Apparently need to pass current value when turning auto on
auto = True
value = self.get_control_value(camera_ID, control_type)[0]
else:
auto = False
self._call_function('ASISetControlValue',
camera_ID,
ctypes.c_int(ControlType[control_type]),
self._parse_input_value(value, control_type),
ctypes.c_int(auto))
self.logger.debug("Set {} to {} on camera {}".format(control_type,
'AUTO' if auto else value,
camera_ID))
def get_roi_format(self, camera_ID):
""" Get the ROI size and image format setting for camera with given integer ID """
width = ctypes.c_int()
height = ctypes.c_int()
binning = ctypes.c_int()
image_type = ctypes.c_int()
self._call_function('ASIGetROIFormat',
camera_ID,
ctypes.byref(width),
ctypes.byref(height),
ctypes.byref(binning),
ctypes.byref(image_type))
roi_format = {'width': width.value * u.pixel,
'height': height.value * u.pixel,
'binning': binning.value,
'image_type': ImgType(image_type.value).name}
return roi_format
def set_roi_format(self, camera_ID, width, height, binning, image_type):
""" Set the ROI size and image format settings for the camera with given integer ID """
width = int(get_quantity_value(width, unit=u.pixel))
height = int(get_quantity_value(height, unit=u.pixel))
binning = int(binning)
self._call_function('ASISetROIFormat',
camera_ID,
ctypes.c_int(width),
ctypes.c_int(height),
ctypes.c_int(binning),
ctypes.c_int(ImgType[image_type]))
self.logger.debug("Set ROI, format on camera {} to {}x{}/{}, {}".format(
camera_ID, width, height, binning, image_type))
def get_start_position(self, camera_ID):
""" Get position of the upper left corner of the ROI for camera with given integer ID
Args:
camera_ID (int): integer ID of the camera
Returns:
(astropy.units.Quantity, astropy.units.Quantity): x, y coordinates of the upper left
corner of the ROI. Note, these are in binned pixels.
"""
start_x = ctypes.c_int()
start_y = ctypes.c_int()
self._call_function('ASIGetStartPos',
camera_ID,
ctypes.byref(start_x),
ctypes.byref(start_y))
start_x = start_x.value * u.pixel
start_y = start_y.value * u.pixel
return start_x, start_y
def set_start_position(self, camera_ID, start_x, start_y):
""" Set position of the upper left corner of the ROI for camera with given integer ID """
start_x = int(get_quantity_value(start_x, unit=u.pixel))
start_y = int(get_quantity_value(start_y, unit=u.pixel))
self._call_function('ASISetStartPos',
camera_ID,
ctypes.c_int(start_x),
ctypes.c_int(start_y))
self.logger.debug("Set ROI start position of camera {} to ({}, {})".format(
camera_ID, start_x, start_y))
def get_dropped_frames(self, camera_ID):
"""Get the number of dropped frames during video capture."""
n_dropped_frames = ctypes.c_int()
self._call_function('ASIGetDroppedFrames',
camera_ID,
ctypes.byref(n_dropped_frames))
self.logger_debug("Camera {} has dropped {} frames.".format(camera_ID, n_dropped_frames))
return n_dropped_frames
def enable_dark_subtract(self, camera_ID, dark_file_path):
"""Enable dark subtraction (not implemented).
You almost certainly wouldn't want to use this as it only works with images taken in
RGB8 format and only with dark frames saved as .BMP files. Far better to do dark
subtraction in post-processing.
"""
raise NotImplementedError
def disable_dark_subtract(self, camera_ID):
"""Disable dark subtraction.
May need to call this as dark current subtraction settings persist in the registry
on Windows.
"""
self._call_function('ASIDisableDarkSubtract',
camera_ID)
self.logger.debug("Dark subtraction on camera {} disabled.".format(camera_ID))
def pulse_guide_on(self, camera_ID, direction):
"""Turn on PulseGuide on ST4 port of given camera in given direction."""
self._call_function('ASIPulseGuideOn',
camera_ID,
GuideDirection[direction])
dname = GuideDirection[direction].name
msg = f"PulseGuide on camera {camera_ID} on in direction {dname}."
self.logger.debug(msg)
def pulse_guide_off(self, camera_ID, direction):
"""Turn off PulseGuide on ST4 port of given camera in given direction."""
self._call_function('ASIPulseGuideOff',
camera_ID,
GuideDirection[direction])
dname = GuideDirection[direction].name
msg = f"PulseGuide on camera {camera_ID} off in direction {dname}."
self.logger.debug(msg)
def get_gain_offset(self, camera_ID):
"""Get pre-setting parameters."""
offset_highest_dr = ctypes.c_int()
offset_unity_gain = ctypes.c_int()
gain_lowest_rn = ctypes.c_int()
offset_lowest_rn = ctypes.c_int()
self._call_function('ASIGetGainOffset',
camera_ID,
ctypes.byref(offset_highest_dr),
ctypes.byref(offset_unity_gain),
ctypes.byref(gain_lowest_rn),
ctypes.byref(offset_lowest_rn))
self.logger.debug('Got pre-setting parameters from camera {}.'.format(camera_ID))
return offset_highest_dr, offset_unity_gain, gain_lowest_rn, offset_lowest_rn
def get_camera_supported_mode(self, camera_ID):
"""Get supported trigger modes for camera with given integer ID."""
modes_struct = SupportedMode()
self._call_function('ASIGetCameraSupportMode',
camera_ID,
ctypes.byref(modes_struct.modes))
supported_modes = []
for mode_int in modes_struct.modes:
if mode_int == CameraMode.END:
break
supported_modes.append(CameraMode(mode_int).name)
self.logger.debug("Got supported modes {} for camera {}".format(supported_modes,
camera_ID))
return supported_modes
def get_camera_mode(self, camera_ID):
"""Get current trigger mode for camera with given integer ID."""
mode = ctypes.int()
self._call_function('ASIGetCameraMode',
camera_ID,
ctypes.byref(mode))
mode_name = CameraMode(mode).name
self.logger.debug('Camera {} is in trigger mode {}'.format(camera_ID, mode_name))
return mode_name
def set_camera_mode(self, camera_ID, mode_name):
"""Set trigger mode for camera with given integer ID."""
mode = CameraMode[mode_name]
self._call_function('ASISetCameraMode',
camera_ID,
mode)
self.logger.debug('Set trigger mode of camera {} to {}.'.format(camera_ID, mode_name))
def send_soft_trigger(self, camera_ID, start_stop_signal):
"""Send out a soft trigger on camera with given integer ID."""
self._call_function('ASISendSoftTrigger',
camera_ID,
int(bool(start_stop_signal)))
self.logger.debug('Soft trigger sent to camera {}.'.format(camera_ID))
def get_serial_number(self, camera_ID):
"""Get serial number of the camera with given integer ID.
The serial number is an array of 8 unsigned chars, the same as string ID,
but it is interpreted differently. It is displayed in ASICAP as a 16 digit
hexadecimal number, so we will convert it the same 16 character string
representation.
"""
struct_SN = ID() # Same structure as string ID.
self._call_function('ASIGetSerialNumber',
camera_ID,
ctypes.byref(struct_SN))
bytes_SN = bytes(struct_SN.id)
serial_number = "".join(f"{b:02x}" for b in bytes_SN)
self.logger.debug("Got serial number '{}' from camera {}".format(serial_number, camera_ID))
return serial_number
def get_trigger_output_io_conf(self, camera_ID):
"""Get external trigger configuration of the camera with given integer ID."""
pin = ctypes.c_int()
pin_high = ctypes.c_int()
delay = ctypes.c_long()
duration = ctypes.c_long()
self._call_function('ASIGetTriggerOutputIOConf',
camera_ID,
ctypes.byref(pin),
ctypes.bytef(pin_high),
ctypes.byref(delay),
ctypes.byref(duration))
self.logger.debug("Got trigger config from camera {}".format(camera_ID))
return TrigOutput(pin).name, bool(pin_high), int(delay), int(duration)
def set_trigger_ouput_io_conf(self, camera_ID, pin, pin_high, delay, duration):
"""Set external trigger configuration of the camera with given integer ID."""
self._call_function('ASISetTriggerOutputIOConf',
camera_ID,
TrigOutput[pin],
ctypes.c_int(pin_high),
ctypes.c_long(delay),
ctypes.c_long(duration))
self.logger.debug("Set trigger config of camera {}".format(camera_ID))
def start_exposure(self, camera_ID):
""" Start exposure on the camera with given integer ID """
self._call_function('ASIStartExposure', camera_ID)
self.logger.debug("Exposure started on camera {}".format(camera_ID))
def stop_exposure(self, camera_ID):
""" Cancel current exposure on camera with given integer ID """
self._call_function('ASIStopExposure', camera_ID)
self.logger.debug("Exposure on camera {} cancelled".format(camera_ID))
def get_exposure_status(self, camera_ID):
""" Get status of current exposure on camera with given integer ID """
status = ctypes.c_int()
self._call_function('ASIGetExpStatus', camera_ID, ctypes.byref(status))
return ExposureStatus(status.value).name
def get_exposure_data(self, camera_ID, width, height, image_type):
""" Get image data from exposure on camera with given integer ID """
exposure_data = self._image_array(width, height, image_type)
self._call_function('ASIGetDataAfterExp',
camera_ID,
exposure_data.ctypes.data_as(ctypes.POINTER(ctypes.c_byte)),
ctypes.c_long(exposure_data.nbytes))
self.logger.debug("Got exposure data from camera {}".format(camera_ID))
return exposure_data
def start_video_capture(self, camera_ID):
""" Start video capture mode on camera with given integer ID """
self._call_function('ASIStartVideoCapture', camera_ID)
def stop_video_capture(self, camera_ID):
""" Stop video capture mode on camera with given integer ID """
self._call_function('ASIStopVideoCapture', camera_ID)
def get_video_data(self, camera_ID, width, height, image_type, timeout):
""" Get the image data from the next available video frame """
video_data = self._image_array(width, height, image_type)
timeout = int(get_quantity_value(timeout, unit=u.ms))
try:
self._call_function('ASIGetVideoData',
camera_ID,
video_data.ctypes.data_as(ctypes.POINTER(ctypes.c_byte)),
ctypes.c_long(video_data.nbytes),
ctypes.c_int(-1))
# If set timeout to anything but -1 (no timeout) this call times out instantly?
except RuntimeError:
# Expect some dropped frames during video capture
return None
else:
return video_data
# Private methods
def _call_function(self, function_name, camera_ID, *args):
""" Utility function for calling the SDK functions that return ErrorCode """
function = getattr(self._CDLL, function_name)
error_code = function(ctypes.c_int(camera_ID), *args)
if error_code != ErrorCode.SUCCESS:
msg = "Error calling {}: {}".format(function_name, ErrorCode(error_code).name)
self.logger.error(msg)
raise RuntimeError(msg)
def _parse_info(self, camera_info):
""" Utility function to parse CameraInfo Structures into something more Pythonic """
pythonic_info = {'name': camera_info.name.decode(),
'camera_ID': int(camera_info.camera_ID),
'max_height': camera_info.max_height * u.pixel,
'max_width': camera_info.max_width * u.pixel,
'is_color_camera': bool(camera_info.is_color_camera),
'bayer_pattern': BayerPattern(camera_info.bayer_pattern).name,
'supported_bins': self._parse_bins(camera_info.supported_bins),
'supported_video_format': self._parse_formats(
camera_info.supported_video_format),
'pixel_size': camera_info.pixel_size * u.um,
'has_mechanical_shutter': bool(camera_info.has_mechanical_shutter),
'has_ST4_port': bool(camera_info.has_ST4_port),
'has_cooler': bool(camera_info.has_cooler),
'is_USB3_host': bool(camera_info.is_USB3_host),
'is_USB3_camera': bool(camera_info.is_USB3_camera),
'e_per_adu': camera_info.e_per_adu * u.electron / u.adu,
'bit_depth': camera_info.bit_depth * u.bit,
'is_trigger_camera': bool(camera_info.is_trigger_camera)}
return pythonic_info
def _parse_bins(self, supported_bins):
bins = tuple(int(b) for b in supported_bins if b != 0)
return bins
def _parse_formats(self, supported_formats):
formats = []
for supported_format in supported_formats:
format = ImgType(supported_format)
if format != ImgType.END:
formats.append(format.name)
else:
break
return tuple(formats)
def _parse_caps(self, control_caps):
""" Utility function to parse ControlCaps Structures into something more Pythonic """
control_type = ControlType(control_caps.control_type).name
control_info = {'name': control_caps.name.decode(),
'description': control_caps.description.decode(),
'max_value': self._parse_return_value(control_caps.max_value,
control_type),
'min_value': self._parse_return_value(control_caps.min_value,
control_type),
'default_value': self._parse_return_value(control_caps.default_value,
control_type),
'is_auto_supported': bool(control_caps.is_auto_supported),
'is_writable': bool(control_caps.is_writable),
'control_type': control_type}
return control_info
def _parse_return_value(self, value, control_type):
""" Helper function to apply appropiate type conversion and/or units to value """
try:
int_value = value.value # If not done already extract Python int from ctypes.c_long
except AttributeError:
int_value = value # If from a ctypes struct value will already be a Python int
# Apply control type specific units and/or data types
if control_type in units_and_scale:
nice_value = int_value * units_and_scale[control_type]
elif control_type in boolean_controls:
nice_value = bool(int_value)
elif control_type == 'FLIP':
nice_value = FlipStatus(int_value).name
else:
nice_value = int_value
return nice_value
def _parse_input_value(self, value, control_type):
""" Helper function to convert input values to appropriate ctypes.c_long """
if control_type in units_and_scale:
value = get_quantity_value(value, unit=units_and_scale[control_type])
elif control_type == 'FLIP':
value = FlipStatus[value]
return ctypes.c_long(int(value))
def _image_array(self, width, height, image_type):
""" Creates a suitable numpy array for storing image data """
width = int(get_quantity_value(width, unit=u.pixel))
height = int(get_quantity_value(height, unit=u.pixel))
if image_type in ('RAW8', 'Y8'):
image_array = np.zeros((height, width), dtype=np.uint8, order='C')
elif image_type == 'RAW16':
image_array = np.zeros((height, width), dtype=np.uint16, order='C')
elif image_type == 'RGB24':
image_array = np.zeros((3, height, width), dtype=np.uint8, order='C')
return image_array
units_and_scale = {'AUTO_TARGET_BRIGHTNESS': u.adu,
'AUTO_MAX_EXP': 1e-6 * u.second, # Unit is microseconds
'BANDWIDTHOVERLOAD': u.percent,
'COOLER_POWER_PERC': u.percent,
'EXPOSURE': 1e-6 * u.second, # Unit is microseconds
'OFFSET': u.adu,
'TARGET_TEMP': u.Celsius,
'TEMPERATURE': 0.1 * u.Celsius} # Unit is 1/10th degree C
boolean_controls = ('ANTI_DEW_HEATER',
'COOLER_ON',
'FAN_ON',
'HARDWARE_BIN',
'HIGH_SPEED_MODE',
'MONO_BIN',
'PATTERN_ADJUST')
####################################################################################################
#
# The C defines, enums and structs from ASICamera2.h translated to Python constants, enums and
# ctypes.Structures. Based on v1.13.0930 of the ZWO ASI SDK.
#
####################################################################################################
ID_MAX = 128 # Maximum value for camera integer ID (camera_ID)
@enum.unique
class BayerPattern(enum.IntEnum):
""" Bayer filter type """
RG = 0
BG = enum.auto()
GR = enum.auto()
GB = enum.auto()
@enum.unique
class ImgType(enum.IntEnum):
""" Supported video format """
RAW8 = 0
RGB24 = enum.auto()
RAW16 = enum.auto()
Y8 = enum.auto()
END = -1
@enum.unique
class GuideDirection(enum.IntEnum):
""" Guider direction """
NORTH = 0
SOUTH = enum.auto()
EAST = enum.auto()
WEST = enum.auto()
@enum.unique
class FlipStatus(enum.IntEnum):
""" Flip status """
NONE = 0
HORIZ = enum.auto()
VERT = enum.auto()
BOTH = enum.auto()
@enum.unique
class CameraMode(enum.IntEnum):
""" Camera status """
NORMAL = 0
TRIG_SOFT_EDGE = enum.auto()
TRIG_RISE_EDGE = enum.auto()
TRIG_FALL_EDGE = enum.auto()
TRIG_SOFT_LEVEL = enum.auto()
TRIG_HIGH_LEVEL = enum.auto()
TRIG_LOW_LEVEL = enum.auto()
END = -1
@enum.unique
class TrigOutput(enum.IntEnum):
"""External trigger output."""
PINA = 0 # Only Pin A output
PINB = enum.auto() # Only Pin B outoput
NONE = -1
@enum.unique
class ErrorCode(enum.IntEnum):
""" Error codes """
SUCCESS = 0
INVALID_INDEX = enum.auto() # No camera connected or index value out of boundary
INVALID_ID = enum.auto()
INVALID_CONTROL_TYPE = enum.auto()
CAMERA_CLOSED = enum.auto() # Camera didn't open
CAMERA_REMOVED = enum.auto() # Failed to fine the camera, maybe it was removed
INVALID_PATH = enum.auto() # Cannot find the path of the file
INVALID_FILEFORMAT = enum.auto()
INVALID_SIZE = enum.auto() # Wrong video format size
INVALID_IMGTYPE = enum.auto() # Unsupported image format
OUTOF_BOUNDARY = enum.auto() # The startpos is out of boundary
TIMEOUT = enum.auto()
INVALID_SEQUENCE = enum.auto() # Stop capture first
BUFFER_TOO_SMALL = enum.auto()
VIDEO_MODE_ACTIVE = enum.auto()
EXPOSURE_IN_PROGRESS = enum.auto()
GENERAL_ERROR = enum.auto() # General error, e.g. value is out of valid range
INVALID_MODE = enum.auto() # The current mode is wrong
END = enum.auto()
class CameraInfo(ctypes.Structure):
""" Camera info structure """
_fields_ = [('name', ctypes.c_char * 64),
('camera_ID', ctypes.c_int),
('max_height', ctypes.c_long),
('max_width', ctypes.c_long),
('is_color_camera', ctypes.c_int),
('bayer_pattern', ctypes.c_int),
('supported_bins', ctypes.c_int * 16), # e.g. (1,2,4,8,0,...) means 1x, 2x, 4x, 8x
('supported_video_format', ctypes.c_int * 8), # ImgTypes, terminates with END
('pixel_size', ctypes.c_double), # in microns
('has_mechanical_shutter', ctypes.c_int),
('has_ST4_port', ctypes.c_int),
('has_cooler', ctypes.c_int),
('is_USB3_host', ctypes.c_int),
('is_USB3_camera', ctypes.c_int),
('e_per_adu', ctypes.c_float),
('bit_depth', ctypes.c_int),
('is_trigger_camera', ctypes.c_int),
('unused', ctypes.c_char * 16)]
class ControlType(enum.IntEnum):
""" Control types """
GAIN = 0
EXPOSURE = enum.auto()
GAMMA = enum.auto()
WB_R = enum.auto()
WB_B = enum.auto()
OFFSET = enum.auto()
BANDWIDTHOVERLOAD = enum.auto()
OVERCLOCK = enum.auto()
TEMPERATURE = enum.auto() # Returns temperature*10
FLIP = enum.auto()
AUTO_MAX_GAIN = enum.auto()
AUTO_MAX_EXP = enum.auto() # in microseconds
AUTO_TARGET_BRIGHTNESS = enum.auto()
HARDWARE_BIN = enum.auto()
HIGH_SPEED_MODE = enum.auto()
COOLER_POWER_PERC = enum.auto()
TARGET_TEMP = enum.auto() # NOT *10
COOLER_ON = enum.auto()
MONO_BIN = enum.auto() # Leads to less grid at software bin mode for colour camera
FAN_ON = enum.auto()
PATTERN_ADJUST = enum.auto()
ANTI_DEW_HEATER = enum.auto()
BRIGHTNESS = OFFSET
AUTO_MAX_BRIGHTNESS = AUTO_TARGET_BRIGHTNESS
class ControlCaps(ctypes.Structure):
""" Structure for caps (limits) on allowable parameter values for each camera control """
_fields_ = [('name', ctypes.c_char * 64), # The name of the control, .e.g. Exposure, Gain
('description', ctypes.c_char * 128), # Description of the command
('max_value', ctypes.c_long),
('min_value', ctypes.c_long),
('default_value', ctypes.c_long),
('is_auto_supported', ctypes.c_int),
('is_writable', ctypes.c_int), # Some can be read only, e.g. temperature
('control_type', ctypes.c_int), # ControlType used to get/set value
('unused', ctypes.c_char * 32)]
class ExposureStatus(enum.IntEnum):
""" Exposure status codes """
IDLE = 0
WORKING = enum.auto()
SUCCESS = enum.auto()
FAILED = enum.auto()
class ID(ctypes.Structure):
_fields_ = [('id', ctypes.c_ubyte * 8)]
class SupportedMode(ctypes.Structure):
""" Array of supported CameraModes, terminated with CameraMode.END """
_fields_ = [('modes', ctypes.c_int * 16)]
|
the-stack_0_10927 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
# Copyright: (c) 2017, Dag Wieers <[email protected]>
# Copyright: (c) 2017, Jacob McGill (@jmcgill298)
# Copyright: (c) 2017, Swetha Chunduri (@schunduri)
# Copyright: (c) 2019, Rob Huelga (@RobW3LGA)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import json
import os
from copy import deepcopy
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes, to_native
# Optional, only used for APIC signature-based authentication
try:
from OpenSSL.crypto import FILETYPE_PEM, load_privatekey, sign
HAS_OPENSSL = True
except ImportError:
HAS_OPENSSL = False
# Optional, only used for XML payload
try:
import lxml.etree
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
# Optional, only used for XML payload
try:
from xmljson import cobra
HAS_XMLJSON_COBRA = True
except ImportError:
HAS_XMLJSON_COBRA = False
def aci_argument_spec():
return dict(
host=dict(type='str', required=True, aliases=['hostname']),
port=dict(type='int', required=False),
username=dict(type='str', default='admin', aliases=['user']),
password=dict(type='str', no_log=True),
private_key=dict(type='str', aliases=['cert_key'], no_log=True), # Beware, this is not the same as client_key !
certificate_name=dict(type='str', aliases=['cert_name']), # Beware, this is not the same as client_cert !
output_level=dict(type='str', default='normal', choices=['debug', 'info', 'normal']),
timeout=dict(type='int', default=30),
use_proxy=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
)
class ACIModule(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.result = dict(changed=False)
self.headers = dict()
self.child_classes = set()
# error output
self.error = dict(code=None, text=None)
# normal output
self.existing = None
# info output
self.config = dict()
self.original = None
self.proposed = dict()
# debug output
self.filter_string = ''
self.method = None
self.path = None
self.response = None
self.status = None
self.url = None
# aci_rest output
self.imdata = None
self.totalCount = None
# Ensure protocol is set
self.define_protocol()
if self.module._debug:
self.module.warn('Enable debug output because ANSIBLE_DEBUG was set.')
self.params['output_level'] = 'debug'
if self.params['private_key']:
# Perform signature-based authentication, no need to log on separately
if not HAS_OPENSSL:
self.module.fail_json(msg='Cannot use signature-based authentication because pyopenssl is not available')
elif self.params['password'] is not None:
self.module.warn("When doing ACI signatured-based authentication, providing parameter 'password' is not required")
elif self.params['password']:
# Perform password-based authentication, log on using password
self.login()
else:
self.module.fail_json(msg="Either parameter 'password' or 'private_key' is required for authentication")
def boolean(self, value, true='yes', false='no'):
''' Return an acceptable value back '''
# When we expect value is of type=bool
if value is None:
return None
elif value is True:
return true
elif value is False:
return false
# If all else fails, escalate back to user
self.module.fail_json(msg="Boolean value '%s' is an invalid ACI boolean value.")
def iso8601_format(self, dt):
''' Return an ACI-compatible ISO8601 formatted time: 2123-12-12T00:00:00.000+00:00 '''
try:
return dt.isoformat(timespec='milliseconds')
except Exception:
tz = dt.strftime('%z')
return '%s.%03d%s:%s' % (dt.strftime('%Y-%m-%dT%H:%M:%S'), dt.microsecond / 1000, tz[:3], tz[3:])
def define_protocol(self):
''' Set protocol based on use_ssl parameter '''
# Set protocol for further use
self.params['protocol'] = 'https' if self.params.get('use_ssl', True) else 'http'
def define_method(self):
''' Set method based on state parameter '''
# Set method for further use
state_map = dict(absent='delete', present='post', query='get')
self.params['method'] = state_map[self.params['state']]
def login(self):
''' Log in to APIC '''
# Perform login request
if 'port' in self.params and self.params['port'] is not None:
url = '%(protocol)s://%(host)s:%(port)s/api/aaaLogin.json' % self.params
else:
url = '%(protocol)s://%(host)s/api/aaaLogin.json' % self.params
payload = {'aaaUser': {'attributes': {'name': self.params['username'], 'pwd': self.params['password']}}}
resp, auth = fetch_url(self.module, url,
data=json.dumps(payload),
method='POST',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
# Handle APIC response
if auth['status'] != 200:
self.response = auth['msg']
self.status = auth['status']
try:
# APIC error
self.response_json(auth['body'])
self.fail_json(msg='Authentication failed: %(code)s %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % auth)
# Retain cookie for later use
self.headers['Cookie'] = resp.headers['Set-Cookie']
def cert_auth(self, path=None, payload='', method=None):
''' Perform APIC signature-based authentication, not the expected SSL client certificate authentication. '''
if method is None:
method = self.params['method'].upper()
# NOTE: ACI documentation incorrectly uses complete URL
if path is None:
path = self.path
path = '/' + path.lstrip('/')
if payload is None:
payload = ''
# Check if we got a private key. This allows the use of vaulting the private key.
if self.params['private_key'].startswith('-----BEGIN PRIVATE KEY-----'):
try:
sig_key = load_privatekey(FILETYPE_PEM, self.params['private_key'])
except Exception:
self.module.fail_json(msg="Cannot load provided 'private_key' parameter.")
# Use the username as the certificate_name value
if self.params['certificate_name'] is None:
self.params['certificate_name'] = self.params['username']
elif self.params['private_key'].startswith('-----BEGIN CERTIFICATE-----'):
self.module.fail_json(msg="Provided 'private_key' parameter value appears to be a certificate. Please correct.")
else:
# If we got a private key file, read from this file.
# NOTE: Avoid exposing any other credential as a filename in output...
if not os.path.exists(self.params['private_key']):
self.module.fail_json(msg="The provided private key file does not appear to exist. Is it a filename?")
try:
with open(self.params['private_key'], 'r') as fh:
private_key_content = fh.read()
except Exception:
self.module.fail_json(msg="Cannot open private key file '%s'." % self.params['private_key'])
if private_key_content.startswith('-----BEGIN PRIVATE KEY-----'):
try:
sig_key = load_privatekey(FILETYPE_PEM, private_key_content)
except Exception:
self.module.fail_json(msg="Cannot load private key file '%s'." % self.params['private_key'])
# Use the private key basename (without extension) as certificate_name
if self.params['certificate_name'] is None:
self.params['certificate_name'] = os.path.basename(os.path.splitext(self.params['private_key'])[0])
elif private_key_content.startswith('-----BEGIN CERTIFICATE-----'):
self.module.fail_json(msg="Provided private key file %s appears to be a certificate. Please correct." % self.params['private_key'])
else:
self.module.fail_json(msg="Provided private key file '%s' does not appear to be a private key. Please correct." % self.params['private_key'])
# NOTE: ACI documentation incorrectly adds a space between method and path
sig_request = method + path + payload
sig_signature = base64.b64encode(sign(sig_key, sig_request, 'sha256'))
sig_dn = 'uni/userext/user-%s/usercert-%s' % (self.params['username'], self.params['certificate_name'])
self.headers['Cookie'] = 'APIC-Certificate-Algorithm=v1.0; ' +\
'APIC-Certificate-DN=%s; ' % sig_dn +\
'APIC-Certificate-Fingerprint=fingerprint; ' +\
'APIC-Request-Signature=%s' % to_native(sig_signature)
def response_json(self, rawoutput):
''' Handle APIC JSON response output '''
try:
jsondata = json.loads(rawoutput)
except Exception as e:
# Expose RAW output for troubleshooting
self.error = dict(code=-1, text="Unable to parse output as JSON, see 'raw' output. %s" % e)
self.result['raw'] = rawoutput
return
# Extract JSON API output
try:
self.imdata = jsondata['imdata']
except KeyError:
self.imdata = dict()
self.totalCount = int(jsondata['totalCount'])
# Handle possible APIC error information
self.response_error()
def response_xml(self, rawoutput):
''' Handle APIC XML response output '''
# NOTE: The XML-to-JSON conversion is using the "Cobra" convention
try:
xml = lxml.etree.fromstring(to_bytes(rawoutput))
xmldata = cobra.data(xml)
except Exception as e:
# Expose RAW output for troubleshooting
self.error = dict(code=-1, text="Unable to parse output as XML, see 'raw' output. %s" % e)
self.result['raw'] = rawoutput
return
# Reformat as ACI does for JSON API output
try:
self.imdata = xmldata['imdata']['children']
except KeyError:
self.imdata = dict()
self.totalCount = int(xmldata['imdata']['attributes']['totalCount'])
# Handle possible APIC error information
self.response_error()
def response_error(self):
''' Set error information when found '''
# Handle possible APIC error information
if self.totalCount != '0':
try:
self.error = self.imdata[0]['error']['attributes']
except (KeyError, IndexError):
pass
def request(self, path, payload=None):
''' Perform a REST request '''
# Ensure method is set (only do this once)
self.define_method()
self.path = path
if 'port' in self.params and self.params['port'] is not None:
self.url = '%(protocol)s://%(host)s:%(port)s/' % self.params + path.lstrip('/')
else:
self.url = '%(protocol)s://%(host)s/' % self.params + path.lstrip('/')
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(path=path, payload=payload)
# Perform request
resp, info = fetch_url(self.module, self.url,
data=payload,
headers=self.headers,
method=self.params['method'].upper(),
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
# Handle APIC response
if info['status'] != 200:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
self.response_json(resp.read())
def query(self, path):
''' Perform a query with no payload '''
self.path = path
if 'port' in self.params and self.params['port'] is not None:
self.url = '%(protocol)s://%(host)s:%(port)s/' % self.params + path.lstrip('/')
else:
self.url = '%(protocol)s://%(host)s/' % self.params + path.lstrip('/')
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(path=path, method='GET')
# Perform request
resp, query = fetch_url(self.module, self.url,
data=None,
headers=self.headers,
method='GET',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
# Handle APIC response
if query['status'] != 200:
self.response = query['msg']
self.status = query['status']
try:
# APIC error
self.response_json(query['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % query)
query = json.loads(resp.read())
return json.dumps(query['imdata'], sort_keys=True, indent=2) + '\n'
def request_diff(self, path, payload=None):
''' Perform a request, including a proper diff output '''
self.result['diff'] = dict()
self.result['diff']['before'] = self.query(path)
self.request(path, payload=payload)
# TODO: Check if we can use the request output for the 'after' diff
self.result['diff']['after'] = self.query(path)
if self.result['diff']['before'] != self.result['diff']['after']:
self.result['changed'] = True
# TODO: This could be designed to update existing keys
def update_qs(self, params):
''' Append key-value pairs to self.filter_string '''
accepted_params = dict((k, v) for (k, v) in params.items() if v is not None)
if accepted_params:
if self.filter_string:
self.filter_string += '&'
else:
self.filter_string = '?'
self.filter_string += '&'.join(['%s=%s' % (k, v) for (k, v) in accepted_params.items()])
# TODO: This could be designed to accept multiple obj_classes and keys
def build_filter(self, obj_class, params):
''' Build an APIC filter based on obj_class and key-value pairs '''
accepted_params = dict((k, v) for (k, v) in params.items() if v is not None)
if len(accepted_params) == 1:
return ','.join('eq({0}.{1},"{2}")'.format(obj_class, k, v) for (k, v) in accepted_params.items())
elif len(accepted_params) > 1:
return 'and(' + ','.join(['eq({0}.{1},"{2}")'.format(obj_class, k, v) for (k, v) in accepted_params.items()]) + ')'
def _deep_url_path_builder(self, obj):
target_class = obj['target_class']
target_filter = obj['target_filter']
subtree_class = obj['subtree_class']
subtree_filter = obj['subtree_filter']
object_rn = obj['object_rn']
mo = obj['module_object']
add_subtree_filter = obj['add_subtree_filter']
add_target_filter = obj['add_target_filter']
if self.module.params['state'] in ('absent', 'present') and mo is not None:
self.path = 'api/mo/uni/{0}.json'.format(object_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
else:
# State is 'query'
if object_rn is not None:
# Query for a specific object in the module's class
self.path = 'api/mo/uni/{0}.json'.format(object_rn)
else:
self.path = 'api/class/{0}.json'.format(target_class)
if add_target_filter:
self.update_qs(
{'query-target-filter': self.build_filter(target_class, target_filter)})
if add_subtree_filter:
self.update_qs(
{'rsp-subtree-filter': self.build_filter(subtree_class, subtree_filter)})
if 'port' in self.params and self.params['port'] is not None:
self.url = '{protocol}://{host}:{port}/{path}'.format(
path=self.path, **self.module.params)
else:
self.url = '{protocol}://{host}/{path}'.format(
path=self.path, **self.module.params)
if self.child_classes:
self.update_qs(
{'rsp-subtree': 'full', 'rsp-subtree-class': ','.join(sorted(self.child_classes))})
def _deep_url_parent_object(self, parent_objects, parent_class):
for parent_object in parent_objects:
if parent_object['aci_class'] is parent_class:
return parent_object
return None
def construct_deep_url(self, target_object, parent_objects=None, child_classes=None):
"""
This method is used to retrieve the appropriate URL path and filter_string to make the request to the APIC.
:param target_object: The target class dictionary containing parent_class, aci_class, aci_rn, target_filter, and module_object keys.
:param parent_objects: The parent class list of dictionaries containing parent_class, aci_class, aci_rn, target_filter, and module_object keys.
:param child_classes: The list of child classes that the module supports along with the object.
:type target_object: dict
:type parent_objects: list[dict]
:type child_classes: list[string]
:return: The path and filter_string needed to build the full URL.
"""
self.filter_string = ''
rn_builder = None
subtree_classes = None
add_subtree_filter = False
add_target_filter = False
has_target_query = False
has_target_query_compare = False
has_target_query_difference = False
has_target_query_called = False
if child_classes is None:
self.child_classes = set()
else:
self.child_classes = set(child_classes)
target_parent_class = target_object['parent_class']
target_class = target_object['aci_class']
target_rn = target_object['aci_rn']
target_filter = target_object['target_filter']
target_module_object = target_object['module_object']
url_path_object = dict(
target_class=target_class,
target_filter=target_filter,
subtree_class=target_class,
subtree_filter=target_filter,
module_object=target_module_object
)
if target_module_object is not None:
rn_builder = target_rn
else:
has_target_query = True
has_target_query_compare = True
if parent_objects is not None:
current_parent_class = target_parent_class
has_parent_query_compare = False
has_parent_query_difference = False
is_first_parent = True
is_single_parent = None
search_classes = set()
while current_parent_class != 'uni':
parent_object = self._deep_url_parent_object(
parent_objects=parent_objects, parent_class=current_parent_class)
if parent_object is not None:
parent_parent_class = parent_object['parent_class']
parent_class = parent_object['aci_class']
parent_rn = parent_object['aci_rn']
parent_filter = parent_object['target_filter']
parent_module_object = parent_object['module_object']
if is_first_parent:
is_single_parent = True
else:
is_single_parent = False
is_first_parent = False
if parent_parent_class != 'uni':
search_classes.add(parent_class)
if parent_module_object is not None:
if rn_builder is not None:
rn_builder = '{0}/{1}'.format(parent_rn,
rn_builder)
else:
rn_builder = parent_rn
url_path_object['target_class'] = parent_class
url_path_object['target_filter'] = parent_filter
has_target_query = False
else:
rn_builder = None
subtree_classes = search_classes
has_target_query = True
if is_single_parent:
has_parent_query_compare = True
current_parent_class = parent_parent_class
else:
raise ValueError("Reference error for parent_class '{0}'. Each parent_class must reference a valid object".format(current_parent_class))
if not has_target_query_difference and not has_target_query_called:
if has_target_query is not has_target_query_compare:
has_target_query_difference = True
else:
if not has_parent_query_difference and has_target_query is not has_parent_query_compare:
has_parent_query_difference = True
has_target_query_called = True
if not has_parent_query_difference and has_parent_query_compare and target_module_object is not None:
add_target_filter = True
elif has_parent_query_difference and target_module_object is not None:
add_subtree_filter = True
self.child_classes.add(target_class)
if has_target_query:
add_target_filter = True
elif has_parent_query_difference and not has_target_query and target_module_object is None:
self.child_classes.add(target_class)
self.child_classes.update(subtree_classes)
elif not has_parent_query_difference and not has_target_query and target_module_object is None:
self.child_classes.add(target_class)
elif not has_target_query and is_single_parent and target_module_object is None:
self.child_classes.add(target_class)
url_path_object['object_rn'] = rn_builder
url_path_object['add_subtree_filter'] = add_subtree_filter
url_path_object['add_target_filter'] = add_target_filter
self._deep_url_path_builder(url_path_object)
def construct_url(self, root_class, subclass_1=None, subclass_2=None, subclass_3=None, child_classes=None):
"""
This method is used to retrieve the appropriate URL path and filter_string to make the request to the APIC.
:param root_class: The top-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param sublass_1: The second-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param sublass_2: The third-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param sublass_3: The fourth-level class dictionary containing aci_class, aci_rn, target_filter, and module_object keys.
:param child_classes: The list of child classes that the module supports along with the object.
:type root_class: dict
:type subclass_1: dict
:type subclass_2: dict
:type subclass_3: dict
:type child_classes: list
:return: The path and filter_string needed to build the full URL.
"""
self.filter_string = ''
if child_classes is None:
self.child_classes = set()
else:
self.child_classes = set(child_classes)
if subclass_3 is not None:
self._construct_url_4(root_class, subclass_1, subclass_2, subclass_3)
elif subclass_2 is not None:
self._construct_url_3(root_class, subclass_1, subclass_2)
elif subclass_1 is not None:
self._construct_url_2(root_class, subclass_1)
else:
self._construct_url_1(root_class)
if 'port' in self.params and self.params['port'] is not None:
self.url = '{protocol}://{host}:{port}/{path}'.format(path=self.path, **self.module.params)
else:
self.url = '{protocol}://{host}/{path}'.format(path=self.path, **self.module.params)
if self.child_classes:
# Append child_classes to filter_string if filter string is empty
self.update_qs({'rsp-subtree': 'full', 'rsp-subtree-class': ','.join(sorted(self.child_classes))})
def _construct_url_1(self, obj):
"""
This method is used by construct_url when the object is the top-level class.
"""
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}.json'.format(obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
elif mo is None:
# Query for all objects of the module's class (filter by properties)
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
else:
# Query for a specific object in the module's class
self.path = 'api/mo/uni/{0}.json'.format(obj_rn)
def _construct_url_2(self, parent, obj):
"""
This method is used by construct_url when the object is the second-level class.
"""
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['target_filter']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}/{1}.json'.format(parent_rn, obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
elif parent_obj is None and mo is None:
# Query for all objects of the module's class
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif parent_obj is None: # mo is known
# Query for all objects of the module's class that match the provided ID value
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif mo is None: # parent_obj is known
# Query for all object's of the module's class that belong to a specific parent object
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}.json'.format(parent_rn)
else:
# Query for specific object in the module's class
self.path = 'api/mo/uni/{0}/{1}.json'.format(parent_rn, obj_rn)
def _construct_url_3(self, root, parent, obj):
"""
This method is used by construct_url when the object is the third-level class.
"""
root_class = root['aci_class']
root_rn = root['aci_rn']
root_filter = root['target_filter']
root_obj = root['module_object']
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['target_filter']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
elif root_obj is None and parent_obj is None and mo is None:
# Query for all objects of the module's class
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif root_obj is None and parent_obj is None: # mo is known
# Query for all objects of the module's class matching the provided ID value of the object
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif root_obj is None and mo is None: # parent_obj is known
# Query for all objects of the module's class that belong to any parent class
# matching the provided ID value for the parent object
self.child_classes.add(obj_class)
self.path = 'api/class/{0}.json'.format(parent_class)
self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
elif parent_obj is None and mo is None: # root_obj is known
# Query for all objects of the module's class that belong to a specific root object
self.child_classes.update([parent_class, obj_class])
self.path = 'api/mo/uni/{0}.json'.format(root_rn)
# NOTE: No need to select by root_filter
# self.update_qs({'query-target-filter': self.build_filter(root_class, root_filter)})
elif root_obj is None: # mo and parent_obj are known
# Query for all objects of the module's class that belong to any parent class
# matching the provided ID values for both object and parent object
self.child_classes.add(obj_class)
self.path = 'api/class/{0}.json'.format(parent_class)
self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif parent_obj is None: # mo and root_obj are known
# Query for all objects of the module's class that match the provided ID value and belong to a specific root object
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}.json'.format(root_rn)
# NOTE: No need to select by root_filter
# self.update_qs({'query-target-filter': self.build_filter(root_class, root_filter)})
# TODO: Filter by parent_filter and obj_filter
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif mo is None: # root_obj and parent_obj are known
# Query for all objects of the module's class that belong to a specific parent object
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}/{1}.json'.format(root_rn, parent_rn)
# NOTE: No need to select by parent_filter
# self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
else:
# Query for a specific object of the module's class
self.path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
def _construct_url_4(self, root, sec, parent, obj):
"""
This method is used by construct_url when the object is the fourth-level class.
"""
root_class = root['aci_class']
root_rn = root['aci_rn']
root_filter = root['target_filter']
root_obj = root['module_object']
sec_class = sec['aci_class']
sec_rn = sec['aci_rn']
sec_filter = sec['target_filter']
sec_obj = sec['module_object']
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['target_filter']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['target_filter']
mo = obj['module_object']
if self.child_classes is None:
self.child_classes = [obj_class]
if self.module.params['state'] in ('absent', 'present'):
# State is absent or present
self.path = 'api/mo/uni/{0}/{1}/{2}/{3}.json'.format(root_rn, sec_rn, parent_rn, obj_rn)
self.update_qs({'rsp-prop-include': 'config-only'})
# TODO: Add all missing cases
elif root_obj is None:
self.child_classes.add(obj_class)
self.path = 'api/class/{0}.json'.format(obj_class)
self.update_qs({'query-target-filter': self.build_filter(obj_class, obj_filter)})
elif sec_obj is None:
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}.json'.format(root_rn)
# NOTE: No need to select by root_filter
# self.update_qs({'query-target-filter': self.build_filter(root_class, root_filter)})
# TODO: Filter by sec_filter, parent and obj_filter
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif parent_obj is None:
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}/{1}.json'.format(root_rn, sec_rn)
# NOTE: No need to select by sec_filter
# self.update_qs({'query-target-filter': self.build_filter(sec_class, sec_filter)})
# TODO: Filter by parent_filter and obj_filter
self.update_qs({'rsp-subtree-filter': self.build_filter(obj_class, obj_filter)})
elif mo is None:
self.child_classes.add(obj_class)
self.path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, sec_rn, parent_rn)
# NOTE: No need to select by parent_filter
# self.update_qs({'query-target-filter': self.build_filter(parent_class, parent_filter)})
else:
# Query for a specific object of the module's class
self.path = 'api/mo/uni/{0}/{1}/{2}/{3}.json'.format(root_rn, sec_rn, parent_rn, obj_rn)
def delete_config(self):
"""
This method is used to handle the logic when the modules state is equal to absent. The method only pushes a change if
the object exists, and if check_mode is False. A successful change will mark the module as changed.
"""
self.proposed = dict()
if not self.existing:
return
elif not self.module.check_mode:
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(method='DELETE')
resp, info = fetch_url(self.module, self.url,
headers=self.headers,
method='DELETE',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
self.method = 'DELETE'
# Handle APIC response
if info['status'] == 200:
self.result['changed'] = True
self.response_json(resp.read())
else:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
else:
self.result['changed'] = True
self.method = 'DELETE'
def get_diff(self, aci_class):
"""
This method is used to get the difference between the proposed and existing configurations. Each module
should call the get_existing method before this method, and add the proposed config to the module results
using the module's config parameters. The new config will added to the self.result dictionary.
:param aci_class: Type str.
This is the root dictionary key for the MO's configuration body, or the ACI class of the MO.
"""
proposed_config = self.proposed[aci_class]['attributes']
if self.existing:
existing_config = self.existing[0][aci_class]['attributes']
config = {}
# values are strings, so any diff between proposed and existing can be a straight replace
for key, value in proposed_config.items():
existing_field = existing_config.get(key)
if value != existing_field:
config[key] = value
# add name back to config only if the configs do not match
if config:
# TODO: If URLs are built with the object's name, then we should be able to leave off adding the name back
# config["name"] = proposed_config["name"]
config = {aci_class: {'attributes': config}}
# check for updates to child configs and update new config dictionary
children = self.get_diff_children(aci_class)
if children and config:
config[aci_class].update({'children': children})
elif children:
config = {aci_class: {'attributes': {}, 'children': children}}
else:
config = self.proposed
self.config = config
@staticmethod
def get_diff_child(child_class, proposed_child, existing_child):
"""
This method is used to get the difference between a proposed and existing child configs. The get_nested_config()
method should be used to return the proposed and existing config portions of child.
:param child_class: Type str.
The root class (dict key) for the child dictionary.
:param proposed_child: Type dict.
The config portion of the proposed child dictionary.
:param existing_child: Type dict.
The config portion of the existing child dictionary.
:return: The child config with only values that are updated. If the proposed dictionary has no updates to make
to what exists on the APIC, then None is returned.
"""
update_config = {child_class: {'attributes': {}}}
for key, value in proposed_child.items():
existing_field = existing_child.get(key)
if value != existing_field:
update_config[child_class]['attributes'][key] = value
if not update_config[child_class]['attributes']:
return None
return update_config
def get_diff_children(self, aci_class):
"""
This method is used to retrieve the updated child configs by comparing the proposed children configs
agains the objects existing children configs.
:param aci_class: Type str.
This is the root dictionary key for the MO's configuration body, or the ACI class of the MO.
:return: The list of updated child config dictionaries. None is returned if there are no changes to the child
configurations.
"""
proposed_children = self.proposed[aci_class].get('children')
if proposed_children:
child_updates = []
existing_children = self.existing[0][aci_class].get('children', [])
# Loop through proposed child configs and compare against existing child configuration
for child in proposed_children:
child_class, proposed_child, existing_child = self.get_nested_config(child, existing_children)
if existing_child is None:
child_update = child
else:
child_update = self.get_diff_child(child_class, proposed_child, existing_child)
# Update list of updated child configs only if the child config is different than what exists
if child_update:
child_updates.append(child_update)
else:
return None
return child_updates
def get_existing(self):
"""
This method is used to get the existing object(s) based on the path specified in the module. Each module should
build the URL so that if the object's name is supplied, then it will retrieve the configuration for that particular
object, but if no name is supplied, then it will retrieve all MOs for the class. Following this method will ensure
that this method can be used to supply the existing configuration when using the get_diff method. The response, status,
and existing configuration will be added to the self.result dictionary.
"""
uri = self.url + self.filter_string
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(path=self.path + self.filter_string, method='GET')
resp, info = fetch_url(self.module, uri,
headers=self.headers,
method='GET',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
self.method = 'GET'
# Handle APIC response
if info['status'] == 200:
self.existing = json.loads(resp.read())['imdata']
else:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
@staticmethod
def get_nested_config(proposed_child, existing_children):
"""
This method is used for stiping off the outer layers of the child dictionaries so only the configuration
key, value pairs are returned.
:param proposed_child: Type dict.
The dictionary that represents the child config.
:param existing_children: Type list.
The list of existing child config dictionaries.
:return: The child's class as str (root config dict key), the child's proposed config dict, and the child's
existing configuration dict.
"""
for key in proposed_child.keys():
child_class = key
proposed_config = proposed_child[key]['attributes']
existing_config = None
# FIXME: Design causes issues for repeated child_classes
# get existing dictionary from the list of existing to use for comparison
for child in existing_children:
if child.get(child_class):
existing_config = child[key]['attributes']
# NOTE: This is an ugly fix
# Return the one that is a subset match
if set(proposed_config.items()).issubset(set(existing_config.items())):
break
return child_class, proposed_config, existing_config
def payload(self, aci_class, class_config, child_configs=None):
"""
This method is used to dynamically build the proposed configuration dictionary from the config related parameters
passed into the module. All values that were not passed values from the playbook task will be removed so as to not
inadvertently change configurations.
:param aci_class: Type str
This is the root dictionary key for the MO's configuration body, or the ACI class of the MO.
:param class_config: Type dict
This is the configuration of the MO using the dictionary keys expected by the API
:param child_configs: Type list
This is a list of child dictionaries associated with the MOs config. The list should only
include child objects that are used to associate two MOs together. Children that represent
MOs should have their own module.
"""
proposed = dict((k, str(v)) for k, v in class_config.items() if v is not None)
self.proposed = {aci_class: {'attributes': proposed}}
# add child objects to proposed
if child_configs:
children = []
for child in child_configs:
child_copy = deepcopy(child)
has_value = False
for root_key in child_copy.keys():
for final_keys, values in child_copy[root_key]['attributes'].items():
if values is None:
child[root_key]['attributes'].pop(final_keys)
else:
child[root_key]['attributes'][final_keys] = str(values)
has_value = True
if has_value:
children.append(child)
if children:
self.proposed[aci_class].update(dict(children=children))
def post_config(self):
"""
This method is used to handle the logic when the modules state is equal to present. The method only pushes a change if
the object has differences than what exists on the APIC, and if check_mode is False. A successful change will mark the
module as changed.
"""
if not self.config:
return
elif not self.module.check_mode:
# Sign and encode request as to APIC's wishes
if self.params['private_key']:
self.cert_auth(method='POST', payload=json.dumps(self.config))
resp, info = fetch_url(self.module, self.url,
data=json.dumps(self.config),
headers=self.headers,
method='POST',
timeout=self.params['timeout'],
use_proxy=self.params['use_proxy'])
self.response = info['msg']
self.status = info['status']
self.method = 'POST'
# Handle APIC response
if info['status'] == 200:
self.result['changed'] = True
self.response_json(resp.read())
else:
try:
# APIC error
self.response_json(info['body'])
self.fail_json(msg='APIC Error %(code)s: %(text)s' % self.error)
except KeyError:
# Connection error
self.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
else:
self.result['changed'] = True
self.method = 'POST'
def exit_json(self, **kwargs):
if 'state' in self.params:
if self.params['state'] in ('absent', 'present'):
if self.params['output_level'] in ('debug', 'info'):
self.result['previous'] = self.existing
# Return the gory details when we need it
if self.params['output_level'] == 'debug':
if 'state' in self.params:
self.result['filter_string'] = self.filter_string
self.result['method'] = self.method
# self.result['path'] = self.path # Adding 'path' in result causes state: absent in output
self.result['response'] = self.response
self.result['status'] = self.status
self.result['url'] = self.url
if 'state' in self.params:
self.original = self.existing
if self.params['state'] in ('absent', 'present'):
self.get_existing()
# if self.module._diff and self.original != self.existing:
# self.result['diff'] = dict(
# before=json.dumps(self.original, sort_keys=True, indent=4),
# after=json.dumps(self.existing, sort_keys=True, indent=4),
# )
self.result['current'] = self.existing
if self.params['output_level'] in ('debug', 'info'):
self.result['sent'] = self.config
self.result['proposed'] = self.proposed
self.result.update(**kwargs)
self.module.exit_json(**self.result)
def fail_json(self, msg, **kwargs):
# Return error information, if we have it
if self.error['code'] is not None and self.error['text'] is not None:
self.result['error'] = self.error
if 'state' in self.params:
if self.params['state'] in ('absent', 'present'):
if self.params['output_level'] in ('debug', 'info'):
self.result['previous'] = self.existing
# Return the gory details when we need it
if self.params['output_level'] == 'debug':
if self.imdata is not None:
self.result['imdata'] = self.imdata
self.result['totalCount'] = self.totalCount
if self.params['output_level'] == 'debug':
if self.url is not None:
if 'state' in self.params:
self.result['filter_string'] = self.filter_string
self.result['method'] = self.method
# self.result['path'] = self.path # Adding 'path' in result causes state: absent in output
self.result['response'] = self.response
self.result['status'] = self.status
self.result['url'] = self.url
if 'state' in self.params:
if self.params['output_level'] in ('debug', 'info'):
self.result['sent'] = self.config
self.result['proposed'] = self.proposed
self.result.update(**kwargs)
self.module.fail_json(msg=msg, **self.result)
|
the-stack_0_10928 | import tensorflow as tf # noqa
import copy
import os
import cPickle as pickle
import numpy as np
import hashlib
from ..data import helpers as helpers
from ..utils import misc as misc
from ..data import batch_fetcher as bfetchers
from ..experiments import experiment
from ..experiments import config as econfig
from ..model import conditionals as conds
from ..model import transforms as trans # noqa
from ..model import likelihoods as likes # noqa
from datetime import datetime
# Hyperparameters.
DEF_ARGS = {
'train_iters': 30000,
'hold_iters': 100,
'hold_interval': 2500,
'ncomps': 40,
'decay_interval': 5000,
'dropout_keeprate_val': None,
'optimizer_class': tf.train.AdamOptimizer,
'momentum': None,
'momentum_iter': 5000,
'max_grad_norm': 1.0,
'trans_alpha': None,
'rescale_init_constant': 1.0,
'trans_state_activation': tf.nn.tanh,
'cond_param_irange': 1e-6,
'first_do_linear_map': True,
'standardize': True,
'base_distribution': 'gaussian',
}
# Base configs for different transformations.
BASE_ARG_CHOICES = {
'lr_decay': (0.5, 0.1),
'init_lr': (0.005, ),
'first_trainable_A': (True, False),
'trans_funcs': [
None,
[trans.additive_coupling, trans.reverse, trans.additive_coupling,
trans.reverse, trans.additive_coupling, trans.reverse,
trans.additive_coupling, trans.log_rescale], # NICE Type
[trans.simple_rnn_transform, ], # 1xRNN
[trans.simple_rnn_transform, trans.reverse,
trans.simple_rnn_transform], # 2xRNN
[trans.rnn_coupling, trans.reverse, trans.rnn_coupling, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling,
trans.log_rescale], # 4xRNN Coup
[trans.simple_rnn_transform, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling, trans.reverse,
trans.rnn_coupling, trans.reverse, trans.rnn_coupling,
trans.log_rescale], # 1xRNN + RNN Coupling
[trans.simple_rnn_transform, trans.reverse, trans.additive_coupling,
trans.reverse, trans.additive_coupling, trans.reverse,
trans.additive_coupling, trans.reverse, trans.additive_coupling,
trans.log_rescale], # 1xRNN + NICE
],
}
# Get configs for standard Gaussian conditional model.
ARG_CHOICES_STDGAU = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_STDGAU['single_marginal'] = (True,)
ARG_CHOICES_STDGAU['standard'] = (True,)
ARG_CHOICES_STDGAU['ncomps'] = (1, )
ARG_CHOICES_STDGAU['cond_func'] = (conds.independent_model,)
ARG_LIST_STDGAU = misc.make_arguments(ARG_CHOICES_STDGAU)
ARG_LIST_STDGAU = filter(
lambda conf: conf['first_trainable_A'] or conf['trans_funcs'] is not None,
ARG_LIST_STDGAU) # Avoid models that have no variables to optimize.
# Get configs for independent GMMs
ARG_CHOICES_IND = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_IND['single_marginal'] = (False,)
ARG_CHOICES_IND['standard'] = (False,)
ARG_CHOICES_IND['cond_func'] = (conds.independent_model,)
ARG_LIST_IND = misc.make_arguments(ARG_CHOICES_IND)
# Get config for Tied conditional model.
ARG_CHOICES_TIED = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_TIED['cond_tied_model'] = (True,)
ARG_CHOICES_TIED['param_nlayers'] = (2,)
ARG_CHOICES_TIED['cond_func'] = (conds.cond_model,)
ARG_LIST_TIED = misc.make_arguments(ARG_CHOICES_TIED)
# Get config for Untied conditional model.
ARG_CHOICES_UNTIED = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_UNTIED['cond_tied_model'] = (False,)
ARG_CHOICES_UNTIED['param_nlayers'] = (2,)
ARG_CHOICES_UNTIED['cond_func'] = (conds.cond_model,)
ARG_LIST_UNTIED = misc.make_arguments(ARG_CHOICES_UNTIED)
# Get config for RNN conditional model.
ARG_CHOICES_RNN = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_RNN['param_nlayers'] = (None, 2)
ARG_CHOICES_RNN['cond_func'] = (conds.rnn_model,)
ARG_LIST_RNN = misc.make_arguments(ARG_CHOICES_RNN)
# Get config for RNN conditional model.
ARG_CHOICES_RNN_FC = copy.copy(BASE_ARG_CHOICES)
ARG_CHOICES_RNN_FC['param_nlayers'] = (2, )
ARG_CHOICES_RNN_FC['cond_func'] = (conds.rnn_model,)
ARG_LIST_RNN_FC = misc.make_arguments(ARG_CHOICES_RNN_FC)
# Make the default be RNN conditional models.
ARG_LIST = misc.make_arguments(ARG_CHOICES_RNN)
def shorten(obj):
""" Helper function to shorten stringeds from long options, uses hash to
ensure shortening without collision """
string = str(obj)
if len(string) >= 255:
hash_object = hashlib.md5(string)
string_hash = str(hash_object.hexdigest())
return string[:50] + '...' + string[-50:] + '_' + string_hash
return string
def print_value(value):
""" Helper function to print functions, lists, and dictionaries for
filenames and printouts. """
if isinstance(value, str):
return value
try:
try:
string = reduce(lambda x, y: x+'-'+y,
[print_value(v) for v in value.items()])
except AttributeError: # Not dictionary
string = reduce(
lambda x, y: x+','+y, [print_value(v) for v in value])
except TypeError: # Not iterable
try:
string = value.func_name
except AttributeError: # Not function
string = str(value)
return string
def get_exp_name(args):
sorted_keys = np.sort(args.keys())
exp_name = reduce(lambda x, y: x+y,
['{}--{}/'.format(k, shorten(print_value(args[k])))
for k in sorted_keys], '')
return exp_name
def make_trainer(dataset, base_save_path, base_log_path,
nepochs=None, exp_class=experiment.Experiment,
fetcher_class=bfetchers.DatasetFetchers, **kwargs):
# Options.
# Load data.
# TODO: general data load
if isinstance(dataset, str):
print('Loading {}...'.format(dataset))
dataset = pickle.load(open(dataset, 'rb'))
print('Loaded.')
# Make the data fetchers.
if 'train_labels' in dataset and 'valid_labels' in dataset and \
'test_labels' in dataset:
# Labeled data.
fetchers = fetcher_class(
(dataset['train'], dataset['train_labels']),
(dataset['valid'], dataset['valid_labels']),
(dataset['test'], dataset['test_labels']))
else:
fetchers = fetcher_class(
(dataset['train'],), (dataset['valid'],), (dataset['test'],))
def main(args):
# Make config for trial with defualt and given arguments.
trial_args = copy.copy(kwargs)
for ind in args:
trial_args[ind] = args[ind]
# Data preprocessing
standardize = misc.get_default(trial_args, 'standardize', False)
cov_func = misc.get_default(trial_args, 'cov_func', None)
trial_args['first_do_linear_map'] = misc.get_default(
trial_args, 'first_do_linear_map', False)
# Get initial linear map parameters.
if trial_args['first_do_linear_map']:
try:
(imp, ib, ip) = helpers.get_initmap(
dataset['train'], standardize=standardize,
cov_func=cov_func)
trial_args['first_init_mat_params'] = imp
trial_args['first_init_b'] = ib
trial_args['first_perm'] = ip
except (TypeError, ValueError) as error:
print('No initial linear parameters due to error:\n{}'.format(
error))
# Determine the number of iterations to run nepochs
trial_args['batch_size'] = misc.get_default(
trial_args, 'batch_size', 256)
if nepochs is not None:
N, d = dataset['train'].shape
iters_per_epoch = N/float(trial_args['batch_size'])
trial_args['train_iters'] = int(nepochs*iters_per_epoch)
config = econfig.RedConfig(**trial_args)
# Make directories specific to experiment trial.
if base_save_path is not None:
save_path = os.path.join(base_save_path, get_exp_name(args))
misc.make_path(save_path)
else:
AttributeError('Must provide save path for validating model')
if base_log_path is not None:
log_path = os.path.join(base_log_path, get_exp_name(args))
misc.make_path(log_path)
else:
log_path = None
# Save config for easy model loading.
try:
pickle.dump(
trial_args, open(os.path.join(save_path, 'trial_args.p'), 'wb'))
except TypeError:
print('Could not save trial arg pickle file.')
# Set up trial and train.
exp = exp_class(
config, log_path, save_path, fetchers)
with exp.graph.as_default():
res_dicts = exp.main()
# Save results.
if log_path is not None:
pickle.dump(
res_dicts, open(os.path.join(log_path, 'result.p'), 'wb'))
else:
pickle.dump(
res_dicts, open(os.path.join(save_path, 'result.p'), 'wb'))
return res_dicts
return main
def invalid_result(result):
return result is None or np.isnan(result['loss'])
def run_experiment(data, arg_list=ARG_LIST, def_args=DEF_ARGS,
exp_class=experiment.Experiment,
fetcher_class=bfetchers.DatasetFetchers,
estimator='TAN', retries=1,
log_path=None, save_path=None, experiments_name=None,
no_log=False):
# Set up paths.
if log_path is None or save_path is None:
home = os.path.expanduser('~')
data_name = os.path.basename(data)
experiments_name = \
experiments_name if experiments_name is not None else \
datetime.now().strftime('%Y_%m_%d_%H:%M:%S.%f')
log_path = log_path if log_path is not None else \
os.path.join(
home, 'de_logs', estimator, data_name, experiments_name)
save_path = save_path if save_path is not None else \
os.path.join(
home, 'de_models', estimator, data_name, experiments_name)
if no_log:
log_path = None
else:
misc.make_path(log_path)
misc.make_path(save_path)
print('log path: {}\nsave path: {}'.format(log_path, save_path))
# Get results for all hyperparameter choices
main = make_trainer(data, save_path, log_path, exp_class=exp_class,
fetcher_class=fetcher_class, **def_args)
if no_log:
log_path = save_path
results = []
best = None
for ai in range(len(arg_list)):
args = arg_list[ai]
retries_left = retries
print('RUNNING {}'.format(experiments_name))
print('[{}/{}] {}'.format(ai+1, len(arg_list), args))
results.append(main(args))
while invalid_result(results[-1]) and retries_left > 0:
print('[{}/{}] Retrying {}'.format(ai+1, len(arg_list), args))
retries_left -= 1
results[-1] = main(args)
better_result = not invalid_result(results[-1]) and (
invalid_result(best) or best['loss'] > results[-1]['loss']
)
if better_result:
best = {}
best['loss'] = results[-1]['loss']
best['results'] = results[-1]
best['args'] = args
pickle.dump(
{'best': best, 'trial_results': results,
'trial_args': arg_list[:ai+1]},
open(os.path.join(log_path, experiments_name+'_all_trials.p'),
'wb'))
if best is not None:
best['save_path'] = save_path
best['log_path'] = log_path
best['def_args'] = def_args
pickle.dump(
best,
open(os.path.join(save_path, experiments_name+'_best_trial.p'), 'wb'))
return best, results
|
the-stack_0_10929 | import warnings
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.tools.monitor import job_monitor
from qiskit.circuit.library import QFT
from qiskit.visualization import plot_histogram, plot_bloch_multivector
warnings.filterwarnings("ignore", category=DeprecationWarning)
import numpy as np
pi = np.pi
def qft_dagger(qc, n):
"""n-qubit QFTdagger the first n qubits in circ"""
# Don't forget the Swaps!
for qubit in range(n//2):
qc.swap(qubit, n-qubit-1)
for j in range(n):
for m in range(j):
qc.cp(-pi/float(2**(j-m)), m, j)
qc.h(j)
def generalised_qpe(amt_estimation_qubits, angle, shots=10000):
go = True
while go:
# Create and set up circuit
qpe3 = QuantumCircuit(amt_estimation_qubits+1, amt_estimation_qubits)
# Apply H-Gates to counting qubits:
for qubit in range(amt_estimation_qubits):
qpe3.h(qubit)
# Prepare our eigenstate |psi>:
repetitions = 1
for counting_qubit in range(amt_estimation_qubits):
for i in range(repetitions):
qpe3.cp(angle, counting_qubit, amt_estimation_qubits);
repetitions *= 2
# Do the inverse QFT:
qft_dagger(qpe3, amt_estimation_qubits)
# Measure of course!
qpe3.barrier()
for n in range(amt_estimation_qubits):
qpe3.measure(n,n)
aer_sim = Aer.get_backend('aer_simulator')
t_qpe3 = transpile(qpe3, aer_sim)
qobj = assemble(t_qpe3, shots=shots)
results = aer_sim.run(qobj).result()
answer = results.get_counts()
answer2 = {int(k,2)/2**amt_estimation_qubits: v for k, v in answer.items()}
print(answer2)
try:
freq = answer.most_frequent()
go = False
except:
pass
#print("Most frequent '" + answer.most_frequent() + "'")
print("Approx rotation angle by Z from the unitary in degrees '" + str(360 * int(answer.most_frequent(), 2)/2**amt_estimation_qubits) + "'")
#print("Phase Calculation " + answer.most_frequent())
##return(plot_histogram(answer))
##comment out the return if you want to see the histogram
return((int(answer.most_frequent(), 2)/2**amt_estimation_qubits))
|
the-stack_0_10932 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import os
import json
import copy
import argparse
import numpy as np
from functools import partial
from collections import defaultdict
import paddle
from paddle import inference
from paddlenlp.datasets import load_dataset, MapDataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import SkepTokenizer
from utils import decoding, read_test_file, load_dict
from extraction.data import convert_example_to_feature as convert_example_to_feature_ext
from classification.data import convert_example_to_feature as convert_example_to_feature_cls
class Predictor(object):
def __init__(self, args):
self.args = args
self.ext_predictor, self.ext_input_handles, self.ext_output_hanle = self.create_predictor(
args.ext_model_path)
print(f"ext_model_path: {args.ext_model_path}, {self.ext_predictor}")
self.cls_predictor, self.cls_input_handles, self.cls_output_hanle = self.create_predictor(
args.cls_model_path)
self.ext_label2id, self.ext_id2label = load_dict(args.ext_label_path)
self.cls_label2id, self.cls_id2label = load_dict(args.cls_label_path)
self.tokenizer = SkepTokenizer.from_pretrained(args.base_model_name)
def create_predictor(self, model_path):
model_file = model_path + ".pdmodel"
params_file = model_path + ".pdiparams"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
raise ValueError("not find params file path {}".format(params_file))
config = paddle.inference.Config(model_file, params_file)
if self.args.device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
"fp32": inference.PrecisionType.Float32,
"int8": inference.PrecisionType.Int8
}
precision_mode = precision_map[args.precision]
if args.use_tensorrt:
config.enable_tensorrt_engine(
max_batch_size=self.args.batch_size,
min_subgraph_size=30,
precision_mode=precision_mode)
elif self.args.device == "cpu":
# set CPU configs accordingly,
# such as enable_mkldnn, set_cpu_math_library_num_threads
config.disable_gpu()
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_threads)
elif self.args.device == "xpu":
# set XPU configs accordingly
config.enable_xpu(100)
config.switch_use_feed_fetch_ops(False)
predictor = paddle.inference.create_predictor(config)
input_handles = [
predictor.get_input_handle(name)
for name in predictor.get_input_names()
]
output_handle = predictor.get_output_handle(predictor.get_output_names()
[0])
return predictor, input_handles, output_handle
def predict_ext(self, args):
ori_test_ds = load_dataset(
read_test_file, data_path=args.test_path, lazy=False)
trans_func = partial(
convert_example_to_feature_ext,
tokenizer=self.tokenizer,
label2id=self.ext_label2id,
max_seq_len=args.max_seq_len,
is_test=True)
test_ds = copy.copy(ori_test_ds).map(trans_func, lazy=False)
batch_list = [
test_ds[idx:idx + args.batch_size]
for idx in range(0, len(test_ds), args.batch_size)
]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=self.tokenizer.pad_token_id),
Pad(axis=0, pad_val=self.tokenizer.pad_token_type_id),
Stack(dtype="int64"), ): fn(samples)
results = []
for bid, batch_data in enumerate(batch_list):
input_ids, token_type_ids, seq_lens = batchify_fn(batch_data)
self.ext_input_handles[0].copy_from_cpu(input_ids)
self.ext_input_handles[1].copy_from_cpu(token_type_ids)
self.ext_predictor.run()
logits = self.ext_output_hanle.copy_to_cpu()
predictions = logits.argmax(axis=2)
for eid, (seq_len,
prediction) in enumerate(zip(seq_lens, predictions)):
idx = bid * args.batch_size + eid
tag_seq = [
self.ext_id2label[idx] for idx in prediction[:seq_len][1:-1]
]
text = ori_test_ds[idx]["text"]
aps = decoding(text, tag_seq)
for aid, ap in enumerate(aps):
aspect, opinions = ap[0], list(set(ap[1:]))
aspect_text = self._concate_aspect_and_opinion(text, aspect,
opinions)
results.append({
"id": str(idx) + "_" + str(aid),
"aspect": aspect,
"opinions": opinions,
"text": text,
"aspect_text": aspect_text
})
return results
def predict_cls(self, args, ext_results):
test_ds = MapDataset(ext_results)
trans_func = partial(
convert_example_to_feature_cls,
tokenizer=self.tokenizer,
label2id=self.cls_label2id,
max_seq_len=args.max_seq_len,
is_test=True)
test_ds = test_ds.map(trans_func, lazy=False)
batch_list = [
test_ds[idx:idx + args.batch_size]
for idx in range(0, len(test_ds), args.batch_size)
]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=self.tokenizer.pad_token_id),
Pad(axis=0, pad_val=self.tokenizer.pad_token_type_id),
Stack(dtype="int64")): fn(samples)
results = []
for batch_data in batch_list:
input_ids, token_type_ids, _ = batchify_fn(batch_data)
self.cls_input_handles[0].copy_from_cpu(input_ids)
self.cls_input_handles[1].copy_from_cpu(token_type_ids)
self.cls_predictor.run()
logits = self.cls_output_hanle.copy_to_cpu()
predictions = logits.argmax(axis=1).tolist()
results.extend(predictions)
return results
def post_process(self, args, ext_results, cls_results):
assert len(ext_results) == len(cls_results)
collect_dict = defaultdict(list)
for ext_result, cls_result in zip(ext_results, cls_results):
ext_result["sentiment_polarity"] = self.cls_id2label[cls_result]
eid, _ = ext_result["id"].split("_")
collect_dict[eid].append(ext_result)
sentiment_results = []
for eid in collect_dict.keys():
sentiment_result = {}
ap_list = []
for idx, single_ap in enumerate(collect_dict[eid]):
if idx == 0:
sentiment_result["text"] = single_ap["text"]
ap_list.append({
"aspect": single_ap["aspect"],
"opinions": single_ap["opinions"],
"sentiment_polarity": single_ap["sentiment_polarity"]
})
sentiment_result["ap_list"] = ap_list
sentiment_results.append(sentiment_result)
with open(args.save_path, "w", encoding="utf-8") as f:
for sentiment_result in sentiment_results:
f.write(json.dumps(sentiment_result, ensure_ascii=False) + "\n")
print(
f"sentiment analysis results has been saved to path: {args.save_path}"
)
def predict(self, args):
ext_results = self.predict_ext(args)
cls_results = self.predict_cls(args, ext_results)
self.post_process(args, ext_results, cls_results)
def _concate_aspect_and_opinion(self, text, aspect, opinion_words):
aspect_text = ""
for opinion_word in opinion_words:
if text.find(aspect) <= text.find(opinion_word):
aspect_text += aspect + opinion_word + ","
else:
aspect_text += opinion_word + aspect + ","
aspect_text = aspect_text[:-1]
return aspect_text
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--base_model_name", default='skep_ernie_1.0_large_ch', type=str, help="Base model name, SKEP used by default", )
parser.add_argument("--ext_model_path", type=str, default=None, help="The path of extraction model path that you want to load.")
parser.add_argument("--cls_model_path", type=str, default=None, help="The path of classification model path that you want to load.")
parser.add_argument("--ext_label_path", type=str, default=None, help="The path of extraction label dict.")
parser.add_argument("--cls_label_path", type=str, default=None, help="The path of classification label dict.")
parser.add_argument('--test_path', type=str, default=None, help="The path of test set that you want to predict.")
parser.add_argument('--save_path', type=str, required=True, default=None, help="The saving path of predict results.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size per GPU/CPU for training.")
parser.add_argument("--max_seq_len", default=256, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--use_tensorrt", action='store_true', help="Whether to use inference engin TensorRT.")
parser.add_argument("--precision", default="fp32", type=str, choices=["fp32", "fp16", "int8"],help='The tensorrt precision.')
parser.add_argument("--device", default="gpu", choices=["gpu", "cpu", "xpu"], help="Device selected for inference.")
parser.add_argument('--cpu_threads', default=10, type=int, help='Number of threads to predict when using cpu.')
parser.add_argument('--enable_mkldnn', default=False, type=eval, choices=[True, False], help='Enable to use mkldnn to speed up when using cpu.')
args = parser.parse_args()
# yapf: enbale
predictor = Predictor(args)
predictor.predict(args)
|
the-stack_0_10937 | import json
import logging
import os
import click
import google.auth.transport.grpc
import google.auth.transport.requests
import google.oauth2.credentials
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from assistant import Assistant
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
SCOPE = 'user-read-playback-state user-modify-playback-state'
@click.group()
@click.pass_context
def spottv(ctx):
return
@spottv.command()
@click.pass_obj
def on(settings):
"""
Turn on TV and launch Spotify app
"""
send_text_query('turn on Google TV', settings['device_model_id'], settings['device_id'])
# play_spotify_uri(spotify_uri='')
@spottv.command()
@click.pass_obj
def off(settings):
"""
Turn off TV
"""
send_text_query('turn off TV', settings['device_model_id'], settings['device_id'])
@spottv.command()
@click.argument('playlist_name')
@click.pass_obj
def play(settings, playlist_name):
"""
Play a playlist defined in config.json
Args:
settings: Device info
playlist_name: Name of the playlist
"""
file = open('config.json')
config_data = json.load(file)
spotify_uri = config_data['playlists'][playlist_name]
file.close()
send_text_query('turn on Google TV', settings['device_model_id'], settings['device_id'])
play_spotify_uri(spotify_uri)
def play_spotify_uri(spotify_uri):
"""
Start playback of Spotify URI
Args:
spotify_uri (str): URI of Spotify track, album or playlist
"""
spotify_controller = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=SCOPE))
devices = spotify_controller.devices()
chromecast = None
if not devices:
click.echo('No device found')
else:
# click.echo(devices)
for device in devices['devices']:
if device['type'] == 'TV':
chromecast = device
break
if not chromecast:
click.echo('No Chromecast found')
else:
chromecast_id = chromecast['id']
chromecast_name = chromecast['name']
playlist = spotify_controller.playlist(spotify_uri)
playlist_name = playlist['name']
click.echo(f"Starting playback of '{playlist_name}' on {chromecast_name}...")
# spotify_controller.shuffle(True, chromecast_id)
spotify_controller.start_playback(device_id=chromecast_id, context_uri=spotify_uri)
def send_text_query(text_query, device_model_id, device_id):
"""Send a text query to specified device
Args:
text_query (str): text query to send (equivalent of a typed voice command).
device_model_id (str): identifier of the device model.
device_id (str): identifier of the registered device instance.
"""
credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json')
# Setup logging.
# logging.basicConfig(level=logging.DEBUG if True else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
logging.error('google-oauthlib-tool '
'--client-secrets client_secret_811734406476-tvp38peele577b6dfv7roigsdf727tog.apps'
'.googleusercontent.com.json '
'--scope https://www.googleapis.com/auth/assistant-sdk-prototype '
'--save --headless')
return
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials,
http_request,
ASSISTANT_API_ENDPOINT
)
logging.info('Connecting to %s', ASSISTANT_API_ENDPOINT)
# Call Assistant
with Assistant('en-US',
device_model_id,
device_id,
grpc_channel,
DEFAULT_GRPC_DEADLINE
) as assistant:
assistant.assist(text_query=text_query)
def get_device_info():
device_info = {}
file = open('device_model.json')
model_data = json.load(file)
device_info['device_model_id'] = model_data['device_model_id']
file.close()
file = open('device_instance.json')
instance_data = json.load(file)
device_info['device_id'] = instance_data['id']
file.close()
return device_info
def main():
return spottv(obj=get_device_info())
if __name__ == '__main__':
main()
|
the-stack_0_10938 | # File: wmi_consts.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Json keys specific to wmi app's input parameters/config and the output result
WMI_JSON_QUERY = "query"
WMI_JSON_TOTAL_SERVICES = "total_services"
WMI_JSON_RUNNING_SERVICES = "running_services"
WMI_JSON_TOTAL_PROCESSES = "total_processes"
WMI_JSON_TOTAL_USERS = "total_users"
WMI_JSON_DISABLED_USERS = "disabled_users"
WMI_JSON_SYSTEM_DETAILS = "system_details"
WMI_JSON_OS_DETAILS = "os_details"
WMI_JSON_BOOT_CONFIG_DETAILS = "boot_config_details"
WMI_JSON_DNSHOSTNAME = "dns_hostname"
WMI_JSON_PHYSICAL_MEM = "memory"
WMI_JSON_WORKGROUP = "workgroup"
WMI_JSON_DOMAIN = "domain"
WMI_JSON_VERSION = "version"
# Status messages for wmi app
WMI_SUCC_QUERY_EXECUTED = "WMI Query executed"
WMI_ERR_QUERY_EXECUTION_FAILED = "WMI query failed."
WMI_ERR_QUERY_EXECUTION_FAILED += "\nPlease make sure remote WMI access is enabled on the target machine."
WMI_ERR_QUERY_EXECUTION_FAILED += "\nAny firewall if present is configured to allow remote WMI communication"
WMI_SUCC_SYS_INFO_QUERIED = "System info queried"
# Progress messages format string
WMI_MSG_CONNECTION_FAILED = "WMI connection to {machine} failed"
# Progress strings constants, define them first and then use them in the call to send_progress
CONN_PY_PROG_SENDING_QUERY = "Executing WMI query"
# Constants relating to '_get_error_message_from_exception'
WMI_ERR_CODE_MSG = "Error code unavailable"
WMI_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
WMI_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
|
the-stack_0_10939 | import logging
import os
import tempfile
from galaxy.tool_shed.galaxy_install.tool_dependencies.env_manager import EnvManager
from galaxy.tool_shed.galaxy_install.tool_dependencies.recipe.env_file_builder import EnvFileBuilder
from galaxy.tool_shed.galaxy_install.tool_dependencies.recipe.install_environment import InstallEnvironment
from galaxy.tool_shed.util import tool_dependency_util
from galaxy.tool_shed.util.basic_util import (
INSTALLATION_LOG,
remove_dir,
)
from galaxy.tool_shed.util.metadata_util import get_updated_changeset_revisions_from_tool_shed
from galaxy.tool_shed.util.repository_util import (
get_absolute_path_to_file_in_repository,
get_repository_for_dependency_relationship,
)
from galaxy.tool_util.deps.resolvers import NullDependency
from galaxy.util import (
listify,
url_get,
)
from galaxy.util.tool_shed.common_util import (
get_tool_shed_url_from_tool_shed_registry,
remove_protocol_from_tool_shed_url,
)
from galaxy.util.tool_shed.xml_util import parse_xml
log = logging.getLogger(__name__)
class RecipeTag:
"""Abstract class that defines a standard format for handling recipe tags when installing packages."""
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
raise Exception("Unimplemented Method")
class SyncDatabase:
def sync_database_with_file_system(
self,
app,
tool_shed_repository,
tool_dependency_name,
tool_dependency_version,
tool_dependency_install_dir,
tool_dependency_type="package",
):
"""
The installation directory defined by the received tool_dependency_install_dir exists, so check for
the presence of INSTALLATION_LOG. If the files exists, we'll assume the tool dependency is installed,
but not necessarily successfully (it could be in an error state on disk. However, we can justifiably
assume here that no matter the state, an associated database record will exist.
"""
# This method should be reached very rarely. It implies that either the Galaxy environment
# became corrupted (i.e., the database records for installed tool dependencies is not synchronized
# with tool dependencies on disk) or the Tool Shed's install and test framework is running. The Tool
# Shed's install and test framework installs repositories in 2 stages, those of type tool_dependency_definition
# followed by those containing valid tools and tool functional test components.
log.debug("Synchronizing the database with the file system...")
try:
log.debug(
"The value of app.config.running_functional_tests is: %s" % str(app.config.running_functional_tests)
)
except Exception:
pass
sa_session = app.install_model.context
can_install_tool_dependency = False
tool_dependency = tool_dependency_util.get_tool_dependency_by_name_version_type_repository(
app, tool_shed_repository, tool_dependency_name, tool_dependency_version, tool_dependency_type
)
if tool_dependency.status == app.install_model.ToolDependency.installation_status.INSTALLING:
# The tool dependency is in an Installing state, so we don't want to do anything to it. If the tool
# dependency is being installed by someone else, we don't want to interfere with that. This assumes
# the installation by "someone else" is not hung in an Installing state, which is a weakness if that
# "someone else" never repaired it.
log.debug(
"Skipping installation of tool dependency %s version %s because it has a status of %s"
% (str(tool_dependency.name), str(tool_dependency.version), str(tool_dependency.status))
)
else:
# We have a pre-existing installation directory on the file system, but our associated database record is
# in a state that allowed us to arrive here. At this point, we'll inspect the installation directory to
# see if we have a "proper installation" and if so, synchronize the database record rather than reinstalling
# the dependency if we're "running_functional_tests". If we're not "running_functional_tests, we'll set
# the tool dependency's installation status to ERROR.
tool_dependency_installation_directory_contents = os.listdir(tool_dependency_install_dir)
if INSTALLATION_LOG in tool_dependency_installation_directory_contents:
# Since this tool dependency's installation directory contains an installation log, we consider it to be
# installed. In some cases the record may be missing from the database due to some activity outside of
# the control of the Tool Shed. Since a new record was created for it and we don't know the state of the
# files on disk, we will set it to an error state (unless we are running Tool Shed functional tests - see
# below).
log.debug(
"Skipping installation of tool dependency %s version %s because it is installed in %s"
% (str(tool_dependency.name), str(tool_dependency.version), str(tool_dependency_install_dir))
)
if app.config.running_functional_tests:
# If we are running functional tests, the state will be set to Installed because previously compiled
# tool dependencies are not deleted by default, from the "install and test" framework..
tool_dependency.status = app.install_model.ToolDependency.installation_status.INSTALLED
else:
error_message = "The installation directory for this tool dependency had contents but the database had no record. "
error_message += (
"The installation log may show this tool dependency to be correctly installed, but due to the "
)
error_message += "missing database record it is now being set to Error."
tool_dependency.status = app.install_model.ToolDependency.installation_status.ERROR
tool_dependency.error_message = error_message
else:
error_message = (
"\nInstallation path %s for tool dependency %s version %s exists, but the expected file %s"
% (
str(tool_dependency_install_dir),
str(tool_dependency_name),
str(tool_dependency_version),
str(INSTALLATION_LOG),
)
)
error_message += " is missing. This indicates an installation error so the tool dependency is being"
error_message += " prepared for re-installation."
log.error(error_message)
tool_dependency.status = app.install_model.ToolDependency.installation_status.NEVER_INSTALLED
remove_dir(tool_dependency_install_dir)
can_install_tool_dependency = True
sa_session.add(tool_dependency)
sa_session.flush()
try:
log.debug(
"Returning from sync_database_with_file_system with tool_dependency %s, can_install_tool_dependency %s."
% (str(tool_dependency.name), str(can_install_tool_dependency))
)
except Exception as e:
log.debug(str(e))
return tool_dependency, can_install_tool_dependency
class Install(RecipeTag, SyncDatabase):
def __init__(self, app):
self.app = app
self.tag = "install"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# <install version="1.0">
# Get the installation directory for tool dependencies that will be installed for the received tool_shed_repository.
actions_elem_tuples = []
proceed_with_install = False
install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=tool_shed_repository.name,
repository_owner=tool_shed_repository.owner,
repository_changeset_revision=tool_shed_repository.installed_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
if os.path.exists(install_dir):
# Notice that we'll throw away the following tool_dependency if it can be installed.
tool_dependency, proceed_with_install = self.sync_database_with_file_system(
self.app,
tool_shed_repository,
package_name,
package_version,
install_dir,
tool_dependency_type="package",
)
if not proceed_with_install:
log.debug(
"Tool dependency %s version %s cannot be installed (it was probably previously installed), so returning it."
% (str(tool_dependency.name), str(tool_dependency.version))
)
return tool_dependency, proceed_with_install, actions_elem_tuples
else:
proceed_with_install = True
if proceed_with_install:
package_install_version = package_elem.get("version", "1.0")
status = self.app.install_model.ToolDependency.installation_status.INSTALLING
tool_dependency = tool_dependency_util.create_or_update_tool_dependency(
app=self.app,
tool_shed_repository=tool_shed_repository,
name=package_name,
version=package_version,
type="package",
status=status,
set_status=True,
)
# Get the information about the current platform in case the tool dependency definition includes tag sets
# for installing compiled binaries.
platform_info_dict = tool_dependency_util.get_platform_info_dict()
if package_install_version == "1.0":
# Handle tool dependency installation using a fabric method included in the Galaxy framework.
actions_elem_tuples = tool_dependency_util.parse_package_elem(
package_elem, platform_info_dict=platform_info_dict, include_after_install_actions=True
)
if not actions_elem_tuples:
proceed_with_install = False
error_message = f"Version {str(package_version)} of the {str(package_name)} package cannot be installed because "
error_message += "the recipe for installing the package is missing either an <actions> tag set or an <actions_group> "
error_message += "tag set."
# Since there was an installation error, update the tool dependency status to Error.
# The remove_installation_path option must be left False here.
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
error_message=error_message,
)
else:
raise NotImplementedError(
'Only install version 1.0 is currently supported (i.e., change your tag to be <install version="1.0">).'
)
return tool_dependency, proceed_with_install, actions_elem_tuples
class Package(RecipeTag):
def __init__(self, app):
self.app = app
self.tag = "package"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
action_elem_tuples = []
proceed_with_install = False
# Only install the tool_dependency if it is not already installed and it is associated with a database
# record in the received tool_dependencies.
if package_name and package_version:
dependencies_ignored = not self.app.toolbox.dependency_manager.uses_tool_shed_dependencies()
if dependencies_ignored:
log.debug(
"Skipping installation of tool dependency package %s because tool shed dependency resolver not enabled."
% str(package_name)
)
# Tool dependency resolves have been configured and they do not include the tool shed. Do not install package.
dep = self.app.toolbox.dependency_manager.find_dep(package_name, package_version, type="package")
if not isinstance(dep, NullDependency):
# TODO: Do something here such as marking it installed or configured externally.
pass
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
)
else:
proceed_with_install = True
return tool_dependency, proceed_with_install, action_elem_tuples
class ReadMe(RecipeTag):
def __init__(self, app):
self.app = app
self.tag = "readme"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# Nothing to be done.
action_elem_tuples = []
proceed_with_install = False
return tool_dependency, proceed_with_install, action_elem_tuples
class Repository(RecipeTag, SyncDatabase):
def __init__(self, app):
self.app = app
self.tag = "repository"
def create_temporary_tool_dependencies_config(self, tool_shed_url, name, owner, changeset_revision):
"""Make a call to the tool shed to get the required repository's tool_dependencies.xml file."""
tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, tool_shed_url)
if tool_shed_url is None or name is None or owner is None or changeset_revision is None:
message = (
"Unable to retrieve required tool_dependencies.xml file from the Tool Shed because one or more of the "
)
message += (
"following required parameters is None: tool_shed_url: %s, name: %s, owner: %s, changeset_revision: %s "
% (str(tool_shed_url), str(name), str(owner), str(changeset_revision))
)
raise Exception(message)
params = dict(name=name, owner=owner, changeset_revision=changeset_revision)
pathspec = ["repository", "get_tool_dependencies_config_contents"]
text = url_get(
tool_shed_url, auth=self.app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params
)
if text:
# Write the contents to a temporary file on disk so it can be reloaded and parsed.
fh = tempfile.NamedTemporaryFile("w", prefix="tmp-toolshed-cttdc")
tmp_filename = fh.name
fh.close()
fh = open(tmp_filename, "w")
fh.write(text)
fh.close()
return tmp_filename
else:
message = "Unable to retrieve required tool_dependencies.xml file from the Tool Shed for revision "
message += f"{str(changeset_revision)} of installed repository {str(name)} owned by {str(owner)}."
raise Exception(message)
def create_tool_dependency_with_initialized_env_sh_file(
self,
dependent_install_dir,
tool_shed_repository,
required_repository,
package_name,
package_version,
tool_dependencies_config,
):
"""
Create or get a tool_dependency record that is defined by the received package_name and package_version.
An env.sh file will be created for the tool_dependency in the received dependent_install_dir.
"""
# The received required_repository refers to a tool_shed_repository record that is defined as a complex
# repository dependency for this tool_dependency. The required_repository may or may not be currently
# installed (it doesn't matter). If it is installed, it is associated with a tool_dependency that has
# an env.sh file that this new tool_dependency must be able to locate and "source". If it is not installed,
# we can still determine where that env.sh file will be, so we'll initialize this new tool_dependency's env.sh
# file in either case. If the required repository ends up with an installation error, this new tool
# dependency will still be fine because its containing repository will be defined as missing dependencies.
tool_dependencies = []
if not os.path.exists(dependent_install_dir):
os.makedirs(dependent_install_dir)
required_tool_dependency_env_file_path = None
if tool_dependencies_config:
required_td_tree, error_message = parse_xml(tool_dependencies_config)
if required_td_tree:
required_td_root = required_td_tree.getroot()
for required_td_elem in required_td_root:
# Find the appropriate package name and version.
if required_td_elem.tag == "package":
# <package name="bwa" version="0.5.9">
required_td_package_name = required_td_elem.get("name", None)
required_td_package_version = required_td_elem.get("version", None)
# Check the database to see if we have a record for the required tool dependency (we may not which is ok). If we
# find a record, we need to see if it is in an error state and if so handle it appropriately.
required_tool_dependency = (
tool_dependency_util.get_tool_dependency_by_name_version_type_repository(
self.app,
required_repository,
required_td_package_name,
required_td_package_version,
"package",
)
)
if required_td_package_name == package_name and required_td_package_version == package_version:
# Get or create a database tool_dependency record with which the installed package on disk will be associated.
tool_dependency = tool_dependency_util.create_or_update_tool_dependency(
app=self.app,
tool_shed_repository=tool_shed_repository,
name=package_name,
version=package_version,
type="package",
status=self.app.install_model.ToolDependency.installation_status.NEVER_INSTALLED,
set_status=True,
)
# Create an env.sh file for the tool_dependency whose first line will source the env.sh file located in
# the path defined by required_tool_dependency_env_file_path. It doesn't matter if the required env.sh
# file currently exists..
required_tool_dependency_env_file_path = self.get_required_repository_package_env_sh_path(
package_name, package_version, required_repository
)
env_file_builder = EnvFileBuilder(tool_dependency.installation_directory(self.app))
env_file_builder.append_line(action="source", value=required_tool_dependency_env_file_path)
return_code = env_file_builder.return_code
if return_code:
error_message = "Error defining env.sh file for package %s, return_code: %s" % (
str(package_name),
str(return_code),
)
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
error_message=error_message,
)
elif required_tool_dependency is not None and required_tool_dependency.in_error_state:
error_message = (
"This tool dependency's required tool dependency %s version %s has status %s."
% (
str(required_tool_dependency.name),
str(required_tool_dependency.version),
str(required_tool_dependency.status),
)
)
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.ERROR,
error_message=error_message,
)
else:
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app,
tool_dependency=tool_dependency,
status=self.app.install_model.ToolDependency.installation_status.INSTALLED,
)
tool_dependencies.append(tool_dependency)
return tool_dependencies
def get_required_repository_package_env_sh_path(self, package_name, package_version, required_repository):
"""Return path to env.sh file in required repository if the required repository has been installed."""
env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=required_repository.name,
repository_owner=required_repository.owner,
repository_changeset_revision=required_repository.installed_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
env_sh_file_path = os.path.join(env_sh_file_dir, "env.sh")
return env_sh_file_path
def handle_complex_repository_dependency_for_package(
self, elem, package_name, package_version, tool_shed_repository
):
"""
Inspect the repository defined by a complex repository dependency definition and take certain steps to
enable installation of the received package name and version to proceed. The received elem is the
<repository> tag set which defines the complex repository dependency. The received tool_shed_repository
is the installed tool shed repository for which the tool dependency defined by the received package_name
and package_version is being installed.
"""
handled_tool_dependencies = []
tool_shed_url = elem.attrib["toolshed"]
required_repository_name = elem.attrib["name"]
required_repository_owner = elem.attrib["owner"]
default_required_repository_changeset_revision = elem.attrib["changeset_revision"]
required_repository = get_repository_for_dependency_relationship(
self.app,
tool_shed_url,
required_repository_name,
required_repository_owner,
default_required_repository_changeset_revision,
)
tool_shed = remove_protocol_from_tool_shed_url(tool_shed_url)
tmp_filename = None
if required_repository:
required_repository_changeset_revision = required_repository.installed_changeset_revision
# Define the installation directory for the required tool dependency package in the required repository.
required_repository_package_install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=required_repository_name,
repository_owner=required_repository_owner,
repository_changeset_revision=required_repository_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
# Define this dependent repository's tool dependency installation directory that will contain
# the env.sh file with a path to the required repository's installed tool dependency package.
dependent_install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=tool_shed_repository.name,
repository_owner=tool_shed_repository.owner,
repository_changeset_revision=tool_shed_repository.installed_changeset_revision,
tool_dependency_type="package",
tool_dependency_name=package_name,
tool_dependency_version=package_version,
)
if os.path.exists(dependent_install_dir):
# Notice that we'll throw away the following tool_dependency if it can be installed.
tool_dependency, can_install_tool_dependency = self.sync_database_with_file_system(
self.app,
tool_shed_repository,
package_name,
package_version,
dependent_install_dir,
tool_dependency_type="package",
)
if not can_install_tool_dependency:
log.debug(
"Tool dependency %s version %s cannot be installed (it was probably previously installed), "
"so appending it to the list of handled tool dependencies.",
str(tool_dependency.name),
str(tool_dependency.version),
)
handled_tool_dependencies.append(tool_dependency)
else:
can_install_tool_dependency = True
if can_install_tool_dependency:
# Set this dependent repository's tool dependency env.sh file with a path to the required repository's
# installed tool dependency package. We can get everything we need from the discovered installed
# required_repository.
if required_repository.is_deactivated_or_installed:
if not os.path.exists(required_repository_package_install_dir):
log.error(
f"Missing required tool dependency directory {str(required_repository_package_install_dir)}"
)
repo_files_dir = required_repository.repo_files_directory(self.app)
if not repo_files_dir:
message = (
"Unable to locate the repository directory for revision %s of installed repository %s owned by %s."
% (
str(required_repository.changeset_revision),
str(required_repository.name),
str(required_repository.owner),
)
)
raise Exception(message)
tool_dependencies_config = get_absolute_path_to_file_in_repository(
repo_files_dir, "tool_dependencies.xml"
)
if tool_dependencies_config:
config_to_use = tool_dependencies_config
else:
message = (
"Unable to locate required tool_dependencies.xml file for revision %s of installed repository %s owned by %s."
% (
str(required_repository.changeset_revision),
str(required_repository.name),
str(required_repository.owner),
)
)
raise Exception(message)
else:
# Make a call to the tool shed to get the changeset revision to which the current value of required_repository_changeset_revision
# should be updated if it's not current.
text = get_updated_changeset_revisions_from_tool_shed(
app=self.app,
tool_shed_url=tool_shed,
name=required_repository_name,
owner=required_repository_owner,
changeset_revision=required_repository_changeset_revision,
)
if text:
updated_changeset_revisions = listify(text)
# The list of changeset revisions is in reverse order, so the newest will be first.
required_repository_changeset_revision = updated_changeset_revisions[0]
# Make a call to the tool shed to get the required repository's tool_dependencies.xml file.
tmp_filename = self.create_temporary_tool_dependencies_config(
tool_shed,
required_repository_name,
required_repository_owner,
required_repository_changeset_revision,
)
config_to_use = tmp_filename
handled_tool_dependencies = self.create_tool_dependency_with_initialized_env_sh_file(
dependent_install_dir=dependent_install_dir,
tool_shed_repository=tool_shed_repository,
required_repository=required_repository,
package_name=package_name,
package_version=package_version,
tool_dependencies_config=config_to_use,
)
self.remove_file(tmp_filename)
else:
message = "Unable to locate required tool shed repository named %s owned by %s with revision %s." % (
str(required_repository_name),
str(required_repository_owner),
str(default_required_repository_changeset_revision),
)
raise Exception(message)
return handled_tool_dependencies
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# We have a complex repository dependency definition.
action_elem_tuples = []
proceed_with_install = False
rd_tool_dependencies = self.handle_complex_repository_dependency_for_package(
package_elem, package_name, package_version, tool_shed_repository
)
for rd_tool_dependency in rd_tool_dependencies:
if rd_tool_dependency.status == self.app.install_model.ToolDependency.installation_status.ERROR:
# We'll log the error here, but continue installing packages since some may not require this dependency.
log.error(
f"Error installing tool dependency for required repository: {str(rd_tool_dependency.error_message)}"
)
return tool_dependency, proceed_with_install, action_elem_tuples
def remove_file(self, file_name):
"""Attempt to remove a file from disk."""
if file_name:
if os.path.exists(file_name):
try:
os.remove(file_name)
except Exception:
pass
class SetEnvironment(RecipeTag):
def __init__(self, app):
self.app = app
self.tag = "set_environment"
def process_tag_set(
self,
tool_shed_repository,
tool_dependency,
package_elem,
package_name,
package_version,
tool_dependency_db_records=None,
):
# We need to handle two tag sets for package_elem here, this:
# <set_environment version="1.0">
# <environment_variable name="R_SCRIPT_PATH"action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
# </set_environment>
# or this:
# <environment_variable name="R_SCRIPT_PATH"action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
action_elem_tuples = []
proceed_with_install = False
if tool_dependency_db_records is None:
attr_tups_of_dependencies_for_install = []
else:
attr_tups_of_dependencies_for_install = [
(td.name, td.version, td.type) for td in tool_dependency_db_records
]
try:
self.set_environment(package_elem, tool_shed_repository, attr_tups_of_dependencies_for_install)
except Exception as e:
error_message = f"Error setting environment for tool dependency: {str(e)}"
log.debug(error_message)
return tool_dependency, proceed_with_install, action_elem_tuples
def set_environment(self, elem, tool_shed_repository, attr_tups_of_dependencies_for_install):
"""
Create a ToolDependency to set an environment variable. This is different from the process used to
set an environment variable that is associated with a package. An example entry in a tool_dependencies.xml
file is::
<set_environment version="1.0">
<environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
</set_environment>
This method must also handle the sub-element tag::
<environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
"""
# TODO: Add support for a repository dependency definition within this tool dependency type's tag set. This should look something like
# the following. See the implementation of support for this in the tool dependency package type's method above.
# This function is only called for set environment actions as defined below, not within an <install version="1.0"> tool
# dependency type. Here is an example of the tag set this function does handle:
# <action type="set_environment">
# <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR</environment_variable>
# </action>
# Here is an example of the tag set this function does not handle:
# <set_environment version="1.0">
# <repository toolshed="<tool shed>" name="<repository name>" owner="<repository owner>" changeset_revision="<changeset revision>" />
# </set_environment>
env_manager = EnvManager(self.app)
tool_dependencies = []
env_var_version = elem.get("version", "1.0")
tool_shed_repository_install_dir = os.path.abspath(tool_shed_repository.repo_files_directory(self.app))
if elem.tag == "environment_variable":
# <environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
elems = [elem]
else:
# <set_environment version="1.0">
# <environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable>
# </set_environment>
elems = [env_var_elem for env_var_elem in elem]
for env_var_elem in elems:
env_var_name = env_var_elem.get("name")
if not env_var_name:
raise Exception("The <environment_variable> tag must have a name attribute")
# The value of env_var_name must match the text value of at least 1 <requirement> tag in the
# tool config's <requirements> tag set whose "type" attribute is "set_environment" (e.g.,
# <requirement type="set_environment">R_SCRIPT_PATH</requirement>).
env_var_action = env_var_elem.get("action")
if not env_var_action:
raise Exception("The <environment_variable> tag must have an action attribute")
# Tool dependencies of type "set_environment" always have the version attribute set to None.
attr_tup = (env_var_name, None, "set_environment")
if attr_tup in attr_tups_of_dependencies_for_install:
install_dir = tool_dependency_util.get_tool_dependency_install_dir(
app=self.app,
repository_name=tool_shed_repository.name,
repository_owner=tool_shed_repository.owner,
repository_changeset_revision=tool_shed_repository.installed_changeset_revision,
tool_dependency_type="set_environment",
tool_dependency_name=env_var_name,
tool_dependency_version=None,
)
install_environment = InstallEnvironment(
app=self.app,
tool_shed_repository_install_dir=tool_shed_repository_install_dir,
install_dir=install_dir,
)
env_var_dict = env_manager.create_env_var_dict(
elem=env_var_elem, install_environment=install_environment
)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
status = self.app.install_model.ToolDependency.installation_status.INSTALLING
tool_dependency = tool_dependency_util.create_or_update_tool_dependency(
app=self.app,
tool_shed_repository=tool_shed_repository,
name=env_var_name,
version=None,
type="set_environment",
status=status,
set_status=True,
)
if env_var_version == "1.0":
# Create this tool dependency's env.sh file.
env_file_builder = EnvFileBuilder(install_dir)
return_code = env_file_builder.append_line(make_executable=True, **env_var_dict)
if return_code:
error_message = "Error creating env.sh file for tool dependency %s, return_code: %s" % (
str(tool_dependency.name),
str(return_code),
)
log.debug(error_message)
status = self.app.install_model.ToolDependency.installation_status.ERROR
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app, tool_dependency=tool_dependency, status=status, error_message=error_message
)
else:
if tool_dependency.status not in [
self.app.install_model.ToolDependency.installation_status.ERROR,
self.app.install_model.ToolDependency.installation_status.INSTALLED,
]:
status = self.app.install_model.ToolDependency.installation_status.INSTALLED
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app, tool_dependency=tool_dependency, status=status
)
log.debug(
"Environment variable %s set in %s for tool dependency %s."
% (str(env_var_name), str(install_dir), str(tool_dependency.name))
)
else:
error_message = 'Only set_environment version 1.0 is currently supported (i.e., change your tag to be <set_environment version="1.0">).'
status = self.app.install_model.ToolDependency.installation_status.ERROR
tool_dependency = tool_dependency_util.set_tool_dependency_attributes(
self.app, tool_dependency=tool_dependency, status=status, error_message=error_message
)
tool_dependencies.append(tool_dependency)
return tool_dependencies
|
the-stack_0_10940 | from torchio import RandomNoise
from ...utils import TorchioTestCase
class TestRandomNoise(TorchioTestCase):
"""Tests for `RandomNoise`."""
def test_no_noise(self):
transform = RandomNoise(mean=0., std=0.)
transformed = transform(self.sample_subject)
self.assertTensorAlmostEqual(
self.sample_subject.t1.data,
transformed.t1.data,
)
def test_with_noise(self):
transform = RandomNoise()
transformed = transform(self.sample_subject)
self.assertTensorNotEqual(
self.sample_subject.t1.data,
transformed.t1.data,
)
def test_constant_noise(self):
transform = RandomNoise(mean=(5., 5.), std=0.)
transformed = transform(self.sample_subject)
self.assertTensorAlmostEqual(
self.sample_subject.t1.data + 5,
transformed.t1.data,
)
def test_negative_std(self):
with self.assertRaises(ValueError):
RandomNoise(std=-2)
def test_std_range_with_negative_min(self):
with self.assertRaises(ValueError):
RandomNoise(std=(-0.5, 4))
def test_wrong_std_type(self):
with self.assertRaises(ValueError):
RandomNoise(std='wrong')
def test_wrong_mean_type(self):
with self.assertRaises(ValueError):
RandomNoise(mean='wrong')
|
the-stack_0_10941 | import logging, math
from gi.repository import Gst, Gtk
class AudioLevelDisplay(object):
""" Displays a Level-Meter of another VideoDisplay into a GtkWidget """
def __init__(self, drawing_area):
self.log = logging.getLogger('AudioLevelDisplay[%s]' % drawing_area.get_name())
self.drawing_area = drawing_area
self.levelrms = []
self.levelpeak = []
self.leveldecay = []
# register on_draw handler
self.drawing_area.connect('draw', self.on_draw)
def on_draw(self, widget, cr):
# number of audio-channels
channels = len(self.levelrms)
if channels == 0:
return
width = self.drawing_area.get_allocated_width()
height = self.drawing_area.get_allocated_height()
# space between the channels in px
margin = 2
# 1 channel -> 0 margins, 2 channels -> 1 margin, 3 channels…
channel_width = int( (width - (margin * (channels - 1))) / channels )
# self.log.debug(
# 'width: %upx filled with %u channels of each %upx '
# 'and %ux margin of %upx',
# width, channels, channel_width, channels-1, margin)
# normalize db-value to 0…1 and multiply with the height
rms_px = [ self.normalize_db(db) * height for db in self.levelrms ]
peak_px = [ self.normalize_db(db) * height for db in self.levelpeak ]
decay_px = [ self.normalize_db(db) * height for db in self.leveldecay ]
# set the line-width >1, to get a nice overlap
cr.set_line_width(2)
# iterate over all pixels
for y in range(0, height):
# calculate our place in the color-gradient, clamp to 0…1
# 0 -> green, 0.5 -> yellow, 1 -> red
color = self.clamp(((y / height) - 0.6) / 0.42)
for channel in range(0, channels):
# start-coordinate for this channel
x = (channel * channel_width) + (channel * margin)
# calculate the brightness based on whether this line is in the
# active region
# default to 0.25, dark
bright = 0.25
if int(y - decay_px[channel]) in range(0, 2):
# decay marker, 2px wide, extra bright
bright = 1.5
elif y < rms_px[channel]:
# rms bar, full bright
bright = 1
elif y < peak_px[channel]:
# peak bar, a little darker
bright = 0.75
# set the color with a little reduced green
cr.set_source_rgb(
color * bright,
(1-color) * bright * 0.75,
0
)
# draw the marker
cr.move_to(x, height-y)
cr.line_to(x + channel_width, height-y)
cr.stroke()
# draw a black line for the margin
cr.set_source_rgb(0,0,0)
cr.move_to(x + channel_width, height-y)
cr.line_to(x + channel_width + margin, height-y)
cr.stroke()
# draw db text-markers
cr.set_source_rgb(1, 1, 1)
for db in [-40, -20, -10, -5, -4, -3, -2, -1]:
text = str(db)
xbearing, ybearing, textwidth, textheight, xadvance, yadvance = (
cr.text_extents(text))
y = self.normalize_db(db) * height
cr.move_to((width-textwidth) / 2, height - y - textheight)
cr.show_text(text)
return True
def normalize_db(self, db):
# -60db -> 1.00 (very quiet)
# -30db -> 0.75
# -15db -> 0.50
# -5db -> 0.25
# -0db -> 0.00 (very loud)
logscale = 1 - math.log10(-0.15 * db + 1)
return self.clamp(logscale)
def clamp(self, value, min_value=0, max_value=1):
return max(min(value, max_value), min_value)
def level_callback(self, rms, peak, decay):
self.levelrms = rms
self.levelpeak = peak
self.leveldecay = decay
self.drawing_area.queue_draw()
|
the-stack_0_10942 | from pynput import keyboard
import time
import BarcodeScanner as BB
def on_press(a):
#try:
global count
global s
if a!=keyboard.Key.shift and a!=keyboard.Key.enter :
#print('{0}'.format(a))
count = count+1
s = s+str(a.char)
if count==4:
return False
#except AttributeError:
#print('{0}'.format(key))
def on_release(key):
#print('{0}'.format(key))
time.sleep(0.1)
while True:
count = 0
s = ""
with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:
#while True:
#print('test')
BB.Scan()
time.sleep(1)
print(s)
listener.join()
|
the-stack_0_10945 | import pandas as pd
from sklearn.metrics import mean_squared_error
import matplotlib
matplotlib.use('Agg') # for saving figures
import matplotlib.pyplot as plt
series = pd.read_csv('daily-users.csv', header=0, parse_dates=[0], index_col=0, squeeze=True)
from statsmodels.tsa.arima_model import ARIMA
f, axarr = plt.subplots(2, 2)
col = 0
row = 0
for pdq in [(3,0,0), (0,0,3), (3,1,0), (3,1,1)]:
print(pdq)
axarr[row, col].set_title('p = %d, d = %d, q = %d' % pdq)
model = ARIMA(series.ix[:'2017-12-31'], pdq, freq='D').fit()
predictions, _, _ = model.forecast(len(series.ix['2018-01-01':]))
print(predictions)
print(series.ix['2018-01-01':][:len(predictions)])
print("Mean squared error: %.2f"
% mean_squared_error(series.ix['2018-01-01':][:len(predictions)], predictions))
series.ix['2017-01-01':].plot(color='gray', linewidth=1, ax=axarr[row, col])
pred_series = pd.Series(predictions, index=series.ix['2018-01-01':][:len(predictions)].index)
pred_series.plot(color='blue', linewidth=3, ax=axarr[row, col])
axarr[row, col].axes.get_xaxis().set_visible(False)
axarr[row, col].axes.get_yaxis().set_visible(False)
col += 1
if col == 2:
col = 0
row += 1
plt.savefig('arima-daily-grid.png', dpi=300, bbox_inches='tight', pad_inches=0)
|
the-stack_0_10946 | import gym
from torch import nn as nn
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.policies.argmax import ArgmaxDiscretePolicy
from rlkit.torch.vpg.ppo import PPOTrainer
from rlkit.torch.networks import Mlp
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.torch_rl_algorithm import TorchOnlineRLAlgorithm
def experiment(variant):
from simple_sup_lstm import SimpleSupLSTMEnv
expl_env = SimpleSupLSTMEnv(**variant['env_kwargs'])
eval_env = SimpleSupLSTMEnv(**variant['env_kwargs'])
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.n
label_num = expl_env.label_num
label_dim = expl_env.label_dim
if variant['load_kwargs']['load']:
load_dir = variant['load_kwargs']['load_dir']
load_data = torch.load(load_dir+'/params.pkl',map_location='cpu')
policy = load_data['trainer/policy']
vf = load_data['trainer/value_function']
else:
hidden_dim = variant['lstm_kwargs']['hidden_dim']
num_lstm_layers = variant['lstm_kwargs']['num_layers']
node_dim = variant['gnn_kwargs']['node_dim']
node_num = expl_env.node_num
input_node_dim = expl_env.node_dim
a_0 = np.zeros(action_dim)
o_0 = np.zeros((node_num, hidden_dim*num_lstm_layers))
h_0 = np.zeros((node_num, hidden_dim*num_lstm_layers))
c_0 = np.zeros((node_num, hidden_dim*num_lstm_layers))
latent_0 = (o_0, h_0, c_0)
from lstm_net import LSTMNet
lstm_ego = LSTMNet(node_dim, action_dim, hidden_dim, num_lstm_layers)
lstm_other = LSTMNet(node_dim, 0, hidden_dim, num_lstm_layers)
from graph_builder import TrafficGraphBuilder
gb = TrafficGraphBuilder(input_dim=input_node_dim+hidden_dim, node_num=node_num,
ego_init=torch.tensor([0.,1.]),
other_init=torch.tensor([1.,0.]),
)
from gnn_net import GNNNet
gnn = GNNNet(
pre_graph_builder=gb,
node_dim=variant['gnn_kwargs']['node_dim'],
conv_type=variant['gnn_kwargs']['conv_type'],
num_conv_layers=variant['gnn_kwargs']['num_layers'],
hidden_activation=variant['gnn_kwargs']['activation'],
)
from gnn_lstm_net import GNNLSTMNet
policy_net = GNNLSTMNet(node_num,gnn,lstm_ego,lstm_other)
from layers import FlattenLayer, SelectLayer
post_net = nn.Sequential(
SelectLayer(-2,0),
FlattenLayer(2),
nn.ReLU(),
nn.Linear(hidden_dim,action_dim)
)
from softmax_lstm_policy import SoftmaxLSTMPolicy
policy = SoftmaxLSTMPolicy(
a_0=a_0,
latent_0=latent_0,
obs_dim=obs_dim,
action_dim=action_dim,
lstm_net=policy_net,
post_net=post_net,
)
print('parameters: ',np.sum([p.view(-1).shape[0] for p in policy.parameters()]))
vf = Mlp(
hidden_sizes=[32, 32],
input_size=obs_dim,
output_size=1,
) # TODO: id is also an input
vf_criterion = nn.MSELoss()
from rlkit.torch.policies.make_deterministic import MakeDeterministic
eval_policy = MakeDeterministic(policy)
expl_policy = policy
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
expl_policy,
)
trainer = PPOTrainer(
policy=policy,
value_function=vf,
vf_criterion=vf_criterion,
recurrent=True,
**variant['trainer_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='SimpleSupLSTM')
parser.add_argument('--node_num', type=int, default=5)
parser.add_argument('--node_dim', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='PPOGNN')
parser.add_argument('--llayer', type=int, default=1)
parser.add_argument('--hidden', type=int, default=32)
parser.add_argument('--gnn', type=str, default='GSage')
parser.add_argument('--node', type=int, default=16)
parser.add_argument('--glayer', type=int, default=3)
parser.add_argument('--act', type=str, default='relu')
parser.add_argument('--lr', type=float, default=None)
parser.add_argument('--bs', type=int, default=None)
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--load', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--snapshot_mode', type=str, default="gap_and_last")
parser.add_argument('--snapshot_gap', type=int, default=500)
args = parser.parse_args()
import os.path as osp
pre_dir = './Data/'+args.exp_name+'node'+str(args.node_num)+'dim'+str(args.node_dim)
main_dir = args.log_dir\
+('llayer'+str(args.llayer))\
+('hidden'+str(args.hidden))\
+args.gnn\
+('node'+str(args.node))\
+('glayer'+str(args.glayer))\
+('act'+args.act)\
+(('ep'+str(args.epoch)) if args.epoch else '')\
+(('lr'+str(args.lr)) if args.lr else '')\
+(('bs'+str(args.bs)) if args.bs else '')
log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))
max_path_length = 10
# noinspection PyTypeChecker
variant = dict(
lstm_kwargs=dict(
hidden_dim=args.hidden,
num_layers=args.llayer,
),
gnn_kwargs=dict(
conv_type=args.gnn,
node_dim=args.node,
num_layers=args.glayer,
activation=args.act,
),
env_kwargs=dict(
node_num=args.node_num,
node_dim=args.node_dim
),
algorithm_kwargs=dict(
num_epochs=(args.epoch if args.epoch else 1000),
num_eval_steps_per_epoch=1000,
num_train_loops_per_epoch=1,
num_trains_per_train_loop=1,
num_expl_steps_per_train_loop=(args.bs if args.bs else 1000),
max_path_length=max_path_length,
save_best=True,
),
trainer_kwargs=dict(
discount=0.99,
max_path_length=max_path_length,
policy_lr=(args.lr if args.lr else 1e-4),
vf_lr=(args.lr if args.lr else 1e-3),
),
load_kwargs=dict(
load=args.load,
load_dir=log_dir,
),
)
if args.load:
log_dir = log_dir + '_load'
import os
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
with open(osp.join(log_dir,'variant.json'),'w') as out_json:
import json
json.dump(variant,out_json,indent=2)
import sys
cmd_input = 'python ' + ' '.join(sys.argv) + '\n'
with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
setup_logger(args.exp_name+'/'+main_dir, variant=variant,
snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,
log_dir=log_dir)
import numpy as np
import torch
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
|
the-stack_0_10947 | """Command to set a metadata attribute."""
import asyncio
from typing import Optional
import click
from astoria.astctl.command import Command
from astoria.common.ipc import MetadataSetManagerRequest
loop = asyncio.get_event_loop()
@click.command("set")
@click.argument("attribute")
@click.argument("value")
@click.option("-v", "--verbose", is_flag=True)
@click.option("-c", "--config-file", type=click.Path(exists=True))
def set(attribute: str, value: str, *, verbose: bool, config_file: Optional[str]) -> None:
"""Set a metadata attribute."""
command = SetMetadataCommand(attribute, value, verbose, config_file)
loop.run_until_complete(command.run())
class SetMetadataCommand(Command):
"""Set a metadata attribute."""
dependencies = ["astmetad"]
def __init__(
self,
attribute: str,
value: str,
verbose: bool,
config_file: Optional[str],
) -> None:
super().__init__(verbose, config_file)
self._attr = attribute
self._value = value
async def main(self) -> None:
"""Main method of the command."""
res = await self._mqtt.manager_request(
"astmetad",
"mutate",
MetadataSetManagerRequest(
sender_name=self.name,
attr=self._attr,
value=self._value,
),
)
if res.success:
print(f"Successfully set {self._attr} to {self._value}.")
if len(res.reason) > 0:
print(res.reason)
else:
print(f"Unable to set {self._attr} to {self._value}.")
if len(res.reason) > 0:
print(res.reason)
# Add timeout
self.halt(silent=True)
|
the-stack_0_10948 | from __future__ import print_function
from __future__ import division
from six.moves import xrange
import os
import time
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import StandardScaler
from lib.datasets import MNIST as Data
from lib.model import Model as BaseModel
from lib.segmentation import segmentation_adjacency, extract_features_fixed
# from lib.segmentation import slic_fixed
from lib.segmentation import quickshift_fixed
from lib.layer import SpatialCNN as Conv, FC
from lib.graph import receptive_fields, fill_features
from lib.pipeline import PreprocessedDataset, FileQueue
# SLIC_FEATURES = [4, 5, 6, 7, 8, 18, 20, 21, 22]
QUICKSHIFT_FEATURES = [4, 6, 7, 8, 24, 28, 29, 31, 37]
DATA_DIR = 'data/mnist'
# PREPROCESS_FIRST = 'data/mnist/slic_spatial'
PREPROCESS_FIRST = 'data/mnist/quickshift_spatial'
NODE_SIZE = 25
NODE_STRIDE = 4
DELTA = 3
NEIGHBORHOOD_SIZE = 25
CONNECTIVITY = 8
LEARNING_RATE = 0.001
TRAIN_DIR = None
# LOG_DIR = 'data/summaries/mnist_slic_spatial'
LOG_DIR = 'data/summaries/mnist_quickshift_spatial'
SAVE_STEP = 250
AUGMENT_TRAIN_EXAMPLES = False
DROPOUT = 0.5
BATCH_SIZE = 64
MAX_STEPS = 15000
DISPLAY_STEP = 10
# FORM_FEATURES = SLIC_FEATURES
FORM_FEATURES = QUICKSHIFT_FEATURES
NUM_FEATURES = len(FORM_FEATURES) + 1
data = Data(DATA_DIR)
# segmentation_algorithm = slic_fixed(
# num_segments=100, compactness=5, max_iterations=10, sigma=0)
segmentation_algorithm = quickshift_fixed(
ratio=1, kernel_size=2, max_dist=2, sigma=0)
feature_extraction_algorithm = extract_features_fixed(FORM_FEATURES)
def preprocess_spatial_fixed(
segmentation_algorithm, feature_extraction_algorithm, node_size,
node_stride, delta, neighborhood_size, connectivity):
def _preprocess(image):
segmentation = segmentation_algorithm(image)
adj, points, _ = segmentation_adjacency(segmentation, connectivity)
features = feature_extraction_algorithm(segmentation, image)
StandardScaler(copy=False).fit_transform(features)
fields = receptive_fields(points, adj, node_size, node_stride,
neighborhood_size, delta)
return fill_features(fields, features)
return _preprocess
preprocess_algorithm = preprocess_spatial_fixed(
segmentation_algorithm, feature_extraction_algorithm, NODE_SIZE,
NODE_STRIDE, DELTA, NEIGHBORHOOD_SIZE, CONNECTIVITY)
# Generate preprocessed dataset.
data.train = PreprocessedDataset(
os.path.join(PREPROCESS_FIRST, 'train'), data.train, preprocess_algorithm)
data.val = PreprocessedDataset(
os.path.join(PREPROCESS_FIRST, 'val'), data.val, preprocess_algorithm)
data.test = PreprocessedDataset(
os.path.join(PREPROCESS_FIRST, 'test'), data.test, preprocess_algorithm)
capacity = 10 * BATCH_SIZE
train_queue = FileQueue(data.train, BATCH_SIZE, capacity, shuffle=True)
val_queue = FileQueue(data.val, BATCH_SIZE, capacity, shuffle=True)
test_queue = FileQueue(data.test, BATCH_SIZE, capacity, shuffle=False)
placeholders = {
'features':
tf.placeholder(tf.float32,
[None, NODE_SIZE, NEIGHBORHOOD_SIZE,
NUM_FEATURES], 'features'),
'labels':
tf.placeholder(tf.uint8, [None, data.num_classes], 'labels'),
'dropout':
tf.placeholder(tf.float32, [], 'dropout'),
}
class Model(BaseModel):
def _build(self):
conv_1 = Conv(
NUM_FEATURES, 64, NEIGHBORHOOD_SIZE, logging=self.logging)
fc_1 = FC(NODE_SIZE * 64, 1024, logging=self.logging)
fc_2 = FC(
1024,
data.num_classes,
act=lambda x: x,
bias=False,
dropout=self.placeholders['dropout'],
logging=self.logging)
self.layers = [conv_1, fc_1, fc_2]
model = Model(
placeholders=placeholders,
learning_rate=LEARNING_RATE,
train_dir=TRAIN_DIR,
log_dir=LOG_DIR)
model.build()
global_step = model.initialize()
def feed_dict_with_batch(batch, dropout=0):
features = np.array([data[0] for data in batch], np.float32)
labels = np.array([data[1] for data in batch], np.uint8)
return {
placeholders['features']: features,
placeholders['labels']: labels,
placeholders['dropout']: DROPOUT,
}
try:
for step in xrange(global_step, MAX_STEPS):
t_pre = time.process_time()
batch = train_queue.dequeue()
feed_dict = feed_dict_with_batch(batch, DROPOUT)
t_pre = time.process_time() - t_pre
t_train = model.train(feed_dict, step)
if step % DISPLAY_STEP == 0:
# Evaluate on training and validation set with zero dropout.
feed_dict.update({model.placeholders['dropout']: 0})
train_info = model.evaluate(feed_dict, step, 'train')
batch = val_queue.dequeue()
feed_dict = feed_dict_with_batch(batch, DROPOUT)
val_info = model.evaluate(feed_dict, step, 'val')
log = 'step={}, '.format(step)
log += 'time={:.2f}s + {:.2f}s, '.format(t_pre, t_train)
log += 'train_loss={:.5f}, '.format(train_info[0])
log += 'train_acc={:.5f}, '.format(train_info[1])
log += 'val_loss={:.5f}, '.format(val_info[0])
log += 'val_acc={:.5f}'.format(val_info[1])
print(log)
if step % SAVE_STEP == 0:
model.save()
except KeyboardInterrupt:
print()
train_queue.close()
val_queue.close()
print('Optimization finished!')
print('Evaluate on test set. This can take a few minutes.')
try:
num_steps = data.test.num_examples // BATCH_SIZE
test_info = [0, 0]
for i in xrange(num_steps):
batch = test_queue.dequeue()
feed_dict = feed_dict_with_batch(batch, DROPOUT)
batch_info = model.evaluate(feed_dict)
test_info = [a + b for a, b in zip(test_info, batch_info)]
log = 'Test results: '
log += 'loss={:.5f}, '.format(test_info[0] / num_steps)
log += 'acc={:.5f}, '.format(test_info[1] / num_steps)
print(log)
except KeyboardInterrupt:
print()
print('Test evaluation aborted.')
test_queue.close()
|
the-stack_0_10949 | from __future__ import division
import torch
from ignite.metrics.metric import Metric
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
class TopKCategoricalAccuracy(Metric):
"""
Calculates the top-k categorical accuracy.
- `update` must receive output of the form `(y_pred, y)`.
"""
def __init__(self, k=5, output_transform=lambda x: x, device=None):
super(TopKCategoricalAccuracy, self).__init__(output_transform, device=device)
self._k = k
@reinit__is_reduced
def reset(self):
self._num_correct = 0
self._num_examples = 0
@reinit__is_reduced
def update(self, output):
y_pred, y = output
sorted_indices = torch.topk(y_pred, self._k, dim=1)[1]
expanded_y = y.view(-1, 1).expand(-1, self._k)
correct = torch.sum(torch.eq(sorted_indices, expanded_y), dim=1)
self._num_correct += torch.sum(correct).item()
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_correct", "_num_examples")
def compute(self):
if self._num_examples == 0:
raise NotComputableError("TopKCategoricalAccuracy must have at"
"least one example before it can be computed.")
return self._num_correct / self._num_examples
|
the-stack_0_10950 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import mock
import pytest
import uuid
from collections import namedtuple
from datetime import datetime, timedelta
from django.utils import timezone
from time import time
from sentry.app import tsdb
from sentry.constants import VERSION_LENGTH
from sentry.event_manager import (
HashDiscarded, EventManager, EventUser,
md5_from_hash
)
from sentry.models import (
Activity, Environment, Event, ExternalIssue, Group, GroupEnvironment,
GroupHash, GroupLink, GroupRelease, GroupResolution, GroupStatus,
GroupTombstone, EventMapping, Integration, Release,
ReleaseProjectEnvironment, OrganizationIntegration, UserReport
)
from sentry.signals import event_discarded, event_saved
from sentry.testutils import assert_mock_called_once_with_partial, TransactionTestCase
from sentry.utils.data_filters import FilterStatKeys
def make_event(**kwargs):
result = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': 1403007314.570599,
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
result.update(kwargs)
return result
class EventManagerTest(TransactionTestCase):
def make_release_event(self, release_name, project_id):
manager = EventManager(make_event(release=release_name))
manager.normalize()
event = manager.save(project_id)
return event
def test_key_id_remains_in_data(self):
manager = EventManager(make_event(key_id=12345))
manager.normalize()
assert manager.get_data()['key_id'] == 12345
event = manager.save(1)
assert event.data['key_id'] == 12345
def test_similar_message_prefix_doesnt_group(self):
# we had a regression which caused the default hash to just be
# 'event.message' instead of '[event.message]' which caused it to
# generate a hash per letter
manager = EventManager(make_event(event_id='a', message='foo bar'))
manager.normalize()
event1 = manager.save(1)
manager = EventManager(make_event(event_id='b', message='foo baz'))
manager.normalize()
event2 = manager.save(1)
assert event1.group_id != event2.group_id
@mock.patch('sentry.event_manager.should_sample')
def test_saves_event_mapping_when_sampled(self, should_sample):
should_sample.return_value = True
event_id = 'a' * 32
manager = EventManager(make_event(event_id=event_id))
event = manager.save(1)
# This is a brand new event, so it is actually saved.
# In this case, we don't need an EventMapping, but we
# do need the Event.
assert not EventMapping.objects.filter(
group_id=event.group_id,
event_id=event_id,
).exists()
assert Event.objects.filter(
event_id=event_id,
).exists()
event_id = 'b' * 32
manager = EventManager(make_event(event_id=event_id))
event = manager.save(1)
# This second is a dupe, so should be sampled
# For a sample, we want to store the EventMapping,
# but don't need to store the Event
assert EventMapping.objects.filter(
group_id=event.group_id,
event_id=event_id,
).exists()
assert not Event.objects.filter(
event_id=event_id,
).exists()
def test_platform_is_saved(self):
manager = EventManager(
make_event(
**{'sentry.interfaces.AppleCrashReport': {
'crash': {},
'binary_images': []
}}
)
)
manager.normalize()
event = manager.save(1)
assert 'sentry.interfacse.AppleCrashReport' not in event.interfaces
def test_ephemral_interfaces_removed_on_save(self):
manager = EventManager(make_event(platform='python'))
event = manager.save(1)
group = event.group
assert group.platform == 'python'
assert event.platform == 'python'
def test_dupe_message_id(self):
event_id = 'a' * 32
manager = EventManager(make_event(event_id=event_id))
manager.save(1)
assert Event.objects.count() == 1
# ensure that calling it again doesn't raise a db error
manager = EventManager(make_event(event_id=event_id))
manager.save(1)
assert Event.objects.count() == 1
def test_updates_group(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
checksum='a' * 32,
)
)
event = manager.save(1)
manager = EventManager(
make_event(
message='foo bar',
event_id='b' * 32,
checksum='a' * 32,
)
)
with self.tasks():
event2 = manager.save(1)
group = Group.objects.get(id=event.group_id)
assert group.times_seen == 2
assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
assert group.message == event2.message
assert group.data.get('type') == 'default'
assert group.data.get('metadata') == {
'title': 'foo bar',
}
def test_updates_group_with_fingerprint(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
event = manager.save(1)
manager = EventManager(
make_event(
message='foo bar',
event_id='b' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
event2 = manager.save(1)
group = Group.objects.get(id=event.group_id)
assert group.times_seen == 2
assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
assert group.message == event2.message
def test_differentiates_with_fingerprint(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
fingerprint=['{{ default }}', 'a' * 32],
)
)
with self.tasks():
manager.normalize()
event = manager.save(1)
manager = EventManager(
make_event(
message='foo bar',
event_id='b' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
manager.normalize()
event2 = manager.save(1)
assert event.group_id != event2.group_id
def test_unresolves_group(self):
# N.B. EventManager won't unresolve the group unless the event2 has a
# later timestamp than event1. MySQL doesn't support microseconds.
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=1403007314,
)
)
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.save()
assert group.is_resolved()
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=1403007345,
)
)
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert not group.is_resolved()
@mock.patch('sentry.event_manager.plugin_is_regression')
def test_does_not_unresolve_group(self, plugin_is_regression):
# N.B. EventManager won't unresolve the group unless the event2 has a
# later timestamp than event1. MySQL doesn't support microseconds.
plugin_is_regression.return_value = False
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=1403007314,
)
)
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.save()
assert group.is_resolved()
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=1403007315,
)
)
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert group.is_resolved()
@mock.patch('sentry.tasks.activity.send_activity_notifications.delay')
@mock.patch('sentry.event_manager.plugin_is_regression')
def test_marks_as_unresolved_with_new_release(
self, plugin_is_regression, mock_send_activity_notifications_delay
):
plugin_is_regression.return_value = True
old_release = Release.objects.create(
version='a',
organization_id=self.project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(self.project)
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=time() - 50000, # need to work around active_at
release=old_release.version,
)
)
event = manager.save(1)
group = event.group
group.update(status=GroupStatus.RESOLVED)
resolution = GroupResolution.objects.create(
release=old_release,
group=group,
)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
data={'version': ''},
)
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=time(),
release=old_release.version,
)
)
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == ''
assert GroupResolution.objects.filter(group=group).exists()
manager = EventManager(
make_event(
event_id='c' * 32,
checksum='a' * 32,
timestamp=time(),
release='b',
)
)
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == 'b'
assert not GroupResolution.objects.filter(group=group).exists()
activity = Activity.objects.get(
group=group,
type=Activity.SET_REGRESSION,
)
mock_send_activity_notifications_delay.assert_called_once_with(activity.id)
@mock.patch('sentry.integrations.example.integration.ExampleIntegration.sync_status_outbound')
@mock.patch('sentry.tasks.activity.send_activity_notifications.delay')
@mock.patch('sentry.event_manager.plugin_is_regression')
def test_marks_as_unresolved_with_new_release_with_integration(
self, plugin_is_regression, mock_send_activity_notifications_delay, mock_sync_status_outbound
):
plugin_is_regression.return_value = True
old_release = Release.objects.create(
version='a',
organization_id=self.project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(self.project)
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=time() - 50000, # need to work around active_at
release=old_release.version,
)
)
event = manager.save(1)
group = event.group
org = group.organization
integration = Integration.objects.create(
provider='example',
name='Example',
)
integration.add_organization(org, self.user)
OrganizationIntegration.objects.filter(
integration_id=integration.id,
organization_id=group.organization.id,
).update(
config={
'sync_comments': True,
'sync_status_outbound': True,
'sync_status_inbound': True,
'sync_assignee_outbound': True,
'sync_assignee_inbound': True,
}
)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id,
integration_id=integration.id,
key='APP-%s' % group.id,
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
group.update(status=GroupStatus.RESOLVED)
resolution = GroupResolution.objects.create(
release=old_release,
group=group,
)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
data={'version': ''},
)
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=time(),
release=old_release.version,
)
)
with self.tasks():
with self.feature({
'organizations:integrations-issue-sync': True,
}):
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == ''
assert GroupResolution.objects.filter(group=group).exists()
manager = EventManager(
make_event(
event_id='c' * 32,
checksum='a' * 32,
timestamp=time(),
release='b',
)
)
event = manager.save(1)
mock_sync_status_outbound.assert_called_once_with(
external_issue, False, event.group.project_id
)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data['version'] == 'b'
assert not GroupResolution.objects.filter(group=group).exists()
activity = Activity.objects.get(
group=group,
type=Activity.SET_REGRESSION,
)
mock_send_activity_notifications_delay.assert_called_once_with(activity.id)
@mock.patch('sentry.models.Group.is_resolved')
def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
mock_is_resolved.return_value = False
manager = EventManager(
make_event(
event_id='a' * 32,
checksum='a' * 32,
timestamp=1403007314,
)
)
with self.tasks():
event = manager.save(1)
mock_is_resolved.return_value = True
manager = EventManager(
make_event(
event_id='b' * 32,
checksum='a' * 32,
timestamp=1403007414,
)
)
with self.tasks():
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=event.group.id)
assert group.active_at == event2.datetime != event.datetime
def test_invalid_transaction(self):
dict_input = {'messages': 'foo'}
manager = EventManager(make_event(
transaction=dict_input,
))
manager.normalize()
event = manager.save(1)
assert event.transaction is None
def test_transaction_as_culprit(self):
manager = EventManager(make_event(
transaction='foobar',
))
manager.normalize()
event = manager.save(1)
assert event.transaction == 'foobar'
assert event.culprit == 'foobar'
def test_culprit_is_not_transaction(self):
manager = EventManager(make_event(
culprit='foobar',
))
manager.normalize()
event1 = manager.save(1)
assert event1.transaction is None
assert event1.culprit == 'foobar'
def test_transaction_and_culprit(self):
manager = EventManager(make_event(
transaction='foobar',
culprit='baz',
))
manager.normalize()
event1 = manager.save(1)
assert event1.transaction == 'foobar'
assert event1.culprit == 'baz'
def test_first_release(self):
project_id = 1
event = self.make_release_event('1.0', project_id)
group = event.group
assert group.first_release.version == '1.0'
event = self.make_release_event('2.0', project_id)
group = event.group
assert group.first_release.version == '1.0'
def test_release_project_slug(self):
project = self.create_project(name='foo')
release = Release.objects.create(version='foo-1.0', organization=project.organization)
release.add_project(project)
event = self.make_release_event('1.0', project.id)
group = event.group
assert group.first_release.version == 'foo-1.0'
release_tag = [v for k, v in event.tags if k == 'sentry:release'][0]
assert release_tag == 'foo-1.0'
event = self.make_release_event('2.0', project.id)
group = event.group
assert group.first_release.version == 'foo-1.0'
def test_release_project_slug_long(self):
project = self.create_project(name='foo')
partial_version_len = VERSION_LENGTH - 4
release = Release.objects.create(
version='foo-%s' % ('a' * partial_version_len, ), organization=project.organization
)
release.add_project(project)
event = self.make_release_event('a' * partial_version_len, project.id)
group = event.group
assert group.first_release.version == 'foo-%s' % ('a' * partial_version_len, )
release_tag = [v for k, v in event.tags if k == 'sentry:release'][0]
assert release_tag == 'foo-%s' % ('a' * partial_version_len, )
def test_group_release_no_env(self):
project_id = 1
event = self.make_release_event('1.0', project_id)
release = Release.objects.get(version='1.0', projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id,
group_id=event.group_id,
environment='',
).exists()
# ensure we're not erroring on second creation
event = self.make_release_event('1.0', project_id)
def test_group_release_with_env(self):
manager = EventManager(
make_event(release='1.0', environment='prod', event_id='a' * 32)
)
event = manager.save(1)
release = Release.objects.get(version='1.0', projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id,
group_id=event.group_id,
environment='prod',
).exists()
manager = EventManager(
make_event(release='1.0', environment='staging', event_id='b' * 32)
)
event = manager.save(1)
release = Release.objects.get(version='1.0', projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id,
group_id=event.group_id,
environment='staging',
).exists()
def test_tsdb(self):
project = self.project
manager = EventManager(make_event(
fingerprint=['totally unique super duper fingerprint'],
environment='totally unique super duper environment',
))
event = manager.save(project.id)
def query(model, key, **kwargs):
return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]
assert query(tsdb.models.project, project.id) == 1
assert query(tsdb.models.group, event.group.id) == 1
environment_id = Environment.get_for_organization_id(
event.project.organization_id,
'totally unique super duper environment',
).id
assert query(tsdb.models.project, project.id, environment_id=environment_id) == 1
assert query(tsdb.models.group, event.group.id, environment_id=environment_id) == 1
@pytest.mark.xfail
def test_record_frequencies(self):
project = self.project
manager = EventManager(make_event())
event = manager.save(project.id)
assert tsdb.get_most_frequent(
tsdb.models.frequent_issues_by_project,
(event.project.id, ),
event.datetime,
) == {
event.project.id: [
(event.group_id, 1.0),
],
}
assert tsdb.get_most_frequent(
tsdb.models.frequent_projects_by_organization,
(event.project.organization_id, ),
event.datetime,
) == {
event.project.organization_id: [
(event.project_id, 1.0),
],
}
def test_event_user(self):
manager = EventManager(make_event(
event_id='a',
environment='totally unique environment',
**{'sentry.interfaces.User': {
'id': '1',
}}
))
manager.normalize()
with self.tasks():
event = manager.save(self.project.id)
environment_id = Environment.get_for_organization_id(
event.project.organization_id,
'totally unique environment',
).id
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_group,
(event.group.id, ),
event.datetime,
event.datetime,
) == {
event.group.id: 1,
}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_project,
(event.project.id, ),
event.datetime,
event.datetime,
) == {
event.project.id: 1,
}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_group,
(event.group.id, ),
event.datetime,
event.datetime,
environment_id=environment_id,
) == {
event.group.id: 1,
}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_project,
(event.project.id, ),
event.datetime,
event.datetime,
environment_id=environment_id,
) == {
event.project.id: 1,
}
euser = EventUser.objects.get(
project_id=self.project.id,
ident='1',
)
assert event.get_tag('sentry:user') == euser.tag_value
# ensure event user is mapped to tags in second attempt
manager = EventManager(
make_event(
event_id='b',
**{'sentry.interfaces.User': {
'id': '1',
'name': 'jane',
}}
)
)
manager.normalize()
with self.tasks():
event = manager.save(self.project.id)
euser = EventUser.objects.get(id=euser.id)
assert event.get_tag('sentry:user') == euser.tag_value
assert euser.name == 'jane'
assert euser.ident == '1'
def test_event_user_unicode_identifier(self):
manager = EventManager(make_event(**{'sentry.interfaces.User': {'username': u'foô'}}))
manager.normalize()
with self.tasks():
manager.save(self.project.id)
euser = EventUser.objects.get(
project_id=self.project.id,
)
assert euser.username == u'foô'
def test_environment(self):
manager = EventManager(make_event(**{
'environment': 'beta',
}))
manager.normalize()
event = manager.save(self.project.id)
assert dict(event.tags).get('environment') == 'beta'
def test_invalid_environment(self):
manager = EventManager(make_event(**{
'environment': 'bad/name',
}))
manager.normalize()
event = manager.save(self.project.id)
assert dict(event.tags).get('environment') is None
@mock.patch('sentry.event_manager.eventstream.insert')
def test_group_environment(self, eventstream_insert):
release_version = '1.0'
def save_event():
manager = EventManager(make_event(**{
'event_id': uuid.uuid1().hex, # don't deduplicate
'environment': 'beta',
'release': release_version,
}))
manager.normalize()
return manager.save(self.project.id)
event = save_event()
# Ensure the `GroupEnvironment` record was created.
instance = GroupEnvironment.objects.get(
group_id=event.group_id,
environment_id=Environment.objects.get(
organization_id=self.project.organization_id,
name=event.get_tag('environment'),
).id,
)
assert Release.objects.get(id=instance.first_release_id).version == release_version
# Ensure that the first event in the (group, environment) pair is
# marked as being part of a new environment.
eventstream_insert.assert_called_with(
group=event.group,
event=event,
is_new=True,
is_sample=False,
is_regression=False,
is_new_group_environment=True,
primary_hash='acbd18db4cc2f85cedef654fccc4a4d8',
skip_consume=False,
)
event = save_event()
# Ensure that the next event in the (group, environment) pair is *not*
# marked as being part of a new environment.
eventstream_insert.assert_called_with(
group=event.group,
event=event,
is_new=False,
is_sample=False,
is_regression=None, # XXX: wut
is_new_group_environment=False,
primary_hash='acbd18db4cc2f85cedef654fccc4a4d8',
skip_consume=False,
)
def test_default_fingerprint(self):
manager = EventManager(make_event())
manager.normalize()
event = manager.save(self.project.id)
assert event.data.get('fingerprint') == ['{{ default }}']
def test_user_report_gets_environment(self):
project = self.create_project()
environment = Environment.objects.create(
project_id=project.id,
organization_id=project.organization_id,
name='production',
)
environment.add_project(project)
event_id = 'a' * 32
group = self.create_group(project=project)
UserReport.objects.create(
group=group,
project=project,
event_id=event_id,
name='foo',
email='[email protected]',
comments='It Broke!!!',
)
manager = EventManager(
make_event(
environment=environment.name,
event_id=event_id,
group=group))
manager.normalize()
manager.save(project.id)
assert UserReport.objects.get(event_id=event_id).environment == environment
def test_default_event_type(self):
manager = EventManager(make_event(message='foo bar'))
manager.normalize()
data = manager.get_data()
assert data['type'] == 'default'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'default'
assert group.data.get('metadata') == {
'title': 'foo bar',
}
def test_message_event_type(self):
manager = EventManager(
make_event(
**{
'message': '',
'sentry.interfaces.Message': {
'formatted': 'foo bar',
'message': 'foo %s',
'params': ['bar'],
}
}
)
)
manager.normalize()
data = manager.get_data()
assert data['type'] == 'default'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'default'
assert group.data.get('metadata') == {
'title': 'foo bar',
}
def test_error_event_type(self):
manager = EventManager(
make_event(
**{
'sentry.interfaces.Exception': {
'values': [{
'type': 'Foo',
'value': 'bar',
}],
},
}
)
)
manager.normalize()
data = manager.get_data()
assert data['type'] == 'error'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'error'
assert group.data.get('metadata') == {
'type': 'Foo',
'value': 'bar',
}
def test_csp_event_type(self):
manager = EventManager(
make_event(
**{
'sentry.interfaces.Csp': {
'effective_directive': 'script-src',
'blocked_uri': 'http://example.com',
},
}
)
)
manager.normalize()
data = manager.get_data()
assert data['type'] == 'csp'
event = manager.save(self.project.id)
group = event.group
assert group.data.get('type') == 'csp'
assert group.data.get('metadata') == {
'directive': 'script-src',
'uri': 'example.com',
'message': "Blocked 'script' from 'example.com'",
}
def test_sdk(self):
manager = EventManager(
make_event(**{
'sdk': {
'name': 'sentry-unity',
'version': '1.0',
},
})
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sdk'] == {
'name': 'sentry-unity',
'version': '1.0',
}
def test_no_message(self):
# test that the message is handled gracefully
manager = EventManager(
make_event(
**{
'message': None,
'sentry.interfaces.Message': {
'message': 'hello world',
},
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.message == 'hello world'
def test_bad_message(self):
# test that the message is handled gracefully
manager = EventManager(make_event(**{
'message': 1234,
}))
manager.normalize()
event = manager.save(self.project.id)
assert event.message == '1234'
assert event.data['sentry.interfaces.Message'] == {
'message': '1234',
}
def test_message_attribute_goes_to_interface(self):
manager = EventManager(make_event(**{
'message': 'hello world',
}))
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sentry.interfaces.Message'] == {
'message': 'hello world',
}
def test_message_attribute_goes_to_formatted(self):
# The combining of 'message' and 'sentry.interfaces.Message' is a bit
# of a compatibility hack, and ideally we would just enforce a stricter
# schema instead of combining them like this.
manager = EventManager(
make_event(
**{
'message': 'world hello',
'sentry.interfaces.Message': {
'message': 'hello world',
},
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sentry.interfaces.Message'] == {
'message': 'hello world',
'formatted': 'world hello',
}
def test_message_attribute_interface_both_strings(self):
manager = EventManager(
make_event(
**{
'sentry.interfaces.Message': 'a plain string',
'message': 'another string',
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data['sentry.interfaces.Message'] == {
'message': 'a plain string',
'formatted': 'another string',
}
def test_throws_when_matches_discarded_hash(self):
manager = EventManager(
make_event(
message='foo',
event_id='a' * 32,
fingerprint=['a' * 32],
)
)
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
tombstone = GroupTombstone.objects.create(
project_id=group.project_id,
level=group.level,
message=group.message,
culprit=group.culprit,
data=group.data,
previous_group_id=group.id,
)
GroupHash.objects.filter(
group=group,
).update(
group=None,
group_tombstone_id=tombstone.id,
)
manager = EventManager(
make_event(
message='foo',
event_id='b' * 32,
fingerprint=['a' * 32],
)
)
mock_event_discarded = mock.Mock()
event_discarded.connect(mock_event_discarded)
mock_event_saved = mock.Mock()
event_saved.connect(mock_event_saved)
with self.tasks():
with self.assertRaises(HashDiscarded):
event = manager.save(1)
assert not mock_event_saved.called
assert_mock_called_once_with_partial(
mock_event_discarded,
project=group.project,
sender=EventManager,
signal=event_discarded,
)
def test_event_saved_signal(self):
mock_event_saved = mock.Mock()
event_saved.connect(mock_event_saved)
manager = EventManager(make_event(message='foo'))
manager.normalize()
event = manager.save(1)
assert_mock_called_once_with_partial(
mock_event_saved,
project=event.group.project,
sender=EventManager,
signal=event_saved,
)
def test_checksum_rehashed(self):
checksum = 'invalid checksum hash'
manager = EventManager(
make_event(**{
'checksum': checksum,
})
)
manager.normalize()
event = manager.save(self.project.id)
hashes = [gh.hash for gh in GroupHash.objects.filter(group=event.group)]
assert hashes == [md5_from_hash(checksum), checksum]
@mock.patch('sentry.event_manager.is_valid_error_message')
def test_should_filter_message(self, mock_is_valid_error_message):
TestItem = namedtuple('TestItem', 'value formatted result')
items = [
TestItem(
{'type': 'UnfilteredException'},
'UnfilteredException',
True,
),
TestItem(
{'value': 'This is an unfiltered exception.'},
'This is an unfiltered exception.',
True,
),
TestItem(
{'type': 'UnfilteredException', 'value': 'This is an unfiltered exception.'},
'UnfilteredException: This is an unfiltered exception.',
True,
),
TestItem(
{'type': 'FilteredException', 'value': 'This is a filtered exception.'},
'FilteredException: This is a filtered exception.',
False,
),
]
data = {
'sentry.interfaces.Exception': {
'values': [item.value for item in items]
},
}
manager = EventManager(data, project=self.project)
mock_is_valid_error_message.side_effect = [item.result for item in items]
assert manager.should_filter() == (True, FilterStatKeys.ERROR_MESSAGE)
assert mock_is_valid_error_message.call_args_list == [
mock.call(self.project, item.formatted) for item in items]
class ReleaseIssueTest(TransactionTestCase):
def setUp(self):
self.project = self.create_project()
self.release = Release.get_or_create(self.project, '1.0')
self.environment1 = Environment.get_or_create(self.project, 'prod')
self.environment2 = Environment.get_or_create(self.project, 'staging')
self.timestamp = 1403007314
def make_event(self, **kwargs):
result = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': 1403007314.570599,
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
result.update(kwargs)
return result
def make_release_event(self, release_version='1.0',
environment_name='prod', project_id=1, **kwargs):
event = make_event(
release=release_version,
environment=environment_name,
event_id=uuid.uuid1().hex,
)
event.update(kwargs)
manager = EventManager(event)
with self.tasks():
event = manager.save(project_id)
return event
def convert_timestamp(self, timestamp):
date = datetime.fromtimestamp(timestamp)
date = date.replace(tzinfo=timezone.utc)
return date
def assert_release_project_environment(self, event, new_issues_count, first_seen, last_seen):
release = Release.objects.get(
organization=event.project.organization.id,
version=event.get_tag('sentry:release'),
)
release_project_envs = ReleaseProjectEnvironment.objects.filter(
release=release,
project=event.project,
environment=event.get_environment(),
)
assert len(release_project_envs) == 1
release_project_env = release_project_envs[0]
assert release_project_env.new_issues_count == new_issues_count
assert release_project_env.first_seen == self.convert_timestamp(first_seen)
assert release_project_env.last_seen == self.convert_timestamp(last_seen)
def test_different_groups(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='b' * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=2,
last_seen=self.timestamp + 100,
first_seen=self.timestamp,
)
def test_same_group(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=1,
last_seen=self.timestamp + 100,
first_seen=self.timestamp,
)
def test_same_group_different_environment(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment2.name,
project_id=self.project.id,
checksum='a' * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event1,
new_issues_count=1,
last_seen=self.timestamp,
first_seen=self.timestamp,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=1,
last_seen=self.timestamp + 100,
first_seen=self.timestamp + 100,
)
|
the-stack_0_10951 | try:
from django.contrib.auth import get_user_model as auth_get_user_model
except ImportError:
auth_get_user_model = None
from django.contrib.auth.models import User
from account.conf import settings
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
def get_user_model(*args, **kwargs):
if auth_get_user_model is not None:
return auth_get_user_model(*args, **kwargs)
else:
return User
def get_user_lookup_kwargs(kwargs):
result = {}
username_field = getattr(get_user_model(), "USERNAME_FIELD", "username")
for key, value in kwargs.iteritems():
result[key.format(username=username_field)] = value
return result
|
the-stack_0_10952 | from ...Core.registers import Registers
from ...Core.commands import Commands
from ...Core.types import Types
from ...Runtime.gc import GC
""" Map: arithmetic operator in programming language = arithmetic operator in ASM """
binop_compare_map = {
'+': {
'operator': Commands.ADD,
'operands': [Registers.EAX, Registers.EBX]
},
'-': {
'operator': Commands.SUB,
'operands': [Registers.EAX, Registers.EBX]
},
'*': {
'operator': Commands.MUL,
'operands': [Registers.EBX]
},
'/': {
'operator': Commands.IDIV,
'operands': [Registers.EBX]
},
'%': {
'operator': Commands.IDIV,
'operands': [Registers.EBX]
}
}
def int_aexp(compiler, node):
""" Integer compilation """
compiler.code.add(Commands.MOV, [Registers.EAX, node.i])\
.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def binop_aexp(compiler, node):
""" Arithmetic expression compilation """
node.left.compile_asm(compiler)
compiler.types.pop()
node.right.compile_asm(compiler)
compiler.types.pop()
compiler.code.add(Commands.POP, Registers.EBX)\
.add(Commands.POP, Registers.EAX)
if node.op == '/' or node.op == '%':
compiler.code.add(Commands.CDQ)
compiler.code.add(binop_compare_map[node.op]['operator'], binop_compare_map[node.op]['operands'])
if node.op == '%':
compiler.code.add(Commands.MOV, [Registers.EAX, Registers.EDX])
compiler.code.add(Commands.PUSH, Registers.EAX)
return compiler.types.set(Types.INT)
def var_aexp(compiler, node):
""" Variable compilation """
if node.context == 'assign':
gc = GC(compiler)
if compiler.environment.is_exist_local_var(node.name):
var = compiler.environment.get_local_var(node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s]' % Registers.ESP])
compiler.code.add(Commands.MOV, [var_type, Registers.EAX])
compiler.environment.update_local_var_type(node.name, node.type)
compiler.code.add(Commands.MOV, [Registers.EAX, var])
compiler.code.add(Commands.MOV, [Registers.EBX, var_type])
gc.decrement()
else:
var = compiler.environment.add_local_var(node.type, node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
if compiler.environment.defined_object is not None:
compiler.environment.set_link_object(var, compiler.environment.defined_object)
compiler.environment.defined_object = None
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s + 4]' % Registers.ESP])
compiler.code.add(Commands.MOV, [Registers.EBX, 'dword [%s]' % Registers.ESP])
gc.increment()
compiler.code.add(Commands.POP, var_type)
compiler.code.add(Commands.POP, var)
else:
compiler.code.add(Commands.MOV, [Registers.EAX, compiler.environment.get_local_var(node.name)])\
.add(Commands.PUSH, Registers.EAX)
runtime_var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.types.set(runtime_var_type)
var_type = compiler.environment.get_local_var_type(node.name)
return var_type
|
the-stack_0_10953 | from django.db import models
from django.urls import reverse
class Post(models.Model):
title = models.CharField(
verbose_name='title',
max_length=255,
help_text="The page title as you'd like it to be seen by the public",
)
body = models.TextField(
verbose_name='content body',
)
created = models.DateTimeField(
verbose_name='created time',
auto_now_add=True,
)
class Meta:
ordering = ['-created']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', args=[self.pk, ])
class AbstractAttachment(models.Model):
name = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name='file name',
help_text="Defaults to filename, if left blank",
)
file = models.ImageField(
verbose_name='uploaded file',
upload_to="attachment",
)
created = models.DateTimeField(
verbose_name='created time',
auto_now_add=True,
)
class Meta:
abstract = True
class Attachment(AbstractAttachment):
post = models.ForeignKey(
'fileupload.Post',
verbose_name='post',
related_name='attachments',
on_delete=models.CASCADE,
blank=True,
null=True,
)
class Meta:
verbose_name = 'attachment'
verbose_name_plural = 'attachments'
ordering = ['-created']
def __str__(self):
return self.name
|
the-stack_0_10955 | from tkinter import *
import random
class Window:
def __init__(self, master):
self.master = master
self.guess_number = None
self.cows = 0
self.bulls = 0
master.title("Bulls and Cows")
self.label = Label(master, text="Let`s play Bulls and Cows game!")
self.label.grid(row=0, column=0, columnspan=2, sticky=W + E)
self.startBut = Button(master, text="Start game", command=self.start, state=NORMAL)
self.startBut.grid(row=1, column=0)
self.closeBut = Button(master, text="Close", command=master.quit)
self.closeBut.grid(row=1, column=2)
self.helpBut = Button(master, text='help', command=self.help)
self.helpBut.grid(row=1, column=1)
vcmd = master.register(self.str_checking) # we have to wrap the command
self.entry = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
def start(self):
g_numb = []
while len(g_numb) <= 3:
rand_numb = random.randint(0, 9)
if rand_numb not in g_numb: g_numb.append(rand_numb)
self.g_numb = g_numb
vcmd = self.master.register(self.str_checking) # we have to wrap the command
self.entry = Entry(self.master, validate="key", validatecommand=(vcmd, '%P'))
self.entry.grid(row=2, column=0, columnspan=2, sticky=W + E)
print(g_numb)
self.comBut = Button(self.master, text='Try It', command=self.bulls_cows)
self.comBut.grid(row=3, column=0, columnspan=2, sticky=W + E)
self.startBut.configure(state=DISABLED)
return g_numb
def str_checking(self, input_numbers):
if not input_numbers:
self.guess_number = None
return True
try:
guess_number = int(input_numbers)
if 0 <= guess_number < 9876:
self.guess_number = guess_number
return True
else:
return False
except ValueError:
return False
def bulls_cows(self):
print(self.guess_number)
if not type(self.guess_number) is list:
self.guess_number = [int(number) for number in str(self.guess_number)]
self.cows = 0
for tip_i in self.guess_number:
for guess_i in self.g_numb:
if tip_i == guess_i:
self.cows += 1
self.bulls = 0
for index, _ in enumerate(self.g_numb):
if self.guess_number[index] == self.g_numb[index]:
self.bulls += 1
self.cows -= 1
if self.bulls == 4:
self.message = "Congratulations, You guessed all 4 bulls!\n" \
"Do you want play another game?"
self.resBut = Button(self.master, text='Play again', command=self.reset)
self.resBut.grid(row=5, column=0, columnspan=2, sticky=W + E)
else:
self.message = f"You guessed {self.bulls} bulls and {self.cows} cows."
self.label_text = StringVar()
self.label_text.set(self.message)
self.label = Label(self.master, textvariable=self.label_text)
self.label.grid(row=4, column=0, columnspan=2, sticky=W + E)
def reset(self):
self.message = ""
self.label_text.set(self.message)
self.start()
def help(self):
help_win = Toplevel(root)
help_win.title('Manual')
help_win.geometry("640x400")
display = Label(help_win, text="""The numerical version of the game is usually played with 4 digits, but can
also be played with 3 or any other number of digits.\n
On a sheet of paper, the players each write a 4-digit secret number. The digits must be all different. Then, in turn,
the players try to guess their opponent's number who gives the number of matches. If the matching digits are in their
right positions, they are "bulls", if in different positions, they are "cows". Example:\n
Secret number: 4271\n
Opponent's try: 1234\n
Answer: 1 bull and 2 cows. (The bull is "2", the cows are "4" and "1".)\n
The first one to reveal the other's secret number in the least number of guesses wins the game.\n
The game may also be played by two teams of players, with the team members discussing their strategy\n
before selecting a move.
A computer program moo, written in 1970 by J. M. Grochow at MIT in the PL/I computer language for the Multics \n
operating system, was amongst the first Bulls and Cows computer implementations, inspired by a similar program written \n
by Frank King in 1968 and running on the Cambridge University mainframe. Because the game has simple rules, \n
while it is difficult and entertaining, there are many computer variants; it is often included in telephones and PDAs.
It is proven that any number could be solved within seven turns. \n
Minimal average game length is 26274/5040=5.2131 turns
https://en.wikipedia.org/wiki/Bulls_and_Cows""")
display.pack()
root = Tk()
app = Window(root)
root.mainloop()
|
the-stack_0_10958 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Callable
import kfp
from kfp import LocalClient, run_pipeline_func_locally
InputPath = kfp.components.InputPath()
OutputPath = kfp.components.OutputPath()
BASE_IMAGE = "python:3.7"
def light_component(
base_image: str = BASE_IMAGE,
):
"""Decorator of kfp light component with customized parameters
Usage:
```python
@light_component(base_image="python:3.7")
def a_component(src: kfp.components.InputPath(), ...):
...
```
"""
def wrapper(func: Callable):
return kfp.components.create_component_from_func(
func=func,
base_image=base_image,
)
return wrapper
@light_component()
def hello(name: str):
print(f"hello {name}")
@light_component()
def local_loader(src: str, dst: kfp.components.OutputPath()):
import os
import shutil
if os.path.exists(src):
shutil.copyfile(src, dst)
@light_component()
def flip_coin(dst: kfp.components.OutputPath()):
import random
result = "head" if random.randint(0, 1) == 0 else "tail"
with open(dst, "w") as f:
f.write(result)
@light_component()
def list(dst: kfp.components.OutputPath()):
import json
with open(dst, "w") as f:
json.dump(["hello", "world", "kfp"], f)
@light_component()
def component_connect_demo(
src: kfp.components.InputPath(), dst: kfp.components.OutputPath()
):
with open(src, "r") as f:
line = f.readline()
print(f"read first line: {line}")
with open(dst, "w") as fw:
fw.write(f"{line} copied")
class LocalRunnerTest(unittest.TestCase):
def setUp(self):
import tempfile
with tempfile.NamedTemporaryFile('w', delete=False) as f:
self.temp_file_path = f.name
f.write("hello world")
def test_run_local(self):
def _pipeline(name: str):
hello(name)
run_pipeline_func_locally(
_pipeline,
{"name": "world"},
execution_mode=LocalClient.ExecutionMode("local"),
)
def test_local_file(self):
def _pipeline(file_path: str):
local_loader(file_path)
run_result = run_pipeline_func_locally(
_pipeline,
{"file_path": self.temp_file_path},
execution_mode=LocalClient.ExecutionMode("local"),
)
output_file_path = run_result.get_output_file("local-loader")
with open(output_file_path, "r") as f:
line = f.readline()
assert "hello" in line
def test_condition(self):
def _pipeline():
_flip = flip_coin()
with kfp.dsl.Condition(_flip.output == "head"):
hello("head")
with kfp.dsl.Condition(_flip.output == "tail"):
hello("tail")
run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
def test_for(self):
@light_component()
def cat(item, dst: OutputPath):
with open(dst, "w") as f:
f.write(item)
def _pipeline():
with kfp.dsl.ParallelFor(list().output) as item:
cat(item)
run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
def test_connect(self):
def _pipeline():
_local_loader = local_loader(self.temp_file_path)
component_connect_demo(_local_loader.output)
run_result = run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
output_file_path = run_result.get_output_file("component-connect-demo")
with open(output_file_path, "r") as f:
line = f.readline()
assert "copied" in line
def test_command_argument_in_any_format(self):
def echo():
return kfp.dsl.ContainerOp(
name="echo",
image=BASE_IMAGE,
command=["echo", "hello world", ">", "/tmp/outputs/output_file"],
arguments=[],
file_outputs={"output": "/tmp/outputs/output_file"},
)
def _pipeline():
_echo = echo()
component_connect_demo(_echo.output)
run_pipeline_func_locally(
_pipeline, {}, execution_mode=LocalClient.ExecutionMode("local")
)
@unittest.skip('docker is not installed in CI environment.')
def test_execution_mode_exclude_op(self):
@light_component(base_image="image_not_exist")
def cat_on_image_not_exist(name: str, dst: OutputPath):
with open(dst, "w") as f:
f.write(name)
def _pipeline():
cat_on_image_not_exist("exclude ops")
run_result = run_pipeline_func_locally(
_pipeline,
{},
execution_mode=LocalClient.ExecutionMode(mode="docker"),
)
output_file_path = run_result.get_output_file("cat-on-image-not-exist")
import os
assert not os.path.exists(output_file_path)
run_result = run_pipeline_func_locally(
_pipeline,
{},
execution_mode=LocalClient.ExecutionMode(
mode="docker", ops_to_exclude=["cat-on-image-not-exist"]
),
)
output_file_path = run_result.get_output_file("cat-on-image-not-exist")
with open(output_file_path, "r") as f:
line = f.readline()
assert "exclude ops" in line
|
the-stack_0_10960 | # -*- encoding: utf-8 -*-
import re
import sys
from svb.multipart_data_generator import MultipartDataGenerator
from svb.test.helper import SvbTestCase
class MultipartDataGeneratorTests(SvbTestCase):
def run_test_multipart_data_with_file(self, test_file):
params = {
"key1": b"ASCII value",
"key2": u"Üñìçôdé value",
"key3": test_file
}
generator = MultipartDataGenerator()
generator.add_params(params)
http_body = generator.get_post_data()
if sys.version_info >= (3,):
http_body = http_body.decode('utf-8')
self.assertTrue(re.search(
r"Content-Disposition: form-data; name=\"key1\"", http_body))
self.assertTrue(re.search(r"ASCII value", http_body))
self.assertTrue(re.search(
r"Content-Disposition: form-data; name=\"key2\"", http_body))
self.assertTrue(re.search(r"Üñìçôdé value", http_body))
self.assertTrue(re.search(
r"Content-Disposition: form-data; name=\"key3\"; "
r"filename=\".+\"",
http_body))
self.assertTrue(re.search(
r"Content-Type: application/octet-stream", http_body))
test_file.seek(0)
file_contents = test_file.read()
if sys.version_info >= (3,) and isinstance(file_contents, bytes):
file_contents = file_contents.decode('utf-8')
self.assertNotEqual(-1, http_body.find(file_contents))
def test_multipart_data_file_text(self):
with open(__file__, mode='r') as test_file:
self.run_test_multipart_data_with_file(test_file)
def test_multipart_data_file_binary(self):
with open(__file__, mode='rb') as test_file:
self.run_test_multipart_data_with_file(test_file)
|
the-stack_0_10962 | import plotly.graph_objects as go
def pad_list(l, n):
pad = [1] * (n - len(l))
return l + pad
def overlaid_area(df, x_column, y_column, filename, category):
df = df.sort_values(x_column)
dose_1 = df[df[category] == 'Primeira dose']
x_dose_1 = dose_1[x_column].tolist()
y_dose_1 = dose_1[y_column].cumsum()
completa = df[df[category] == 'Vacinação completa']
x_completa = completa[x_column].tolist()
y_completa = completa[y_column].cumsum()
azul_escuro = "rgb(0, 102, 255)"
azul = "rgb(102, 204, 255)"
fig = go.Figure()
fig.add_trace(go.Scatter(x=x_dose_1, y=y_dose_1, fill='tozeroy', legendgroup='a', name='Primeira dose', fillcolor=azul)) # fill down to xaxis
fig.add_trace(go.Scatter(x=x_completa, y=y_completa, fill='tozeroy', legendgroup='a', name='Vacinação completa', fillcolor=azul_escuro)) # fill to trace0 y
fig.update_layout(
showlegend=True,
xaxis_type='category',
xaxis_title="Semana - início em 17/01/2021 até 20/06/2021",
yaxis_title="Total de vacinas aplicadas",
font=dict(
size=25))
return fig
|
the-stack_0_10963 | from gluoncv.data import COCOInstance, COCOSegmentation
from pycocotools.coco import COCO
import numpy as np
from PIL import Image, ImageOps
import os
import pickle
import random
from io import BytesIO
def randomJPEGcompression(image, min_quality=75):
qf = random.randrange(min_quality, 100)
outputIoStream = BytesIO()
image = Image.fromarray(image)
image.save(outputIoStream, "JPEG", quality=qf, optimice=True)
outputIoStream.seek(0)
return np.array(Image.open(outputIoStream))
def random_alter_background(img_np, mask_np, white_prob=0.3):
if random.random()<white_prob:
# gray or while
if random.random()<0.5:
bg_value = np.random.randint(220, 250, size=(1,1,1), dtype="uint8")
else:
bg_value = np.random.randint(250, 256, size=(1,1,1), dtype="uint8")
else:
# random color
bg_value = np.random.randint(0,255,size=(1,1,3), dtype="uint8")
# replace the background
bg_mask = mask_np[:,:,None]==0
bg = bg_value*bg_mask
img_new_np = img_np*(~bg_mask)+bg
return img_new_np
class COCOiMaterialist(COCOInstance):
CLASSES=['shirt, blouse', 'top, t-shirt, sweatshirt', 'sweater', 'cardigan', 'jacket',
'vest', 'pants', 'shorts', 'skirt', 'coat', 'dress', 'jumpsuit', 'cape',
'glasses', 'hat', 'headband, head covering, hair accessory', 'tie', 'glove',
'watch', 'belt', 'leg warmer', 'tights, stockings', 'sock', 'shoe',
'bag, wallet', 'scarf', 'umbrella', 'hood', 'collar', 'lapel', 'epaulette',
'sleeve', 'pocket', 'neckline', 'buckle', 'zipper', 'applique', 'bead',
'bow', 'flower', 'fringe', 'ribbon', 'rivet', 'ruffle', 'sequin', 'tassel']
def _load_jsons(self):
"""Load all image paths and labels from JSON annotation files into buffer."""
items = []
labels = []
segms = []
for split in self._splits:
anno = os.path.join(self._root, 'annotations', split) + '.json'
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c['name'] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
filename = entry['file_name']
dirname = split.split('_')[-1] # "train" or "val"
abs_path = os.path.join(self._root, dirname, filename)
if not os.path.exists(abs_path):
raise IOError('Image: {} not exists.'.format(abs_path))
label, segm = self._check_load_bbox(_coco, entry)
# skip images without objects
if self._skip_empty and label is None:
continue
items.append(abs_path)
labels.append(label)
segms.append(segm)
return items, labels, segms
def _check_load_bbox(self, coco, entry):
"""Check and load ground-truth labels"""
ann_ids = coco.getAnnIds(imgIds=entry['id'], iscrowd=None)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
valid_segs = []
width = entry['width']
height = entry['height']
for obj in objs:
if obj.get('ignore', 0) == 1:
continue
# crowd objs cannot be used for segmentation
if obj.get('iscrowd', 0) == 1:
continue
# need accurate floating point box representation
x1, y1, w, h = obj['bbox']
x2, y2 = x1 + np.maximum(0, w), y1 + np.maximum(0, h)
# clip to image boundary
x1 = np.minimum(width, np.maximum(0, x1))
y1 = np.minimum(height, np.maximum(0, y1))
x2 = np.minimum(width, np.maximum(0, x2))
y2 = np.minimum(height, np.maximum(0, y2))
# require non-zero seg area and more than 1x1 box size
if obj['area'] > self._min_object_area and x2 > x1 and y2 > y1 \
and (x2 - x1) * (y2 - y1) >= 4:
contiguous_cid = self.json_id_to_contiguous[obj['category_id']]
valid_objs.append([x1, y1, x2, y2, contiguous_cid])
segs = obj['segmentation'] # polygon or RLE
assert isinstance(segs, list) or isinstance(segs, dict), '{}'.format(obj.get('iscrowd', 0))
if isinstance(segs, list):
valid_segs.append([np.asarray(p).reshape(-1, 2).astype('float32')
for p in segs if len(p) >= 6])
else:
valid_segs.append(segs)
# there is no easy way to return a polygon placeholder: None is returned
# in validation, None cannot be used for batchify -> drop label in transform
# in training: empty images should be be skipped
if not valid_objs:
valid_objs = None
valid_segs = None
else:
valid_objs = np.asarray(valid_objs).astype('float32')
return valid_objs, valid_segs
class iMaterialistSegmentation(COCOSegmentation):
"""only using categories less than 13 for segmentation"""
CAT_LIST = [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
NUM_CLASS = 14
def __init__(self, root=os.path.expanduser('datasets/imaterialist'),
split='train', mode=None, transform=None, tta=None, alter_bg=False, **kwargs):
super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs)
from pycocotools import mask
if split == 'train':
print('train set')
ann_file = os.path.join(root, 'annotations/rle_instances_train.json')
ids_file = os.path.join(root, 'annotations/train_ids.mx')
self.root = os.path.join(root, 'train')
else:
print('val set')
ann_file = os.path.join(root, 'annotations/rle_instances_val.json')
ids_file = os.path.join(root, 'annotations/val_ids.mx')
self.root = os.path.join(root, 'val')
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
with open(ids_file, 'rb') as f:
self.ids = pickle.load(f)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.transform = transform
self.alter_bg = alter_bg
if self.alter_bg:
self.NUM_CLASS = 2
if self.mode != "train":
self.tta = tta
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
m = coco_mask.decode(instance['segmentation'])
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
@property
def classes(self):
"""Category names."""
if self.alter_bg:
return ('background', 'garment')
else:
return ('background', 'shirt, blouse', 'top, t-shirt, sweatshirt', 'sweater',
'cardigan', 'jacket', 'vest', 'pants', 'shorts', 'skirt', 'coat',
'dress', 'jumpsuit', 'cape')
def _sync_pad(self, img, mask):
w, h = img.size
long_size = max(w, h)
padh = long_size - h
padw = long_size - w
im_pad = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask_pad = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# region for padding (set -1 later)
ignore_w = round((1-padw/float(long_size))*self.crop_size) if padw != 0 else None
ignore_h = round((1-padh/float(long_size))*self.crop_size) if padh != 0 else None
return im_pad, mask_pad, (ignore_w, ignore_h)
def _resize_short_within(self, img, short, max_size, mult_base=1, interp=Image.BILINEAR):
"""Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly. Also this function will ensure
the new image will not exceed ``max_size`` even at the longer side.
Parameters
----------
img : PIL.Image
The original image.
short : int
Resize shorter side to ``short``.
max_size : int
Make sure the longer side of new image is smaller than ``max_size``.
mult_base : int, default is 1
Width and height are rounded to multiples of `mult_base`.
interp : default is Image.BILINEAR
Returns
-------
PIL.Image
An 'PIL.Image' containing the resized image.
"""
w, h = img.size
im_size_min, im_size_max = (h, w) if w > h else (w, h)
scale = float(short) / float(im_size_min)
if np.round(scale * im_size_max / mult_base) * mult_base > max_size:
# fit in max_size
scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max)
new_w, new_h = (int(np.round(w * scale / mult_base) * mult_base),
int(np.round(h * scale / mult_base) * mult_base))
img = img.resize((new_w, new_h), interp)
return img
def _testval_sync_transform(self, img, mask, padding=True):
""" resize image and mask while keeping ratio"""
if padding:
# padding and resize
img, mask, keep_size = self._sync_pad(img, mask)
img = img.resize((self.crop_size, self.crop_size), Image.BILINEAR)
mask = mask.resize(img.size, Image.NEAREST)
else:
# resize without padding
short_size = self.crop_size*1.75
if max(img.size) > short_size:
img = self._resize_short_within(img, short_size, short_size*2)
mask = mask.resize(img.size, Image.NEAREST)
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
if padding:
mask[keep_size[1]:, keep_size[0]:] = -1
return img, mask
def _random_alter_background(self, img, mask):
# alter background and random jpeg quality
img_np = img.asnumpy().astype('uint8')
mask_np = mask.asnumpy()
img_new_np = random_alter_background(img_np, mask_np)
img_new_np = randomJPEGcompression(img_new_np)
img_new = self._img_transform(img_new_np)
return img_new
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
mask = self._gen_seg_mask(
cocotarget, img_metadata['height'], img_metadata['width'])
if self.alter_bg:
mask = (mask>0).astype('uint8')
mask = Image.fromarray(mask)
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
if self.alter_bg and (random.random() < self.alter_bg):
img = self._random_alter_background(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
# resize without padding for memory reduction when test time augmentation
img, mask = self._testval_sync_transform(img, mask, not self.tta)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask |
the-stack_0_10966 | """Important Bodies.
Contains some predefined bodies of the Solar System:
* Sun (☉)
* Earth (♁)
* Moon (☾)
* Mercury (☿)
* Venus (♀)
* Mars (♂)
* Jupiter (♃)
* Saturn (♄)
* Uranus (⛢)
* Neptune (♆)
* Pluto (♇)
and a way to define new bodies (:py:class:`~Body` class).
Data references can be found in :py:mod:`~einsteinpy.constant`
"""
import astropy.units as u
from einsteinpy import constant
from einsteinpy.coordinates import CartesianDifferential
class Body:
"""
Class to create a generic Body
"""
@u.quantity_input(mass=u.kg, R=u.km)
def __init__(
self,
name="Generic Body",
mass=0 * u.kg,
R=0 * u.km,
differential=None,
a=0 * u.m,
q=0 * u.C,
parent=None,
):
"""
Parameters
----------
name : str
Name/ID of the body
mass : ~astropy.units.kg
Mass of the body
R : ~astropy.units
Radius of the body
differential : ~einsteinpy.coordinates, optional
Complete coordinates of the body
a : ~astropy.units.m, optional
Spin factor of massive body. Should be less than half of schwarzschild radius.
q : ~astropy.units.C, optional
Charge on the massive body
is_attractor : Bool, optional
To denote is this body is acting as attractor or not
parent : Body, optional
The parent object of the body.
"""
if differential:
if differential.system == "Cartesian":
self.pos_vec = [differential.x, differential.y, differential.z]
self.vel_vec = [differential.v_x, differential.v_y, differential.v_z]
else:
self.pos_vec = [differential.r, differential.theta, differential.phi]
self.vel_vec = [differential.v_r, differential.v_t, differential.v_p]
self.a = a
self.R = R
self.q = q
self.mass = mass
self.name = name
self.coordinates = differential
self.parent = parent
def __repr__(self):
return (
"'Body ( name: ({0}), mass: ({1}), radius: ({2}), coordinates: ({3}), spin factor: ({4}), charge: ({"
"5}) )'".format(
self.name, self.mass, self.R, self.coordinates, self.a, self.q
)
)
def __str__(self):
return (
"Body ( name: ({0}), mass: ({1}), radius: ({2}), coordinates: ({3}), spin factor: ({4}), charge: ({"
"5}) )".format(
self.name, self.mass, self.R, self.coordinates, self.a, self.q
)
)
class _Sun(Body):
def __init__(self):
parent = None
name = "Sun"
R = constant.R_sun
mass = constant.Solar_Mass
super(_Sun, self).__init__(name=name, mass=mass, R=R, parent=parent)
Sun = _Sun()
class _Earth(Body):
def __init__(self):
parent = Sun
name = "Earth"
R = 6731 * u.km
mass = 5.97219e24 * u.kg
super(_Earth, self).__init__(name=name, mass=mass, R=R, parent=parent)
Earth = _Earth()
class _Moon(Body):
def __init__(self):
parent = Earth
name = "Moon"
R = 1737.5 * u.km
mass = 7.34767309e22 * u.kg
super(_Moon, self).__init__(name=name, mass=mass, R=R, parent=parent)
Moon = _Moon()
|
the-stack_0_10968 | import re
import typing
import pytest
from dagster import (
Any,
DagsterInvalidConfigDefinitionError,
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
Field,
Float,
Int,
List,
ModeDefinition,
Noneable,
Permissive,
PipelineDefinition,
ResourceDefinition,
Set,
String,
Tuple,
composite_solid,
execute_pipeline,
execute_solid,
pipeline,
solid,
)
from dagster.config.errors import DagsterEvaluationErrorReason
from dagster.config.field_utils import convert_potential_field
from dagster.config.validate import process_config, validate_config
def test_noop_config():
assert Field(Any)
def test_int_field():
config_field = convert_potential_field({'int_field': Int})
assert validate_config(config_field.config_type, {'int_field': 1}).value == {'int_field': 1}
def test_float_field():
config_field = convert_potential_field({'float_field': Float})
assert validate_config(config_field.config_type, {'float_field': 1.0}).value == {
'float_field': 1.0
}
assert process_config(config_field.config_type, {'float_field': 1.0}).value == {
'float_field': 1.0
}
assert validate_config(config_field.config_type, {'float_field': 1}).value == {'float_field': 1}
assert process_config(config_field.config_type, {'float_field': 1}).value == {
'float_field': 1.0
}
def assert_config_value_success(config_type, config_value, expected):
result = process_config(config_type, config_value)
assert result.success
assert result.value == expected
def assert_eval_failure(config_type, value):
assert not validate_config(config_type, value).success
def test_int_fails():
config_field = convert_potential_field({'int_field': Int})
assert_eval_failure(config_field.config_type, {'int_field': 'fjkdj'})
assert_eval_failure(config_field.config_type, {'int_field': True})
def test_default_arg():
config_field = convert_potential_field(
{'int_field': Field(Int, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {'int_field': 2})
def test_default_float_arg():
config_field = convert_potential_field(
{'float_field': Field(Float, default_value=2.0, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {'float_field': 2.0})
config_field = convert_potential_field(
{'float_field': Field(Float, default_value=2, is_required=False)}
)
assert_config_value_success(config_field.config_type, {}, {'float_field': 2})
def _single_required_string_config_dict():
return convert_potential_field({'string_field': String})
def _multiple_required_fields_config_dict():
return convert_potential_field({'field_one': String, 'field_two': String})
def _single_optional_string_config_dict():
return convert_potential_field({'optional_field': Field(String, is_required=False)})
def _single_optional_string_field_config_dict_with_default():
optional_field_def = Field(String, is_required=False, default_value='some_default')
return convert_potential_field({'optional_field': optional_field_def})
def _mixed_required_optional_string_config_dict_with_default():
return convert_potential_field(
{
'optional_arg': Field(String, is_required=False, default_value='some_default'),
'required_arg': Field(String, is_required=True),
'optional_arg_no_default': Field(String, is_required=False),
}
)
def _multiple_required_fields_config_permissive_dict():
return Field(Permissive({'field_one': Field(String), 'field_two': Field(String)}))
def _validate(config_field, value):
res = process_config(config_field.config_type, value)
assert res.success, res.errors[0].message
return res.value
def test_single_required_string_field_config_type():
assert _validate(_single_required_string_config_dict(), {'string_field': 'value'}) == {
'string_field': 'value'
}
with pytest.raises(
AssertionError,
match=(
re.escape(
'Missing required field "string_field" at the root. Available Fields: '
'"[\'string_field\']".'
)
),
):
_validate(_single_required_string_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {'extra': 'yup'})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {'string_field': 'yupup', 'extra': 'yup'})
with pytest.raises(AssertionError):
_validate(_single_required_string_config_dict(), {'string_field': 1})
def test_undefined_field_error():
with pytest.raises(
AssertionError,
match=('Undefined field "extra" at the root. Expected: "{ string_field: ' 'String }".'),
):
_validate(
_single_required_string_config_dict(), {'string_field': 'value', 'extra': 'extra'}
)
def test_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_dict(),
{'field_one': 'value_one', 'field_two': 'value_two'},
) == {'field_one': 'value_one', 'field_two': 'value_two'}
def test_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {'field_one': 'yup'})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_dict(), {'field_one': 'yup', 'extra': 'yup'})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(),
{'field_one': 'yup', 'field_two': 'yup', 'extra': 'should_not_exist'},
)
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_dict(), {'field_one': 'value_one', 'field_two': 2}
)
def test_single_optional_field_passing():
assert _validate(_single_optional_string_config_dict(), {'optional_field': 'value'}) == {
'optional_field': 'value'
}
assert _validate(_single_optional_string_config_dict(), {}) == {}
with pytest.raises(AssertionError):
assert _validate(_single_optional_string_config_dict(), {'optional_field': None}) == {
'optional_field': None
}
def test_single_optional_field_failing():
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {'optional_field': 1})
with pytest.raises(AssertionError):
_validate(_single_optional_string_config_dict(), {'dlkjfalksdjflksaj': 1})
def test_single_optional_field_passing_with_default():
assert _validate(_single_optional_string_field_config_dict_with_default(), {}) == {
'optional_field': 'some_default'
}
assert _validate(
_single_optional_string_field_config_dict_with_default(), {'optional_field': 'override'}
) == {'optional_field': 'override'}
def test_permissive_multiple_required_fields_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': 'should_exist',
},
) == {
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': 'should_exist',
}
def test_permissive_multiple_required_fields_nested_passing():
assert _validate(
_multiple_required_fields_config_permissive_dict(),
{
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': {'nested': 'value', 'with_int': 2},
},
) == {
'field_one': 'value_one',
'field_two': 'value_two',
'previously_unspecified': {'nested': 'value', 'with_int': 2},
}
def test_permissive_multiple_required_fields_failing():
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {})
with pytest.raises(AssertionError):
_validate(_multiple_required_fields_config_permissive_dict(), {'field_one': 'yup'})
with pytest.raises(AssertionError):
_validate(
_multiple_required_fields_config_permissive_dict(),
{'field_one': 'value_one', 'field_two': 2},
)
def test_mixed_args_passing():
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{'optional_arg': 'value_one', 'required_arg': 'value_two'},
) == {'optional_arg': 'value_one', 'required_arg': 'value_two'}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(), {'required_arg': 'value_two'}
) == {'optional_arg': 'some_default', 'required_arg': 'value_two'}
assert _validate(
_mixed_required_optional_string_config_dict_with_default(),
{'required_arg': 'value_two', 'optional_arg_no_default': 'value_three'},
) == {
'optional_arg': 'some_default',
'required_arg': 'value_two',
'optional_arg_no_default': 'value_three',
}
def _single_nested_config():
return convert_potential_field({'nested': {'int_field': Int}})
def _nested_optional_config_with_default():
return convert_potential_field(
{'nested': {'int_field': Field(Int, is_required=False, default_value=3)}}
)
def _nested_optional_config_with_no_default():
return convert_potential_field({'nested': {'int_field': Field(Int, is_required=False)}})
def test_single_nested_config():
assert _validate(_single_nested_config(), {'nested': {'int_field': 2}}) == {
'nested': {'int_field': 2}
}
def test_single_nested_config_undefined_errors():
with pytest.raises(
AssertionError,
match='Value at path root:nested must be dict. Expected: "{ int_field: Int }".',
):
_validate(_single_nested_config(), {'nested': 'dkjfdk'})
with pytest.raises(
AssertionError,
match='Invalid scalar at path root:nested:int_field. Value "dkjfdk" of type .* is not valid for expected type "Int".',
):
_validate(_single_nested_config(), {'nested': {'int_field': 'dkjfdk'}})
with pytest.raises(
AssertionError,
match=(
'Undefined field "not_a_field" at path root:nested. Expected: ' '"{ int_field: Int }".'
),
):
_validate(_single_nested_config(), {'nested': {'int_field': 2, 'not_a_field': 1}})
with pytest.raises(
AssertionError,
match='Invalid scalar at path root:nested:int_field. Value "{\'too_nested\': \'dkjfdk\'}" of type .* is not valid for expected type "Int".',
):
_validate(_single_nested_config(), {'nested': {'int_field': {'too_nested': 'dkjfdk'}}})
def test_nested_optional_with_default():
assert _validate(_nested_optional_config_with_default(), {'nested': {'int_field': 2}}) == {
'nested': {'int_field': 2}
}
assert _validate(_nested_optional_config_with_default(), {'nested': {}}) == {
'nested': {'int_field': 3}
}
def test_nested_optional_with_no_default():
assert _validate(_nested_optional_config_with_no_default(), {'nested': {'int_field': 2}}) == {
'nested': {'int_field': 2}
}
assert _validate(_nested_optional_config_with_no_default(), {'nested': {}}) == {'nested': {}}
def test_config_defaults():
@solid(config_schema={"sum": Int})
def two(_context):
assert _context.solid_config['sum'] == 6
return _context.solid_config['sum']
@solid(config_schema={"sum": Int})
def one(_context, prev_sum):
assert prev_sum == 6
return prev_sum + _context.solid_config['sum']
# addition_composite_solid
def addition_composite_solid_config_fn(config):
child_config = {'config': {"sum": config['a'] + config['b'] + config['c']}}
return {'one': child_config, 'two': child_config}
@composite_solid(
config_fn=addition_composite_solid_config_fn,
config_schema={
"a": Field(Int, is_required=False, default_value=1),
"b": Field(Int, is_required=False, default_value=2),
"c": Int,
},
)
def addition_composite_solid():
return one(two())
@pipeline
def addition_pipeline():
addition_composite_solid()
result = execute_pipeline(
addition_pipeline, {'solids': {'addition_composite_solid': {'config': {'c': 3}}}}
)
assert result.success
def test_config_with_and_without_config():
@solid(config_schema={'prefix': Field(str, is_required=False, default_value='_')})
def prefix_value(context, v):
return '{prefix}{v}'.format(prefix=context.solid_config["prefix"], v=v)
@composite_solid(
config_fn=lambda cfg: {'prefix_value': {'config': {'prefix': cfg['prefix']}}},
config_schema={'prefix': Field(str, is_required=False, default_value='_id_')},
)
def prefix_id(val):
return prefix_value(val)
@solid
def print_value(_, v):
return str(v)
@pipeline
def config_issue_pipeline():
v = prefix_id()
print_value(v)
result = execute_pipeline(
config_issue_pipeline,
{
'solids': {
'prefix_id': {
'config': {'prefix': '_customprefix_'},
'inputs': {'val': {'value': "12345"}},
}
}
},
)
assert result.success
assert result.result_for_solid('print_value').output_value() == '_customprefix_12345'
result_using_default = execute_pipeline(
config_issue_pipeline,
{'solids': {'prefix_id': {'config': {}, 'inputs': {'val': {'value': "12345"}}}}},
)
assert result_using_default.success
assert result_using_default.result_for_solid('print_value').output_value() == '_id_12345'
def test_build_optionality():
optional_test_type = convert_potential_field(
{'required': {'value': String}, 'optional': {'value': Field(String, is_required=False)},}
).config_type
assert optional_test_type.fields['required'].is_required
assert optional_test_type.fields['optional'].is_required is False
def test_wrong_solid_name():
@solid(name='some_solid', input_defs=[], output_defs=[], config_schema=Int)
def some_solid(_):
return None
@pipeline(name='pipeline_wrong_solid_name')
def pipeline_def():
some_solid()
env_config = {'solids': {'another_name': {'config': {}}}}
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, env_config)
pe = pe_info.value
assert 'Undefined field "another_name" at path root:solids' in str(pe)
def fail_me():
assert False
def dummy_resource(config_schema=None):
return ResourceDefinition(lambda: None, config_schema=config_schema)
def test_wrong_resources():
pipeline_def = PipelineDefinition(
name='pipeline_test_multiple_context',
mode_defs=[
ModeDefinition(
resource_defs={'resource_one': dummy_resource(), 'resource_two': dummy_resource()}
)
],
solid_defs=[],
)
with pytest.raises(
DagsterInvalidConfigError, match='Undefined field "nope" at path root:resources'
):
execute_pipeline(pipeline_def, {'resources': {'nope': {}}})
def test_solid_list_config():
value = [1, 2]
called = {}
@solid(name='solid_list_config', input_defs=[], output_defs=[], config_schema=[int])
def solid_list_config(context):
assert context.solid_config == value
called['yup'] = True
@pipeline(name='solid_list_config_pipeline')
def pipeline_def():
solid_list_config()
result = execute_pipeline(
pipeline_def, run_config={'solids': {'solid_list_config': {'config': value}}}
)
assert result.success
assert called['yup']
def test_two_list_types():
@solid(
input_defs=[], config_schema={'list_one': [int], 'list_two': [int]},
)
def two_list_type(context):
return context.solid_config
assert execute_solid(
two_list_type,
run_config={'solids': {'two_list_type': {'config': {'list_one': [1], 'list_two': [2]}}}},
).output_value() == {'list_one': [1], 'list_two': [2]}
@solid(
input_defs=[], config_schema={'list_one': [Int], 'list_two': [Int]},
)
def two_list_type_condensed_syntax(context):
return context.solid_config
assert execute_solid(
two_list_type_condensed_syntax,
run_config={
'solids': {
'two_list_type_condensed_syntax': {'config': {'list_one': [1], 'list_two': [2]}}
}
},
).output_value() == {'list_one': [1], 'list_two': [2]}
@solid(
input_defs=[], config_schema={'list_one': [int], 'list_two': [int]},
)
def two_list_type_condensed_syntax_primitives(context):
return context.solid_config
assert execute_solid(
two_list_type_condensed_syntax_primitives,
run_config={
'solids': {
'two_list_type_condensed_syntax_primitives': {
'config': {'list_one': [1], 'list_two': [2]}
}
}
},
).output_value() == {'list_one': [1], 'list_two': [2]}
def test_multilevel_default_handling():
@solid(config_schema=Field(Int, is_required=False, default_value=234))
def has_default_value(context):
assert context.solid_config == 234
pipeline_def = PipelineDefinition(
name='multilevel_default_handling', solid_defs=[has_default_value]
)
assert execute_pipeline(pipeline_def).success
assert execute_pipeline(pipeline_def, run_config=None).success
assert execute_pipeline(pipeline_def, run_config={}).success
assert execute_pipeline(pipeline_def, run_config={'solids': {}}).success
assert execute_pipeline(pipeline_def, run_config={'solids': {'has_default_value': {}}}).success
assert execute_pipeline(
pipeline_def, run_config={'solids': {'has_default_value': {'config': 234}}}
).success
def test_no_env_missing_required_error_handling():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
pipeline_def = PipelineDefinition(
name='no_env_missing_required_error', solid_defs=[required_int_solid]
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def)
assert isinstance(pe_info.value, DagsterInvalidConfigError)
pe = pe_info.value
assert len(pe.errors) == 1
mfe = pe.errors[0]
assert mfe.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert len(pe.errors) == 1
assert pe.errors[0].message == (
'''Missing required field "solids" at the root. '''
'''Available Fields: "['execution', 'loggers', '''
''''resources', 'solids', 'storage']".'''
)
def test_root_extra_field():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={'solids': {'required_int_solid': {'config': 948594}}, 'nope': None},
)
pe = pe_info.value
assert len(pe.errors) == 1
fnd = pe.errors[0]
assert fnd.reason == DagsterEvaluationErrorReason.FIELD_NOT_DEFINED
assert 'Undefined field "nope"' in pe.message
def test_deeper_path():
@solid(config_schema=Int)
def required_int_solid(_context):
pass
@pipeline
def pipeline_def():
required_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def, run_config={'solids': {'required_int_solid': {'config': 'asdf'}}}
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
def test_working_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called['yup'] = True
@pipeline
def pipeline_def():
required_list_int_solid()
result = execute_pipeline(
pipeline_def, run_config={'solids': {'required_list_int_solid': {'config': [1, 2]}}}
)
assert result.success
assert called['yup']
def test_item_error_list_path():
called = {}
@solid(config_schema=[int])
def required_list_int_solid(context):
assert context.solid_config == [1, 2]
called['yup'] = True
@pipeline
def pipeline_def():
required_list_int_solid()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(
pipeline_def,
run_config={'solids': {'required_list_int_solid': {'config': [1, 'nope']}}},
)
pe = pe_info.value
assert len(pe.errors) == 1
rtm = pe.errors[0]
assert rtm.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH
assert 'Invalid scalar at path root:solids:required_list_int_solid:config[1]' in str(pe)
def test_list_in_config_error():
error_msg = (
'Cannot use List in the context of config. '
'Please use a python list (e.g. [int]) or dagster.Array (e.g. Array(int)) instead.'
)
with pytest.raises(DagsterInvalidDefinitionError, match=re.escape(error_msg)):
@solid(config_schema=List[int])
def _no_runtime_list_in_config(_):
pass
def test_required_resource_not_given():
@pipeline(
name='required_resource_not_given',
mode_defs=[ModeDefinition(resource_defs={'required': dummy_resource(Int)})],
)
def pipeline_def():
pass
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={'resources': None})
assert len(not_none_pe_info.value.errors) == 1
assert (
'Value at path root:resources must be not be None.'
in not_none_pe_info.value.errors[0].message
)
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={'resources': {}})
pe = pe_info.value
error = pe.errors[0]
assert error.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD
assert (
error.message == 'Missing required field "required" at path root:resources. '
'Available Fields: "[\'required\']".'
)
def test_multilevel_good_error_handling_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as not_none_pe_info:
execute_pipeline(pipeline_def, run_config={'solids': None})
assert len(not_none_pe_info.value.errors) == 1
assert (
'Value at path root:solids must be not be None.' in not_none_pe_info.value.errors[0].message
)
with pytest.raises(DagsterInvalidConfigError) as missing_field_pe_info:
execute_pipeline(pipeline_def, run_config={'solids': {}})
assert len(missing_field_pe_info.value.errors) == 1
assert missing_field_pe_info.value.errors[0].message == (
'''Missing required field "good_error_handling" at path root:solids. '''
'''Available Fields: "['good_error_handling']".'''
)
def test_multilevel_good_error_handling_solid_name_solids():
@solid(config_schema=Int)
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
with pytest.raises(DagsterInvalidConfigError) as pe_info:
execute_pipeline(pipeline_def, run_config={'solids': {'good_error_handling': {}}})
assert len(pe_info.value.errors) == 1
assert pe_info.value.errors[0].message == (
'''Missing required field "config" at path root:solids:good_error_handling. '''
'''Available Fields: "['config', 'outputs']".'''
)
def test_multilevel_good_error_handling_config_solids_name_solids():
@solid(config_schema=Noneable(int))
def good_error_handling(_context):
pass
@pipeline
def pipeline_def():
good_error_handling()
execute_pipeline(pipeline_def, run_config={'solids': {'good_error_handling': {'config': None}}})
def test_invalid_default_values():
with pytest.raises(
DagsterInvalidConfigError,
match='Value "3" of type .* is not valid for expected type "Int"',
):
@solid(config_schema=Field(Int, default_value='3'))
def _solid(_):
pass
def test_typing_types_into_config():
match_str = re.escape(
'You have passed in typing.List to the config system. '
'Types from the typing module in python are not allowed '
'in the config system. You must use types that are imported '
'from dagster or primitive types such as bool, int, etc.'
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List)
def _solid(_):
pass
match_str = re.escape(
'You have passed in typing.List[int] to the config system. Types '
'from the typing module in python are not allowed in the config system. '
'You must use types that are imported from dagster or primitive types '
'such as bool, int, etc.'
)
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=Field(typing.List[int]))
def _solid(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=match_str):
@solid(config_schema=typing.List[int])
def _solid(_):
pass
for ttype in [
typing.Optional[int],
typing.Set,
typing.Set[int],
typing.Dict,
typing.Dict[int, str],
typing.Tuple,
typing.Tuple[int, int],
]:
with pytest.raises(DagsterInvalidDefinitionError):
@solid(config_schema=Field(ttype))
def _solid(_):
pass
def test_no_set_in_config_system():
set_error_msg = re.escape('Cannot use Set in the context of a config field.')
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set))
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set)
def _bare_open_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Field(Set[int]))
def _bare_closed_set(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=set_error_msg):
@solid(config_schema=Set[int])
def _bare_closed_set(_):
pass
def test_no_tuple_in_config_system():
tuple_error_msg = re.escape('Cannot use Tuple in the context of a config field.')
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple))
def _bare_open_tuple(_):
pass
with pytest.raises(DagsterInvalidDefinitionError, match=tuple_error_msg):
@solid(config_schema=Field(Tuple[int]))
def _bare_closed_set(_):
pass
def test_field_is_none():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={'none_field': None})
def _none_is_bad(_):
pass
assert 'Fields cannot be None' in str(exc_info.value)
|
the-stack_0_10971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_profile_udp
short_description: Manage UDP profiles on a BIG-IP
description:
- Manage UDP profiles on a BIG-IP. There are a variety of UDP profiles, each with their
own adjustments to the standard C(udp) profile. Users of this module should be aware
that many of the adjustable knobs have no module default. Instead, the default is
assigned by the BIG-IP system itself which, in most cases, is acceptable.
version_added: 2.6
options:
name:
description:
- Specifies the name of the profile.
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(udp) profile.
idle_timeout:
description:
- Specifies the length of time that a connection is idle (has no traffic) before
the connection is eligible for deletion.
- When creating a new profile, if this parameter is not specified, the remote
device will choose a default value appropriate for the profile, based on its
C(parent) profile.
- When a number is specified, indicates the number of seconds that the UDP
connection can remain idle before the system deletes it.
- When C(0), or C(indefinite), specifies that UDP connections can remain idle
indefinitely.
- When C(immediate), specifies that you do not want the UDP connection to
remain idle, and that it is therefore immediately eligible for deletion.
datagram_load_balancing:
description:
- Specifies, when C(yes), that the system load balances UDP traffic
packet-by-packet.
type: bool
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a TCP profile
bigip_profile_tcp:
name: foo
parent: udp
idle_timeout: 300
datagram_load_balancing: no
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
parent:
description: The new parent of the resource.
returned: changed
type: string
sample: udp
idle_timeout:
description: The new idle timeout of the resource.
returned: changed
type: int
sample: 100
datagram_load_balancing:
description: The new datagram load balancing setting of the resource.
returned: changed
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'datagramLoadBalancing': 'datagram_load_balancing',
'idleTimeout': 'idle_timeout',
'defaultsFrom': 'parent'
}
api_attributes = [
'datagramLoadBalancing',
'idleTimeout',
'defaultsFrom'
]
returnables = [
'datagram_load_balancing',
'idle_timeout',
'parent'
]
updatables = [
'datagram_load_balancing',
'idle_timeout',
'parent'
]
@property
def idle_timeout(self):
if self._values['idle_timeout'] is None:
return None
if self._values['idle_timeout'] in ['indefinite', 'immediate']:
return self._values['idle_timeout']
return int(self._values['idle_timeout'])
class ApiParameters(Parameters):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing'] == 'enabled':
return True
return False
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing']:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.ltm.profile.udps.udp.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.ltm.profile.udps.udp.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.ltm.profile.udps.udp.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.ltm.profile.udps.udp.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.ltm.profile.udps.udp.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
idle_timeout=dict(),
datagram_load_balancing=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
the-stack_0_10973 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from osc_lib import exceptions
from neutron_diagnose.command import commandmanager
from neutron_diagnose.i18n import _
class CheckSgRule(commandmanager.ShowOne):
_description = _("Compare the security group rule in DataBase with "
"iptables rules in related compute node.")
def get_parser(self, prog_name):
parser = super(CheckSgRule, self).get_parser(prog_name)
parser.add_argument(
'port-id',
metavar='<port-id>',
help=_('the port uuid.'),
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
network_client = self.app.client_manager.network
ssh_client = self.app.client_manager.ssh
result = {}
return commandmanager.set_result(result)
|
the-stack_0_10975 | __author__ = 'ipetrash'
# https://docs.python.org/3.4/tutorial/inputoutput.html#reading-and-writing-files
# http://pythonworld.ru/tipy-dannyx-v-python/fajly-rabota-s-fajlami.html
if __name__ == '__main__':
# Открыть файл в режиме записи
with open('foo.txt', mode='w') as f:
f.write('123\n')
f.write('one two\n')
f.write('one two\n')
f.write('раз два\n') |
the-stack_0_10980 | import re as _re
import numpy as _np
import copy as _copy
import gdspy as _gdspy
import os as _os
import time as _time
from . import utils
from . import geometry
_PRINT_LIT_REDUCTION = False
class CallTree:
_operators = [['make'], ['.'], ['^'], ['*', '/'], ['-', '+'], ['pstart', 'pend'],
['psep'], ['.'], ['='], [',']]
def __init__(self, root, text=""):
self._root = root
self._children = []
self._names = {}
self._func = ""
if text:
nodeStack = [self, ]
textbuffer = ""
strDelimiter = ''
for i, c in enumerate(text):
if strDelimiter and c != strDelimiter:
textbuffer += c
elif strDelimiter and c == strDelimiter:
textbuffer += c
strDelimiter = ''
elif c in ['"', "'"]:
strDelimiter = c
textbuffer += c
elif c == "(":
top = nodeStack[-1]
new = CallTree(root)
m = _re.search("[^a-zA-Z0-9_]", textbuffer[::-1])
if m:
new._func = textbuffer[len(textbuffer)-m.start():]
top._addText(textbuffer[:len(textbuffer)-m.start()])
else:
new._func = textbuffer
top._children.append(new)
nodeStack.append(new)
textbuffer = ""
elif c == ")":
nodeStack[-1]._addText(textbuffer)
nodeStack.pop()
textbuffer = ""
else:
textbuffer += c
if len(nodeStack) == 0:
raise ValueError("Additional ')' at:\n'"+utils.shortenText(text[i-30:i+30], maxLength=1e99)+"'")
if len(nodeStack) > 1:
raise ValueError("Additional '('.")
def _addText(self, text):
text = text.strip()
if len(text) > 0:
self._children.append(text.strip())
def _py2lit(self, *vals):
res = []
for val in vals:
if type(val) is list and type(val[0]) is str:
res.append(val)
elif type(val) is float:
res.append(['float', val])
elif type(val) is int:
res.append(['int', val])
elif type(val) is list and len(val) == 2:
res.append(['point', val])
elif type(val) is str:
res.append(['string', val])
elif isinstance(val, geometry.Shape):
res.append(['shape', val])
else:
raise ValueError("Uknown variable type '"+str(type(val))+"'")
if len(vals) == 1:
return res[0]
return res
def createLiterals(self):
for child in self._children:
if type(child) is CallTree:
child.createLiterals()
#=====================================================================
# generate literals from text
i = 0
inPoint = False
while i < len(self._children):
if type(self._children[i]) is str:
i, inPoint = self._parseStr(i, inPoint)
else:
i += 1
#=====================================================================
# accumulate children
i = 0
while i < len(self._children)-1:
if (hasattr(self._children[i], "_literals")
and hasattr(self._children[i+1], "_literals")):
self._children[i]._literals += self._children.pop(i+1)._literals
else:
i += 1
def _instanciateShape(self, obj, largs, dargs):
tree = obj['tree']
argdict = {k: None for k in obj['args']}
if len(largs) > len(argdict):
raise ValueError("Too many list args in parametric shape call: '{}'.".format(self._func))
if len(argdict) > 0:
for targetKey, listArg in zip(obj['args'], largs):
argdict[targetKey] = self._py2lit(listArg)
for key, val in dargs.items():
if argdict[key] is None:
argdict[key] = self._py2lit(val)
else:
raise ValueError("Argument specified by list arg and named arg in parametric shape call: '{}'.".format(self._func))
if None in argdict.values():
raise ValueError("To few arguements in parametric shape call: '{}'.".format(self._func))
unresolvedNames = tree.resolveNames(argdict)
if unresolvedNames:
raise ValueError("Unresolved names "
+", ".join(['"'+s+'"' for s in unresolvedNames])
+" in imported parameteric shape call: ".format(self._func))
tree.evaluate()
return tree.getShape()
def evaluate(self):
for child in self._children:
if type(child) is CallTree:
child.evaluate()
#=====================================================================
# accumulate children
i = 0
while i < len(self._children)-1:
if (hasattr(self._children[i], "_literals")
and hasattr(self._children[i+1], "_literals")):
self._children[i]._literals += self._children.pop(i+1)._literals
else:
i += 1
#=====================================================================
# reduce literals
if len(self._children) > 1:
raise ValueError("Fatal error: children without literals not allowed.")
self.resolveNames({})
self._reduceLiterals()
#=====================================================================
# prepare function parsing
if self._func == "":
if len(self._children) == 1:
self._literals = [self._result]
else:
unresolvedNames = []
largs = []
dargs = {}
# multiple arguments
if self._result[0] == "argumentlist":
for lit in self._result[1]:
if lit[0] == 'assignment':
if lit[1][1][0] == 'name':
unresolvedNames.append(lit[1][1][1])
dargs[lit[1][0]] = lit[1][1][1]
else:
if lit[0] == 'name':
unresolvedNames.append(lit[1])
largs.append(lit[1])
# only one argument
elif self._result[0] != "none":
if self._result[0] == 'name':
unresolvedNames.append(self._result[1])
largs = [self._result[1]]
dargs = {}
def requireResolvedNamesOnly():
if unresolvedNames:
raise ValueError('Unresolved name(s): '
+', '.join(['"'+s+'"' for s in unresolvedNames])
+' in argumentlist of func "{}".'.format(self._func))
if _PRINT_LIT_REDUCTION:
utils.debug('Evaluate function "'+self._func+'", largs='
+str(largs)+', dargs='+str(dargs))
#=====================================================================
# rect function
if self._func == "rect":
requireResolvedNamesOnly()
self._literals = [['shape', geometry.Rect(*largs, **dargs)]]
#=====================================================================
# polygon function
elif self._func == "polygon":
requireResolvedNamesOnly()
self._literals = [['shape', geometry.Polygon(*largs, **dargs)]]
#=====================================================================
# text function
elif self._func == "text":
requireResolvedNamesOnly()
self._literals = [['shape', geometry.Text(*largs, **dargs)]]
#=====================================================================
# translate function
elif self._func == "translate":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape", "point", "shaperef"])
+geometry.Translator(*largs, **dargs)]]
#=====================================================================
# rotate function
elif self._func == "rotate":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape", "point", "shaperef"])
+geometry.Rotator(*largs, **dargs)]]
#=====================================================================
# mirror function
elif self._func == "mirror":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape"])
+geometry.Mirrower(*largs, **dargs)]]
#=====================================================================
# grow function
elif self._func == "grow":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck("shape")
+geometry.Grower(*largs, **dargs)]]
#=====================================================================
# smooth function
elif self._func == "round":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck("shape")
+geometry.Rounder(*largs, **dargs)]]
#=====================================================================
# create array of shapes
elif self._func == "array":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(["shape", "shaperef"])
+geometry.Arrayer(*largs, **dargs)]]
#=====================================================================
# multiple calls to parametric shapes
elif self._func == "call":
requireResolvedNamesOnly()
self._literals = [['func', utils.TypeCheck(['name', 'tree'], returnType='raw')
+utils.Caller(self._root, *largs, **dargs)]]
#=====================================================================
# cast float to int
elif self._func == "int":
requireResolvedNamesOnly()
if len(dargs) > 0 or len(largs) != 1:
raise ValueError("Invalid arguments to 'int' call.")
self._literals = [['int', int(largs[0])]]
#=====================================================================
# absolute
elif self._func == "abs":
requireResolvedNamesOnly()
if len(dargs) > 0 or len(largs) != 1:
raise ValueError("Invalid arguments to 'abs' call.")
self._literals = [['float', abs(largs[0])]]
#=====================================================================
# create letter from number
elif self._func == "char":
requireResolvedNamesOnly()
letters = "abcdefghijklmnopqrstuvwxyz"
if len(dargs) > 0 or len(largs) != 1 or largs[0] > len(letters):
raise ValueError("Invalid arguments to 'char' call.")
self._literals = [['string', letters[int(largs[0])]]]
#=====================================================================
# min/max/mean functions
elif self._func in ["min", "max", "mean"]:
requireResolvedNamesOnly()
if len(dargs) > 0:
raise ValueError("Function '"+self._func+"' does not support named arguments.")
if len(largs) == 0:
raise ValueError("Function '"+self._func+"' needs more than one argument.")
try:
largs = [float(f) for f in largs]
except:
raise ValueError("Function '"+self._func+"' supports only numerical inputs.")
fdict = {"min": min, "max": max, "mean": lambda l: sum(l)/len(l)}
self._literals = [['float', fdict[self._func](largs)]]
#=====================================================================
# trigonometric functions
elif self._func in ["cos", "sin", "tan", "asin", "acos", "atan"]:
requireResolvedNamesOnly()
if len(largs) != 1 or any([a not in ['unit'] for a in dargs]):
raise ValueError("Invalid arguments to 'cos' function.")
u = dargs.get('unit', 'deg')
if u == 'deg':
largs[0] *= _np.pi/180
elif u == 'rad':
pass
else:
raise ValueError("Invalid value for 'unit' argument in 'cos' function.")
if self._func == "sin":
self._literals = [['float', _np.sin(largs[0])]]
elif self._func == "cos":
self._literals = [['float', _np.cos(largs[0])]]
elif self._func == "tan":
self._literals = [['float', _np.tan(largs[0])]]
elif self._func == "asin":
self._literals = [['float', 180/_np.pi*_np.arcsin(largs[0])]]
elif self._func == "acos":
self._literals = [['float', 180/_np.pi*_np.arccos(largs[0])]]
else:
self._literals = [['float', 180/_np.pi*_np.arctan(largs[0])]]
#=====================================================================
# arctan2
elif self._func == "atan2":
requireResolvedNamesOnly()
if len(dargs) > 0 or len(largs) != 2:
raise ValueError("Invalid arguments to 'abs' call.")
self._literals = [['float', 180/_np.pi*_np.arctan2(largs[0], largs[1])]]
#=====================================================================
# calculate height of shape
elif self._func == "height":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'height' function.")
self._literals = [['float', largs[0].height()]]
#=====================================================================
# calculate width of shape
elif self._func == "width":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'width' function.")
self._literals = [['float', largs[0].width()]]
#=====================================================================
# calculate bounding box
elif self._func == "bb":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'bb' function.")
self._literals = [['shape', largs[0].boundingBox()]]
#=====================================================================
# calculate center of mass
elif self._func == "center":
requireResolvedNamesOnly()
if len(largs) != 1:
raise ValueError("Invalid arguments to 'center' function.")
self._literals = [['point', largs[0].center()]]
#=====================================================================
# instanciate shapes
elif self._func in self._root.shapeDict:
requireResolvedNamesOnly()
obj = _copy.deepcopy(self._root.shapeDict[self._func])
shape = self._instanciateShape(obj, largs, dargs)
utils.debug('self._literals = ["shape", '+str(shape)+']')
self._literals = [['shape', shape]]
#=====================================================================
# look in imported database
elif self._func in [name for lib in self._root.importDict.values()
for name in lib.shapeDict.keys()]:
self._literals = [['import', self._func, [largs, dargs]]]
#=====================================================================
# create symbol reference:
elif self._func == 'ref':
if len(largs) == 1:
self._literals = [['shaperef', _gdspy.CellReference(self._root.gdsLib.cells[largs[0]])]]
elif len(largs) > 1:
if largs[0] not in self._root.paramSymDict:
raise ValueError('Parametric symbol "'+str(largs[0])+'" was not defined. '
+'(Symbols may only be used after their definition)')
paramSym = self._root.paramSymDict[largs[0]]
symParams = largs[1:]
self._literals = [['paramshaperef', paramSym], ['operator', 'make'],
['argumentlist',
[['name', p] if type(p) is str
else self._py2lit(p) for p in symParams]]]
else:
raise ValueError("Invalid function/shape '{}'.".format(self._func))
if _PRINT_LIT_REDUCTION:
utils.debug('Evaluation result: ['+', '.join(['['+l[0]+', '
+utils.shortenText(str(l[1]), maxLength=10)+']' for l in self._literals])+']')
def _parseStr(self, childId, inPoint=False):
#=====================================================================
# Split string in literals 'str', 'int', 'float', 'name', 'operator'
# and 'point'
appliedChange = False
s = self._children[childId]
if not hasattr(self._children[childId], "_literals"):
literals = []
strDelimiter = ''
buf = ''
inNumber = False
inName = False
s = s + ' '
for prevC, c, nextC in zip(' ' + s[:-1], s, s[1:] + ' '):
while True:
reparseChar = False
if strDelimiter:
if c == strDelimiter:
strDelimiter = ''
literals.append(['string', buf])
else:
buf += c
elif inNumber:
if _re.match('[0-9.e]', c) or c in ['+', '-'] and prevC == 'e':
buf += c
else:
n = float(buf)
if n - round(n) < 1e-6 * n:
literals.append(['int', n])
else:
literals.append(['float', n])
inNumber = False
reparseChar = True
elif inName:
if _re.match('[a-zA-Z0-9_]', c):
buf += c
else:
utils.testValidName(buf)
literals.append(['name', buf])
inName = False
reparseChar = True
else:
if c in ['"', "'"]:
strDelimiter = c
buf = ''
elif c == '[':
literals.append(['operator', 'pstart'])
inPoint = True
elif inPoint and c == ',':
literals.append(['operator', 'psep'])
elif c == ']':
literals.append(['operator', 'pend'])
inPoint = False
elif _re.match('[0-9]', c) or c == '.' and _re.match('[0-9]', nextC):
reparseChar = True
inNumber = True
buf = ''
elif c in [op for ops in self._operators for op in ops]:
literals.append(['operator', c])
elif _re.match('[a-zA-Z_]', c):
reparseChar = True
inName = True
buf = ''
elif _re.match('\s', c):
pass
else:
raise ValueError("Unexpected character '{}'".format(c))
if not reparseChar:
break
self._children[childId] = CallTree(self._root)
self._children[childId]._literals = literals
return childId + 1, inPoint
def _reduceLiterals(self):
if hasattr(self, '_result'):
return
if _PRINT_LIT_REDUCTION:
utils.debug("Start reducing:")
utils.debug()
if len(self._children) == 0:
self._result = ['none', None]
return
literals = self._children[0]._literals
for ops in self._operators:
i = 0
#=====================================================================
# helper functions
def popNextLit():
if i < len(literals) - 1:
return literals.pop(i+1)
else:
return None
def popPrevLit():
nonlocal i
if i > 0:
i -= 1
return literals.pop(i)
else:
return None
def viewNextLit():
if i < len(literals) - 1:
return literals[i+1]
else:
return None
def viewPrevLit():
if i > 0:
return literals[i-1]
else:
return None
def isNextLitType(types):
if i < len(literals) - 1:
lit = literals[i+1]
else:
return False
if type(types) is list:
return lit != None and lit[0] in types
else:
return lit != None and lit[0] == types
def isPrevLitType(types):
if i > 0:
lit = literals[i-1]
else:
return False
if type(types) is list:
return lit[0] in types
else:
return lit[0] == types
#=====================================================================
# evaluate operators
while i < len(literals):
l = literals[i]
if l[0] == 'tree':
self.resolveNames({})
elif l[0] == 'operator' and l[1] in ops:
if _PRINT_LIT_REDUCTION:
utils.debug(literals)
#=====================================================================
# two scalar numeric operands
if (l[1] in ['^', '*', '/', '+', '-']
and isNextLitType(['float', 'int'])
and isPrevLitType(['float', 'int'])):
op1 = popPrevLit()
op2 = popNextLit()
if l[1] == '^':
if 'float' in [op1[0] or op2[0]] and op2[1] > 0:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, pow(op1[1], op2[1])]
elif l[1] == '*':
if 'float' in [op1[0] or op2[0]]:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, op1[1] * op2[1]]
elif l[1] == '/':
literals[i] = ['float', op1[1]/op2[1]]
elif l[1] == '+':
if 'float' in [op1[0] or op2[0]]:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, op1[1] + op2[1]]
elif l[1] == '-':
if 'float' in [op1[0] or op2[0]]:
ty = 'float'
else:
ty = 'int'
literals[i] = [ty, op1[1] - op2[1]]
#=====================================================================
# plus and minus for points
elif (l[1] in ['+', '-'] and isNextLitType('point')
and isPrevLitType('point')):
op1 = popPrevLit()
op2 = popNextLit()
if l[1] == '+':
literals[i] = ['point', [p1+p2 for p1,p2 in zip(op1,op2)]]
elif l[1] == '-':
literals[i] = ['point', [p1-p2 for p1,p2 in zip(op1,op2)]]
#=====================================================================
# plus operator for strings
elif l[1] == '+' and (isNextLitType('string')
and not isPrevLitType('name')
or (isPrevLitType('string'))
and not isNextLitType('name')):
op1 = popPrevLit()
op2 = popNextLit()
if op1[0] == 'int':
op1[1] = str(int(op1[1]))
else:
op1[1] = str(op1[1])
if op2[0] == 'int':
op2[1] = str(int(op2[1]))
else:
op2[1] = str(op2[1])
literals[i] = ['string', op1[1] + op2[1]]
#=====================================================================
# plus and minus as unary operators for numbers
elif l[1] in ['+', '-'] and isNextLitType(['float', 'int']):
op = popNextLit()
if l[1] == '+':
literals[i] = op
elif l[1] == '-':
literals[i] = [op[0], -op[1]]
#=====================================================================
# geometrical arithmetical operations
elif(l[1] in ['+', '-', '*'] and isPrevLitType('shape')
and isNextLitType('shape')):
op1 = popPrevLit()
op2 = popNextLit()
if l[1] == '+':
literals[i] = ['shape', op1[1].union(op2[1])]
elif l[1] == '-':
literals[i] = ['shape', op1[1].substract(op2[1])]
elif l[1] == '*':
literals[i] = ['shape', op1[1].intersect(op2[1])]
#=====================================================================
# point start, sep and end operators
elif l[1] == 'pstart' and isNextLitType(['float', 'int']):
op = popNextLit()
literals[i] = ["point-x", op[1]]
elif l[1] == 'psep' and isPrevLitType('point-x') and isNextLitType('point-y'):
op1 = popPrevLit()
op2 = popNextLit()
literals[i] = ["point", (op1[1], op2[1])]
elif l[1] == 'pend' and isPrevLitType(['float', 'int']):
op = popPrevLit()
literals[i] = ["point-y", op[1]]
#=====================================================================
# dot operator for imported shapes
elif(l[1] == '.' and isNextLitType('import')
and isPrevLitType('name')):
op1 = popPrevLit()
op2 = popNextLit()
largs, dargs = op2[2]
obj = _copy.deepcopy(self._root.importDict[op1[1]].shapeDict[op2[1]])
shape = self._instanciateShape(obj, largs, dargs)
utils.debug('self._literals['+str(i)+'] = ["shape", '+str(shape)+']')
literals[i] = ['shape', shape]
#=====================================================================
# dot operator for functions
elif(l[1] == '.' and isNextLitType('func')
and (viewNextLit()[1].check(viewPrevLit())
or (isPrevLitType('operator')
and viewPrevLit()[1] in ['pend', 'point-y']))):
if viewNextLit()[1].check(viewPrevLit()):
op1 = popPrevLit()
op2 = popNextLit()
literals[i] = op2[1](op1)
#=====================================================================
# argument list operator
elif l[1] == ',':
op1 = popPrevLit()
op2 = popNextLit()
if op1 is None:
l1 = []
elif op1[0] == 'argumentlist':
l1 = op1[1]
else:
l1 = [list(op1)]
if op2 is None:
l2 = []
elif op2[0] == 'argumentlist':
l2 = op2[1]
else:
l2 = [list(op2)]
literals[i] = ['argumentlist', l1+l2]
#=====================================================================
# assignment operator
elif l[1] == '=' and isPrevLitType('name'):
op1 = popPrevLit()
op2 = popNextLit()
literals[i] = ['assignment', [op1[1], op2]]
#=====================================================================
# make operator that creates shape refs
elif (l[1] == 'make'
and isPrevLitType('paramshaperef')
and isNextLitType('argumentlist')):
op1 = popPrevLit()
op2 = popNextLit()
paramSym = op1[1]
symParams = [v[1] for v in op2[1]]
utils.debug("symbol name pattern:", paramSym[0]['name_pattern'],
"params:", symParams)
symInstanceName = paramSym[0]['name_pattern'].format(*symParams)
if symInstanceName in self._root.gdsLib.cells.keys():
sym = self._root.gdsLib.cells[symInstanceName]
else:
_gdspy.current_library = self._root.gdsLib
sym = _gdspy.Cell(symInstanceName)
self._root.gdsLib.add(sym)
if len(list(sym)) == 0:
for section in paramSym:
tree = _copy.deepcopy(section['tree'])
# replace root reference with true reference:
tree._root = section['tree']._root
layer = section['layer']
argNames = section['args']
argdict = {k: self._py2lit(v) for k, v in zip(argNames, symParams)}
unresolvedNames = tree.resolveNames(argdict)
tree.evaluate()
if tree._result[0] != 'none':
shapeResult = False
try:
s = tree.getShape()
shapeResult = True
except ValueError:
refs = tree.getShaperef()
if shapeResult:
if s is None:
if unresolvedNames:
raise ValueError("Unresolved name(s) in layer shapes: "
+", ".join(['"'+n+'"' for n in unresolvedNames]))
else:
raise ValueError("Unexpected 'None'-shape found after instanciation "
+"of parametric symbol:\n"+str(tree))
shape = s._shape
if not shape is None:
if hasattr(shape, "layer"):
shape.layer = layer
elif hasattr(shape, "layers"):
shape.layers = [layer for _ in range(len(shape.layers))]
sym.add(shape)
else:
for ref in refs:
sym.add(ref)
# add created sym to all parents
# TODO: it would proably be better to use the 'importSymbols' of
# the PlsScript instance just before 'write_gds' is called.
# Otherwise layer transformation will not work, also the
# 'parent' attriute is unnecessary, we have importDict
# already...
parent = self._root.parent
while parent is not None:
_gdspy.current_library = parent.gdsLib
if sym.name not in parent.gdsLib:
parent.gdsLib.add(sym)
parent = parent.parent
_gdspy.current_library = self._root.gdsLib
literals[i] = ['shaperef', _gdspy.CellReference(sym)]
else:
if viewPrevLit():
t1 = viewPrevLit()
else:
t1 = 'None'
if viewNextLit():
t2 = viewNextLit()
else:
t2 = 'None'
if _PRINT_LIT_REDUCTION:
utils.debug("parsing paused...")
utils.debug()
raise ValueError("Illegal operands for operator '{}': {} and {}".format(l[1], t1, t2))
if _PRINT_LIT_REDUCTION:
utils.debug("applied operator:", l[1])
utils.debug()
i += 1
if _PRINT_LIT_REDUCTION:
utils.debug(literals)
utils.debug("Done reducing.")
utils.debug()
if (len(self._children[0]._literals) > 1
and not all([lit[0] == 'shaperef'
for lit in self._children[0]._literals])
and not any([lit[0] == 'paramshaperef'
for lit in self._children[0]._literals])):
raise ValueError("Syntax error.")
if len(self._children[0]._literals) == 1 and self._children[0]._literals[0][0] != 'shaperef':
self._result = self._children[0]._literals[0]
else:
self._result = self._children[0]._literals
def resolveNames(self, names):
unresolvedNames = []
# magic names:
names["__FILENAME__"] = ["string", _re.sub('\..*$', '', _os.path.basename(self._root.path))]
names["__HASH__"] = ["string", self._root.hash]
names["__DATE__"] = ["string", _time.strftime("%d.%m.%Y")]
names["__TIME__"] = ["string", _time.strftime("%H:%M")]
# constants:
names["True"] = ['int', 1]
names["False"] = ['int', 0]
for child in self._children:
if type(child) is CallTree:
child.resolveNames(names)
def resolveArglist(lit):
unresolvedNames = []
if lit[0] == 'argumentlist':
for i, sublit in enumerate(lit[1]):
if sublit[0] == 'assignment':
unresolvedNames.extend(resolveArglist(sublit[1][1]))
elif sublit[0] == 'name':
unresolvedNames.extend(resolveArglist(sublit))
elif lit[0] == 'name':
if lit[1] in names:
lit[0] = names[lit[1]][0]
lit[1] = _copy.deepcopy(names[lit[1]][1])
else:
unresolvedNames.append(names)
return unresolvedNames
if hasattr(self, '_result'):
unresolvedNames.extend(resolveArglist(self._result))
if hasattr(self, '_literals'):
for literal in self._literals:
if literal[0] == 'name':
if literal[1] in names:
literal[0] = names[literal[1]][0]
literal[1] = _copy.deepcopy(names[literal[1]][1])
else:
unresolvedNames.append(literal[1])
elif literal[0] == 'tree':
unresolvedNames.extend(literal[1]['tree'].resolveNames(names))
for name in names:
if name in literal[1]['args']:
literal[1]['args'].delete(name)
if len(literal[1]['args']) == 0:
literal[1]['tree'].evaluate()
utils.debug('Replacing: '+str(literal[1]['tree'])+' -> ["shape", '
+literal[1]['tree'].getShape()+ ']')
literal[0] = 'shape'
literal[1] = _copy.deepcopy(literal[1]['tree'].getShape())
else:
unresolvedNames.extend(resolveArglist(literal))
return unresolvedNames
def getShape(self, ref=False):
utils.debug('getShape() called:')
if hasattr(self, "_literals"):
utils.debug(' > self._literals = '+str(self._literals))
else:
utils.debug(' > self._literals = <undefined>')
if hasattr(self, "_result"):
utils.debug(' > self._result = '+str(self._result))
else:
utils.debug(' > self._result = <undefined>')
if hasattr(self, "_result"):
if ref:
if not all([r[0]=='shaperef' for r in self._result]):
raise ValueError('Expected only "shaperef" types but found: '+str(self._result))
return [r[1] for r in self._result]
else:
if self._result[0] != 'shape':
raise ValueError('Expected "shape" type result but found: '+str(self._result))
return self._result[1]
return None
def getShaperef(self):
return self.getShape(ref=True)
def __str__(self):
return self._strRec()
def __repr__(self):
return self._strRec()
def _strRec(self, level=0):
if hasattr(self, "_literals"):
hasLits = "'yes'"
else:
hasLits = "'no'"
if hasattr(self, "_result"):
hasRes = "'yes'"
else:
hasRes = "'no'"
result = (" "*level + "<CallTree object; func='"
+ self._func+"'; literals? "
+ hasLits+"; result? "
+ hasRes+">\n")
for child in self._children:
if type(child) is str:
result += " "*(level+1) + "'" + _re.sub("\s+", " ", child.strip()) + "'\n"
else:
result += child._strRec(level+1)
return result
|
the-stack_0_10982 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import inspect
from pandapower.auxiliary import _check_bus_index_and_print_warning_if_high, \
_check_gen_index_and_print_warning_if_high, _init_runpp_options, _init_rundcopp_options, \
_init_rundcpp_options, _init_runopp_options, _internal_stored
from pandapower.opf.validate_opf_input import _check_necessary_opf_parameters
from pandapower.optimal_powerflow import _optimal_powerflow
from pandapower.powerflow import _powerflow, _recycled_powerflow
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def set_user_pf_options(net, overwrite=False, **kwargs):
"""
This function sets the 'user_pf_options' dict for net. These options overrule
net.__internal_options once they are added to net. These options are used in configuration of
load flow calculation.
At the same time, user-defined arguments for pandapower.runpp() always have a higher priority.
To remove user_pf_options, set overwrite=True and provide no additional arguments
:param net: pandaPower network
:param overwrite: specifies whether the user_pf_options is removed before setting new options
:param kwargs: load flow options, e. g. tolerance_mva = 1e-3
:return: None
"""
standard_parameters = ['calculate_voltage_angles', 'trafo_model', 'check_connectivity', 'mode',
'copy_constraints_to_ppc', 'switch_rx_ratio', 'enforce_q_lims',
'recycle', 'voltage_depend_loads', 'consider_line_temperature', 'delta',
'trafo3w_losses', 'init_vm_pu', 'init_va_degree', 'init_results',
'tolerance_mva', 'trafo_loading', 'numba', 'ac', 'algorithm',
'max_iteration', 'v_debug', 'run_control']
if overwrite or 'user_pf_options' not in net.keys():
net['user_pf_options'] = dict()
net.user_pf_options.update({key: val for key, val in kwargs.items()
if key in standard_parameters})
additional_kwargs = {key: val for key, val in kwargs.items()
if key not in standard_parameters}
# this part is to inform user and to make typos in parameters visible
if len(additional_kwargs) > 0:
logger.info('parameters %s are not in the list of standard options' % list(
additional_kwargs.keys()))
net.user_pf_options.update(additional_kwargs)
def runpp(net, algorithm='nr', calculate_voltage_angles="auto", init="auto",
max_iteration="auto", tolerance_mva=1e-8, trafo_model="t",
trafo_loading="current", enforce_q_lims=False, check_connectivity=True,
voltage_depend_loads=True, consider_line_temperature=False,
run_control=False, **kwargs):
"""
Runs a power flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**algorithm** (str, "nr") - algorithm that is used to solve the power flow problem.
The following algorithms are available:
- "nr" Newton-Raphson (pypower implementation with numba accelerations)
- "iwamoto_nr" Newton-Raphson with Iwamoto multiplier (maybe slower than NR but more robust)
- "bfsw" backward/forward sweep (specially suited for radial and weakly-meshed networks)
- "gs" gauss-seidel (pypower implementation)
- "fdbx" fast-decoupled (pypower implementation)
- "fdxb" fast-decoupled (pypower implementation)
**calculate_voltage_angles** (str or bool, "auto") - consider voltage angles in loadflow calculation
If True, voltage angles of ext_grids and transformer shifts are considered in the
loadflow calculation. Considering the voltage angles is only necessary in meshed
networks that are usually found in higher voltage levels. calculate_voltage_angles
in "auto" mode defaults to:
- True, if the network voltage level is above 70 kV
- False otherwise
The network voltage level is defined as the maximum rated voltage of any bus in the network that
is connected to a line.
**init** (str, "auto") - initialization method of the loadflow
pandapower supports four methods for initializing the loadflow:
- "auto" - init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
- "flat"- flat start with voltage of 1.0pu and angle of 0° at all PQ-buses and 0° for PV buses as initial solution, the slack bus is initialized with the values provided in net["ext_grid"]
- "dc" - initial DC loadflow before the AC loadflow. The results of the DC loadflow are used as initial solution for the AC loadflow. Note that the DC loadflow only calculates voltage angles at PQ and PV buses, voltage magnitudes are still flat started.
- "results" - voltage vector of last loadflow from net.res_bus is used as initial solution. This can be useful to accelerate convergence in iterative loadflows like time series calculations.
Considering the voltage angles might lead to non-convergence of the power flow in flat start.
That is why in "auto" mode, init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
**max_iteration** (int, "auto") - maximum number of iterations carried out in the power flow algorithm.
In "auto" mode, the default value depends on the power flow solver:
- 10 for "nr"
- 100 for "bfsw"
- 1000 for "gs"
- 30 for "fdbx"
- 30 for "fdxb"
**tolerance_mva** (float, 1e-8) - loadflow termination condition referring to P / Q mismatch of node power in MVA
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model.
- "pi" - transformer is modeled as equivalent PI-model. This is not recommended, since it is less exact than the T-model. It is only recommended for valdiation with other software that uses the pi-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**enforce_q_lims** (bool, False) - respect generator reactive power limits
If True, the reactive power limits in net.gen.max_q_mvar/min_q_mvar are respected in the
loadflow. This is done by running a second loadflow if reactive power limits are
violated at any generator, so that the runtime for the loadflow will increase if reactive
power has to be curtailed.
Note: enforce_q_lims only works if algorithm="nr"!
**check_connectivity** (bool, True) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If True, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are set out of service in the ppc
**voltage_depend_loads** (bool, True) - consideration of voltage-dependent loads. If False, net.load.const_z_percent and net.load.const_i_percent are not considered, i.e. net.load.p_mw and net.load.q_mvar are considered as constant-power loads.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided
line temperature. If True, net.line must contain a column "temperature_degree_celsius".
The temperature dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**KWARGS:
**numba** (bool, True) - Activation of numba JIT compiler in the newton solver
If set to True, the numba JIT compiler is used to generate matrices for the powerflow,
which leads to significant speed improvements.
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**delta_q** - Reactive power tolerance for option "enforce_q_lims" in kvar - helps convergence in some cases.
**trafo3w_losses** - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**v_debug** (bool, False) - if True, voltage values in each newton-raphson iteration are logged in the ppc
**init_vm_pu** (string/float/array/Series, None) - Allows to define initialization specifically for voltage magnitudes. Only works with init == "auto"!
- "auto": all buses are initialized with the mean value of all voltage controlled elements in the grid
- "flat" for flat start from 1.0
- "results": voltage magnitude vector is taken from result table
- a float with which all voltage magnitudes are initialized
- an iterable with a voltage magnitude value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage magnitude value for each bus (indexes have to match the indexes in net.bus)
**init_va_degree** (string/float/array/Series, None) - Allows to define initialization specifically for voltage angles. Only works with init == "auto"!
- "auto": voltage angles are initialized from DC power flow if angles are calculated or as 0 otherwise
- "dc": voltage angles are initialized from DC power flow
- "flat" for flat start from 0
- "results": voltage angle vector is taken from result table
- a float with which all voltage angles are initialized
- an iterable with a voltage angle value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage angle value for each bus (indexes have to match the indexes in net.bus)
**recycle** (dict, none) - Reuse of internal powerflow variables for time series calculation
Contains a dict with the following parameters:
bus_pq: If True PQ values of buses are updated
trafo: If True trafo relevant variables, e.g., the Ybus matrix, is recalculated
gen: If True Sbus and the gen table in the ppc are recalculated
**neglect_open_switch_branches** (bool, False) - If True no auxiliary buses are created for branches when switches are opened at the branch. Instead branches are set out of service
"""
# if dict 'user_pf_options' is present in net, these options overrule the net.__internal_options
# except for parameters that are passed by user
recycle = kwargs.get("recycle", None)
if isinstance(recycle, dict) and _internal_stored(net):
_recycled_powerflow(net, **kwargs)
return
if run_control and net.controller.in_service.any():
from pandapower.control import run_control
parameters = {**locals(), **kwargs}
# disable run control for inner loop to avoid infinite loop
parameters["run_control"] = False
run_control(**parameters)
else:
passed_parameters = _passed_runpp_parameters(locals())
_init_runpp_options(net, algorithm=algorithm, calculate_voltage_angles=calculate_voltage_angles,
init=init, max_iteration=max_iteration, tolerance_mva=tolerance_mva,
trafo_model=trafo_model, trafo_loading=trafo_loading,
enforce_q_lims=enforce_q_lims, check_connectivity=check_connectivity,
voltage_depend_loads=voltage_depend_loads,
consider_line_temperature=consider_line_temperature,
passed_parameters=passed_parameters, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def rundcpp(net, trafo_model="t", trafo_loading="current", recycle=None, check_connectivity=True,
switch_rx_ratio=2, trafo3w_losses="hv", **kwargs):
"""
Runs PANDAPOWER DC Flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model. This is consistent with PowerFactory and is also more accurate than the PI-model. We recommend using this transformer model.
- "pi" - transformer is modeled as equivalent PI-model. This is consistent with Sincal, but the method is questionable since the transformer is physically T-shaped. We therefore recommend the use of the T-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**check_connectivity** (bool, False) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If true, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are put out of service in the PYPOWER matrix
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
****kwargs** - options to use for PYPOWER.runpf
"""
_init_rundcpp_options(net, trafo_model=trafo_model, trafo_loading=trafo_loading,
recycle=recycle, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def runopp(net, verbose=False, calculate_voltage_angles=True, check_connectivity=True,
suppress_warnings=True, switch_rx_ratio=2, delta=1e-10, init="flat", numba=True,
trafo3w_losses="hv", consider_line_temperature=False, **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities can be defined in net.sgen / net.gen /net.load / net.storage /net.ext_grid
net.sgen.controllable if a static generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If True, the following
flexibilities apply:
- net.gen.min_p_mw / net.gen.max_p_mw
- net.gen.min_q_mvar / net.gen.max_q_mvar
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.sgen.min_q_mvar / net.sgen.max_q_mvar
- net.dcline.max_p_mw
- net.dcline.min_q_to_mvar / net.dcline.max_q_to_mvar / net.dcline.min_q_from_mvar / net.dcline.max_q_from_mvar
- net.ext_grid.min_p_mw / net.ext_grid.max_p_mw
- net.ext_grid.min_q_mvar / net.ext_grid.max_q_mvar
- net.load.min_p_mw / net.load.max_p_mw
- net.load.min_q_mvar / net.load.max_q_mvar
- net.storage.min_p_mw / net.storage.max_p_mw
- net.storage.min_q_mvar / net.storage.max_q_mvar
Controllable loads behave just like controllable static generators. It must be stated if they are controllable.
Otherwise, they are not respected as flexibilities.
Dc lines are controllable per default
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.bus.min_vm_pu / net.bus.max_vm_pu
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
If the external grid ist controllable, the voltage setpoint of the external grid can be optimized within the
voltage constraints by the OPF. The same applies to the voltage setpoints of the controllable generator elements.
How these costs are combined into a cost function depends on the cost_function parameter.
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**init** (str, "flat") - init of starting opf vector. Options are "flat" or "pf"
Starting solution vector (x0) for opf calculations is determined by this flag. Options are:
"flat" (default): starting vector is (upper bound - lower bound) / 2
"pf": a power flow is executed prior to the opf and the pf solution is the starting vector. This may improve
convergence, but takes a longer runtime (which are probably neglectible for opf calculations)
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided\
line temperature. If True, net.line must contain a column "temperature_degree_celsius".\
The temperature dependency coefficient alpha must be provided in the net.line.alpha\
column, otherwise the default value of 0.004 is used
**kwargs** - Pypower / Matpower keyword arguments:
- OPF_VIOLATION (5e-6) constraint violation tolerance
- PDIPM_COSTTOL (1e-6) optimality tolerance
- PDIPM_GRADTOL (1e-6) gradient tolerance
- PDIPM_COMPTOL (1e-6) complementarity condition (inequality) tolerance
- PDIPM_FEASTOL (set to OPF_VIOLATION if not specified) feasibiliy (equality) tolerance
- PDIPM_MAX_IT (150) maximum number of iterations
- SCPDIPM_RED_IT(20) maximum number of step size reductions per iteration
"""
_check_necessary_opf_parameters(net, logger)
_init_runopp_options(net, calculate_voltage_angles=calculate_voltage_angles,
check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta, init=init, numba=numba,
trafo3w_losses=trafo3w_losses,
consider_line_temperature=consider_line_temperature, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def rundcopp(net, verbose=False, check_connectivity=True, suppress_warnings=True,
switch_rx_ratio=0.5, delta=1e-10, trafo3w_losses="hv", **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities for generators can be defined in net.sgen / net.gen.
net.sgen.controllable / net.gen.controllable signals if a generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If yes, the following
flexibilities apply:
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.gen.min_p_mw / net.gen.max_p_mw
- net.load.min_p_mw / net.load.max_p_mw
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
"""
if (not net.sgen.empty) & ("controllable" not in net.sgen.columns):
logger.warning('Warning: Please specify sgen["controllable"]\n')
if (not net.load.empty) & ("controllable" not in net.load.columns):
logger.warning('Warning: Please specify load["controllable"]\n')
_init_rundcopp_options(net, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta,
trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def _passed_runpp_parameters(local_parameters):
"""
Internal function to distinguish arguments for pandapower.runpp() that are explicitly passed by
the user.
:param local_parameters: locals() in the runpp() function
:return: dictionary of explicitly passed parameters
"""
net = local_parameters.pop("net")
if not ("user_pf_options" in net.keys() and len(net.user_pf_options) > 0):
return None
try:
default_parameters = {k: v.default for k, v in inspect.signature(runpp).parameters.items()}
except:
args, varargs, keywords, defaults = inspect.getfullargspec(runpp)
default_parameters = dict(zip(args[-len(defaults):], defaults))
default_parameters.update({"init": "auto"})
passed_parameters = {
key: val for key, val in local_parameters.items()
if key in default_parameters.keys() and val != default_parameters.get(key, None)}
return passed_parameters
|
the-stack_0_10983 | """
Given an integer array with all positive numbers and no duplicates, find the
number of possible combinations that add up to a positive integer target.
Example:
nums = [1, 2, 3]
target = 4
The possible combination ways are:
(1, 1, 1, 1)
(1, 1, 2)
(1, 2, 1)
(1, 3)
(2, 1, 1)
(2, 2)
(3, 1)
Note that different sequences are counted as different combinations.
Therefore the output is 7.
Follow up:
What if negative numbers are allowed in the given array?
How does it change the problem?
What limitation we need to add to the question to allow negative numbers?
Credits:
Special thanks to @pbrother for adding this problem and creating all test
cases.
"""
class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if nums is None or nums == []:
return 0
nums.sort()
dp = [0] * (target + 1)
dp[0] = 1
for i in range(target + 1):
for j in nums:
if i + j <= target:
dp[i + j] += dp[i]
else:
break
return dp[-1]
a = Solution()
print(a.combinationSum4([1,2,3],4))
|
the-stack_0_10985 | import numpy
from chainer.functions.connection import bilinear
from chainer import link
class Bilinear(link.Link):
"""Bilinear layer that performs tensor multiplication.
Bilinear is a primitive link that wraps the
:func:`~chainer.functions.bilinear` functions. It holds parameters ``W``,
``V1``, ``V2``, and ``b`` corresponding to the arguments of
:func:`~chainer.functions.bilinear`.
Args:
left_size (int): Dimension of input vector :math:`e^1` (:math:`J`)
right_size (int): Dimension of input vector :math:`e^2` (:math:`K`)
out_size (int): Dimension of output vector :math:`y` (:math:`L`)
nobias (bool): If ``True``, parameters ``V1``, ``V2``, and ``b`` are
omitted.
initialW (3-D numpy array): Initial value of :math:`W`.
Shape of this argument must be
``(left_size, right_size, out_size)``. If ``None``,
:math:`W` is initialized by centered Gaussian distribution properly
scaled according to the dimension of inputs and outputs.
initial_bias (tuple): Initial values of :math:`V^1`, :math:`V^2`
and :math:`b`. The length this argument must be 3.
Each element of this tuple must have the shapes of
``(left_size, output_size)``, ``(right_size, output_size)``,
and ``(output_size,)``, respectively. If ``None``, :math:`V^1`
and :math:`V^2` is initialized by scaled centered Gaussian
distributions and :math:`b` is set to :math:`0`.
.. seealso:: See :func:`chainer.functions.bilinear` for details.
Attributes:
W (~chainer.Variable): Bilinear weight parameter.
V1 (~chainer.Variable): Linear weight parameter for the first argument.
V2 (~chainer.Variable): Linear weight parameter for the second
argument.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, left_size, right_size, out_size, nobias=False,
initialW=None, initial_bias=None):
super(Bilinear, self).__init__(W=(left_size, right_size, out_size))
self.in_sizes = (left_size, right_size)
self.nobias = nobias
if initialW is not None:
assert initialW.shape == self.W.data.shape
self.W.data[...] = initialW
else:
# TODO(Kenta OONO): I do not know appropriate way of
# initializing weights in tensor network.
# This initialization is a modification of
# that of Linear function.
in_size = left_size * right_size * out_size
self.W.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / in_size), self.W.data.shape)
if not self.nobias:
self.add_param('V1', (left_size, out_size))
self.add_param('V2', (right_size, out_size))
self.add_param('b', out_size)
if initial_bias is not None:
V1, V2, b = initial_bias
assert V1.shape == self.V1.data.shape
assert V2.shape == self.V2.data.shape
assert b.shape == self.b.data.shape
self.V1.data[...] = V1
self.V2.data[...] = V2
self.b.data[...] = b
else:
self.V1.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / left_size), (left_size, out_size))
self.V2.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / right_size), (right_size, out_size))
self.b.data.fill(0)
def __call__(self, e1, e2):
"""Applies the bilinear function to inputs and the internal parameters.
Args:
e1 (~chainer.Variable): Left input.
e2 (~chainer.Variable): Right input.
Returns:
~chainer.Variable: Output variable.
"""
if self.nobias:
return bilinear.bilinear(e1, e2, self.W)
else:
return bilinear.bilinear(e1, e2, self.W, self.V1, self.V2, self.b)
def zero_grads(self):
# Left for backward compatibility
self.zerograds()
|
the-stack_0_10986 | import sublime
import sublime_plugin
import subprocess
from .path_utils import path_for_view
SCRIPT_PATH = 'Packages/SublimeConfig/src/commands/open_current_directory_in_terminal.applescript'
def osascript(
*,
script,
args=[]
):
cmd = ['osascript', '-'] + args
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return proc.communicate(input=script)
class OpenCurrentDirectoryInTerminalCommand(sublime_plugin.TextCommand):
def run(self, edit):
directory, filename = path_for_view(self.view)
script = sublime.load_binary_resource(SCRIPT_PATH)
osascript(script=script, args=[directory])
|
the-stack_0_10987 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2013 [email protected] <[email protected]>
# Copyright (c) 2014-2017 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017-2018 Bryce Guinta <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""utilities methods and classes for checkers
Base id of standard checkers (used in msg and report ids):
01: base
02: classes
03: format
04: import
05: misc
06: variables
07: exceptions
08: similar
09: design_analysis
10: newstyle
11: typecheck
12: logging
13: string_format
14: string_constant
15: stdlib
16: python3
17: refactoring
18-50: not yet used: reserved for future internal checkers.
51-99: perhaps used: reserved for external checkers
The raw_metrics checker has no number associated since it doesn't emit any
messages nor reports. XXX not true, emit a 07 report !
"""
import sys
import tokenize
import warnings
from typing import Any
from pylint.config import OptionsProviderMixIn
from pylint.reporters import diff_string
from pylint.utils import register_plugins
from pylint.interfaces import UNDEFINED
def table_lines_from_stats(stats, old_stats, columns):
"""get values listed in <columns> from <stats> and <old_stats>,
and return a formated list of values, designed to be given to a
ureport.Table object
"""
lines = []
for m_type in columns:
new = stats[m_type]
format = str # pylint: disable=redefined-builtin
if isinstance(new, float):
format = lambda num: "%.3f" % num
old = old_stats.get(m_type)
if old is not None:
diff_str = diff_string(old, new)
old = format(old)
else:
old, diff_str = "NC", "NC"
lines += (m_type.replace("_", " "), format(new), old, diff_str)
return lines
class BaseChecker(OptionsProviderMixIn):
"""base class for checkers"""
# checker name (you may reuse an existing one)
name = None # type: str
# options level (0 will be displaying in --help, 1 in --long-help)
level = 1
# ordered list of options to control the ckecker behaviour
options = () # type: Any
# messages issued by this checker
msgs = {} # type: Any
# reports issued by this checker
reports = () # type: Any
# mark this checker as enabled or not.
enabled = True
def __init__(self, linter=None):
"""checker instances should have the linter as argument
linter is an object implementing ILinter
"""
if self.name is not None:
self.name = self.name.lower()
OptionsProviderMixIn.__init__(self)
self.linter = linter
def add_message(
self,
msg_id,
line=None,
node=None,
args=None,
confidence=UNDEFINED,
col_offset=None,
):
"""add a message of a given type"""
self.linter.add_message(msg_id, line, node, args, confidence, col_offset)
# dummy methods implementing the IChecker interface
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
class BaseTokenChecker(BaseChecker):
"""Base class for checkers that want to have access to the token stream."""
def process_tokens(self, tokens):
"""Should be overridden by subclasses."""
raise NotImplementedError()
def initialize(linter):
"""initialize linter with checkers in this package """
register_plugins(linter, __path__[0])
__all__ = ("BaseChecker", "BaseTokenChecker", "initialize")
|
the-stack_0_10988 | """Splash_screen module."""
from PyQt6 import QtGui, QtCore, QtWidgets # type: ignore
from pineboolib.core.utils.utils_base import filedir
from pineboolib.core import settings
class SplashScreen(object):
"""Show a splashscreen to inform keep the user busy while Pineboo is warming up."""
_splash: "QtWidgets.QSplashScreen"
def __init__(self):
"""Inicialize."""
splash_path = filedir(
"./core/images/splashscreen/%s240.png"
% ("dbadmin" if settings.CONFIG.value("application/dbadmin_enabled") else "quick")
)
splash_pix = QtGui.QPixmap(splash_path)
self._splash = QtWidgets.QSplashScreen(
splash_pix, QtCore.Qt.WindowType.WindowStaysOnTopHint
)
self._splash.setMask(splash_pix.mask())
frame_geo = self._splash.frameGeometry()
primary_screen = QtGui.QGuiApplication.primaryScreen()
frame_geo.moveCenter(primary_screen.geometry().center())
self._splash.move(frame_geo.topLeft())
def showMessage(self, text: str) -> None:
"""Show a message into spalsh screen."""
self._splash.showMessage(
text, QtCore.Qt.AlignmentFlag.AlignLeft, QtCore.Qt.GlobalColor.white
)
def hide(self) -> None:
"""Hide splash screen."""
QtCore.QTimer.singleShot(1000, self._splash.hide)
def show(self) -> None:
"""Show splash screen."""
self._splash.show()
|
the-stack_0_10989 | import torch
import torch.nn as nn
import torch.nn.functional as F
import src.data.data as data
import src.data.config as cfg
import src.models.utils as model_utils
import src.evaluate.utils as eval_utils
import src.train.batch as batch_utils
def make_sampler(sampler_type, opt, *args, **kwargs):
print("Initializing Greedy Sampler")
return GreedySampler(opt, *args, **kwargs)
class Sampler():
def __init__(self, opt, data_loader, batch_mode=False):
# Token on which to end sampling
self.end_token = data_loader.vocab_encoder[data.end_token]
self.opt = opt
def generate_sequence(self, batch, model):
raise
class GreedySampler(Sampler):
def __init__(self, opt, data_loader, batch_mode=True):
super(GreedySampler, self).__init__(opt, data_loader)
def append_batch(self, X, next_idx, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((next_idx, next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
values, indices = lm_probs[:, -1, :].max(dim=-1)
seqs = indices.clone().unsqueeze(1)
loss = values
counts = 1
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((indices.view(-1, 1), next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
# Sample from top k
for _ in range(self.opt.eval.smax):
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
# Sample from top k
values, next_idx = lm_probs[:, -1, :].max(dim=-1)
loss += values
counts += 1
next_idx = next_idx.unsqueeze(1)
seqs = torch.cat([seqs, next_idx], 1)
if (next_idx.item() == self.end_token) or (_ == end_len - 1):
break
XMB, MMB = self.append_batch(XMB, next_idx, MMB)
beams = []
for beam in seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": [loss.item()],
"loss": loss.item(),
"beam_lengths": [counts],
"length": counts
}
return sampling_result
class TopKSampler(Sampler):
def __init__(self, opt, data_loader, batch_mode=True):
super(TopKSampler, self).__init__(opt, data_loader)
def append_batch(self, X, next_idx, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((next_idx, next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
# start_idx = context_size_event + 1
# start_idx = max_e1 + max_r
# end_idx = context_size_effect - 1
# end_idx = max_e2
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k)
seqs = indices.t().clone()
losses = - values.view(-1, 1)
ended = (seqs == self.end_token).float()
counts = (1 - ended)
XMB = XMB.repeat(self.opt.eval.k, 1, 1)
MMB = MMB.repeat(self.opt.eval.k, 1)
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((indices.view(self.opt.eval.k, -1), next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
# Sample from top k
for _ in range(end_len):
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
# Sample from top k
values, indices = lm_probs[:, -1, :].topk(self.opt.eval.k)
choice = torch.multinomial(values.exp(), 1)
next_idx = indices.gather(-1, choice)
ended = ended + (next_idx == self.end_token).float() * (1 - ended)
next_idx = next_idx * (1 - ended).long() + ended.long() * self.end_token
counts += (1 - ended)
seqs = torch.cat([seqs, next_idx], 1)
if ended.sum().item() == self.opt.eval.k:
break
losses -= values.gather(-1, choice) * (1 - ended)
XMB, MMB = self.append_batch(XMB, next_idx, MMB)
beams = []
for beam in seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": losses.squeeze().tolist(),
"loss": losses[0].item(),
"beam_lengths": counts.long().squeeze().tolist(),
"length": counts[0].long().item()
}
return sampling_result
class BeamSampler(TopKSampler):
def __init__(self, opt, data_loader, batch_mode=True, scorer=None):
super(BeamSampler, self).__init__(opt, data_loader, batch_mode)
self.kill_mask = torch.ones(opt.eval.bs, opt.eval.bs).to(cfg.device) * 9000
self.kill_mask[:, 0] = 0
def make_batch(self, X):
X = np.array(X)
assert X.ndim in [1, 2]
if X.ndim == 1:
X = np.expand_dims(X, axis=0)
pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1])
pos_enc = np.expand_dims(pos_enc, axis=0)
batch = np.stack([X, pos_enc], axis=-1)
batch = torch.tensor(batch, dtype=torch.long).to(device)
return batch
def append_batch(self, X, beam_toks, mask):
next_pos = X[:, -1:, 1] + 1
next_x = torch.cat((beam_toks.unsqueeze(1), next_pos), -1).unsqueeze(1)
next_mask = torch.cat([mask, torch.ones(X.size(0), 1, device=mask.device)], 1)
return torch.cat((X, next_x), 1), next_mask
def generate_sequence(self, batch, model, data_loader, start_idx, end_len):
# start_idx = context_size_event + 1
# start_idx = max_e1 + max_r
# end_idx = context_size_effect - 1
# end_idx = max_e2
XMB = batch["sequences"][:, :start_idx]
MMB = batch["attention_mask"][:, :start_idx]
XMB = model_utils.prepare_position_embeddings(
self.opt, data_loader.vocab_encoder, XMB.unsqueeze(-1))
tokens = []
beam_losses = []
# Beam Search
beam_lls, beam_toks, beam_seqs = None, None, None
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
dist = lm_probs[:, -1, :].squeeze()
beam_lls, beam_toks = dist.topk(self.opt.eval.bs)
beam_losses.append(beam_lls)
ended = (beam_toks == self.end_token).float()
counts = (2 - ended)
beam_toks = beam_toks.unsqueeze(1)
beam_seqs = beam_toks.clone()
XMB = XMB.repeat(self.opt.eval.bs, 1, 1)
MMB = MMB.repeat(self.opt.eval.bs, 1)
next_pos = XMB[:, -1:, 1] + 1
next_x = torch.cat((beam_toks, next_pos), -1).unsqueeze(1)
XMB = torch.cat((XMB, next_x), 1)
MMB = torch.cat([MMB, torch.ones(XMB.size(0), 1, device=MMB.device)], 1)
for _ in range(end_len):
# print(XMB.shape)
# Compute distribution for current beam
lm_probs = F.log_softmax(model(
XMB.unsqueeze(1), sequence_mask=MMB), dim=-1)
dist = lm_probs[:, -1, :].squeeze()
# get hypothesis tokens for distribution
hyp_beam_lls, hyp_beam_toks = dist.topk(self.opt.eval.bs)
# Compute masks and expand beam
expanded_ended = ended.unsqueeze(1).repeat(1, self.opt.eval.bs)
hypothesis_mask = expanded_ended * self.kill_mask + (1 - expanded_ended)
paper_results = False
if paper_results:
# Results from paper with slightly buggy beam search
current_beam_lls = beam_lls.unsqueeze(1).repeat(
1, self.opt.eval.bs).view(self.opt.eval.bs ** 2)
else:
# Current beam search implementation
current_beam_lls = beam_losses[-1].unsqueeze(1).repeat(
1, self.opt.eval.bs).view(self.opt.eval.bs ** 2)
# Compute losses of hypotheses, masking those that have ended
hyp_beam_lls = (hyp_beam_lls.view(self.opt.eval.bs ** 2) *
hypothesis_mask.view(-1)) + current_beam_lls
# Get normalizer for sequences
temp_counts = counts.unsqueeze(1).repeat(1, self.opt.eval.bs).view(
self.opt.eval.bs ** 2)
# Select best beams with lowest aggregate loss
beam_lls, top_beam_idxs = (hyp_beam_lls / temp_counts).topk(self.opt.eval.bs)
# Update placements in beam based on selecetion
beam_losses = [i.index_select(0, top_beam_idxs // self.opt.eval.bs)
for i in beam_losses]
ended = ended.index_select(0, top_beam_idxs // self.opt.eval.bs)
counts = temp_counts.index_select(0, top_beam_idxs)
# Save beam losses
beam_losses.append(beam_lls * counts)
# Update beam tokens
ended_mask = (1 - ended).long()
end_replacement = (self.end_token * ended).long()
next_toks = hyp_beam_toks.view(-1)[top_beam_idxs]
beam_toks = next_toks * ended_mask + end_replacement
# Update ended and counts
ended = ended + (beam_toks == self.end_token).float() * (1 - ended)
counts = counts + (1 - ended)
# Update beam sequences
beam_seqs = beam_seqs.t().repeat(self.opt.eval.bs, 1).t().contiguous().view(
self.opt.eval.bs ** 2, -1)[top_beam_idxs]
beam_seqs = torch.cat((beam_seqs, beam_toks.unsqueeze(1)), dim=1)
# I have no idea what's going on but Ari's on point with it
XMB = XMB.transpose(0, 1).transpose(1, 2).repeat(
self.opt.eval.bs, 1, 1).transpose(2, 1).transpose(
1, 0).contiguous().view(
self.opt.eval.bs ** 2, XMB.size(1), XMB.size(2))[top_beam_idxs]
XMB, MMB = self.append_batch(XMB, beam_toks, MMB)
if (beam_toks == self.end_token).sum().item() == self.opt.eval.bs:
break
beams = []
for beam in beam_seqs:
beams.append(" ".join("".join(
[data_loader.vocab_decoder[tok.item()].replace(
'</w>', ' ').replace('\n', '')
for tok in beam if tok != self.end_token]).split()))
sampling_result = {
"sequence": beams[0],
"beams": beams,
"beam_losses": beam_lls.tolist(),
"loss": beam_lls[0].item(),
"beam_lengths": counts.tolist(),
"length": counts[0].item()
}
return sampling_result
|
the-stack_0_10990 | #!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import WORC.addexceptions as WORCexceptions
import fastr
from fastr.api import ResourceLimit
import os
import graphviz
class Evaluate(object):
"""Build a network that evaluates the performance of an estimator."""
def __init__(self, label_type, modus='binary_classification', ensemble=50,
scores='percentages',
parent=None, features=None,
fastr_plugin='LinearExecution',
name='Example'):
"""
Initialize object.
Parameters
----------
network: fastr network, default None
If you input a network, the evaluate network is added
to the existing network.
"""
if parent is not None:
self.parent = parent
self.network = parent.network
self.mode = 'WORC'
self.name = parent.network.id
self.ensemble = parent.configs[0]['Ensemble']['Use']
else:
self.mode = 'StandAlone'
self.fastr_plugin = fastr_plugin
self.name = 'WORC_Evaluate_' + name
self.network = fastr.create_network(id=self.name)
self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'], self.name)
self.ensemble = ensemble
if features is None and self.mode == 'StandAlone':
raise WORCexceptions.WORCIOError('Either features as input or a WORC network is required for the Evaluate network.')
self.modus = modus
self.features = features
self.label_type = label_type
self.create_network()
def create_network(self):
"""Add evaluate components to network."""
# Create all nodes
if self.modus == 'binary_classification':
self.node_ROC =\
self.network.create_node('worc/PlotROC:1.0', tool_version='1.0',
id='plot_ROC',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
if self.mode == 'StandAlone':
self.node_Estimator =\
self.network.create_node('worc/PlotEstimator:1.0', tool_version='1.0',
id='plot_Estimator',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Barchart =\
self.network.create_node('worc/PlotBarchart:1.0',
tool_version='1.0', id='plot_Barchart',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Hyperparameters =\
self.network.create_node('worc/PlotHyperparameters:1.0',
tool_version='1.0', id='plot_Hyperparameters',
resources=ResourceLimit(memory='6G'),
step_id='Evaluation')
if 'classification' in self.modus:
self.node_STest =\
self.network.create_node('worc/StatisticalTestFeatures:1.0',
tool_version='1.0',
id='statistical_test_features',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_decomposition =\
self.network.create_node('worc/Decomposition:1.0',
tool_version='1.0',
id='decomposition',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
self.node_Ranked_Percentages =\
self.network.create_node('worc/PlotRankedScores:1.0',
tool_version='1.0',
id='plot_ranked_percentages',
resources=ResourceLimit(memory='20G'),
step_id='Evaluation')
self.node_Ranked_Posteriors =\
self.network.create_node('worc/PlotRankedScores:1.0',
tool_version='1.0',
id='plot_ranked_posteriors',
resources=ResourceLimit(memory='20G'),
step_id='Evaluation')
self.node_Boxplots_Features =\
self.network.create_node('worc/PlotBoxplotFeatures:1.0',
tool_version='1.0',
id='plot_boxplot_features',
resources=ResourceLimit(memory='12G'),
step_id='Evaluation')
# Create sinks
if self.modus == 'binary_classification':
self.sink_ROC_PNG =\
self.network.create_sink('PNGFile', id='ROC_PNG',
step_id='general_sinks')
self.sink_ROC_Tex =\
self.network.create_sink('TexFile', id='ROC_Tex',
step_id='general_sinks')
self.sink_ROC_CSV =\
self.network.create_sink('CSVFile', id='ROC_CSV',
step_id='general_sinks')
self.sink_PRC_PNG =\
self.network.create_sink('PNGFile', id='PRC_PNG',
step_id='general_sinks')
self.sink_PRC_Tex =\
self.network.create_sink('TexFile', id='PRC_Tex',
step_id='general_sinks')
self.sink_PRC_CSV =\
self.network.create_sink('CSVFile', id='PRC_CSV',
step_id='general_sinks')
if self.mode == 'StandAlone':
self.sink_Estimator_Json =\
self.network.create_sink('JsonFile', id='Estimator_Json',
step_id='general_sinks')
self.sink_Barchart_PNG =\
self.network.create_sink('PNGFile', id='Barchart_PNG',
step_id='general_sinks')
self.sink_Barchart_Tex =\
self.network.create_sink('TexFile',
id='Barchart_Tex',
step_id='general_sinks')
self.sink_Hyperparameters_CSV =\
self.network.create_sink('CSVFile', id='Hyperparameters_CSV',
step_id='general_sinks')
if 'classification' in self.modus:
self.sink_STest_CSV =\
self.network.create_sink('CSVFile',
id='StatisticalTestFeatures_CSV',
step_id='general_sinks')
self.sink_STest_PNG =\
self.network.create_sink('PNGFile',
id='StatisticalTestFeatures_PNG',
step_id='general_sinks')
self.sink_STest_Tex =\
self.network.create_sink('TexFile',
id='StatisticalTestFeatures_Tex',
step_id='general_sinks')
self.sink_decomposition_PNG =\
self.network.create_sink('PNGFile', id='Decomposition_PNG',
step_id='general_sinks')
self.sink_Ranked_Percentages_Zip =\
self.network.create_sink('ZipFile', id='RankedPercentages_Zip',
step_id='general_sinks')
self.sink_Ranked_Percentages_CSV =\
self.network.create_sink('CSVFile', id='RankedPercentages_CSV',
step_id='general_sinks')
self.sink_Ranked_Posteriors_Zip =\
self.network.create_sink('ZipFile', id='RankedPosteriors_Zip',
step_id='general_sinks')
self.sink_Ranked_Posteriors_CSV =\
self.network.create_sink('CSVFile', id='RankedPosteriors_CSV',
step_id='general_sinks')
self.sink_Boxplots_Features_Zip =\
self.network.create_sink('ZipFile', id='BoxplotsFeatures_Zip',
step_id='general_sinks')
# Create links to sinks
if self.modus == 'binary_classification':
self.sink_ROC_PNG.input = self.node_ROC.outputs['ROC_png']
self.sink_ROC_Tex.input = self.node_ROC.outputs['ROC_tex']
self.sink_ROC_CSV.input = self.node_ROC.outputs['ROC_csv']
self.sink_PRC_PNG.input = self.node_ROC.outputs['PRC_png']
self.sink_PRC_Tex.input = self.node_ROC.outputs['PRC_tex']
self.sink_PRC_CSV.input = self.node_ROC.outputs['PRC_csv']
if self.mode == 'StandAlone':
self.sink_Estimator_Json.input = self.node_Estimator.outputs['output_json']
self.sink_Barchart_PNG.input = self.node_Barchart.outputs['output_png']
self.sink_Barchart_Tex.input = self.node_Barchart.outputs['output_tex']
self.sink_Hyperparameters_CSV.input = self.node_Hyperparameters.outputs['output_csv']
if 'classification' in self.modus:
self.sink_STest_CSV.input = self.node_STest.outputs['output_csv']
self.sink_STest_PNG.input = self.node_STest.outputs['output_png']
self.sink_STest_Tex.input = self.node_STest.outputs['output_tex']
self.sink_decomposition_PNG.input = self.node_decomposition.outputs['output']
self.sink_Ranked_Percentages_Zip.input =\
self.node_Ranked_Percentages.outputs['output_zip']
self.sink_Ranked_Percentages_CSV.input =\
self.node_Ranked_Percentages.outputs['output_csv']
# Create constant node
self.node_Ranked_Percentages.inputs['scores'] = ['percentages']
self.sink_Ranked_Posteriors_Zip.input =\
self.node_Ranked_Posteriors.outputs['output_zip']
self.sink_Ranked_Posteriors_CSV.input =\
self.node_Ranked_Posteriors.outputs['output_csv']
self.sink_Boxplots_Features_Zip.input =\
self.node_Boxplots_Features.outputs['output_zip']
# Create constant node
self.node_Ranked_Posteriors.inputs['scores'] = ['posteriors']
if self.mode == 'StandAlone':
self.source_LabelType =\
self.network.create_constant('String', [self.label_type],
id='LabelType',
step_id='Evaluation')
self.source_Ensemble =\
self.network.create_constant('String', [self.ensemble],
id='Ensemble',
step_id='Evaluation')
# Create sources if not supplied by a WORC network
if self.mode == 'StandAlone':
self.source_Estimator =\
self.network.create_source('HDF5', id='Estimator')
self.source_PatientInfo =\
self.network.create_source('PatientInfoFile', id='PatientInfo')
self.source_Images =\
self.network.create_source('ITKImageFile', id='Images',
node_group='patients')
self.source_Segmentations =\
self.network.create_source('ITKImageFile', id='Segmentations',
node_group='patients')
self.source_Config =\
self.network.create_source('ParameterFile', id='Config')
self.labels = list()
self.source_Features = list()
for idx in range(0, len(self.features)):
label = 'Features_' + str(idx)
self.labels.append(label)
self.source_Features.append(self.network.create_source('HDF5', id=label, node_group='features'))
# Create links to the sources that could be in a WORC network
if self.mode == 'StandAlone':
self.create_links_Standalone()
else:
self.create_links_Addon()
def create_links_Standalone(self):
"""Create links in network between nodes when using standalone."""
# Sources from the Evaluate network are used
if self.modus == 'binary_classification':
self.node_ROC.inputs['prediction'] = self.source_Estimator.output
self.node_ROC.inputs['pinfo'] = self.source_PatientInfo.output
self.node_Estimator.inputs['prediction'] = self.source_Estimator.output
self.node_Estimator.inputs['pinfo'] = self.source_PatientInfo.output
self.node_Barchart.inputs['prediction'] = self.source_Estimator.output
self.node_Hyperparameters.inputs['prediction'] = self.source_Estimator.output
if 'classification' in self.modus:
self.links_STest_Features = list()
self.links_decomposition_Features = list()
self.links_Boxplots_Features = list()
for idx, label in enumerate(self.labels):
if 'classification' in self.modus:
self.links_STest_Features.append(self.node_STest.inputs['features'][str(label)] << self.source_Features[idx].output)
self.links_STest_Features[idx].collapse = 'features'
self.links_decomposition_Features.append(self.node_decomposition.inputs['features'][str(label)] << self.source_Features[idx].output)
self.links_decomposition_Features[idx].collapse = 'features'
self.links_Boxplots_Features.append(self.node_Boxplots_Features.inputs['features'][str(label)] << self.source_Features[idx].output)
self.links_Boxplots_Features[idx].collapse = 'features'
if 'classification' in self.modus:
self.node_STest.inputs['patientclass'] = self.source_PatientInfo.output
self.node_STest.inputs['config'] = self.source_Config.output
self.node_decomposition.inputs['patientclass'] = self.source_PatientInfo.output
self.node_decomposition.inputs['config'] = self.source_Config.output
self.node_Ranked_Percentages.inputs['estimator'] = self.source_Estimator.output
self.node_Ranked_Percentages.inputs['pinfo'] = self.source_PatientInfo.output
self.link_images_perc = self.network.create_link(self.source_Images.output, self.node_Ranked_Percentages.inputs['images'])
self.link_images_perc.collapse = 'patients'
self.link_segmentations_perc = self.network.create_link(self.source_Segmentations.output, self.node_Ranked_Percentages.inputs['segmentations'])
self.link_segmentations_perc.collapse = 'patients'
self.node_Boxplots_Features.inputs['patientclass'] = self.source_PatientInfo.output
self.node_Boxplots_Features.inputs['config'] = self.source_Config.output
self.node_Ranked_Posteriors.inputs['estimator'] = self.source_Estimator.output
self.node_Ranked_Posteriors.inputs['pinfo'] = self.source_PatientInfo.output
self.link_images_post = self.network.create_link(self.source_Images.output, self.node_Ranked_Posteriors.inputs['images'])
self.link_images_post.collapse = 'patients'
self.link_segmentations_post = self.network.create_link(self.source_Segmentations.output, self.node_Ranked_Posteriors.inputs['segmentations'])
self.link_segmentations_post.collapse = 'patients'
if self.modus == 'binary_classification':
self.node_ROC.inputs['ensemble'] = self.source_Ensemble.output
self.node_ROC.inputs['label_type'] = self.source_LabelType.output
if 'classification' in self.modus:
self.node_Ranked_Percentages.inputs['ensemble'] =\
self.source_Ensemble.output
self.node_Ranked_Percentages.inputs['label_type'] =\
self.source_LabelType.output
self.node_Estimator.inputs['ensemble'] = self.source_Ensemble.output
self.node_Estimator.inputs['label_type'] = self.source_LabelType.output
self.node_Barchart.inputs['estimators'] = self.source_Ensemble.output
self.node_Barchart.inputs['label_type'] = self.source_LabelType.output
self.node_Hyperparameters.inputs['estimators'] = self.source_Ensemble.output
self.node_Hyperparameters.inputs['label_type'] = self.source_LabelType.output
self.node_Ranked_Posteriors.inputs['ensemble'] =\
self.source_Ensemble.output
self.node_Ranked_Posteriors.inputs['label_type'] =\
self.source_LabelType.output
def create_links_Addon(self):
"""Create links in network between nodes when adding Evaluate to WORC."""
# Sources from the WORC network are used
prediction = self.parent.classify.outputs['classification']
if hasattr(self.parent, 'source_patientclass_test'):
pinfo = self.parent.source_patientclass_test.output
else:
pinfo = self.parent.source_patientclass_train.output
config = self.parent.source_class_config.output
if hasattr(self.parent, 'sources_images_train'):
if self.parent.sources_images_train:
# NOTE: Use images of first modality to depict tumor
label = self.parent.modlabels[0]
images = self.parent.sources_images_train[label].output
segmentations =\
self.parent.sources_segmentations_train[label].output
if self.modus == 'binary_classification':
self.node_ROC.inputs['ensemble'] = self.parent.source_Ensemble.output
self.node_ROC.inputs['label_type'] = self.parent.source_LabelType.output
if 'classification' in self.modus:
self.node_Ranked_Percentages.inputs['ensemble'] =\
self.parent.source_Ensemble.output
self.node_Ranked_Percentages.inputs['label_type'] =\
self.parent.source_LabelType.output
self.node_Barchart.inputs['estimators'] = self.parent.source_Ensemble.output
self.node_Barchart.inputs['label_type'] = self.parent.source_LabelType.output
self.node_Hyperparameters.inputs['estimators'] = self.parent.source_Ensemble.output
self.node_Hyperparameters.inputs['label_type'] = self.parent.source_LabelType.output
self.node_Ranked_Posteriors.inputs['ensemble'] =\
self.parent.source_Ensemble.output
self.node_Ranked_Posteriors.inputs['label_type'] =\
self.parent.source_LabelType.output
if self.modus == 'binary_classification':
self.node_ROC.inputs['prediction'] = prediction
self.node_ROC.inputs['pinfo'] = pinfo
self.node_Barchart.inputs['prediction'] = prediction
self.node_Hyperparameters.inputs['prediction'] = prediction
if 'classification' in self.modus:
self.links_STest_Features = dict()
self.links_decomposition_Features = dict()
self.links_Boxplots_Features = dict()
# Check if we have ComBat features
if self.parent.configs[0]['General']['ComBat'] == 'True':
name = 'ComBat'
# Take features from ComBat
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.network.create_link(self.parent.ComBat.outputs['features_train_out'], self.node_STest.inputs['features'])
self.links_decomposition_Features[name] =\
self.network.create_link(self.parent.ComBat.outputs['features_train_out'], self.node_decomposition.inputs['features'])
self.links_Boxplots_Features[name] =\
self.network.create_link(self.parent.ComBat.outputs['features_train_out'], self.node_Boxplots_Features.inputs['features'])
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'ComBat'
self.links_decomposition_Features[name].collapse = 'ComBat'
self.links_Boxplots_Features[name].collapse = 'ComBat'
else:
for idx, label in enumerate(self.parent.modlabels):
# NOTE: Currently statistical testing is only done within the training set
if hasattr(self.parent, 'sources_images_train'):
if self.parent.sources_images_train:
# Take features directly from feature computation toolboxes
for node in self.parent.featureconverter_train[label]:
name = node.id
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.node_STest.inputs['features'][name] << node.outputs['feat_out']
self.links_decomposition_Features[name] =\
self.node_decomposition.inputs['features'][name] << node.outputs['feat_out']
self.links_Boxplots_Features[name] =\
self.node_Boxplots_Features.inputs['features'][name] << node.outputs['feat_out']
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'train'
self.links_decomposition_Features[name].collapse = 'train'
self.links_Boxplots_Features[name].collapse = 'train'
else:
# Feature are precomputed and given as sources
for node in self.parent.sources_features_train.values():
name = node.id
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.node_STest.inputs['features'][name] << node.output
self.links_decomposition_Features[name] =\
self.node_decomposition.inputs['features'][name] << node.output
self.links_Boxplots_Features[name] =\
self.node_Boxplots_Features.inputs['features'][name] << node.output
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'train'
self.links_decomposition_Features[name].collapse = 'train'
self.links_Boxplots_Features[name].collapse = 'train'
else:
# Feature are precomputed and given as sources
for node in self.parent.sources_features_train.values():
name = node.id
if 'classification' in self.modus:
self.links_STest_Features[name] =\
self.node_STest.inputs['features'][name] << node.output
self.links_decomposition_Features[name] =\
self.node_decomposition.inputs['features'][name] << node.output
self.links_Boxplots_Features[name] =\
self.node_Boxplots_Features.inputs['features'][name] << node.output
# All features should be input at once
if 'classification' in self.modus:
self.links_STest_Features[name].collapse = 'train'
self.links_decomposition_Features[name].collapse = 'train'
self.links_Boxplots_Features[name].collapse = 'train'
if 'classification' in self.modus:
self.node_STest.inputs['patientclass'] = pinfo
self.node_STest.inputs['config'] = config
self.node_decomposition.inputs['patientclass'] = pinfo
self.node_decomposition.inputs['config'] = config
self.node_Ranked_Percentages.inputs['estimator'] = prediction
self.node_Ranked_Percentages.inputs['pinfo'] = pinfo
self.node_Boxplots_Features.inputs['patientclass'] = pinfo
self.node_Boxplots_Features.inputs['config'] = config
self.node_Ranked_Posteriors.inputs['estimator'] = prediction
self.node_Ranked_Posteriors.inputs['pinfo'] = pinfo
if hasattr(self.parent, 'sources_images_test'):
images = self.parent.sources_images_test[label].output
segmentations =\
self.parent.sources_segmentations_test[label].output
if 'classification' in self.modus:
self.link_images_perc =\
self.network.create_link(images, self.node_Ranked_Percentages.inputs['images'])
self.link_images_perc.collapse = 'test'
self.link_segmentations_perc =\
self.network.create_link(segmentations, self.node_Ranked_Percentages.inputs['segmentations'])
self.link_segmentations_perc.collapse = 'test'
self.link_images_post =\
self.network.create_link(images, self.node_Ranked_Posteriors.inputs['images'])
self.link_images_post.collapse = 'test'
self.link_segmentations_post =\
self.network.create_link(segmentations, self.node_Ranked_Posteriors.inputs['segmentations'])
self.link_segmentations_post.collapse = 'test'
elif hasattr(self.parent, 'sources_images_train'):
if self.parent.sources_images_train:
if 'classification' in self.modus:
self.link_images_perc =\
self.network.create_link(images, self.node_Ranked_Percentages.inputs['images'])
self.link_images_perc.collapse = 'train'
self.link_segmentations_perc =\
self.network.create_link(segmentations, self.node_Ranked_Percentages.inputs['segmentations'])
self.link_segmentations_perc.collapse = 'train'
self.link_images_post =\
self.network.create_link(images, self.node_Ranked_Posteriors.inputs['images'])
self.link_images_post.collapse = 'train'
self.link_segmentations_post =\
self.network.create_link(segmentations, self.node_Ranked_Posteriors.inputs['segmentations'])
self.link_segmentations_post.collapse = 'train'
def set(self, estimator=None, pinfo=None, images=None,
segmentations=None, config=None, features=None,
sink_data={}):
"""Set the sources and sinks based on the provided attributes."""
if self.mode == 'StandAlone':
self.source_data = dict()
self.sink_data = dict()
self.source_data['Estimator'] = estimator
self.source_data['PatientInfo'] = pinfo
self.source_data['Images'] = images
self.source_data['Segmentations'] = segmentations
self.source_data['Config'] = config
self.source_data['LabelType'] = self.label_type
self.source_data['Ensemble'] = self.ensemble
for feature, label in zip(features, self.labels):
self.source_data[label] = feature
else:
self.sink_data = self.parent.sink_data
if self.modus == 'binary_classification':
if 'ROC_PNG' not in sink_data.keys():
self.sink_data['ROC_PNG'] = ("vfs://output/{}/Evaluation/ROC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'ROC_Tex' not in sink_data.keys():
self.sink_data['ROC_Tex'] = ("vfs://output/{}/Evaluation/ROC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'ROC_CSV' not in sink_data.keys():
self.sink_data['ROC_CSV'] = ("vfs://output/{}/Evaluation/ROC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'PRC_PNG' not in sink_data.keys():
self.sink_data['PRC_PNG'] = ("vfs://output/{}/Evaluation/PRC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'PRC_Tex' not in sink_data.keys():
self.sink_data['PRC_Tex'] = ("vfs://output/{}/Evaluation/PRC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'PRC_CSV' not in sink_data.keys():
self.sink_data['PRC_CSV'] = ("vfs://output/{}/Evaluation/PRC_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Estimator_Json' not in sink_data.keys():
self.sink_data['Estimator_Json'] = ("vfs://output/{}/Evaluation/performance_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Barchart_PNG' not in sink_data.keys():
self.sink_data['Barchart_PNG'] = ("vfs://output/{}/Evaluation/Barchart_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Barchart_Tex' not in sink_data.keys():
self.sink_data['Barchart_Tex'] = ("vfs://output/{}/Evaluation/Barchart_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Hyperparameters_CSV' not in sink_data.keys():
self.sink_data['Hyperparameters_CSV'] = ("vfs://output/{}/Evaluation/Hyperparameters_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'classification' in self.modus:
if 'StatisticalTestFeatures_CSV' not in sink_data.keys():
self.sink_data['StatisticalTestFeatures_CSV'] = ("vfs://output/{}/Evaluation/StatisticalTestFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'StatisticalTestFeatures_PNG' not in sink_data.keys():
self.sink_data['StatisticalTestFeatures_PNG'] = ("vfs://output/{}/Evaluation/StatisticalTestFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'StatisticalTestFeatures_Tex' not in sink_data.keys():
self.sink_data['StatisticalTestFeatures_Tex'] = ("vfs://output/{}/Evaluation/StatisticalTestFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'Decomposition_PNG' not in sink_data.keys():
self.sink_data['Decomposition_PNG'] = ("vfs://output/{}/Evaluation/Decomposition_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPercentages_Zip' not in sink_data.keys():
self.sink_data['RankedPercentages_Zip'] = ("vfs://output/{}/Evaluation/RankedPercentages_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPercentages_CSV' not in sink_data.keys():
self.sink_data['RankedPercentages_CSV'] = ("vfs://output/{}/Evaluation/RankedPercentages_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPosteriors_Zip' not in sink_data.keys():
self.sink_data['RankedPosteriors_Zip'] = ("vfs://output/{}/Evaluation/RankedPosteriors_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'RankedPosteriors_CSV' not in sink_data.keys():
self.sink_data['RankedPosteriors_CSV'] = ("vfs://output/{}/Evaluation/RankedPosteriors_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
if 'BoxplotsFeatures_Zip' not in sink_data.keys():
self.sink_data['BoxplotsFeatures_Zip'] = ("vfs://output/{}/Evaluation/BoxplotsFeatures_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
def execute(self):
"""Execute the network through the fastr.network.execute command."""
# Draw and execute nwtwork
try:
self.network.draw(file_path=self.network.id + '.svg',
draw_dimensions=True)
except graphviz.backend.ExecutableNotFound:
print('[WORC WARNING] Graphviz executable not found: not drawing network diagram. MAke sure the Graphviz executables are on your systems PATH.')
self.network.execute(self.source_data, self.sink_data,
execution_plugin=self.fastr_plugin,
tmpdir=self.fastr_tmpdir)
|
the-stack_0_10992 | import numpy as np
import torch
import torch.nn as nn
class CNNCTC(nn.Module):
def __init__(self, class_num, mode='train'):
super(CNNCTC, self).__init__()
feature = [
nn.Conv2d(3, 50, stride=1, kernel_size=3, padding=1),
nn.BatchNorm2d(50),
nn.ReLU(inplace=True),
nn.Conv2d(50, 100, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.1),
nn.Conv2d(100, 100, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.1),
nn.BatchNorm2d(100),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(100, 200, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.2),
nn.Conv2d(200, 200, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.2),
nn.BatchNorm2d(200),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(200, 250, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.3),
nn.BatchNorm2d(250),
nn.ReLU(inplace=True),
nn.Conv2d(250, 300, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.3),
nn.Conv2d(300, 300, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.3),
nn.BatchNorm2d(300),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(300, 350, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.4),
nn.BatchNorm2d(350),
nn.ReLU(inplace=True),
nn.Conv2d(350, 400, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.4),
nn.Conv2d(400, 400, stride=1, kernel_size=3, padding=1),
nn.Dropout(p=0.4),
nn.BatchNorm2d(400),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)
]
classifier = [
nn.Linear(1600, 900),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
# nn.Linear(900, 200),
# nn.ReLU(inplace=True),
nn.Linear(900, class_num)
]
self.mode = mode
self.feature = nn.Sequential(*feature)
self.classifier = nn.Sequential(*classifier)
def forward(self, x): # x: batch, window, slice channel, h, w
result = []
for s in range(x.shape[1]):
result.append(self.single_forward(x[:, s, :, :, :]))
out = torch.stack(result)
if self.mode != 'train':
return self.decode(out)
return out
def single_forward(self, x):
feat = self.feature(x)
feat = feat.view(feat.shape[0], -1) # flatten
out = self.classifier(feat)
return out
def decode(self, pred):
pred = pred.permute(1, 0, 2).cpu().data.numpy() # batch, step, class
seq = []
for i in range(pred.shape[0]):
seq.append(self.pred_to_string(pred[i]))
return seq
@staticmethod
def pred_to_string(pred): # step, class
seq = []
for i in range(pred.shape[0]):
label = np.argmax(pred[i])
seq.append(label)
out = []
for i in range(len(seq)):
if len(out) == 0:
if seq[i] != 0:
out.append(seq[i])
else:
if seq[i] != 0 and seq[i] != seq[i - 1]:
out.append(seq[i])
return out
|
the-stack_0_10993 | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
import typing as ty
import numpy as np
import numpy.typing as npt
class CoefficientTensorsMixin:
def __init__(self, *coefficients: ty.Union[ty.List, npt.ArrayLike]):
"""Coefficients for a scalar function of a vector.
Parameters
----------
coefficients: the tensor coefficients of the function.
"""
c_dict = dict()
for coefficient in coefficients:
if type(coefficient) in [list, int]:
coefficient = np.asarray(coefficient)
rank = coefficient.ndim
elif type(coefficient) is not np.ndarray:
raise ValueError(
"Coefficients should be either Numpy arrays "
"or (possibly nested) lists."
)
else:
rank = coefficient.ndim
c_dict[rank] = coefficient
self._coefficients = c_dict
@property
def coefficients(self):
return self._coefficients
@coefficients.setter
def coefficients(self, value):
self._coefficients = value
def get_coefficient(self, order: int):
try:
return self.coefficients[order]
except KeyError:
print(
f"""Order {order} not found, coefficients were only given for
orders: {list(self.coefficients.keys())}."""
)
raise
@property
def max_degree(self):
"""Maximum order among the coefficients' ranks."""
return max(self.coefficients.keys())
|
the-stack_0_10994 |
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import tarfile
from six.moves import urllib
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def maybe_download(directory, filename, url):
"""Download filename from url unless it's already in directory."""
if not os.path.exists(directory):
print("Creating directory %s" % directory)
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if not os.path.exists(filepath):
print("Downloading %s to %s" % (url, filepath))
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print("Succesfully downloaded", filename, statinfo.st_size, "bytes")
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
train_path = os.path.join(directory, "train")
return train_path
def get_wmt_enfr_dev_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
dev_name = "validate"
dev_path = os.path.join(directory, dev_name)
return dev_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def create_vocabulary_source(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
for fact in facts:
for w in fact:
word = w.encode('UTF-8')
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def create_vocabulary_target(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
tokens=eval(line)
for w in tokens:
word = w.replace('\n', '\\n')
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line[:-1] for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
v = [vocabulary.get(w.encode('UTF-8'), UNK_ID) for w in sentence]
return v
def data_to_token_ids_source(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
fvs=[]
for fv in eval(line):
token_ids = sentence_to_token_ids(fv, vocab, tokenizer,
normalize_digits)
fvs.append(token_ids)
tokens_file.write(str(fvs) + "\n")
def data_to_token_ids_target(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=False):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
line = line.replace('\\n', '\\\\n')
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(eval(line), vocab, tokenizer,
normalize_digits)
tokens_file.write(str(token_ids) + "\n")
def prepare_data(data_dir, en_vocabulary_size, fr_vocabulary_size, tokenizer=None):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
en_vocabulary_size: size of the English vocabulary to create and use.
fr_vocabulary_size: size of the French vocabulary to create and use.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
Returns:
A tuple of 6 elements:
(1) path to the token-ids for English training data-set,
(2) path to the token-ids for French training data-set,
(3) path to the token-ids for English development data-set,
(4) path to the token-ids for French development data-set,
(5) path to the English vocabulary file,
(6) path to the French vocabulary file.
"""
# Get wmt data to the specified directory.
train_path = get_wmt_enfr_train_set(data_dir)
dev_path = get_wmt_enfr_dev_set(data_dir)
# Create vocabularies of the appropriate sizes.
fr_vocab_path = os.path.join(data_dir, "vocab%d.answers" % fr_vocabulary_size)
en_vocab_path = os.path.join(data_dir, "vocab%d.questions" % en_vocabulary_size)
create_vocabulary_source(en_vocab_path, train_path + ".questions", en_vocabulary_size, tokenizer)
create_vocabulary_target(fr_vocab_path, train_path + ".answers", fr_vocabulary_size, tokenizer)
# Create token ids for the training data.
fr_train_ids_path = train_path + (".ids%d.answers" % fr_vocabulary_size)
en_train_ids_path = train_path + (".ids%d.questions" % en_vocabulary_size)
data_to_token_ids_target(train_path + ".answers", fr_train_ids_path, fr_vocab_path, tokenizer)
data_to_token_ids_source(train_path + ".questions", en_train_ids_path, en_vocab_path, tokenizer)
return (en_train_ids_path, fr_train_ids_path,
en_train_ids_path, fr_train_ids_path,
en_vocab_path, fr_vocab_path)
def get_lens(inputs, split_sentences=False):
lens = np.zeros((len(inputs)), dtype=int)
for i, t in enumerate(inputs):
lens[i] = t.shape[0]
return lens
def get_sentence_lens(inputs):
lens = np.zeros((len(inputs)), dtype=int)
sen_lens = []
max_sen_lens = []
for i, t in enumerate(inputs):
sentence_lens = np.zeros((len(t)), dtype=int)
for j, s in enumerate(t):
sentence_lens[j] = len(s)
lens[i] = len(t)
sen_lens.append(sentence_lens)
max_sen_lens.append(np.max(sentence_lens))
return lens, sen_lens, max(max_sen_lens)
def pad_inputs(inputs, lens, max_len, mode="", sen_lens=None, max_sen_len=None):
if mode == "mask":
padded = [np.pad(inp, (0, max_len - lens[i]), 'constant', constant_values=PAD_ID) for i, inp in enumerate(inputs)]
return np.vstack(padded)
elif mode == "split_sentences":
padded = np.zeros((len(inputs), max_len, max_sen_len))
for i, inp in enumerate(inputs):
padded_sentences = [np.pad(s, (0, max_sen_len - sen_lens[i][j]), 'constant', constant_values=PAD_ID) for j, s in enumerate(inp)]
# trim array according to max allowed inputs
if len(padded_sentences) > max_len:
padded_sentences = padded_sentences[(len(padded_sentences)-max_len):]
lens[i] = max_len
padded_sentences = np.vstack(padded_sentences)
padded_sentences = np.pad(padded_sentences, ((0, max_len - lens[i]),(0,0)), 'constant', constant_values=PAD_ID)
padded[i] = padded_sentences
return padded
padded = [np.pad(np.squeeze(inp, axis=1), (0, max_len - lens[i]), 'constant', constant_values=PAD_ID) for i, inp in enumerate(inputs)]
return np.vstack(padded)
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
sources = []
targets = []
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
count=0
while source and target:
count+=1
#print (count)
sources.append(np.array(eval(source)))
targets.append(np.array([GO_ID]+eval(target)+[EOS_ID]))
source, target = source_file.readline(), target_file.readline()
return sources, targets
def pad_length_bucket(source, targets, config, split_sentences=True):
inputs = source
if split_sentences:
input_lens, sen_lens, max_sen_len = get_sentence_lens(inputs)
max_mask_len = max_sen_len
else:
input_lens = get_lens(inputs)
t_lens = get_lens(targets)
max_t_len = np.max(t_lens)
max_input_len = min(np.max(input_lens), config.max_allowed_inputs)
#pad out arrays to max
if split_sentences:
inputs = pad_inputs(inputs, input_lens, max_input_len, "split_sentences", sen_lens, max_sen_len)
input_masks = np.zeros(len(inputs))
else:
inputs = pad_inputs(inputs, input_lens, max_input_len)
input_masks = pad_inputs(input_masks, mask_lens, max_mask_len, "mask")
targets = pad_inputs(targets, t_lens, max_t_len, "mask")
if config.train_mode:
train = targets[:config.num_train], inputs[:config.num_train], t_lens[:config.num_train], input_lens[:config.num_train], input_masks[:config.num_train]
valid = targets[config.num_train:], inputs[config.num_train:], t_lens[config.num_train:], input_lens[config.num_train:], input_masks[config.num_train:]
return train, valid, max_t_len, max_input_len, max_mask_len
else:
test = targets, inputs, t_lens, input_lens, input_masks, answers, rel_labels
return test, max_t_len, max_input_len, max_mask_len
def get_vocab_size(vocab_path):
with tf.gfile.GFile(vocab_path, mode="r") as vocab_file:
vocab_line = vocab_file.readline()
count=0
while vocab_line:
count+=1
vocab_line = vocab_file.readline()
print(count)
return count
|
the-stack_0_10996 | import logging
import logconfig
logconfig.logconfig(filename=None)
logconfig.loglevel(logging.INFO)
import squaregrid
from sde import *
def test_sde():
def c(z):
return 2.0*z*z*z - 1j*z + 0.2
gr = squaregrid.SquareGrid(3.0,255)
def report(Q):
"""Print data about solution in SelfDualityEquation object Q"""
j = int(gr.ny / 2)
for i in range(0,gr.nx,gr.nx // 10):
z = Q.grid.zm[j,i]
u = Q.u[j,i]
u0 = Q.u0[j,i]
print('u(%g%+gi) = \t%f (diff from uzero is %f)' % (z.real,z.imag,u,u-u0))
print("----------------------------------------------------------------------")
print(" FOURIER METHOD")
print("----------------------------------------------------------------------")
global QF
QF = SelfDualityEquation(3,c,gr,method='fourier')
report(QF)
print("----------------------------------------------------------------------")
print(" EULER METHOD")
print("----------------------------------------------------------------------")
global QE
QE = SelfDualityEquation(3,c,gr,method='euler')
report(QE)
|
the-stack_0_10997 | import numpy as np
import os
import pytest
import tempfile
import torch
from mmcv.parallel import MMDataParallel
from os.path import dirname, exists, join
from mmdet3d.apis import (convert_SyncBN, inference_detector,
inference_mono_3d_detector,
inference_multi_modality_detector,
inference_segmentor, init_model, show_result_meshlab,
single_gpu_test)
from mmdet3d.core import Box3DMode
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes)
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_model
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection3d repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet3d
repo_dpath = dirname(dirname(mmdet3d.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def test_convert_SyncBN():
cfg = _get_config_module(
'pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py')
model_cfg = cfg.model
convert_SyncBN(model_cfg)
assert model_cfg['pts_voxel_encoder']['norm_cfg']['type'] == 'BN1d'
assert model_cfg['pts_backbone']['norm_cfg']['type'] == 'BN2d'
assert model_cfg['pts_neck']['norm_cfg']['type'] == 'BN2d'
def test_show_result_meshlab():
pcd = 'tests/data/nuscenes/samples/LIDAR_TOP/n015-2018-08-02-17-16-37+' \
'0800__LIDAR_TOP__1533201470948018.pcd.bin'
box_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([0])
scores_3d = torch.tensor([0.5])
points = np.random.rand(100, 4)
img_meta = dict(
pts_filename=pcd, boxes_3d=box_3d, box_mode_3d=Box3DMode.LIDAR)
data = dict(points=[[torch.tensor(points)]], img_metas=[[img_meta]])
result = [
dict(
pts_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
out_dir, file_name = show_result_meshlab(data, result, temp_out_dir)
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
tmp_dir.cleanup()
# test multi-modality show
# indoor scene
pcd = 'tests/data/sunrgbd/points/000001.bin'
filename = 'tests/data/sunrgbd/sunrgbd_trainval/image/000001.jpg'
box_3d = DepthInstance3DBoxes(
torch.tensor(
[[-1.1580, 3.3041, -0.9961, 0.3829, 0.4647, 0.5574, 1.1213]]))
img = np.random.randn(1, 3, 608, 832)
k_mat = np.array([[529.5000, 0.0000, 365.0000],
[0.0000, 529.5000, 265.0000], [0.0000, 0.0000, 1.0000]])
rt_mat = np.array([[0.9980, 0.0058, -0.0634], [0.0058, 0.9835, 0.1808],
[0.0634, -0.1808, 0.9815]])
rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) @ rt_mat.transpose(
1, 0)
depth2img = k_mat @ rt_mat
img_meta = dict(
filename=filename,
depth2img=depth2img,
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
box_mode_3d=Box3DMode.DEPTH,
box_type_3d=DepthInstance3DBoxes,
pcd_trans=np.array([0., 0., 0.]),
pcd_scale_factor=1.0,
pts_filename=pcd,
transformation_3d_flow=['R', 'S', 'T'])
data = dict(
points=[[torch.tensor(points)]], img_metas=[[img_meta]], img=[img])
result = [dict(boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d)]
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, 0.3, task='multi_modality-det')
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_png = file_name + '_img.png'
expected_outfile_proj = file_name + '_pred.png'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
expected_outfile_png_path = os.path.join(out_dir, file_name,
expected_outfile_png)
expected_outfile_proj_path = os.path.join(out_dir, file_name,
expected_outfile_proj)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
assert os.path.exists(expected_outfile_png_path)
assert os.path.exists(expected_outfile_proj_path)
tmp_dir.cleanup()
# outdoor scene
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
filename = 'tests/data/kitti/training/image_2/000000.png'
box_3d = LiDARInstance3DBoxes(
torch.tensor(
[[6.4495, -3.9097, -1.7409, 1.5063, 3.1819, 1.4716, 1.8782]]))
img = np.random.randn(1, 3, 384, 1280)
lidar2img = np.array(
[[6.09695435e+02, -7.21421631e+02, -1.25125790e+00, -1.23041824e+02],
[1.80384201e+02, 7.64479828e+00, -7.19651550e+02, -1.01016693e+02],
[9.99945343e-01, 1.24365499e-04, 1.04513029e-02, -2.69386917e-01],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
img_meta = dict(
filename=filename,
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
box_mode_3d=Box3DMode.LIDAR,
box_type_3d=LiDARInstance3DBoxes,
pcd_trans=np.array([0., 0., 0.]),
pcd_scale_factor=1.0,
pts_filename=pcd,
lidar2img=lidar2img)
data = dict(
points=[[torch.tensor(points)]], img_metas=[[img_meta]], img=[img])
result = [
dict(
pts_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, 0.1, task='multi_modality-det')
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_png = file_name + '_img.png'
expected_outfile_proj = file_name + '_pred.png'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
expected_outfile_png_path = os.path.join(out_dir, file_name,
expected_outfile_png)
expected_outfile_proj_path = os.path.join(out_dir, file_name,
expected_outfile_proj)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
assert os.path.exists(expected_outfile_png_path)
assert os.path.exists(expected_outfile_proj_path)
tmp_dir.cleanup()
# test mono-3d show
filename = 'tests/data/nuscenes/samples/CAM_BACK_LEFT/n015-2018-' \
'07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg'
box_3d = CameraInstance3DBoxes(
torch.tensor(
[[6.4495, -3.9097, -1.7409, 1.5063, 3.1819, 1.4716, 1.8782]]))
img = np.random.randn(1, 3, 384, 1280)
cam_intrinsic = np.array([[100.0, 0.0, 50.0], [0.0, 100.0, 50.0],
[0.0, 0.0, 1.0]])
img_meta = dict(
filename=filename,
pcd_horizontal_flip=False,
pcd_vertical_flip=False,
box_mode_3d=Box3DMode.CAM,
box_type_3d=CameraInstance3DBoxes,
pcd_trans=np.array([0., 0., 0.]),
pcd_scale_factor=1.0,
cam_intrinsic=cam_intrinsic)
data = dict(
points=[[torch.tensor(points)]], img_metas=[[img_meta]], img=[img])
result = [
dict(
img_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, 0.1, task='mono-det')
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
expected_outfile_png = file_name + '_img.png'
expected_outfile_proj = file_name + '_pred.png'
expected_outfile_png_path = os.path.join(out_dir, file_name,
expected_outfile_png)
expected_outfile_proj_path = os.path.join(out_dir, file_name,
expected_outfile_proj)
assert os.path.exists(expected_outfile_png_path)
assert os.path.exists(expected_outfile_proj_path)
tmp_dir.cleanup()
# test seg show
pcd = 'tests/data/scannet/points/scene0000_00.bin'
points = np.random.rand(100, 6)
img_meta = dict(pts_filename=pcd)
data = dict(points=[[torch.tensor(points)]], img_metas=[[img_meta]])
pred_seg = torch.randint(0, 20, (100, ))
result = [dict(semantic_mask=pred_seg)]
tmp_dir = tempfile.TemporaryDirectory()
temp_out_dir = tmp_dir.name
out_dir, file_name = show_result_meshlab(
data, result, temp_out_dir, task='seg')
expected_outfile_pred = file_name + '_pred.obj'
expected_outfile_pts = file_name + '_points.obj'
expected_outfile_pred_path = os.path.join(out_dir, file_name,
expected_outfile_pred)
expected_outfile_pts_path = os.path.join(out_dir, file_name,
expected_outfile_pts)
assert os.path.exists(expected_outfile_pred_path)
assert os.path.exists(expected_outfile_pts_path)
tmp_dir.cleanup()
def test_inference_detector():
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
detector_cfg = 'configs/pointpillars/hv_pointpillars_secfpn_' \
'6x8_160e_kitti-3d-3class.py'
detector = init_model(detector_cfg, device='cpu')
results = inference_detector(detector, pcd)
bboxes_3d = results[0][0]['boxes_3d']
scores_3d = results[0][0]['scores_3d']
labels_3d = results[0][0]['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
def test_inference_multi_modality_detector():
# these two multi-modality models both only have GPU implementations
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
# indoor scene
pcd = 'tests/data/sunrgbd/points/000001.bin'
img = 'tests/data/sunrgbd/sunrgbd_trainval/image/000001.jpg'
ann_file = 'tests/data/sunrgbd/sunrgbd_infos.pkl'
detector_cfg = 'configs/imvotenet/imvotenet_stage2_'\
'16x8_sunrgbd-3d-10class.py'
detector = init_model(detector_cfg, device='cuda:0')
results = inference_multi_modality_detector(detector, pcd, img, ann_file)
bboxes_3d = results[0][0]['boxes_3d']
scores_3d = results[0][0]['scores_3d']
labels_3d = results[0][0]['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
# outdoor scene
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
img = 'tests/data/kitti/training/image_2/000000.png'
ann_file = 'tests/data/kitti/kitti_infos_train.pkl'
detector_cfg = 'configs/mvxnet/dv_mvx-fpn_second_secfpn_adamw_' \
'2x8_80e_kitti-3d-3class.py'
detector = init_model(detector_cfg, device='cuda:0')
results = inference_multi_modality_detector(detector, pcd, img, ann_file)
bboxes_3d = results[0][0]['pts_bbox']['boxes_3d']
scores_3d = results[0][0]['pts_bbox']['scores_3d']
labels_3d = results[0][0]['pts_bbox']['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
def test_inference_mono_3d_detector():
# FCOS3D only has GPU implementations
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
img = 'tests/data/nuscenes/samples/CAM_BACK_LEFT/' \
'n015-2018-07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg'
ann_file = 'tests/data/nuscenes/nus_infos_mono3d.coco.json'
detector_cfg = 'configs/fcos3d/fcos3d_r101_caffe_fpn_gn-head_dcn_' \
'2x8_1x_nus-mono3d.py'
detector = init_model(detector_cfg, device='cuda:0')
results = inference_mono_3d_detector(detector, img, ann_file)
bboxes_3d = results[0][0]['img_bbox']['boxes_3d']
scores_3d = results[0][0]['img_bbox']['scores_3d']
labels_3d = results[0][0]['img_bbox']['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 9
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
def test_inference_segmentor():
# PN2 only has GPU implementations
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
pcd = 'tests/data/scannet/points/scene0000_00.bin'
segmentor_cfg = 'configs/pointnet2/pointnet2_ssg_' \
'16x2_cosine_200e_scannet_seg-3d-20class.py'
segmentor = init_model(segmentor_cfg, device='cuda:0')
results = inference_segmentor(segmentor, pcd)
seg_3d = results[0][0]['semantic_mask']
assert seg_3d.shape == torch.Size([100])
assert seg_3d.min() >= 0
assert seg_3d.max() <= 19
def test_single_gpu_test():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
cfg = _get_config_module('votenet/votenet_16x8_sunrgbd-3d-10class.py')
cfg.model.train_cfg = None
model = build_model(cfg.model, test_cfg=cfg.get('test_cfg'))
dataset_cfg = cfg.data.test
dataset_cfg.data_root = './tests/data/sunrgbd'
dataset_cfg.ann_file = 'tests/data/sunrgbd/sunrgbd_infos.pkl'
dataset = build_dataset(dataset_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
model = MMDataParallel(model, device_ids=[0])
results = single_gpu_test(model, data_loader)
bboxes_3d = results[0]['boxes_3d']
scores_3d = results[0]['scores_3d']
labels_3d = results[0]['labels_3d']
assert bboxes_3d.tensor.shape[0] >= 0
assert bboxes_3d.tensor.shape[1] == 7
assert scores_3d.shape[0] >= 0
assert labels_3d.shape[0] >= 0
|
the-stack_0_10999 | ### @export "setup"
import fake_input
input, input = fake_input.create(['', 'Mary had a little lamb',
'Its fleece was white as snow',
'It was also tasty'])
### @export "code"
from sys import argv
script, filename = argv
print(f"We're going to erase {filename}.")
print("If you don't want that, hit CTRL-C (^C).")
print("If you do want that, hit RETURN.")
input("?")
print("Opening the file...")
target = open(filename, 'w')
print("Truncating the file. Goodbye!")
target.truncate()
print("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print("I'm going to write these to the file.")
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print("And finally, we close it.")
target.close()
|
the-stack_0_11001 | from __future__ import print_function, division
from sympy import (
Basic,
sympify,
symbols,
Dummy,
Lambda,
summation,
Piecewise,
S,
cacheit,
Sum,
exp,
I,
Ne,
Eq,
poly,
series,
factorial,
And,
)
from sympy.polys.polyerrors import PolynomialError
from sympy.solvers.solveset import solveset
from sympy.stats.crv import reduce_rational_inequalities_wrap
from sympy.stats.rv import (
NamedArgsMixin,
SinglePSpace,
SingleDomain,
random_symbols,
PSpace,
ConditionalDomain,
RandomDomain,
ProductDomain,
)
from sympy.stats.symbolic_probability import Probability
from sympy.functions.elementary.integers import floor
from sympy.sets.fancysets import Range, FiniteSet
from sympy.sets.sets import Union
from sympy.sets.contains import Contains
from sympy.utilities import filldedent
from sympy.core.sympify import _sympify
import random
class DiscreteDistribution(Basic):
def __call__(self, *args):
return self.pdf(*args)
class SingleDiscreteDistribution(DiscreteDistribution, NamedArgsMixin):
""" Discrete distribution of a single variable
Serves as superclass for PoissonDistribution etc....
Provides methods for pdf, cdf, and sampling
See Also:
sympy.stats.crv_types.*
"""
set = S.Integers
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def sample(self):
""" A random realization from the distribution """
icdf = self._inverse_cdf_expression()
while True:
sample_ = floor(list(icdf(random.uniform(0, 1)))[0])
if sample_ >= self.set.inf:
return sample_
@cacheit
def _inverse_cdf_expression(self):
""" Inverse of the CDF
Used by sample
"""
x = Dummy("x", positive=True, integer=True)
z = Dummy("z", positive=True)
cdf_temp = self.cdf(x)
# Invert CDF
try:
inverse_cdf = solveset(cdf_temp - z, x, domain=S.Reals)
except NotImplementedError:
inverse_cdf = None
if not inverse_cdf or len(inverse_cdf.free_symbols) != 1:
raise NotImplementedError("Could not invert CDF")
return Lambda(z, inverse_cdf)
@cacheit
def compute_cdf(self, **kwargs):
""" Compute the CDF from the PDF
Returns a Lambda
"""
x, z = symbols("x, z", integer=True, cls=Dummy)
left_bound = self.set.inf
# CDF is integral of PDF from left bound to z
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
def _cdf(self, x):
return None
def cdf(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
cdf = self._cdf(x)
if cdf is not None:
return cdf
return self.compute_cdf(**kwargs)(x)
@cacheit
def compute_characteristic_function(self, **kwargs):
""" Compute the characteristic function from the PDF
Returns a Lambda
"""
x, t = symbols("x, t", real=True, cls=Dummy)
pdf = self.pdf(x)
cf = summation(exp(I * t * x) * pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, cf)
def _characteristic_function(self, t):
return None
def characteristic_function(self, t, **kwargs):
""" Characteristic function """
if not kwargs:
cf = self._characteristic_function(t)
if cf is not None:
return cf
return self.compute_characteristic_function(**kwargs)(t)
@cacheit
def compute_moment_generating_function(self, **kwargs):
t = Dummy("t", real=True)
x = Dummy("x", integer=True)
pdf = self.pdf(x)
mgf = summation(exp(t * x) * pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, mgf)
def _moment_generating_function(self, t):
return None
def moment_generating_function(self, t, **kwargs):
if not kwargs:
mgf = self._moment_generating_function(t)
if mgf is not None:
return mgf
return self.compute_moment_generating_function(**kwargs)(t)
@cacheit
def compute_quantile(self, **kwargs):
""" Compute the Quantile from the PDF
Returns a Lambda
"""
x = Dummy("x", integer=True)
p = Dummy("p", real=True)
left_bound = self.set.inf
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, x), **kwargs)
set = ((x, p <= cdf),)
return Lambda(p, Piecewise(*set))
def _quantile(self, x):
return None
def quantile(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
quantile = self._quantile(x)
if quantile is not None:
return quantile
return self.compute_quantile(**kwargs)(x)
def expectation(self, expr, var, evaluate=True, **kwargs):
""" Expectation of expression over distribution """
# TODO: support discrete sets with non integer stepsizes
if evaluate:
try:
p = poly(expr, var)
t = Dummy("t", real=True)
mgf = self.moment_generating_function(t)
deg = p.degree()
taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
result = 0
for k in range(deg + 1):
result += (
p.coeff_monomial(var ** k)
* taylor.coeff_monomial(t ** k)
* factorial(k)
)
return result
except PolynomialError:
return summation(
expr * self.pdf(var), (var, self.set.inf, self.set.sup), **kwargs
)
else:
return Sum(
expr * self.pdf(var), (var, self.set.inf, self.set.sup), **kwargs
)
def __call__(self, *args):
return self.pdf(*args)
class DiscreteDistributionHandmade(SingleDiscreteDistribution):
_argnames = ("pdf",)
@property
def set(self):
return self.args[1]
def __new__(cls, pdf, set=S.Integers):
return Basic.__new__(cls, pdf, set)
class DiscreteDomain(RandomDomain):
"""
A domain with discrete support with step size one.
Represented using symbols and Range.
"""
is_Discrete = True
class SingleDiscreteDomain(DiscreteDomain, SingleDomain):
def as_boolean(self):
return Contains(self.symbol, self.set)
class ConditionalDiscreteDomain(DiscreteDomain, ConditionalDomain):
"""
Domain with discrete support of step size one, that is restricted by
some condition.
"""
@property
def set(self):
rv = self.symbols
if len(self.symbols) > 1:
raise NotImplementedError(
filldedent(
"""
Multivariate conditional domains are not yet implemented."""
)
)
rv = list(rv)[0]
return reduce_rational_inequalities_wrap(self.condition, rv).intersect(
self.fulldomain.set
)
class DiscretePSpace(PSpace):
is_real = True
is_Discrete = True
@property
def pdf(self):
return self.density(*self.symbols)
def where(self, condition):
rvs = random_symbols(condition)
assert all(r.symbol in self.symbols for r in rvs)
if len(rvs) > 1:
raise NotImplementedError(
filldedent(
"""Multivariate discrete
random variables are not yet supported."""
)
)
conditional_domain = reduce_rational_inequalities_wrap(condition, rvs[0])
conditional_domain = conditional_domain.intersect(self.domain.set)
return SingleDiscreteDomain(rvs[0].symbol, conditional_domain)
def probability(self, condition):
complement = isinstance(condition, Ne)
if complement:
condition = Eq(condition.args[0], condition.args[1])
try:
_domain = self.where(condition).set
if condition == False or _domain is S.EmptySet:
return S.Zero
if condition == True or _domain == self.domain.set:
return S.One
prob = self.eval_prob(_domain)
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
dens = density(expr)
if not isinstance(dens, DiscreteDistribution):
dens = DiscreteDistributionHandmade(dens)
z = Dummy("z", real=True)
space = SingleDiscretePSpace(z, dens)
prob = space.probability(condition.__class__(space.value, 0))
if prob is None:
prob = Probability(condition)
return prob if not complement else S.One - prob
def eval_prob(self, _domain):
sym = list(self.symbols)[0]
if isinstance(_domain, Range):
n = symbols("n", integer=True)
inf, sup, step = (r for r in _domain.args)
summand = (self.pdf).replace(sym, n * step)
rv = summation(summand, (n, inf / step, (sup) / step - 1)).doit()
return rv
elif isinstance(_domain, FiniteSet):
pdf = Lambda(sym, self.pdf)
rv = sum(pdf(x) for x in _domain)
return rv
elif isinstance(_domain, Union):
rv = sum(self.eval_prob(x) for x in _domain.args)
return rv
def conditional_space(self, condition):
# XXX: Converting from set to tuple. The order matters to Lambda
# though so we should be starting with a set...
density = Lambda(tuple(self.symbols), self.pdf / self.probability(condition))
condition = condition.xreplace(dict((rv, rv.symbol) for rv in self.values))
domain = ConditionalDiscreteDomain(self.domain, condition)
return DiscretePSpace(domain, density)
class ProductDiscreteDomain(ProductDomain, DiscreteDomain):
def as_boolean(self):
return And(*[domain.as_boolean for domain in self.domains])
class SingleDiscretePSpace(DiscretePSpace, SinglePSpace):
""" Discrete probability space over a single univariate variable """
is_real = True
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return SingleDiscreteDomain(self.symbol, self.set)
def sample(self):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {self.value: self.distribution.sample()}
def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs):
rvs = rvs or (self.value,)
if self.value not in rvs:
return expr
expr = _sympify(expr)
expr = expr.xreplace(dict((rv, rv.symbol) for rv in rvs))
x = self.value.symbol
try:
return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs)
except NotImplementedError:
return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup), **kwargs)
def compute_cdf(self, expr, **kwargs):
if expr == self.value:
x = Dummy("x", real=True)
return Lambda(x, self.distribution.cdf(x, **kwargs))
else:
raise NotImplementedError()
def compute_density(self, expr, **kwargs):
if expr == self.value:
return self.distribution
raise NotImplementedError()
def compute_characteristic_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_moment_generating_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_quantile(self, expr, **kwargs):
if expr == self.value:
p = Dummy("p", real=True)
return Lambda(p, self.distribution.quantile(p, **kwargs))
else:
raise NotImplementedError()
|
the-stack_0_11002 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test eaiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_0_11003 | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from neurst.data.data_pipelines.transcript_data_pipeline import TranscriptDataPipeline
from neurst.metrics import register_metric
from neurst.metrics.metric import Metric
def _wer(ref, hypo):
errors = np.zeros([len(ref) + 1, len(hypo) + 1, 3])
errors[0, :, 1] = np.arange(len(hypo) + 1)
errors[:, 0, 2] = np.arange(len(ref) + 1)
substitution = np.array([1, 0, 0])
insertion = np.array([0, 1, 0])
deletion = np.array([0, 0, 1])
for r, ref in enumerate(ref):
for d, dec in enumerate(hypo):
errors[r + 1, d + 1] = min((
errors[r, d] + (ref != dec) * substitution,
errors[r + 1, d] + insertion,
errors[r, d + 1] + deletion), key=np.sum)
return tuple(errors[-1, -1])
@register_metric
class Wer(Metric):
def __init__(self, language="en", *args, **kwargs):
_ = args
_ = kwargs
self._language = language
super(Wer, self).__init__()
def set_groundtruth(self, groundtruth):
""" Setup inside groundtruth.
Args:
groundtruth: A list of references,
[sent0_ref, sent1_ref, ...]
"""
self._references = [TranscriptDataPipeline.cleanup_transcript(
self._language, x, lowercase=True, remove_punctuation=True) for x in groundtruth]
def greater_or_eq(self, result1, result2):
return self.get_value(result1) <= self.get_value(result2)
def get_value(self, result):
if isinstance(result, (float, np.float32, np.float64)):
return result
return result["WER"]
def call(self, hypothesis, groundtruth=None):
""" Calculate wer
Args:
hypothesis: A list of hypothesis texts.
groundtruth: A list of reference texts.
Returns:
A tuple(wer, substitutions, insertions, deletions)
"""
if groundtruth is None:
groundtruth = self._references
else:
groundtruth = [TranscriptDataPipeline.cleanup_transcript(
self._language, x, lowercase=True, remove_punctuation=True) for x in groundtruth]
hypothesis = [TranscriptDataPipeline.cleanup_transcript(
self._language, x, lowercase=True, remove_punctuation=True) for x in hypothesis]
substitutions = 0
insertions = 0
deletions = 0
numwords = 0
for lref, lout in zip(groundtruth, hypothesis):
# read the reference and output
reftext, output = lref.strip().split(), lout.strip().split()
# compare output to reference
s, i, d = _wer(reftext, output)
substitutions += s
insertions += i
deletions += d
numwords += len(reftext)
substitutions /= numwords
deletions /= numwords
insertions /= numwords
error = substitutions + deletions + insertions
return {
"WER": error * 100.,
"WER-substitutions": substitutions * 100.,
"WER-insertions": insertions * 100.,
"WER-deletions": deletions * 100.
}
|
the-stack_0_11005 | import json
from django.db import connection
from elasticsearch import Elasticsearch
from jobs.models import Job
es_client = Elasticsearch('http://localhost:9200')
def run():
# Create Index
es_client.indices.create(index='jobs')
# Put Mapping
with open("jobs/job.json", "r") as fp:
es_client.indices.put_mapping(index='jobs', doc_type='job', body=json.load(fp))
# Start Indexing
job_ids = Job.objects.values_list('id', flat=True)
db_cursor = connection.cursor()
for job_id in job_ids:
query = "SELECT get_job_data({});".format(job_id)
db_cursor.execute(query)
result = db_cursor.fetchone()
es_client.index(index='jobs', doc_type='job', body=result[0])
print("Indexed job {}".format(job_id))
|
the-stack_0_11006 | from pandas import DataFrame
excluded = [
'01 Buster',
'838 Spyder',
'Aqua Blaster',
'B.O.X.',
'B.R.1.C.K',
'CHMP',
'Droid Ravager',
'Drumstick',
'Grumpii',
'HBB Renegade',
'MegaBoidz',
'Meta',
'Order 66',
'Puff Boxer',
'R.E.X. 02',
'Red Steel',
'SB Skyhammer',
'T.I.G.E.R.Zero',
'WAT 51',
'| | | | | | | | | | | | | | | |',
]
def unique_filter(df: DataFrame):
return df[~df['Front'].isin(excluded)]
|
the-stack_0_11010 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Unlimited developers
import asyncio
import time
from test_framework.util import assert_raises_async, waitFor
from test_framework.test_framework import BitcoinTestFramework
from test_framework.loginit import logging
from test_framework.electrumutil import (ElectrumConnection,
address_to_scripthash, bitcoind_electrum_args)
from test_framework.connectrum.exc import ElectrumErrorResponse
MAX_RPC_CONNECTIONS = 5
MAX_SCRIPTHASH_SUBSCRIPTIONS = 5
SCRIPTHASH_ALIAS_BYTES_LIMIT = 54 * 2 # two bitcoin cash addresses
class ElectrumDoSLimitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
max_args = [
"-electrum.rawarg=--scripthash-subscription-limit={}".format(MAX_SCRIPTHASH_SUBSCRIPTIONS),
"-electrum.rawarg=--scripthash-alias-bytes-limit={}".format(SCRIPTHASH_ALIAS_BYTES_LIMIT),
"-electrum.rawarg=--rpc-max-connections={}".format(MAX_RPC_CONNECTIONS)
]
self.extra_args = [bitcoind_electrum_args() + max_args]
def run_test(self):
n = self.nodes[0]
n.generate(1)
async def async_tests(loop):
await self.test_connection_limit(loop)
await self.test_subscribe_limit(n)
await self.test_scripthash_alias_limit(n)
loop = asyncio.get_event_loop()
loop.run_until_complete(async_tests(loop))
async def test_subscribe_limit(self, n):
cli = ElectrumConnection()
await cli.connect()
logging.info("Testing scripthash subscription limit.")
# Subscribe up to limit
scripthashes = []
for i in range(0, MAX_SCRIPTHASH_SUBSCRIPTIONS):
s = address_to_scripthash(n.getnewaddress())
await cli.subscribe('blockchain.scripthash.subscribe', s)
scripthashes.append(s)
# Next subscription should fail
s = address_to_scripthash(n.getnewaddress())
await assert_raises_async(
ElectrumErrorResponse,
cli.call,
"blockchain.scripthash.subscribe", s)
try:
await cli.call("blockchain.scripthash.subscribe", s)
except ElectrumErrorResponse as e:
error_code = "-32600"
assert error_code in str(e)
assert "subscriptions limit reached" in str(e)
# Subscribing to an existing subscription should not affect the limit.
await cli.subscribe('blockchain.scripthash.subscribe', scripthashes[0])
# Unsubscribing should allow for a new subscription
ok = await cli.call('blockchain.scripthash.unsubscribe', scripthashes[0])
assert(ok)
await cli.subscribe('blockchain.scripthash.subscribe', s)
# ... and also enforce the limit again
await assert_raises_async(ElectrumErrorResponse, cli.call,
'blockchain.scripthash.subscribe',
address_to_scripthash(n.getnewaddress()))
cli.disconnect();
async def test_scripthash_alias_limit(self, n):
cli = ElectrumConnection()
await cli.connect()
addresses = ["bitcoincash:ppwk8u8cg8cthr3jg0czzays6hsnysykes9amw07kv",
"bitcoincash:qrsrvtc95gg8rrag7dge3jlnfs4j9pe0ugrmeml950"]
# Alias limit allows to subscribe to two addresses.
for a in addresses:
await cli.subscribe('blockchain.address.subscribe', a)
# Third address should fail
third = n.getnewaddress()
await assert_raises_async(
ElectrumErrorResponse,
cli.call,
"blockchain.address.subscribe", third)
try:
await cli.call("blockchain.address.subscribe", third)
except ElectrumErrorResponse as e:
error_code = "-32600"
assert error_code in str(e)
assert "alias subscriptions limit reached" in str(e)
# Unsubscribing should allow for a new subscription
ok = await cli.call('blockchain.address.unsubscribe', addresses[0])
assert(ok)
await cli.subscribe('blockchain.address.subscribe', third)
# ... and also enforce the limit again
await assert_raises_async(ElectrumErrorResponse, cli.call,
'blockchain.address.subscribe', n.getnewaddress())
cli.disconnect();
async def test_connection_limit(self, loop):
connections = []
for i in range(MAX_RPC_CONNECTIONS):
c = ElectrumConnection()
await c.connect()
connections.append(c)
# Exceed limit, we should get disconnected.
extra_connection = ElectrumConnection()
await extra_connection.connect()
try:
await asyncio.wait_for(extra_connection.call("server.ping"), timeout = 5)
assert(False)
except asyncio.TimeoutError:
# We expect this to timeout
pass
waitFor(5, lambda: not extra_connection.is_connected())
# Drop one connection
connections[0].disconnect()
# New connection should be accepted now.
extra_connection2 = ElectrumConnection()
await extra_connection2.connect();
await asyncio.wait_for(extra_connection2.call("server.ping"), timeout = 5)
for c in connections[1:] + [extra_connection2]:
c.disconnect()
if __name__ == '__main__':
ElectrumDoSLimitTest().main()
|
the-stack_0_11011 | from datetime import datetime, timedelta
from http import HTTPStatus
from backend.extensions import db
from backend.models import User, JWTToken
from backend.serializers.login_serializer import LoginSchema
from flask import request
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
decode_token,
get_jwt_identity,
jwt_refresh_token_required,
jwt_required,
jwt_optional,
get_raw_jwt,
)
from flask_restful import Resource
from marshmallow import ValidationError
class UserLogin(Resource):
@jwt_optional
def post(self):
current_user = get_jwt_identity()
if current_user:
return (
{"msg": f"User already logged in as {current_user}"},
HTTPStatus.UNAUTHORIZED,
)
if not request.is_json:
return {"msg": "No input data provided"}, HTTPStatus.BAD_REQUEST
schema = LoginSchema()
try:
result = schema.load(request.json)
except ValidationError as error:
return (
{"msg": "Wrong input data", "errors": error.messages},
HTTPStatus.BAD_REQUEST,
)
username = result["username"]
password = result["password"]
if not (username and password):
return ({"msg": "Username and password required"}, HTTPStatus.BAD_REQUEST)
user = User.query.filter_by(username=username).first()
if user and user.check_password(password):
access_token = create_access_token(
identity=username, expires_delta=timedelta(minutes=60)
)
refresh_token = create_refresh_token(
identity=username, expires_delta=timedelta(weeks=1)
)
ret = {"access_token": access_token, "refresh_token": refresh_token}
add_token_to_database(access_token)
add_token_to_database(refresh_token)
return ret, HTTPStatus.CREATED
else:
return {"msg": "Not authorized"}, HTTPStatus.UNAUTHORIZED
class UserLogout(Resource):
@jwt_required
def delete(self):
jti = get_raw_jwt()["jti"]
token = JWTToken.query.filter_by(jti=jti).one()
token.revoked = True
db.session.commit()
return {"msg": "Successfully logged out"}, HTTPStatus.OK
class RefreshAccessToken(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(
identity=current_user, expires_delta=timedelta(minutes=60)
)
add_token_to_database(access_token)
return {"access_token": access_token}, HTTPStatus.CREATED
class RefreshToken(Resource):
@jwt_refresh_token_required
def delete(self):
jti = get_raw_jwt()["jti"]
token = JWTToken.query.filter_by(jti=jti).one()
token.revoked = True
db.session.commit()
return {"msg": "Refresh token successfully revoked"}, HTTPStatus.OK
def add_token_to_database(encoded_token):
"""
Adds a new token to the database. It is not revoked when it is added.
:param identity_claim:
"""
decoded_token = decode_token(encoded_token)
jti = decoded_token["jti"]
token_type = decoded_token["type"]
user_identity = decoded_token["identity"]
expires = datetime.fromtimestamp(decoded_token["exp"])
revoked = False
db_token = JWTToken(
jti=jti,
token_type=token_type,
user_identity=user_identity,
expires=expires,
revoked=revoked,
)
db.session.add(db_token)
db.session.commit()
|
the-stack_0_11012 | from __future__ import division
import torch
from onmt.translate import penalties
class Beam(object):
"""
Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
Args:
beam_size (int): Number of beams to use.
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
n_best (int): Don't stop until at least this many beams have
reached EOS.
cuda (bool): use gpu
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
stepwise_penalty (bool): Apply coverage penalty at every step.
block_ngram_repeat (int): Block beams where
``block_ngram_repeat``-grams repeat.
exclusion_tokens (set[str]): If a gram contains any of these
tokens, it may repeat.
"""
def __init__(self, size, pad, bos, eos,
n_best=1, cuda=False,
global_scorer=None,
min_length=0,
stepwise_penalty=False,
block_ngram_repeat=0,
exclusion_tokens=set()):
self.size = size
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size)
.fill_(pad)]
self.next_ys[0][0] = bos
# Has EOS topped the beam yet.
self._eos = eos
self.eos_top = False
# The attentions (matrix) for each time.
self.attn = []
# Time and k pair for finished.
self.finished = []
self.n_best = n_best
# Information for global scoring.
self.global_scorer = global_scorer
self.global_state = {}
# Minimum prediction length
self.min_length = min_length
# Apply Penalty at every step
self.stepwise_penalty = stepwise_penalty
self.block_ngram_repeat = block_ngram_repeat
self.exclusion_tokens = exclusion_tokens
@property
def current_predictions(self):
return self.next_ys[-1]
@property
def current_origin(self):
"""Get the backpointers for the current timestep."""
return self.prev_ks[-1]
def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete.
"""
num_words = word_probs.size(1)
if self.stepwise_penalty:
self.global_scorer.update_score(self, attn_out)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len <= self.min_length:
# assumes there are len(word_probs) predictions OTHER
# than EOS that are greater than -1e20
for k in range(len(word_probs)):
word_probs[k][self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + self.scores.unsqueeze(1)
# Don't let EOS have children.
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
# Block ngram repeats
if self.block_ngram_repeat > 0:
le = len(self.next_ys)
for j in range(self.next_ys[-1].size(0)):
hyp, _ = self.get_hyp(le - 1, j)
ngrams = set()
fail = False
gram = []
for i in range(le - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram +
[hyp[i].item()])[-self.block_ngram_repeat:]
# Skip the blocking if it is in the exclusion list
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
beam_scores[j] = -10e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,
True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
self.attn.append(attn_out.index_select(0, prev_k))
self.global_scorer.update_global_state(self)
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
self.eos_top = True
@property
def done(self):
return self.eos_top and len(self.finished) >= self.n_best
def sort_finished(self, minimum=None):
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
i += 1
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def get_hyp(self, timestep, k):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
attn.append(self.attn[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], torch.stack(attn[::-1])
class GNMTGlobalScorer(object):
"""NMT re-ranking.
Args:
alpha (float): Length parameter.
beta (float): Coverage parameter.
length_penalty (str): Length penalty strategy.
coverage_penalty (str): Coverage penalty strategy.
Attributes:
alpha (float): See above.
beta (float): See above.
length_penalty (callable): See :class:`penalties.PenaltyBuilder`.
coverage_penalty (callable): See :class:`penalties.PenaltyBuilder`.
"""
@classmethod
def from_opt(cls, opt):
return cls(
opt.alpha,
opt.beta,
opt.length_penalty,
opt.coverage_penalty)
def __init__(self, alpha, beta, length_penalty, coverage_penalty):
self.alpha = alpha
self.beta = beta
penalty_builder = penalties.PenaltyBuilder(coverage_penalty,
length_penalty)
# Term will be subtracted from probability
self.cov_penalty = penalty_builder.coverage_penalty()
# Probability will be divided by this
self.length_penalty = penalty_builder.length_penalty()
def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
len_pen = self.length_penalty(len(beam.next_ys), self.alpha)
normalized_probs = logprobs / len_pen
if not beam.stepwise_penalty:
penalty = self.cov_penalty(beam.global_state["coverage"],
self.beta)
normalized_probs -= penalty
return normalized_probs
def update_score(self, beam, attn):
"""
Function to update scores of a Beam that is not finished
"""
if "prev_penalty" in beam.global_state.keys():
beam.scores.add_(beam.global_state["prev_penalty"])
penalty = self.cov_penalty(beam.global_state["coverage"] + attn,
self.beta)
beam.scores.sub_(penalty)
def update_global_state(self, beam):
"Keeps the coverage vector as sum of attentions"
if len(beam.prev_ks) == 1:
beam.global_state["prev_penalty"] = beam.scores.clone().fill_(0.0)
beam.global_state["coverage"] = beam.attn[-1]
self.cov_total = beam.attn[-1].sum(1)
else:
self.cov_total += torch.min(beam.attn[-1],
beam.global_state['coverage']).sum(1)
beam.global_state["coverage"] = beam.global_state["coverage"] \
.index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])
prev_penalty = self.cov_penalty(beam.global_state["coverage"],
self.beta)
beam.global_state["prev_penalty"] = prev_penalty
|
the-stack_0_11013 | from .BaseRequest import BaseRequest
class UpdateDataAlertRequest(BaseRequest):
"""
Update site request for generating API requests to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param subject: (Optional) The string to set as the new subject of the alert.
:type subject: string
:param frequency: (Optional) The frequency of the data-driven alert: once, frequently, hourly,
daily, or weekly.
:type frequency: string
:param alert_owner_id: (Optional) The ID of the user to assign as owner of the data-driven alert.
:type alert_owner_id: string
:param is_public_flag: (Optional) Boolean flag.
Determines the visibility of the data-driven alert. If the flag is True,
users with access to the view containing the alert can see the alert and add
themselves as recipients. If the flag is False, then the alert is only visible
to the owner, site or server administrators, and specific users they add as recipients.
:type is_public_flag: boolean
"""
def __init__(self,
ts_connection,
subject=None,
frequency=None,
alert_owner_id=None,
is_public_flag=None):
super().__init__(ts_connection)
self._subject = subject
self._frequency = frequency
self._alert_owner_id = alert_owner_id
self._is_public_flag = is_public_flag
self.base_update_alert_request
@property
def optional_alert_param_keys(self):
return [
'subject',
'frequency',
'public'
]
@property
def optional_owner_param_keys(self):
return ['id']
@property
def optional_alert_param_values(self):
return [
self._subject,
self._frequency,
self._is_public_flag
]
@property
def optional_owner_param_values(self):
return [self._alert_owner_id]
@property
def base_update_alert_request(self):
self._request_body.update({'dataAlert': {}})
return self._request_body
@property
def modified_update_alert_request(self):
self._request_body['dataAlert'].update(
self._get_parameters_dict(
self.optional_alert_param_keys,
self.optional_alert_param_values))
if self._alert_owner_id:
self._request_body['dataAlert'].update({'owner': {}})
self._request_body['dataAlert']['owner'].update(
self._get_parameters_dict(
self.optional_owner_param_keys,
self.optional_owner_param_values))
return self._request_body
def get_request(self):
return self.modified_update_alert_request
|
the-stack_0_11015 | import pyodbc
driver = '{Microsoft Access Driver(*.mdb,*.accdb)}'
filepath = r'C:\Users\weidongc\Desktop\Booking\2020\2020 CN Ads Booking v12.accdb'
myDataSource = pyodbc.dataSources()
access_drive = myDataSource['MS Access Database']
cnxn = pyodbc.connect(driver=access_drive,dbq=filepath,autocommit=True)
crsr = cnxn.cursor()
#grab all the tables
table_list = list(crsr.tables())
# for i in table_list:
# print(i)
table_name = 'wbr'
query = 'select * from {}'.format(table_name)
crsr.execute(query)
crsr.close()
cnxn.close()
#print(result)
# df = pd.DataFrame()
#
# df.append(query)
# one_row = crsr.fetchall()
|
the-stack_0_11017 | """ ConViT Model
@article{d2021convit,
title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases},
author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent},
journal={arXiv preprint arXiv:2103.10697},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.10697
Original code: https://github.com/facebookresearch/convit, original copyright below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
'''These modules are adapted from those of timm, see
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp
from .registry import register_model
from .vision_transformer_hybrid import HybridEmbed
from .fx_features import register_notrace_module
import torch
import torch.nn as nn
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# ConViT
'convit_tiny': _cfg(
url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"),
'convit_small': _cfg(
url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"),
'convit_base': _cfg(
url="https://dl.fbaipublicfiles.com/convit/convit_base.pth")
}
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,
locality_strength=1.):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.locality_strength = locality_strength
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None
def forward(self, x):
B, N, C = x.shape
if self.rel_indices is None or self.rel_indices.shape[1] != N:
self.rel_indices = self.get_rel_indices(N)
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attention(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k = qk[0], qk[1]
pos_score = self.rel_indices.expand(B, -1, -1, -1)
pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1, -1, 1, 1)
attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn /= attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map=False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:, :, -1] ** .5
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 # max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads ** .5)
center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1 + kernel_size * h2
self.pos_proj.weight.data[position, 2] = -1
self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance
self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance
self.pos_proj.weight.data *= self.locality_strength
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
img_size = int(num_patches ** .5)
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
rel_indices[:, :, :, 2] = indd.unsqueeze(0)
rel_indices[:, :, :, 1] = indy.unsqueeze(0)
rel_indices[:, :, :, 0] = indx.unsqueeze(0)
device = self.qk.weight.device
return rel_indices.to(device)
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def get_attention_map(self, x, return_map=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0)
img_size = int(N ** .5)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
distances = indd ** .5
distances = distances.to('cuda')
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N
if return_map:
return dist, attn_map
else:
return dist
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConViT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None,
local_up_to_layer=3, locality_strength=1., use_pos_embed=True):
super().__init__()
embed_dim *= num_heads
self.num_classes = num_classes
self.local_up_to_layer = local_up_to_layer
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.num_patches = num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i < local_up_to_layer else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
for n, m in self.named_modules():
if hasattr(m, 'local_init'):
m.local_init()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
if u == self.local_up_to_layer:
x = torch.cat((cls_tokens, x), dim=1)
x = blk(x)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _create_convit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
return build_model_with_cfg(
ConViT, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def convit_tiny(pretrained=False, **kwargs):
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args)
return model
@register_model
def convit_small(pretrained=False, **kwargs):
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args)
return model
@register_model
def convit_base(pretrained=False, **kwargs):
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args)
return model
|
the-stack_0_11019 | import collections
import datetime
import logging
from celery import shared_task
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.db.models import Q, Sum
from django.utils import timezone
from rest_framework import status
from waldur_core.core import utils as core_utils
from waldur_core.structure import models as structure_models
from waldur_mastermind.common.utils import create_request
from waldur_mastermind.invoices import models as invoices_models
from waldur_mastermind.invoices import utils as invoice_utils
from . import exceptions, models, utils, views
logger = logging.getLogger(__name__)
User = get_user_model()
def approve_order(order, user):
order.approve()
order.approved_by = user
order.approved_at = timezone.now()
order.save()
serialized_order = core_utils.serialize_instance(order)
serialized_user = core_utils.serialize_instance(user)
transaction.on_commit(
lambda: process_order.delay(serialized_order, serialized_user)
)
@shared_task
def process_order(serialized_order, serialized_user):
# Skip remote plugin because it is going to processed
# only after it gets approved by service provider
from waldur_mastermind.marketplace_remote import PLUGIN_NAME as REMOTE_PLUGIN_NAME
order = core_utils.deserialize_instance(serialized_order)
user = core_utils.deserialize_instance(serialized_user)
for item in order.items.exclude(offering__type=REMOTE_PLUGIN_NAME):
item.set_state_executing()
item.save(update_fields=['state'])
utils.process_order_item(item, user)
@shared_task
def process_order_item(serialized_order_item, serialized_user):
order_item = core_utils.deserialize_instance(serialized_order_item)
user = core_utils.deserialize_instance(serialized_user)
utils.process_order_item(order_item, user)
@shared_task
def create_screenshot_thumbnail(uuid):
screenshot = models.Screenshot.objects.get(uuid=uuid)
utils.create_screenshot_thumbnail(screenshot)
@shared_task
def notify_order_approvers(uuid):
order = models.Order.objects.get(uuid=uuid)
users = order.get_approvers()
emails = [u.email for u in users if u.email]
link = core_utils.format_homeport_link(
'projects/{project_uuid}/marketplace-order-list/',
project_uuid=order.project.uuid,
)
context = {
'order_url': link,
'order': order,
'site_name': settings.WALDUR_CORE['SITE_NAME'],
}
core_utils.broadcast_mail('marketplace', 'notification_approval', context, emails)
@shared_task
def notify_about_resource_change(event_type, context, resource_uuid):
resource = models.Resource.objects.get(uuid=resource_uuid)
project = structure_models.Project.all_objects.get(id=resource.project_id)
emails = project.get_users().values_list('email', flat=True)
core_utils.broadcast_mail('marketplace', event_type, context, emails)
def filter_aggregate_by_scope(queryset, scope):
scope_path = None
if isinstance(scope, structure_models.Project):
scope_path = 'resource__project'
if isinstance(scope, structure_models.Customer):
scope_path = 'resource__project__customer'
if scope_path:
queryset = queryset.filter(**{scope_path: scope})
return queryset
def aggregate_reported_usage(start, end, scope):
queryset = models.ComponentUsage.objects.filter(
date__gte=start, date__lte=end
).exclude(component__parent=None)
queryset = filter_aggregate_by_scope(queryset, scope)
queryset = queryset.values('component__parent_id').annotate(total=Sum('usage'))
return {row['component__parent_id']: row['total'] for row in queryset}
def aggregate_fixed_usage(start, end, scope):
queryset = models.ResourcePlanPeriod.objects.filter(
# Resource has been active during billing period
Q(start__gte=start, end__lte=end)
| Q(end__isnull=True) # Resource is still active
| Q(
end__gte=start, end__lte=end
) # Resource has been launched in previous billing period and stopped in current
)
queryset = filter_aggregate_by_scope(queryset, scope)
queryset = queryset.values('plan__components__component__parent_id').annotate(
total=Sum('plan__components__amount')
)
return {
row['plan__components__component__parent_id']: row['total'] for row in queryset
}
def calculate_usage_for_scope(start, end, scope):
reported_usage = aggregate_reported_usage(start, end, scope)
fixed_usage = aggregate_fixed_usage(start, end, scope)
# It needs to cover a case when a key is None because OfferingComponent.parent can be None.
fixed_usage.pop(None, None)
components = set(reported_usage.keys()) | set(fixed_usage.keys())
content_type = ContentType.objects.get_for_model(scope)
for component_id in components:
models.CategoryComponentUsage.objects.update_or_create(
content_type=content_type,
object_id=scope.id,
component_id=component_id,
date=start,
defaults={
'reported_usage': reported_usage.get(component_id),
'fixed_usage': fixed_usage.get(component_id),
},
)
@shared_task(name='waldur_mastermind.marketplace.calculate_usage_for_current_month')
def calculate_usage_for_current_month():
start = invoice_utils.get_current_month_start()
end = invoice_utils.get_current_month_end()
scopes = []
for customer in structure_models.Customer.objects.all():
scopes.append(customer)
for project in customer.projects.all():
scopes.append(project)
for scope in scopes:
calculate_usage_for_scope(start, end, scope)
@shared_task(name='waldur_mastermind.marketplace.send_notifications_about_usages')
def send_notifications_about_usages():
for warning in utils.get_info_about_missing_usage_reports():
customer = warning['customer']
emails = [owner.email for owner in customer.get_owners()]
warning['public_resources_url'] = utils.get_public_resources_url(customer)
if customer.serviceprovider.enable_notifications and emails:
core_utils.broadcast_mail(
'marketplace', 'notification_usages', warning, emails
)
@shared_task
def terminate_resource(serialized_resource, serialized_user):
resource = core_utils.deserialize_instance(serialized_resource)
user = core_utils.deserialize_instance(serialized_user)
view = views.ResourceViewSet.as_view({'post': 'terminate'})
response = create_request(view, user, {}, uuid=resource.uuid.hex)
if response.status_code != status.HTTP_200_OK:
raise exceptions.ResourceTerminateException(response.rendered_content)
@shared_task(
name='waldur_mastermind.marketplace.terminate_resources_if_project_end_date_has_been_reached'
)
def terminate_resources_if_project_end_date_has_been_reached():
expired_projects = structure_models.Project.objects.exclude(
end_date__isnull=True
).filter(end_date__lte=timezone.datetime.today())
for project in expired_projects:
resources = models.Resource.objects.filter(project=project).filter(
state__in=(models.Resource.States.OK, models.Resource.States.ERRED)
)
if resources:
utils.schedule_resources_termination(resources)
else:
project.delete()
@shared_task(name='waldur_mastermind.marketplace.notify_about_stale_resource')
def notify_about_stale_resource():
if not settings.WALDUR_MARKETPLACE['ENABLE_STALE_RESOURCE_NOTIFICATIONS']:
return
today = datetime.datetime.today()
prev_1 = today - relativedelta(months=1)
prev_2 = today - relativedelta(months=2)
items = invoices_models.InvoiceItem.objects.filter(
Q(invoice__month=today.month, invoice__year=today.year,)
| Q(invoice__month=prev_1.month, invoice__year=prev_1.year)
| Q(invoice__month=prev_2.month, invoice__year=prev_2.year)
)
actual_resources_ids = []
for item in items:
if item.price:
actual_resources_ids.append(item.resource.id)
resources = (
models.Resource.objects.exclude(id__in=actual_resources_ids)
.exclude(
Q(state=models.Resource.States.TERMINATED)
| Q(state=models.Resource.States.TERMINATING)
| Q(state=models.Resource.States.CREATING)
)
.exclude(offering__billable=False)
)
user_resources = collections.defaultdict(list)
for resource in resources:
owners = resource.project.customer.get_owners().exclude(email='')
resource_url = core_utils.format_homeport_link(
'/projects/{project_uuid}/marketplace-project-resource-details/{resource_uuid}/',
project_uuid=resource.project.uuid.hex,
resource_uuid=resource.uuid.hex,
)
for user in owners:
user_resources[user.email].append(
{'resource': resource, 'resource_url': resource_url}
)
for key, value in user_resources.items():
core_utils.broadcast_mail(
'marketplace',
'notification_about_stale_resources',
{'resources': value},
[key],
)
@shared_task(
name='waldur_mastermind.marketplace.terminate_resource_if_its_end_date_has_been_reached'
)
def terminate_resource_if_its_end_date_has_been_reached():
expired_resources = models.Resource.objects.exclude(
end_date__isnull=True,
state__in=(
models.Resource.States.TERMINATED,
models.Resource.States.TERMINATING,
),
).filter(end_date__lte=timezone.datetime.today())
utils.schedule_resources_termination(expired_resources)
@shared_task
def notify_about_resource_termination(resource_uuid, user_uuid, is_staff_action=None):
resource = models.Resource.objects.get(uuid=resource_uuid)
user = User.objects.get(uuid=user_uuid)
admin_emails = set(
resource.project.get_users(structure_models.ProjectRole.ADMINISTRATOR)
.exclude(email='')
.values_list('email', flat=True)
)
manager_emails = set(
resource.project.get_users(structure_models.ProjectRole.MANAGER)
.exclude(email='')
.values_list('email', flat=True)
)
emails = admin_emails | manager_emails
resource_url = core_utils.format_homeport_link(
'/projects/{project_uuid}/marketplace-project-resource-details/{resource_uuid}/',
project_uuid=resource.project.uuid.hex,
resource_uuid=resource.uuid.hex,
)
context = {'resource': resource, 'user': user, 'resource_url': resource_url}
if is_staff_action:
core_utils.broadcast_mail(
'marketplace',
'marketplace_resource_terminatate_scheduled_staff',
context,
emails,
)
else:
core_utils.broadcast_mail(
'marketplace', 'marketplace_resource_terminatate_scheduled', context, emails
)
|
the-stack_0_11020 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
new_registers = [
{
"target": "system",
"register": "fcsr",
"size": 64,
"physical_register": "fcsr",
"index": "0x3",
"fields": [
{"field": "WPRI_VAR", "shift": 8, "size": 56},
{"field": "FRM", "shift": 5, "size": 3},
{"field": "NZ", "shift": 4, "size": 1},
{"field": "DZ", "shift": 3, "size": 1},
{"field": "OF", "shift": 2, "size": 1},
{"field": "UF", "shift": 1, "size": 1},
{"field": "NX", "shift": 0, "size": 1},
],
"choice": {
"name": "fcsr",
"value": "0x3",
"weight": "0",
"description": "URW; Floating-point control and "
"status register.",
},
}
]
changed_registers = [
{
"target": "system",
"register": "fflags",
"size": 64,
"physical_register": "fflags",
"index": "0x1",
"fields": [
{"field": "WPRI_VAR", "shift": 5, "size": 59},
{"field": "NZ", "shift": 4, "size": 1},
{"field": "DZ", "shift": 3, "size": 1},
{"field": "OF", "shift": 2, "size": 1},
{"field": "UF", "shift": 1, "size": 1},
{"field": "NX", "shift": 0, "size": 1},
],
},
{
"target": "system",
"register": "frm",
"size": 64,
"physical_register": "frm",
"index": "0x2",
"fields": [
{"field": "WPRI_VAR", "shift": 8, "size": 56},
{"field": "WPRI_VAR", "shift": 0, "size": 5},
{"field": "FRM", "shift": 5, "size": 3},
],
},
{
"target": "system",
"register": "mscratch",
"size": 64,
"physical_register": "mscratch",
"index": "0x340",
"fields": [{"field": "MSCRATCH", "shift": 0, "size": 64}],
},
]
delete_register_choices = [{"name": "mstatus_hyp"}]
|
the-stack_0_11021 | import requests
from django.contrib.gis.geos import Point
from georiviere.observations.models import Station, StationProfile, Parameter, ParameterTracking, Unit
from . import BaseImportCommand
class Command(BaseImportCommand):
help = "Import physico-chemical quality stations from Hub'Eau API"
api_url = "https://hubeau.eaufrance.fr/api/v1/qualite_rivieres/station_pc"
api_analyse_pc_url = "https://hubeau.eaufrance.fr/api/v1/qualite_rivieres/analyse_pc"
def create_or_update_stations(self, results, verbosity, with_parameters=False):
"""Create or update stations from results"""
station_profile, station_profile_created = StationProfile.objects.get_or_create(
code='PCQUAL'
)
if verbosity >= 2:
if station_profile_created:
self.stdout.write('Created station profile {0}'.format(station_profile))
for station in results:
station_obj, station_created = Station.objects.update_or_create(
code=station['code_station'],
defaults={
'label': station['libelle_station'] or "",
'station_uri': station['uri_station'] or "",
'geom': Point(
station['coordonnee_x'],
station['coordonnee_y'],
srid='2154'
),
'hardness': station['durete'],
}
)
station_obj.station_profiles.add(station_profile)
if verbosity >= 2:
if station_created:
self.stdout.write('Created station {0}'.format(station_obj))
else:
self.stdout.write('Updated station {0}'.format(station_obj))
if with_parameters:
# Get parameters from analyse_pc API endpoint
payload = {
'format': 'json',
'size': 50,
'code_station': station_obj.code,
}
response = requests.get(self.api_analyse_pc_url, params=payload)
response_content = response.json()
analysepc_data = response_content['data']
for measure in analysepc_data:
# Create Parameter and Unit for temperature
unit_obj, unit_created = Unit.objects.get_or_create(
code=measure['code_unite'],
defaults={
'label': measure['symbole_unite'],
'symbol': measure['symbole_unite'],
}
)
parameter_obj, parameter_created = Parameter.objects.get_or_create(
code=measure['code_parametre'],
defaults={
'label': measure['libelle_parametre'],
'unit': unit_obj,
}
)
parameter_tracking, parameter_tracking_created = ParameterTracking.objects.get_or_create(
station=station_obj,
parameter=parameter_obj,
defaults={
'label': measure['libelle_parametre'],
'measure_frequency': "",
'transmission_frequency': "",
'data_availability': ParameterTracking.DataAvailabilityChoice.ONLINE,
}
)
if verbosity >= 2 and parameter_tracking_created:
self.stdout.write('Added parameter {0}'.format(parameter_tracking))
|
the-stack_0_11026 | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Methods for generating QAOA cost Hamiltonians corresponding to
different optimization problems.
"""
from typing import Iterable, Union
import networkx as nx
import retworkx as rx
import pennylane as qml
from pennylane import qaoa
########################
# Hamiltonian components
def bit_driver(wires: Union[Iterable, qaoa.Wires], b: int):
r"""Returns the bit-driver cost Hamiltonian.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{b + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`b \ \in \ \{0, \ 1\}`. This Hamiltonian is often used when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the Hamiltonian acts
b (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with a majority of bits being :math:`0` or
a majority of bits being :math:`1`, respectively.
Returns:
.Hamiltonian:
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.bit_driver(wires, 1)
>>> print(hamiltonian)
(1) [Z0]
+ (1) [Z1]
+ (1) [Z2]
"""
if b == 0:
coeffs = [-1 for _ in wires]
elif b == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError(f"'b' must be either 0 or 1, got {b}")
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
def edge_driver(graph: Union[nx.Graph, rx.PyGraph], reward: list):
r"""Returns the edge-driver cost Hamiltonian.
Given some graph, :math:`G` with each node representing a wire, and a binary
colouring where each node/wire is assigned either :math:`|0\rangle` or :math:`|1\rangle`, the edge driver
cost Hamiltonian will assign a lower energy to edges represented by qubit states with endpoint colourings
supplied in ``reward``.
For instance, if ``reward`` is ``["11"]``, then edges
with both endpoints coloured as ``1`` (the state :math:`|11\rangle`) will be assigned a lower energy, while
the other colourings (``"00"``, ``"10"``, and ``"01"`` corresponding to states
:math:`|00\rangle`, :math:`|10\rangle`, and :math:`|10\rangle`, respectively) will be assigned a higher energy.
See usage details for more information.
Args:
graph (nx.Graph or rx.PyGraph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian:
**Example**
>>> import networkx as nx
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0]
+ (0.25) [Z1]
+ (0.25) [Z1]
+ (0.25) [Z2]
+ (0.25) [Z0 Z1]
+ (0.25) [Z1 Z2]
>>> import retworkx as rx
>>> graph = rx.PyGraph()
>>> graph.add_nodes_from([0, 1, 2])
>>> graph.add_edges_from([(0, 1,""), (1,2,"")])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0]
+ (0.25) [Z1]
+ (0.25) [Z1]
+ (0.25) [Z2]
+ (0.25) [Z0 Z1]
+ (0.25) [Z1 Z2]
In the above example, ``"11"``, ``"10"``, and ``"01"`` are assigned a lower
energy than ``"00"``. For example, a quick calculation of expectation values gives us:
.. math:: \langle 000 | H | 000 \rangle \ = \ 1.5
.. math:: \langle 100 | H | 100 \rangle \ = \ 0.5
.. math:: \langle 110 | H | 110\rangle \ = \ -0.5
In the first example, both vertex pairs are not in ``reward``. In the second example, one pair is in ``reward`` and
the other is not. Finally, in the third example, both pairs are in ``reward``.
.. details::
:title: Usage Details
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. With QAOA, it is natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`|01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the ``edge_driver()``
function outputs a Hamiltonian that rewards the pairs in the set, and penalizes the others.
For example, given the reward set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle\}` and the graph :math:`G`,
the ``edge_driver()`` function will output the following Hamiltonian:
.. math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
``reward`` must always contain both :math:`|01\rangle` and :math:`|10\rangle`, or neither of the two.
Within an undirected graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same. Therefore, there is no well-defined way to
penalize one and reward the other.
.. Note::
The absolute difference in energy between colourings in ``reward`` and colourings in its
complement is always :math:`1`.
"""
allowed = ["00", "01", "10", "11"]
if not all(e in allowed for e in reward):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
coeffs = []
ops = []
is_rx = isinstance(graph, rx.PyGraph)
graph_nodes = graph.nodes()
graph_edges = sorted(graph.edge_list()) if is_rx else graph.edges
# In RX each node is assigned to an integer index starting from 0;
# thus, we use the following lambda function to get node-values.
get_nvalue = lambda i: graph_nodes[i] if is_rx else i
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph_nodes]
ops = [qml.Identity(v) for v in graph_nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph_edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[
qml.PauliZ(get_nvalue(e[0])) @ qml.PauliZ(get_nvalue(e[1])),
qml.PauliZ(get_nvalue(e[0])),
qml.PauliZ(get_nvalue(e[1])),
]
)
if reward == "10":
for e in graph_edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(get_nvalue(e[0])) @ qml.PauliZ(get_nvalue(e[1])))
if reward == "11":
for e in graph_edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[
qml.PauliZ(get_nvalue(e[0])) @ qml.PauliZ(get_nvalue(e[1])),
qml.PauliZ(get_nvalue(e[0])),
qml.PauliZ(get_nvalue(e[1])),
]
)
return qml.Hamiltonian(coeffs, ops)
#######################
# Optimization problems
def maxcut(graph: Union[nx.Graph, rx.PyGraph]):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the
MaxCut problem, for a given graph.
The goal of the MaxCut problem for a particular graph is to find a partition of nodes into two sets,
such that the number of edges in the graph with endpoints in different sets is maximized. Formally,
we wish to find the `cut of the graph <https://en.wikipedia.org/wiki/Cut_(graph_theory)>`__ such
that the number of edges crossing the cut is maximized.
The MaxCut cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{1}{2} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_i Z_j \ - \ \mathbb{I} \big),
where :math:`G` is a graph, :math:`\mathbb{I}` is the identity, and :math:`Z_i` and :math:`Z_j` are
the Pauli-Z operators on the :math:`i`-th and :math:`j`-th wire respectively.
The mixer Hamiltonian returned from :func:`~qaoa.maxcut` is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
Args:
graph (nx.Graph or rx.PyGraph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
**Example**
>>> import networkx as nx
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> cost_h, mixer_h = qml.qaoa.maxcut(graph)
>>> print(cost_h)
(-1.0) [I0]
+ (0.5) [Z0 Z1]
+ (0.5) [Z1 Z2]
>>> print(mixer_h)
(1) [X0]
+ (1) [X1]
+ (1) [X2]
>>> import retworkx as rx
>>> graph = rx.PyGraph()
>>> graph.add_nodes_from([0, 1, 2])
>>> graph.add_edges_from([(0, 1,""), (1,2,"")])
>>> cost_h, mixer_h = qml.qaoa.maxcut(graph)
>>> print(cost_h)
(-1.0) [I0]
+ (0.5) [Z0 Z1]
+ (0.5) [Z1 Z2]
>>> print(mixer_h)
(1) [X0]
+ (1) [X1]
+ (1) [X2]
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
is_rx = isinstance(graph, rx.PyGraph)
graph_nodes = graph.nodes()
graph_edges = sorted(graph.edge_list()) if is_rx else graph.edges
# In RX each node is assigned to an integer index starting from 0;
# thus, we use the following lambda function to get node-values.
get_nvalue = lambda i: graph_nodes[i] if is_rx else i
identity_h = qml.Hamiltonian(
[-0.5 for e in graph_edges],
[qml.Identity(get_nvalue(e[0])) @ qml.Identity(get_nvalue(e[1])) for e in graph_edges],
)
H = edge_driver(graph, ["10", "01"]) + identity_h
# store the valuable information that all observables are in one commuting group
H.grouping_indices = [list(range(len(H.ops)))]
return (H, qaoa.x_mixer(graph_nodes))
def max_independent_set(graph: Union[nx.Graph, rx.PyGraph], constrained: bool = True):
r"""For a given graph, returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Maximum Independent Set problem.
Given some graph :math:`G`, an independent set is a set of vertices such that no pair of vertices in the set
share a common edge. The Maximum Independent Set problem, is the problem of finding the largest such set.
Args:
graph (nx.Graph or rx.PyGraph): a graph whose edges define the pairs of vertices on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by
`Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas (2019) <https://doi.org/10.3390/a12020034>`__.
The Maximum Independent Set cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v},
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z
operator applied to the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state.
**Unconstrained**
The Maximum Independent Set cost Hamiltonian for unconstrained QAOA is defined as:
.. math:: H_C \ = \ 3 \sum_{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \
\displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the set of edges of :math:`G`, :math:`V(G)` is the set of vertices,
and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
graph_nodes = graph.nodes()
if constrained:
cost_h = bit_driver(graph_nodes, 1)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.bit_flip_mixer(graph, 0))
cost_h = 3 * edge_driver(graph, ["10", "01", "00"]) + bit_driver(graph_nodes, 1)
mixer_h = qaoa.x_mixer(graph_nodes)
# store the valuable information that all observables are in one commuting group
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, mixer_h)
def min_vertex_cover(graph: Union[nx.Graph, rx.PyGraph], constrained: bool = True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Minimum Vertex Cover problem,
for a given graph.
To solve the Minimum Vertex Cover problem, we attempt to find the smallest
`vertex cover <https://en.wikipedia.org/wiki/Vertex_cover>`__ of a graph --- a collection of vertices such that
every edge in the graph has one of the vertices as an endpoint.
Args:
graph (nx.Graph or rx.PyGraph): a graph whose edges define the pairs of vertices on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in arXiv:1709.03489.
The Minimum Vertex Cover cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = \ - \displaystyle\sum_{v \in V(G)} Z_{v},
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator
applied to the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|1\rangle` state.
**Unconstrained**
The Minimum Vertex Cover cost Hamiltonian for unconstrained QAOA is defined as:
.. math:: H_C \ = \ 3 \sum_{(i, j) \in E(G)} (Z_i Z_j \ + \ Z_i \ + \ Z_j) \ - \
\displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the set of edges of :math:`G`, :math:`V(G)` is the set of vertices,
and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
graph_nodes = graph.nodes()
if constrained:
cost_h = bit_driver(graph_nodes, 0)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.bit_flip_mixer(graph, 1))
cost_h = 3 * edge_driver(graph, ["11", "10", "01"]) + bit_driver(graph_nodes, 0)
mixer_h = qaoa.x_mixer(graph_nodes)
# store the valuable information that all observables are in one commuting group
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, mixer_h)
def max_clique(graph: Union[nx.Graph, rx.PyGraph], constrained: bool = True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Maximum Clique problem,
for a given graph.
The goal of Maximum Clique is to find the largest `clique <https://en.wikipedia.org/wiki/Clique_(graph_theory)>`__ of a
graph --- the largest subgraph such that all vertices are connected by an edge.
Args:
graph (nx.Graph or rx.PyGraph): a graph whose edges define the pairs of vertices on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in arXiv:1709.03489.
The Maximum Clique cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v},
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator
applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is :func:`~qaoa.bit_flip_mixer` applied to :math:`\bar{G}`,
the complement of the graph.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state.
**Unconstrained**
The Maximum Clique cost Hamiltonian for unconstrained QAOA is defined as:
.. math:: H_C \ = \ 3 \sum_{(i, j) \in E(\bar{G})}
(Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`V(G)` is the set of vertices of the input graph :math:`G`, :math:`E(\bar{G})` is the set of
edges of the complement of :math:`G`, and :math:`Z_i` is the Pauli-Z operator applied to the
:math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
graph_nodes = graph.nodes()
graph_complement = (
rx.complement(graph) if isinstance(graph, rx.PyGraph) else nx.complement(graph)
)
if constrained:
cost_h = bit_driver(graph_nodes, 1)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.bit_flip_mixer(graph_complement, 0))
cost_h = 3 * edge_driver(graph_complement, ["10", "01", "00"]) + bit_driver(graph_nodes, 1)
mixer_h = qaoa.x_mixer(graph_nodes)
# store the valuable information that all observables are in one commuting group
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, mixer_h)
def max_weight_cycle(graph: Union[nx.Graph, rx.PyGraph, rx.PyDiGraph], constrained: bool = True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the
maximum-weighted cycle problem, for a given graph.
The maximum-weighted cycle problem is defined in the following way (see
`here <https://1qbit.com/whitepaper/arbitrage/>`__ for more details).
The product of weights of a subset of edges in a graph is given by
.. math:: P = \prod_{(i, j) \in E} [(c_{ij} - 1)x_{ij} + 1]
where :math:`E` are the edges of the graph, :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)` and :math:`c_{ij}` is the corresponding edge weight.
Our objective is to maximimize :math:`P`, subject to selecting the :math:`x_{ij}` so that
our subset of edges composes a `cycle <https://en.wikipedia.org/wiki/Cycle_(graph_theory)>`__.
Args:
graph (nx.Graph or rx.PyGraph or rx.PyDiGraph): the directed graph on which the Hamiltonians are defined
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian, dict): The cost and mixer Hamiltonians, as well as a dictionary
mapping from wires to the graph's edges
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel,
Venturelli, and Biswas in `arXiv:1709.03489 <https://arxiv.org/abs/1709.03489>`__.
The maximum weighted cycle cost Hamiltonian for unconstrained QAOA is
.. math:: H_C = H_{\rm loss}.
Here, :math:`H_{\rm loss}` is a loss Hamiltonian:
.. math:: H_{\rm loss} = \sum_{(i, j) \in E} Z_{ij}\log c_{ij}
where :math:`E` are the edges of the graph and :math:`Z_{ij}` is a qubit Pauli-Z matrix
acting upon the wire specified by the edge :math:`(i, j)` (see :func:`~.loss_hamiltonian`
for more details).
The returned mixer Hamiltonian is :func:`~.cycle_mixer` given by
.. math:: H_M = \frac{1}{4}\sum_{(i, j)\in E}
\left(\sum_{k \in V, k\neq i, k\neq j, (i, k) \in E, (k, j) \in E}
\left[X_{ij}X_{ik}X_{kj} +Y_{ij}Y_{ik}X_{kj} + Y_{ij}X_{ik}Y_{kj} - X_{ij}Y_{ik}Y_{kj}\right]
\right).
This mixer provides transitions between collections of cycles, i.e., any subset of edges
in :math:`E` such that all the graph's nodes :math:`V` have zero net flow
(see the :func:`~.net_flow_constraint` function).
.. note::
**Recommended initialization circuit:**
Your circuit must prepare a state that corresponds to a cycle (or a superposition
of cycles). Follow the example code below to see how this is done.
**Unconstrained**
The maximum weighted cycle cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = H_{\rm loss} + 3 H_{\rm netflow} + 3 H_{\rm outflow}.
The netflow constraint Hamiltonian :func:`~.net_flow_constraint` is given by
.. math:: H_{\rm netflow} = \sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, of node :math:`i`. It is minimized whenever a
subset of edges in :math:`E` results in zero net flow from each node in :math:`V`.
The outflow constraint Hamiltonian :func:`~.out_flow_constraint` is given by
.. math:: H_{\rm outflow} = \sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right).
It is minimized whenever a subset of edges in :math:`E` results in an outflow of at most one
from each node in :math:`V`.
The returned mixer Hamiltonian is :func:`~.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
**Example**
First set up a simple graph:
.. code-block:: python
import pennylane as qml
import numpy as np
import networkx as nx
a = np.random.random((4, 4))
np.fill_diagonal(a, 0)
g = nx.DiGraph(a)
The cost and mixer Hamiltonian as well as the mapping from wires to edges can be loaded
using:
>>> cost, mixer, mapping = qml.qaoa.max_weight_cycle(g, constrained=True)
Since we are using ``constrained=True``, we must ensure that the input state to the QAOA
algorithm corresponds to a cycle. Consider the mapping:
>>> mapping
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
A simple cycle is given by the edges ``(0, 1)`` and ``(1, 0)`` and corresponding wires
``0`` and ``3``. Hence, the state :math:`|100100000000\rangle` corresponds to a cycle and
can be prepared using :class:`~.BasisState` or simple :class:`~.PauliX` rotations on the
``0`` and ``3`` wires.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph, rx.PyDiGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph or rx.PyDiGraph, got {type(graph).__name__}"
)
mapping = qaoa.cycle.wires_to_edges(graph)
if constrained:
cost_h = qaoa.cycle.loss_hamiltonian(graph)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.cycle.cycle_mixer(graph), mapping)
cost_h = qaoa.cycle.loss_hamiltonian(graph) + 3 * (
qaoa.cycle.net_flow_constraint(graph) + qaoa.cycle.out_flow_constraint(graph)
)
mixer_h = qaoa.x_mixer(mapping.keys())
return (cost_h, mixer_h, mapping)
|
the-stack_0_11027 | # CSC 321, Assignment 4
#
# This is the main training file for the CycleGAN part of the assignment.
#
# Usage:
# ======
# To train with the default hyperparamters (saves results to samples_cyclegan/):
# python cycle_gan.py
#
# To train with cycle consistency loss (saves results to samples_cyclegan_cycle/):
# python cycle_gan.py --use_cycle_consistency_loss
#
#
# For optional experimentation:
# -----------------------------
# If you have a powerful computer (ideally with a GPU), then you can obtain better results by
# increasing the number of filters used in the generator and/or discriminator, as follows:
# python cycle_gan.py --g_conv_dim=64 --d_conv_dim=64
import os
import pdb
import pickle
import argparse
import warnings
warnings.filterwarnings("ignore")
# Torch imports
import torch
import torch.nn as nn
import torch.optim as optim
# Numpy & Scipy imports
import numpy as np
import scipy
import scipy.misc
# Local imports
import utils
from data_loader import get_emoji_loader
from models import CycleGenerator, DCDiscriminator
SEED = 11
# Set the random seed manually for reproducibility.
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
def print_models(G_XtoY, G_YtoX, D_X, D_Y):
"""Prints model information for the generators and discriminators.
"""
print(" G_XtoY ")
print("---------------------------------------")
print(G_XtoY)
print("---------------------------------------")
print(" G_YtoX ")
print("---------------------------------------")
print(G_YtoX)
print("---------------------------------------")
print(" D_X ")
print("---------------------------------------")
print(D_X)
print("---------------------------------------")
print(" D_Y ")
print("---------------------------------------")
print(D_Y)
print("---------------------------------------")
def create_model(opts):
"""Builds the generators and discriminators.
"""
G_XtoY = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights, batch_norm=not opts.disable_bn)
G_YtoX = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights, batch_norm=not opts.disable_bn)
D_X = DCDiscriminator(conv_dim=opts.d_conv_dim)
D_Y = DCDiscriminator(conv_dim=opts.d_conv_dim)
print_models(G_XtoY, G_YtoX, D_X, D_Y)
if torch.cuda.is_available():
G_XtoY.cuda()
G_YtoX.cuda()
D_X.cuda()
D_Y.cuda()
print('Models moved to GPU.')
return G_XtoY, G_YtoX, D_X, D_Y
def checkpoint(iteration, G_XtoY, G_YtoX, D_X, D_Y, opts):
"""Saves the parameters of both generators G_YtoX, G_XtoY and discriminators D_X, D_Y.
"""
G_XtoY_path = os.path.join(opts.checkpoint_dir, 'G_XtoY.pkl')
G_YtoX_path = os.path.join(opts.checkpoint_dir, 'G_YtoX.pkl')
D_X_path = os.path.join(opts.checkpoint_dir, 'D_X.pkl')
D_Y_path = os.path.join(opts.checkpoint_dir, 'D_Y.pkl')
torch.save(G_XtoY.state_dict(), G_XtoY_path)
torch.save(G_YtoX.state_dict(), G_YtoX_path)
torch.save(D_X.state_dict(), D_X_path)
torch.save(D_Y.state_dict(), D_Y_path)
def load_checkpoint(opts):
"""Loads the generator and discriminator models from checkpoints.
"""
G_XtoY_path = os.path.join(opts.load, 'G_XtoY.pkl')
G_YtoX_path = os.path.join(opts.load, 'G_YtoX.pkl')
D_X_path = os.path.join(opts.load, 'D_X.pkl')
D_Y_path = os.path.join(opts.load, 'D_Y.pkl')
G_XtoY = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights)
G_YtoX = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights)
D_X = DCDiscriminator(conv_dim=opts.d_conv_dim)
D_Y = DCDiscriminator(conv_dim=opts.d_conv_dim)
G_XtoY.load_state_dict(torch.load(G_XtoY_path, map_location=lambda storage, loc: storage))
G_YtoX.load_state_dict(torch.load(G_YtoX_path, map_location=lambda storage, loc: storage))
D_X.load_state_dict(torch.load(D_X_path, map_location=lambda storage, loc: storage))
D_Y.load_state_dict(torch.load(D_Y_path, map_location=lambda storage, loc: storage))
if torch.cuda.is_available():
G_XtoY.cuda()
G_YtoX.cuda()
D_X.cuda()
D_Y.cuda()
print('Models moved to GPU.')
return G_XtoY, G_YtoX, D_X, D_Y
def merge_images(sources, targets, opts, k=10):
"""Creates a grid consisting of pairs of columns, where the first column in
each pair contains images source images and the second column in each pair
contains images generated by the CycleGAN from the corresponding images in
the first column.
"""
_, _, h, w = sources.shape
row = int(np.sqrt(opts.batch_size))
merged = np.zeros([3, row*h, row*w*2])
for idx, (s, t) in enumerate(zip(sources, targets)):
i = idx // row
j = idx % row
merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s
merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t
return merged.transpose(1, 2, 0)
def save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts):
"""Saves samples from both generators X->Y and Y->X.
"""
fake_X = G_YtoX(fixed_Y)
fake_Y = G_XtoY(fixed_X)
X, fake_X = utils.to_data(fixed_X), utils.to_data(fake_X)
Y, fake_Y = utils.to_data(fixed_Y), utils.to_data(fake_Y)
merged = merge_images(X, fake_Y, opts)
path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration))
scipy.misc.imsave(path, merged)
print('Saved {}'.format(path))
merged = merge_images(Y, fake_X, opts)
path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration))
scipy.misc.imsave(path, merged)
print('Saved {}'.format(path))
def training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, opts):
"""Runs the training loop.
* Saves checkpoint every opts.checkpoint_every iterations
* Saves generated samples every opts.sample_every iterations
"""
# Create generators and discriminators
if opts.load:
G_XtoY, G_YtoX, D_X, D_Y = load_checkpoint(opts)
else:
G_XtoY, G_YtoX, D_X, D_Y = create_model(opts)
g_params = list(G_XtoY.parameters()) + list(G_YtoX.parameters()) # Get generator parameters
d_params = list(D_X.parameters()) + list(D_Y.parameters()) # Get discriminator parameters
# Create optimizers for the generators and discriminators
g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2])
d_optimizer = optim.Adam(d_params, opts.lr, [opts.beta1, opts.beta2])
iter_X = iter(dataloader_X)
iter_Y = iter(dataloader_Y)
test_iter_X = iter(test_dataloader_X)
test_iter_Y = iter(test_dataloader_Y)
# Get some fixed data from domains X and Y for sampling. These are images that are held
# constant throughout training, that allow us to inspect the model's performance.
fixed_X = utils.to_var(test_iter_X.next()[0])
fixed_Y = utils.to_var(test_iter_Y.next()[0])
iter_per_epoch = min(len(iter_X), len(iter_Y))
for iteration in range(1, opts.train_iters+1):
# Reset data_iter for each epoch
if iteration % iter_per_epoch == 0:
iter_X = iter(dataloader_X)
iter_Y = iter(dataloader_Y)
images_X, labels_X = iter_X.next()
images_X, labels_X = utils.to_var(images_X), utils.to_var(labels_X).long().squeeze()
images_Y, labels_Y = iter_Y.next()
images_Y, labels_Y = utils.to_var(images_Y), utils.to_var(labels_Y).long().squeeze()
# ============================================
# TRAIN THE DISCRIMINATORS
# ============================================
#########################################
## FILL THIS IN ##
#########################################
# Train with real images
d_optimizer.zero_grad()
# 1. Compute the discriminator losses on real images
#print(images_X.size()[0])
#print(images_X.size().shape)
inv_m = 1 / images_X.size()[0]
inv_n = 1 / images_Y.size()[0]
D_X_loss = torch.sum((D_X(images_X) - 1)**2) * inv_m
D_Y_loss = torch.sum((D_Y(images_Y) - 1)**2) * inv_n
d_real_loss = D_X_loss + D_Y_loss
d_real_loss.backward()
d_optimizer.step()
# Train with fake images
d_optimizer.zero_grad()
# 2. Generate fake images that look like domain X based on real images in domain Y
fake_X = G_YtoX(images_Y)
# 3. Compute the loss for D_X
D_X_loss = inv_n * torch.sum(D_X(fake_X)**2)
# 4. Generate fake images that look like domain Y based on real images in domain X
fake_Y = G_XtoY(images_X)
# 5. Compute the loss for D_Y
D_Y_loss = inv_m * torch.sum(D_Y(fake_Y)**2)
d_fake_loss = D_X_loss + D_Y_loss
d_fake_loss.backward()
d_optimizer.step()
# =========================================
# TRAIN THE GENERATORS
# =========================================
#########################################
## FILL THIS IN: Y--X-->Y CYCLE ##
#########################################
g_optimizer.zero_grad()
# 1. Generate fake images that look like domain X based on real images in domain Y
fake_X = G_YtoX(images_Y)
# 2. Compute the generator loss based on domain X
g_loss = inv_n * torch.sum((D_X(fake_X) - 1)**2)
if opts.use_cycle_consistency_loss:
reconstructed_Y = G_XtoY(fake_X)
# 3. Compute the cycle consistency loss (the reconstruction loss)
cycle_consistency_loss = inv_n * torch.sum((images_Y - reconstructed_Y)**2)
g_loss += cycle_consistency_loss
g_loss.backward()
g_optimizer.step()
#########################################
## FILL THIS IN: X--Y-->X CYCLE ##
#########################################
g_optimizer.zero_grad()
# 1. Generate fake images that look like domain Y based on real images in domain X
fake_Y = G_XtoY(images_X)
# 2. Compute the generator loss based on domain Y
g_loss = inv_m * torch.sum((D_Y(fake_Y) - 1)**2)
if opts.use_cycle_consistency_loss:
reconstructed_X = G_YtoX(fake_Y)
# 3. Compute the cycle consistency loss (the reconstruction loss)
cycle_consistency_loss = inv_m * torch.sum((images_X - reconstructed_X)**2)
g_loss += cycle_consistency_loss
g_loss.backward()
g_optimizer.step()
# Print the log info
if iteration % opts.log_step == 0:
print('Iteration [{:5d}/{:5d}] | d_real_loss: {:6.4f} | d_Y_loss: {:6.4f} | d_X_loss: {:6.4f} | '
'd_fake_loss: {:6.4f} | g_loss: {:6.4f}'.format(
iteration, opts.train_iters, d_real_loss.data[0], D_Y_loss.data[0],
D_X_loss.data[0], d_fake_loss.data[0], g_loss.data[0]))
# Save the generated samples
if iteration % opts.sample_every == 0:
save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts)
# Save the model parameters
if iteration % opts.checkpoint_every == 0:
checkpoint(iteration, G_XtoY, G_YtoX, D_X, D_Y, opts)
def main(opts):
"""Loads the data, creates checkpoint and sample directories, and starts the training loop.
"""
# Create train and test dataloaders for images from the two domains X and Y
dataloader_X, test_dataloader_X = get_emoji_loader(emoji_type=opts.X, opts=opts)
dataloader_Y, test_dataloader_Y = get_emoji_loader(emoji_type=opts.Y, opts=opts)
# Create checkpoint and sample directories
utils.create_dir(opts.checkpoint_dir)
utils.create_dir(opts.sample_dir)
# Start training
training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, opts)
def print_opts(opts):
"""Prints the values of all command-line arguments.
"""
print('=' * 80)
print('Opts'.center(80))
print('-' * 80)
for key in opts.__dict__:
if opts.__dict__[key]:
print('{:>30}: {:<30}'.format(key, opts.__dict__[key]).center(80))
print('=' * 80)
def create_parser():
"""Creates a parser for command-line arguments.
"""
parser = argparse.ArgumentParser()
# Model hyper-parameters
parser.add_argument('--image_size', type=int, default=32, help='The side length N to convert images to NxN.')
parser.add_argument('--g_conv_dim', type=int, default=32)
parser.add_argument('--d_conv_dim', type=int, default=32)
parser.add_argument('--use_cycle_consistency_loss', action='store_true', default=False, help='Choose whether to include the cycle consistency term in the loss.')
parser.add_argument('--init_zero_weights', action='store_true', default=False, help='Choose whether to initialize the generator conv weights to 0 (implements the identity function).')
parser.add_argument('--disable_bn', action='store_true', help='Disable Batch Normalization(BN)')
# Training hyper-parameters
parser.add_argument('--train_iters', type=int, default=600, help='The number of training iterations to run (you can Ctrl-C out earlier if you want).')
parser.add_argument('--batch_size', type=int, default=16, help='The number of images in a batch.')
parser.add_argument('--num_workers', type=int, default=0, help='The number of threads to use for the DataLoader.')
parser.add_argument('--lr', type=float, default=0.0003, help='The learning rate (default 0.0003)')
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
# Data sources
parser.add_argument('--X', type=str, default='Apple', choices=['Apple', 'Windows'], help='Choose the type of images for domain X.')
parser.add_argument('--Y', type=str, default='Windows', choices=['Apple', 'Windows'], help='Choose the type of images for domain Y.')
# Saving directories and checkpoint/sample iterations
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints_cyclegan')
parser.add_argument('--sample_dir', type=str, default='samples_cyclegan')
parser.add_argument('--load', type=str, default=None)
parser.add_argument('--log_step', type=int , default=10)
parser.add_argument('--sample_every', type=int , default=100)
parser.add_argument('--checkpoint_every', type=int , default=800)
return parser
if __name__ == '__main__':
parser = create_parser()
opts = parser.parse_args()
if opts.use_cycle_consistency_loss:
opts.sample_dir = 'samples_cyclegan_cycle'
if opts.load:
opts.sample_dir = '{}_pretrained'.format(opts.sample_dir)
opts.sample_every = 20
print_opts(opts)
main(opts)
|
the-stack_0_11028 | import os
import flask
from flask import send_from_directory
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from backend.utils import CustomJsonEncoder
app = flask.Flask(__name__)
app.json_encoder = CustomJsonEncoder
app.config["DEBUG"] = os.environ.get("DEBUG")
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DATABASE_URL")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# noinspection PyBroadException
@app.route("/<path:filename>")
def fallback(filename):
public_dir = os.path.abspath("../frontend/dist/")
try:
return send_from_directory(public_dir, path=filename)
except Exception:
return send_from_directory(public_dir, path="index.html")
|
the-stack_0_11030 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
from telemetry.core import util
def Run(project_config, no_browser=False,
disable_cloud_storage_io_during_test=False):
args = sys.argv[1:]
assert '--top-level-dir' not in args, (
'Top level directory for running tests should be specified through '
'the instance of telemetry.project_config.ProjectConfig.')
assert '--client-config' not in args, (
'Client config file to be used for telemetry should be specified through '
'the instance of telemetry.project_config.ProjectConfig.')
assert project_config.top_level_dir, 'Must specify top level dir for project'
args.extend(['--top-level-dir', project_config.top_level_dir])
for c in project_config.client_configs:
args.extend(['--client-config', c])
if no_browser and not '--no-browser' in args:
args.extend(['--no-browser'])
if project_config.default_chrome_root and not '--chrome-root' in args:
args.extend(['--chrome-root', project_config.default_chrome_root])
if disable_cloud_storage_io_during_test:
args.extend(['--disable-cloud-storage-io'])
env = os.environ.copy()
telemetry_dir = util.GetTelemetryDir()
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = os.pathsep.join([env['PYTHONPATH'], telemetry_dir])
else:
env['PYTHONPATH'] = telemetry_dir
path_to_run_tests = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'run_tests.py')
return subprocess.call([sys.executable, path_to_run_tests] + args, env=env)
|
the-stack_0_11031 | import argparse
import shutil
from pathlib import Path
import time
from pyimzml.ImzMLParser import ImzMLParser
import numpy as np
from matplotlib import pyplot as plt
from sm.browser import utils, mz_search, split_sort
TMP_LOCAL_PATH = Path("/tmp/imzml-browser")
TMP_LOCAL_PATH.mkdir(parents=True, exist_ok=True)
def log(start, message):
elapsed = time.time() - start
print(f"{elapsed:.2f}s: {message}")
def preprocess_dataset_peaks(full_dataset_s3_path: str):
assert full_dataset_s3_path.startswith("s3")
start = time.time()
log(start, "Initialization")
ds = utils.DatasetFiles(full_dataset_s3_path, TMP_LOCAL_PATH)
log(start, f"downloading dataset files from {full_dataset_s3_path} to {ds.ds_path}")
ds.download_imzml()
log(start, f"parsing imzml at {ds.imzml_path}")
imzml_reader = split_sort.ImzMLReader(ds.imzml_path) # replace with ImzmlParser
imzml_reader.add_stream(ds.ibd_path.open("rb"))
log(start, f"segmenting dataset by mz at {ds.segments_path}")
ibd_size_mb = ds.ibd_path.stat().st_size / 1024 ** 2
split_sort.segment_dataset(imzml_reader, ibd_size_mb, ds.segments_path)
log(start, f"sorting, merging, saving segments at {ds.sorted_peaks_path}")
split_sort.sort_merge_segments(ds.segments_path, ds.sorted_peaks_path)
log(start, f"saving dataset coordinates at {ds.ds_coordinates_path}")
np.array(imzml_reader.coordinates, dtype="i").tofile(ds.ds_coordinates_path.open("wb"))
log(start, f"building and saving mz index at {ds.mz_index_path}")
mz_index = mz_search.build_mz_index(ds.sorted_peaks_path)
mz_index.tofile(ds.mz_index_path)
log(start, f"uploading dataset files from {ds.ds_path} to {ds.full_ds_s3_path}")
ds.upload_sorted_mz()
log(start, f"removing {ds.segments_path}")
shutil.rmtree(ds.segments_path, ignore_errors=True)
log(start, f"done")
class DatasetBrowser:
def __init__(
self, full_dataset_s3_path: str,
):
start = time.time()
log(start, f"fetching and initializing mz index files from {full_dataset_s3_path}")
ds = utils.DatasetFiles(full_dataset_s3_path, TMP_LOCAL_PATH)
log(start, f"parsing imzml at {ds.imzml_path}")
self.imzml_path = ds.imzml_path
self.coordinates = np.frombuffer(ds.read_coordinates(), dtype="i").reshape(-1, 2)
self.mz_index = np.frombuffer(ds.read_mz_index(), dtype="f")
self.sorted_peaks_s3_file = ds.make_sorted_peaks_s3_file()
log(start, f"done")
def search(self, mz_lo: int, mz_hi: int) -> np.ndarray:
start = time.time()
log(start, "searching mz image")
mz_peaks = mz_search.search_and_fetch_mz_peaks(
self.sorted_peaks_s3_file, self.mz_index, mz_lo, mz_hi
)
mz_image, alpha, mz_max, mz_min = mz_search.create_mz_image(mz_peaks, self.coordinates)
rgba_image = plt.get_cmap("gray")(mz_image)
rgba_image[:, :, 3] = alpha
log(start, "done")
return rgba_image
def search_pixel(self, x: int, y: int) -> np.ndarray:
start = time.time()
log(start, f"pixel parsing imzml at {self.imzml_path}")
p = ImzMLParser(self.imzml_path)
n = 0
coordinate_x = p.coordinates[n][0]
coordinate_y = p.coordinates[n][1]
if((x, y, 1) in p.coordinates):
n = p.coordinates.index((x, y, 1))
coordinate_x = p.coordinates[n][0]
coordinate_y = p.coordinates[n][1]
mzs, ints = p.getspectrum(n)
log(start, "done")
return dict({'mzs': mzs.tolist(), 'ints': ints.tolist(), 'x': coordinate_x, 'y': coordinate_y})
if __name__ == "__main__":
parser = argparse.ArgumentParser("Build mz search index and search random mz images")
parser.add_argument("--s3-path", type=str, required=True)
parser.add_argument("--sort-peaks", action="store_true")
parser.add_argument("--mz-search", action="store_true")
parser.add_argument("--mz", type=float)
parser.add_argument("--ppm", type=int)
args = parser.parse_args()
if args.sort_peaks:
preprocess_dataset_peaks(args.s3_path)
elif args.mz_search:
dataset_browser = DatasetBrowser(args.s3_path)
mz_lo, mz_hi = utils.mz_ppm_bin(mz=args.mz, ppm=args.ppm)
mz_image = dataset_browser.search(mz_lo, mz_hi)
plt.imshow(mz_image)
plt.show()
|
the-stack_0_11034 | #30 min with 52cpus in LMEM1
#the script uses a maximum of 40GB mem
#%reset -f
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import dask as da
import glob
import time
from tqdm import tqdm #to see progressbar for loops
from scipy.interpolate import interp1d #1d interp
import xesmf as xe #for spatial interpolation in projected or lon-lat coords
#for projections
from pyproj import Proj, transform, Transformer
#run this cell just once----
#from dask_jobqueue import SLURMCluster
from dask.distributed import Client, LocalCluster
#this seems that is working---
client = Client(processes=False,n_workers=1,threads_per_worker=52,memory_limit='120GB')
#this produce memory limit problems:
#client = Client(processes=False,n_workers=12,threads_per_worker=1,memory_limit='4GB')
#
#this seems that is working, but with lots of warnings related to mem issues?---
#this produce the same result as Client, but we can'not see progress neither strem
#client = LocalCluster(processes=False,n_workers=1,threads_per_worker=24,memory_limit='48GB')
#
#this is not calling any partition, just running with 1 core in the node we are now---
#cluster = SLURMCluster(queue='normal',cores=24,memory='48GB',processes=1,interface="lo")
#cluster = SLURMCluster(queue='LMEM1',cores=2,memory='1GB',project='test',interface='lo',scheduler_options={'interface': 'lo'})
#cluster = SLURMCluster(queue='LMEM1',cores=4,memory='2GB',processes=1, interface='lo')
#cluster = SLURMCluster()
#cluster.scale(jobs=2)
#cluster.scale(memory='2GB')
#cluster.adapt(maximum_jobs=2)
#print(cluster.job_script())
#client = Client(cluster)
#
# open dashboard with this if link doesn't work
# http://localhost:8787/status
#----------
home_dir="/export/lv4/user/jfajardourbina/"
ml_dir=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/"
dir_wind=f"{home_dir}dws_ulf_getm_2D_depth_avg/data/atmosphere/" #winds
dir_displacement="net_displacement/"
dir_topo=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_bathy_grid/" #topo data
file_topo="DWS200m.2012.v03.nc"
file_wind0="UERRA.2009.nc4" #any wind file
#
savee='everyM2' #saving track data every m2
deploy='everyM2'#deploy set of particles every m2
minTsim=60 #mimimum time of simulation (days)
maxTsim=91 #maximum time of simulation (days)
dir_tracks = f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/exp-deployHighVolume_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d/tracks/"
#
npa_per_dep=12967 #number of particles per deployment
m2=int(12.42*3600+2) #period in seconds
nt_interp=283*2 #interpolate wind data every 9.43 min from 1h original data (factor of m2=44714)
ref_time=np.datetime64("1980-01-01") #reference time for time interpolation, could be any value
dx=400/1e3;dy=400/1e3 #particle grid resolution
#
#paths for output data
dir_post_proc_data=f"{ml_dir}post_proc_data/" #to save wind interp files
dir_interp_wind="wind/"
file_interp_wind_root="wind_avg_std_during_1M2_and_interp_to_particle_grid_for_convlstm.nc"
#--------
dsw=xr.open_dataset(dir_wind+file_wind0) #open any wind data
dsw.close()
dsto=xr.open_dataset(dir_topo+file_topo) #topo file
xct0=dsto.xc.min().values/1e3; yct0=dsto.yc.min().values/1e3 #=(0,0)
#--------
#open grid of displacements (use for convlstm)---
file_displacement=sorted(glob.glob(f'{dir_post_proc_data}{dir_displacement}*.nc',recursive=True))[0]
ds_dis=xr.open_dataset(file_displacement); ds_dis.close()
xcdis0,ycdis0=ds_dis.x,ds_dis.y; del ds_dis
xcdis,ycdis=np.meshgrid(xcdis0,ycdis0)
#
#or build it---
#xmin=x0.min();xmax=x0.max();ymin=y0.min();ymax=y0.max()
#extend_grid=10 #so from particle min max positions extend grid 10*dx (to not have problems with convolution)
#xgrid=np.arange(xmin-dx*1e3*extend_grid,xmax+dx*1e3*(extend_grid+1),dx*1e3,dtype='float32')
#ygrid=np.arange(ymin-dy*1e3*extend_grid,ymax+dy*1e3*(extend_grid+1),dy*1e3,dtype='float32')
#xgrid,ygrid=np.meshgrid(xgrid,ygrid)
#define the transformations----------
#1)
#from epgs:28992(DWS) to epgs:4326(LatLon with WGS84 datum used by GPS and Google Earth)
proj = Transformer.from_crs('epsg:28992','epsg:4326',always_xy=True)
#2)
#from epgs:4326(LatLon with WGS84) to epgs:28992(DWS)
inproj = Transformer.from_crs('epsg:4326','epsg:28992',always_xy=True)
#inproj_old=Proj("EPSG:28992") #old method (has errors 10-20m when contrast with the rotated coords)
#lon,lat to 28992(DWS)-projection--------------------
#bathymetry--------
xct=dsto.lonc.values; yct=dsto.latc.values #lon,lat units
xctp,yctp,z = inproj.transform(xct,yct,xct*0.)
#[xctp,yctp] = inproj_old(xct,yct) #old method
xctp=(xctp)/1e3; yctp=(yctp)/1e3
#first projected point to correct the coordinates of model local meter units
xctp0=xctp[0,0]; yctp0=yctp[0,0]
#local meter model units to 28992(DWS)-projection and lon-lat--------------
#matrix rotation -17degrees-----
ang=-17*np.pi/180
angs=np.ones((2,2))
angs[0,0]=np.cos(ang); angs[0,1]=np.sin(ang)
angs[1,0]=-np.sin(ang); angs[1,1]=np.cos(ang)
#bathymetry----
#original topo points in meter
xct2,yct2=np.meshgrid(dsto.xc.values,dsto.yc.values)
xy=np.array([xct2.flatten(),yct2.flatten()]).T
#rotate
xyp=np.matmul(angs,xy.T).T/1e3
xyp0=xyp[0,:] #the first rotated point in the topo data in meter =0,0
#correction from rotation to projection:
#1)substact the first rotated topo point in meter, but give tha same as xyp0=[0,0]
#2)add the first projected point of the case (lon,lat model units to projection)
xyp=xyp-xyp0
xyp[:,0]=xyp[:,0]+xctp0; xyp[:,1]=xyp[:,1]+yctp0
xyp=np.reshape(xyp,(len(dsto.yc.values),len(dsto.xc.values),2))
xctp2=xyp[...,0]; yctp2=xyp[...,1] #km
#
#contrast projections (lon,lat model units to meter) with rotated case
#around 0 meter diff with new method
#10 meter difference in average and maximum of 20 with old method
a=xctp-xctp2; b=yctp-yctp2
print(np.abs(a).max()*1e3, np.abs(b).max()*1e3, np.abs(a).mean()*1e3, np.abs(b).mean()*1e3)
#particle grid of displacements (use for convlstm)------
xy=np.array([xcdis.flatten(),ycdis.flatten()]).T
ny,nx=xcdis.shape
#rotate
xyp=np.matmul(angs,xy.T).T/1e3
#correction from rotation to projection:
#1)substact the first rotated topo point in meter, but give tha same as xyp0=[0,0]
#2)add the first projected point of the case (lon,lat model units to meter)
xyp=xyp-xyp0
xyp[:,0]=xyp[:,0]+xctp0; xyp[:,1]=xyp[:,1]+yctp0
xyp=np.reshape(xyp,(ny,nx,2))
xcdisp=xyp[...,0]; ycdisp=xyp[...,1] #km
#
#get coordinates in lon-lat units (WGS84 )
xcdisp_lon, ycdisp_lat, _ = proj.transform(xcdisp*1e3,ycdisp*1e3, ycdisp*0.)
#for spatial interpolation using lon-lat-----
#build the input grid (lon-lat of original wind file)---
ds_in = xr.Dataset()
ds_in.coords["lon"] = dsw.lon.astype('float32')
ds_in["lon"].attrs['long_name'] = 'longitude'
ds_in.coords["lat"] = dsw.lat.astype('float32')
ds_in["lat"].attrs['long_name'] = 'latidude'
print(ds_in)
print()
#build the output grid (lon-lat of particle displacement)---
#this grid is used for the interpolation
ds_out = xr.Dataset()
ds_out.coords["lon"] = (("yc","xc"),xcdisp_lon.astype('float32'))
ds_out["lon"].attrs['long_name'] = 'longitude'
ds_out.coords["lat"] = (("yc","xc"),ycdisp_lat.astype('float32'))
ds_out["lat"].attrs['long_name'] = 'latidude'
#ds_out=ds_out.drop(["xc","yc"])
print(ds_out)
#regridder-----
#only need to run once
regridder = xe.Regridder(ds_in,ds_out,"patch") #special smooth iterpolator from this package
#regridder_bilinear = xe.Regridder(ds_in,ds_out,"bilinear")
#regridder_nearest = xe.Regridder(ds_in,ds_out,"nearest_s2d") #classical nearest
#for temporal interpolation-----
def interp1d_fun(x,tin,tout):
f = interp1d(tin,x,axis=-1,kind='linear')
return f(tout)
def xr_interp1d(x,tin,tout,idim,odim):
#x: xarray with chunks
#idim: input coordinate that will be changed by output odim
#odim: output coordinate
ds_interp1d = xr.apply_ufunc(
interp1d_fun,x,
input_core_dims=[[idim]],
output_core_dims=[[odim]],
output_dtypes=[np.float32],
dask_gufunc_kwargs={'output_sizes':{odim:len(tout)}},
kwargs={'tin':tin,'tout':tout}, #input to the above function
dask='parallelized',
#vectorize=True,
)
return ds_interp1d
#rotate wind from projection to model coordinates---
def projection_to_model_local_coords(x,y,ang=17*np.pi/180):
return np.cos(ang)*x + np.sin(ang)*y, -np.sin(ang)*x + np.cos(ang)*y
#-----
files_wind=sorted(glob.glob(f'{dir_wind}/**/*.nc4',recursive=True))
for file_wind in tqdm(files_wind):
year=int(str(file_wind)[-8:-4])
print(year)
#open wind data------
dsw=xr.open_dataset(file_wind,chunks={'time':-1,'lon':-1,'lat':-1})[["u10","v10"]];dsw.close() #winds
tw = dsw.time.values #contains data for the full 1st day of the next year
#del these long attributes
del dsw.attrs["history_of_appended_files"], dsw.attrs["history"]
#spatial interpolation-----
dsw_int = regridder(dsw)
#temporal interpolation-----
#first track of this year---
month_sim=1
file_track=f'tracks_{year}{month_sim:02d}_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d.nc'
file_track_path=f'{dir_tracks}{year}/{file_track}'
dst=xr.open_dataset(file_track_path)
t0=dst.time.isel(traj=0,obs=0).values
x0=dst.x.isel(traj=range(npa_per_dep),obs=0).values
y0=dst.y.isel(traj=range(npa_per_dep),obs=0).values
dst.close(); del dst
#
#first track of the following year---
if file_wind!=files_wind[-1]:
file_track=f'tracks_{year+1}{month_sim:02d}_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d.nc'
file_track_path=f'{dir_tracks}{year+1}/{file_track}'
t1=xr.open_dataset(file_track_path).time.isel(traj=0,obs=0).values
#last track of this year (for the final simulated month)---
else:
#for the final year we can not open the next year simulation
#we only have tracks until october, so we can get the wind for the last interval of displacement
last_year_tracks=sorted(glob.glob(f'{dir_tracks}{year}/*.nc',recursive=True))
end_month=len(last_year_tracks)
file_track=f'tracks_{year}{end_month:02d}_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d.nc'
file_track_path=f'{dir_tracks}{year}/{file_track}'
t1=xr.open_dataset(file_track_path).time.isel(traj=-1,obs=0).values + np.timedelta64(m2,'s')
#
#times to get wind data for this year---
#however if we can not find a factor "nt_interp" of m2, use 10min
#we wont have the same amount of interp data every m2, but it is better to skip 10min of 1 sample than 1h(original data)
#nt_interp=283*2 #interpolate wind data every 9.43 min from 1h original data (factor of m2=44714)
#t_interp:
# - high reolution times to compute avg and std during the interval of net displacement
# - the last data could be close to the beginning of next year, or the same year for the final month (October) of the simulation
#t_dep:
# - times of displacement for the current year, referenced to the initial time of the m2 interval.
t_interp=np.arange(t0,t1+np.timedelta64(1,'s'),nt_interp,dtype='datetime64[s]')
t_dep=np.arange(t0,t1,m2,dtype='datetime64[s]') #only for this year
#1d interp----
#reference with respect to ref_time (so convert timedelta64 to float)
t_interp0=(t_interp-ref_time) / np.timedelta64(1,'s') #dates after interpolation (factor of m2)
tw0=(tw-ref_time) / np.timedelta64(1,'s') #dates of original winds (every 1h)
#
dsw_int=xr_interp1d(dsw_int.chunk({'time':-1,'xc':10,'yc':10}),tw0,t_interp0,idim='time',odim='time_int').transpose("time_int","yc","xc")
#add time, xc and yc coords
dsw_int.coords["time_int"]=t_interp
dsw_int.coords["xc"] = ("xc",xcdis0.values.astype('float32')) #model coords in m
dsw_int.coords["yc"] = ("yc",ycdis0.values.astype('float32'))
#reshape with xarray---
#
#check time dimensions
nt_interval=int(m2/nt_interp) #points in the m2 interval (right border of interval open)
nt_dep=(len(t_interp)-1)//nt_interval #=len(t_dep), final shape after mean or std in the m2 interval. "-1" because we also don't consider the right border of the last interval in the avg
#times after avg or std are referenced with the date of deployment (the begin of the m2 interval of the displacement)
print("check times:",nt_interval,nt_dep,len(t_dep),nt_interval*nt_dep,len(dsw_int.time_int)-1)
#
#https://stackoverflow.com/questions/59504320/how-do-i-subdivide-refine-a-dimension-in-an-xarray-dataset
#steps:
# - assign_coords: create coords time_dep and time_interval
# - stack: create a coord and index called multi_time which is related to the original temporal size of the data,
# that now match a 2d-MultiIndex(nt_dep,nt_interval) which is defined using the new time_dep and time_interval coords,
# and will order the above coords keeping constant time_dep in every time_interval(0:78); which is consistent with how dsw_t_interp was created.
# - reset_index().rename: del the the old time coord, and rename time index as multi_time to remove the old time index.
# - unstack(): use the above 2d-MultiIndex to reshape the data original 1d time data into time_dep, time_interval,
# however, the new dimensions are send by default to the last index,
# - transpose: to fix above issue for the dimensions of variables, however, can not fix the order that dims are shown after Dimensions:
#
dsw_int=dsw_int.isel(time_int=slice(0,-1)
).assign_coords(time_dep=t_dep,time_interval=range(nt_interval)
).stack(multi_time=("time_dep","time_interval")
).reset_index("time_int",drop=True).rename(time_int="multi_time"
).unstack(dim="multi_time").transpose("time_dep","time_interval","yc","xc")
dsw_int #still time in the last on the title of dimensions
#
#instead of above we could also try resample of xarray---
#and then perform avg, std, but not working well
#res=int(nt_interp+m2)
#dsout_m2_avg=dsout.resample(time=f'{res}s',closed="right")#.mean(dim='time');
#print(t_dep[:5])
#for i in dsout_m2_avg: print(i)
#rotate wind from projection to model coordinates---
dsw_int["u10"],dsw_int["v10"]=projection_to_model_local_coords(dsw_int.u10,dsw_int.v10)
#compute wind speed, direction,... (mean and std) based on Farrugia and Micallef (2017)---
wd = np.arctan2(dsw_int.v10,dsw_int.u10) #wd for the interp times
ws = (dsw_int.u10**2 + dsw_int.v10**2)**.5 #ws for the interp times
u10_vec = dsw_int.u10.mean(dim='time_interval')
v10_vec = dsw_int.v10.mean(dim='time_interval')
#
dsw_int["wd_mean"] = np.arctan2(v10_vec,u10_vec)
dsw_int["ws_mean"] = ws.mean(dim='time_interval')
dsw_int["ws_mean_vec"] = (u10_vec**2 + v10_vec**2)**.5
dsw_int["wd_std"] = ( (ws*(2*np.arctan(np.tan(0.5*(wd-dsw_int["wd_mean"]))))**2).mean(dim='time_interval') / dsw_int["ws_mean"] )**.5
#use abs because there is 1 case with very small negative value -1e-7
dsw_int["ws_std"] = ( abs(((ws*np.cos(wd-dsw_int["wd_mean"]))**2).mean(dim='time_interval') - dsw_int["ws_mean_vec"]**2) )**.5
#
#del u10 and v10
del dsw_int["u10"], dsw_int["v10"], dsw_int["time_interval"]
#call computations---
dsw_int=dsw_int.compute()
#save data---
dsw_int=dsw_int.rename(time_dep="time") #rename dim time_dep
#global coords and attrs---
dsw_int["time"].attrs['description'] = 'initial date of the M2 interval of the net particle displacement'
dsw_int["yc"].attrs['long_name'] = 'yc'
dsw_int["yc"].attrs['description'] = 'the same as the net particle displacement grid y-axis'
dsw_int["yc"].attrs['units'] = 'm'
dsw_int["xc"].attrs['long_name'] = 'xc'
dsw_int["xc"].attrs['description'] = 'the same as the net particle displacement grid x-axis'
dsw_int["xc"].attrs['units'] = 'm'
#
dsw_int.attrs["spatial_info"] = "1) xESMF (method: patch) was used to interpolate wind components to the net displacement particle-grid (using lon-lat coords). 2) Then the wind was projected (rotated) to the local model axes."
dsw_int.attrs["temporal_info"] = f"Wind components were linearly interpolated to {nt_interp}s (factor of M2={m2}s), and then the avg and std in the M2 interval of the net displacement were computed."
dsw_int.attrs["std of wind speed and direction"] = "Based on Farrugia and Micallef (2017)."
#
#variables---
#
dsw_int["wd_mean"].attrs['long_name'] = 'M2-mean wind direction'
dsw_int["wd_mean"].attrs['units'] = 'radian'
dsw_int["wd_mean"].attrs['description'] = 'Farrugia and Micallef (2017): eq(7)'
#
dsw_int["ws_mean"].attrs['long_name'] = 'M2-mean wind speed'
dsw_int["ws_mean"].attrs['units'] = 'm/s'
dsw_int["ws_mean"].attrs['description'] = 'eq(9)'
#
dsw_int["ws_mean_vec"].attrs['long_name'] = 'M2-mean wind speed with vectorial method'
dsw_int["ws_mean_vec"].attrs['units'] = 'm/s'
dsw_int["ws_mean_vec"].attrs['description'] = 'eq(8)'
#
dsw_int["wd_std"].attrs['long_name'] = 'M2-std of wind direction'
dsw_int["wd_std"].attrs['units'] = 'radian'
dsw_int["wd_std"].attrs['description'] = 'eq(18): square root of along wind variance'
#
dsw_int["ws_std"].attrs['long_name'] = 'M2-std of wind speed'
dsw_int["ws_std"].attrs['units'] = 'm/s'
dsw_int["ws_std"].attrs['description'] = 'eq(25)'
#
file_out_nc=f"{year}_{file_interp_wind_root}"
dir_out_nc=dir_post_proc_data+dir_interp_wind
dsw_int.to_netcdf(dir_out_nc+file_out_nc)
dsw_int.close(); del dsw_int; del dsw
client.close() |
the-stack_0_11035 | import matplotlib.pyplot as plt
import h5py
import glob
from natsort import natsorted
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input-path',type=str,help='input for .h5')
parser.add_argument('--output-path',type=str,help='output for png')
args = parser.parse_args()
image_list = []
im_cntr1 = 0
im_cntr2_list = []
input_path = args.input_path
output_path = args.output_path
#input_path = "./path2svs/"
#output_path = "/path2png/"
output_path = os.path.join(output_path,"png_patches/testA/")
print(output_path)
os.makedirs(output_path)
h5_counter = 0
exception_list = []
for filem in natsorted(glob.glob(input_path+"*.h5")):
print("h5 count",h5_counter)
h5_counter+=1
print(filem)
try:
png_cntr = 0
hdf = h5py.File(filem)
for i in list(hdf['imgs']):
plt.imsave(output_path+filem.split("/")[-1]+"_"+str(png_cntr) +".png",i)
png_cntr+=1
print(png_cntr)
except:
exception_list.append(filem.split("/")[-1])
print("Exception occured!!!")
pass
#im_counter = 0
#for image in sorted(glob.glob(filename_list+"/*")):
#print(image.split("/")[-1])
#if domain_type in image:
#imagename = "/a"+str(im_counter)
#shutil.copy(image,output_folder_name+"/"+image.split("/")[-1])
#im_counter += 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.