ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4010594ff3b8c0f5fcd14ff66584543a40679e5 | import os
from conans import ConanFile, CMake, tools
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package"
def build(self):
cmake = CMake(self)
cmake.definitions["BUILDING_SHARED"] = self.options["redis-plus-plus"].shared
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
self.run(os.path.join("bin", "test_package"), run_environment=True)
|
py | b40106a00f6d005cefb96bce518f29a6c2dbd3be | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
# import matplotlib.pyplot as plt
# from logistic_regression import LogisticRegression
from Regression.regression import LogisticRegression
# from regression import LogisticRegression
def accuracy(y_true, y_pred):
accuracy = np.sum(y_true == y_pred) / len(y_true)
return accuracy
bc = datasets.load_breast_cancer()
X, y = bc.data, bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)
regressor = LogisticRegression(learning_rate=0.0001, n_iters=1000)
regressor.fit(X_train, y_train)
predictions = regressor.predict(X_test)
print("LR classification accuracy:", accuracy(y_test, predictions))
|
py | b401073dad63ab199df7d637efb365de30ef5184 | from setuptools import setup
# has fake Trove classifier to fool Python extractor to believe this is Python 3 for sure
# Programming Language :: Python :: 3.7
setup(
name="example-setup.py",
install_requires=["requests==1.2.3"],
python_requires='>=3.5',
)
|
py | b401086ad3468007b9076639b50e2ebfc5abdd2e | # -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.
import typing
import torch
import torch_mlir
# RUN: %PYTHON %s | npcomp-opt | FileCheck %s
mb = torch_mlir.ModuleBuilder()
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.i = 3
self.f = 42.5
# CHECK: %[[TRUE:.*]] = basicpy.bool_constant true
# CHECK: %[[N3:.*]] = basicpy.numeric_constant 3 : i64
# CHECK: %[[N42:.*]] = basicpy.numeric_constant 4.250000e+01 : f64
# CHECK: %[[MODULE:.*]] = torch.nn_module {
# Note: for some reason, Torch always adds a "training" property to all modules.
# CHECK: torch.attr "training", %[[TRUE]] : !basicpy.BoolType
# CHECK: torch.attr "i", %[[N3]] : i64
# CHECK: torch.attr "f", %[[N42]] : f64
test_module = TestModule()
recursivescriptmodule = torch.jit.script(test_module)
# TODO: Automatically handle unpacking Python class RecursiveScriptModule into the underlying ScriptModule.
mb.import_module(recursivescriptmodule._c)
mb.module.operation.print()
|
py | b4010a4bc2b60c042d6fc96c0e7fda8e65e72358 | """
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import torch
from det3d.ops.iou3d_nms import iou3d_nms_cuda, iou3d_nms_utils
from . import iou3d_nms_cuda
import numpy as np
def boxes_iou_bev(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()
iou3d_nms_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou
def to_pcdet(boxes):
# transform back to pcdet's coordinate
boxes = boxes[:, [0, 1, 2, 4, 3, 5, -1]]
boxes[:, -1] = -boxes[:, -1] - np.pi/2
return boxes
def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# transform back to pcdet's coordinate
boxes_a = to_pcdet(boxes_a)
boxes_b = to_pcdet(boxes_b)
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
return iou3d
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
def nms_normal_gpu(boxes, scores, thresh, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None |
py | b4010aa75e3685be7a15d36a72f5db5d9e6d3d5b | # Generated by Django 2.2.3 on 2019-07-23 09:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mytutor', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='asked_by',
),
migrations.RemoveField(
model_name='question',
name='subject',
),
]
|
py | b4010bfb73606698249d4abbd7f4a89ba33e5619 | import os,sys
#filename with absolute path
WF = "/tmp/SDKsettings.txt"
def restoreServers():
try:
f = open(WF,"r")
l = f.readline()
while l:
w =l.split(",")
print "Restore server " + w[1] + " on node " + w[0] +\
"with SDK " + w[2]
AdminTask.setServerSDK('-nodeName ' +w[0]+ ' -serverName ' +w[1]+\
' -sdkName ' + w[2])
l = f.readline()
finally:
f.close()
restoreServers()
|
py | b4010c75396a0d01efdabb4aa59a31403e80987f | w = 50.0
h = 50.0
def mandel():
"""Print a mandelbrot fractal to the console, yielding after each character is printed"""
y = 0.0
while y < h:
x = 0.0
while x < w:
Zr, Zi, Tr, Ti = 0.0, 0.0, 0.0, 0.0
Cr = 2 * x / w - 1.5
Ci = 2 * y / h - 1.0
i = 0
while i < 50 and Tr + Ti <= 4:
Zi = 2 * Zr * Zi + Ci
Zr = Tr - Ti + Cr
Tr = Zr * Zr
Ti = Zi * Zi
i += 1
if Tr + Ti <= 4:
print('*', end='')
else:
print('·', end='')
x += 1
yield
print()
y += 1
yield
# run the mandelbrot
try: from browser import request_animation_frame
except: request_animation_frame = None
gen = mandel()
def gen_cb(_time=None):
for _ in range(4): gen.__next__()
request_animation_frame(gen_cb)
if request_animation_frame: gen_cb()
else: any(gen)
|
py | b4010cbce3da4136ce10037374806913f79425dc | # pylint: skip-file
import asyncio
import sys
import json
import time
from typing import (
Optional,
List,
Sequence,
Union,
Dict,
Awaitable,
BinaryIO,
Tuple,
Any,
)
from base64 import b64encode
from urllib.parse import quote
import aiohttp
import backoff # type: ignore
from . import __version__
from .utils import filter_items
from .errors import (
HTTPException,
Forbidden,
NotFound,
SpotifyException,
BearerTokenError,
RateLimitedException,
)
__all__ = ("HTTPClient", "HTTPUserClient")
_GET_BEARER_ARG_ERR = "{name} was `None` when getting a bearer token."
_PYTHON_VERSION = ".".join(str(_) for _ in sys.version_info[:3])
_AIOHTTP_VERSION = aiohttp.__version__
class HTTPClient:
"""A class responsible for handling all HTTP logic.
This class combines a small amount of stateful logic control
with the :meth:`request` method and a very thin wrapper over
the raw HTTP API.
All endpoint methods mirror the default arguments the API
uses and is best described as a series of "good defaults"
for the routes.
Parameters
----------
client_id : str
The client id provided by spotify for the app.
client_secret : str
The client secret for the app.
loop : Optional[event loop]
The event loop the client should run on, if no loop is specified `asyncio.get_event_loop()` is called and used instead.
Attributes
----------
loop : AbstractEventLoop
The loop the client is running with.
client_id : str
The client id of the app.
client_secret : str
The client secret.
"""
RETRY_AMOUNT = 10
DEFAULT_USER_AGENT = (
user_agent
) = f"Application (https://github.com/mental32/spotify.py {__version__}) Python/{_PYTHON_VERSION} aiohttp/{_AIOHTTP_VERSION}"
def __init__(self, client_id: str, client_secret: str, loop=None):
self.loop = loop or asyncio.get_event_loop()
self._session = aiohttp.ClientSession(loop=self.loop)
self.client_id = client_id
self.client_secret = client_secret
self.bearer_info: Optional[Dict[str, str]] = None
self.__request_barrier_lock = asyncio.Lock()
self.__request_barrier = asyncio.Event()
self.__request_barrier.set()
@staticmethod
def route(
method: str, path: str, *, base: str = "https://api.spotify.com/v1", **kwargs
) -> Tuple[str, str]:
"""Used for constructing URLs for API endpoints.
Parameters
----------
method : str
The HTTP/REST method used.
path : str
A path to be formatted.
kwargs : Any
The arguments used to format the path.
Returns
-------
route : Tuple[str, str]
A tuple of the method and formatted url path to use.
"""
url = base + path
if kwargs:
url = url.format(
**{
key: (quote(value) if isinstance(value, str) else value)
for key, value in kwargs.items()
}
)
return (method, url)
async def get_bearer_info(
self,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
session: Optional[aiohttp.ClientSession] = None,
):
"""Get the application bearer token from client_id and client_secret.
Raises
------
SpotifyException
This will be raised when either `client_id` or
`client_secret` is `None`
"""
client_id = client_id or self.client_id
client_secret = client_secret or self.client_secret
if client_id is None:
raise SpotifyException(_GET_BEARER_ARG_ERR.format(name="client_id"))
if client_secret is None:
raise SpotifyException(_GET_BEARER_ARG_ERR.format(name="client_secret"))
token = b64encode(":".join((client_id, client_secret)).encode())
data = {"grant_type": "client_credentials"}
headers = {"Authorization": f"Basic {token.decode()}"}
session = session or self._session
async with session.post(
"https://accounts.spotify.com/api/token", data=data, headers=headers
) as response:
bearer_info = json.loads(await response.text(encoding="utf-8"))
if "error" in bearer_info.keys():
raise BearerTokenError(response=response, message=bearer_info)
return bearer_info
@backoff.on_exception(backoff.expo, RateLimitedException)
async def request(self, route, **kwargs):
r"""Make a request to the spotify API with the current bearer credentials.
Parameters
----------
route : Tuple[str, str]
A tuple of the method and url gained from :meth:`route`.
\*\*kwargs : Any
keyword arguments to pass into :class:`aiohttp.ClientSession.request`
"""
assert isinstance(route, tuple), "route parameter was not a tuple!"
assert len(route) == 2, "route parameter must have exactly two items"
method, url, = route
headers = kwargs.pop("headers", {})
if "Authorization" not in headers:
if self.bearer_info is None:
self.bearer_info = bearer_info = await self.get_bearer_info()
access_token = bearer_info["access_token"]
else:
access_token = self.bearer_info["access_token"]
headers["Authorization"] = "Bearer " + access_token
headers = {
"Content-Type": kwargs.pop("content_type", "application/json"),
"User-Agent": self.user_agent,
**headers,
}
if "json" in kwargs:
headers["Content-Type"] = "application/json"
kwargs["data"] = json.dumps(
kwargs.pop("json"), separators=(",", ":"), ensure_ascii=True
)
for current_retry in range(self.RETRY_AMOUNT):
await self.__request_barrier.wait()
response = await self._session.request(
method, url, headers=headers, **kwargs
)
try:
status = response.status
try:
data = json.loads(await response.text(encoding="utf-8"))
except json.decoder.JSONDecodeError:
data = {}
if 300 > status >= 200:
return data
if status == 401:
self.bearer_info = bearer_info = await self.get_bearer_info()
headers["Authorization"] = "Bearer " + bearer_info["access_token"]
continue
if status == 429:
# we're being rate limited.
self.__request_barrier.clear()
amount = int(response.headers.get("Retry-After"))
checkpoint = int(time.time())
async with self.__request_barrier_lock:
if (int(time.time()) - checkpoint) < amount:
self.__request_barrier.clear()
await asyncio.sleep(int(amount), loop=self.loop)
self.__request_barrier.set()
continue
if status in (502, 503):
# unconditional retry
continue
if status == 403:
raise Forbidden(response, data)
if status == 404:
raise NotFound(response, data)
finally:
await response.release()
if response.status == 429:
raise RateLimitedException((amount, _max_retries - current_retry))
raise HTTPException(response, data)
async def close(self):
"""Close the underlying HTTP session."""
await self._session.close()
# Methods are defined in the order that they are listed in
# the api docs (https://developer.spotify.com/documentation/web-api/reference/)
# Album related endpoints
def album(self, spotify_id: str, market: Optional[str] = "US") -> Awaitable:
"""Get Spotify catalog information for a single album.
Parameters
----------
spotify_id : str
The spotify_id to search by.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/albums/{spotify_id}", spotify_id=spotify_id)
payload: Dict[str, Any] = {}
if market:
payload["market"] = market
return self.request(route, params=payload)
def album_tracks(
self,
spotify_id: str,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
market="US",
) -> Awaitable:
"""Get Spotify catalog information about an album’s tracks.
Parameters
----------
spotify_id : str
The spotify_id to search by.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/albums/{spotify_id}/tracks", spotify_id=spotify_id)
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if market:
payload["market"] = market
return self.request(route, params=payload)
def albums(self, spotify_ids, market="US") -> Awaitable:
"""Get Spotify catalog information for multiple albums identified by their Spotify IDs.
Parameters
----------
spotify_ids : List[str]
The spotify_ids to search by.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/albums/")
payload: Dict[str, Any] = {"ids": spotify_ids}
if market:
payload["market"] = market
return self.request(route, params=payload)
# Artist related endpoints.
def artist(self, spotify_id) -> Awaitable:
"""Get Spotify catalog information for a single artist identified by their unique Spotify ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = self.route("GET", "/artists/{spotify_id}", spotify_id=spotify_id)
return self.request(route)
def artist_albums(
self,
spotify_id,
include_groups=None,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
market="US",
):
"""Get Spotify catalog information about an artist’s albums.
Parameters
----------
spotify_id : str
The spotify_id to search by.
include_groups : INCLUDE_GROUPS_TP
INCLUDE_GROUPS
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/artists/{spotify_id}/albums", spotify_id=spotify_id)
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if include_groups:
payload["include_groups"] = include_groups
if market:
payload["market"] = market
return self.request(route, params=payload)
def artist_top_tracks(self, spotify_id, country) -> Awaitable:
"""Get Spotify catalog information about an artist’s top tracks by country.
Parameters
----------
spotify_id : str
The spotify_id to search by.
country : COUNTRY_TP
COUNTRY
"""
route = self.route(
"GET", "/artists/{spotify_id}/top-tracks", spotify_id=spotify_id
)
payload: Dict[str, Any] = {"country": country}
return self.request(route, params=payload)
def artist_related_artists(self, spotify_id) -> Awaitable:
"""Get Spotify catalog information about artists similar to a given artist.
Similarity is based on analysis of the Spotify community’s listening history.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = self.route(
"GET", "/artists/{spotify_id}/related-artists", spotify_id=spotify_id
)
return self.request(route)
def artists(self, spotify_ids) -> Awaitable:
"""Get Spotify catalog information for several artists based on their Spotify IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
"""
route = self.route("GET", "/artists")
payload: Dict[str, Any] = {"ids": spotify_ids}
return self.request(route, params=payload)
# Browse endpoints.
def category(self, category_id, country=None, locale=None) -> Awaitable:
"""Get a single category used to tag items in Spotify (on, for example, the Spotify player’s “Browse” tab).
Parameters
----------
category_id : str
The Spotify category ID for the category.
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
"""
route = self.route(
"GET", "/browse/categories/{category_id}", category_id=category_id
)
payload: Dict[str, Any] = {}
if country:
payload["country"] = country
if locale:
payload["locale"] = locale
return self.request(route, params=payload)
def category_playlists(
self,
category_id,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
country=None,
) -> Awaitable:
"""Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
route = self.route(
"GET", "/browse/categories/{category_id}/playlists", category_id=category_id
)
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if country:
payload["country"] = country
return self.request(route, params=payload)
def categories(
self,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
country=None,
locale=None,
) -> Awaitable:
"""Get a list of categories used to tag items in Spotify (on, for example, the Spotify player’s “Browse” tab).
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
"""
route = self.route("GET", "/browse/categories")
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if country:
payload["country"] = country
if locale:
payload["locale"] = locale
return self.request(route, params=payload)
def featured_playlists(
self,
locale=None,
country=None,
timestamp=None,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
):
"""Get a list of Spotify featured playlists (shown, for example, on a Spotify player’s ‘Browse’ tab).
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
TIMESTAMP
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
route = self.route("GET", "/browse/featured-playlists")
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if country:
payload["country"] = country
if locale:
payload["locale"] = locale
if timestamp:
payload["timestamp"] = timestamp
return self.request(route, params=payload)
def new_releases(
self, *, country=None, limit: Optional[int] = 20, offset: Optional[int] = 0
) -> Awaitable:
"""Get a list of new album releases featured in Spotify (shown, for example, on a Spotify player’s “Browse” tab).
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
route = self.route("GET", "/browse/new-releases")
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if country:
payload["country"] = country
return self.request(route, params=payload)
def recommendations(
self,
seed_artists,
seed_genres,
seed_tracks,
*,
limit: Optional[int] = 20,
market=None,
**filters,
):
"""Get Recommendations Based on Seeds.
Parameters
----------
seed_artists : str
A comma separated list of Spotify IDs for seed artists. Up to 5 seed values may be provided.
seed_genres : str
A comma separated list of any genres in the set of available genre seeds. Up to 5 seed values may be provided.
seed_tracks : str
A comma separated list of Spotify IDs for a seed track. Up to 5 seed values may be provided.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
max_* : Optional[Keyword arguments]
For each tunable track attribute, a hard ceiling on the selected track attribute’s value can be provided.
min_* : Optional[Keyword arguments]
For each tunable track attribute, a hard floor on the selected track attribute’s value can be provided.
target_* : Optional[Keyword arguments]
For each of the tunable track attributes (below) a target value may be provided.
"""
route = self.route("GET", "/recommendations")
payload: Dict[str, Any] = {
"seed_artists": seed_artists,
"seed_genres": seed_genres,
"seed_tracks": seed_tracks,
"limit": limit,
}
if market:
payload["market"] = market
if filters:
payload.update(filters)
return self.request(route, params=payload)
# Follow related endpoints.
def following_artists_or_users(self, ids, *, type_="artist") -> Awaitable:
"""Check to see if the current user is following one or more artists or other Spotify users.
Parameters
----------
ids : List[:class:`str`]
A comma-separated list of the artist or the user Spotify IDs to check.
A maximum of 50 IDs can be sent in one request.
type_ : Optional[:class:`str`]
The ID type: either "artist" or "user".
Default: "artist"
"""
route = self.route("GET", "/me/following/contains")
payload: Dict[str, Any] = {"ids": ids, "type": type_}
return self.request(route, params=payload)
def following_playlists(self, playlist_id: str, ids: List[str]) -> Awaitable:
"""Check to see if one or more Spotify users are following a specified playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID of the playlist.
ids : List[:class:`str`]
A list of the artist or the user Spotify IDs.
A maximum of five IDs are allowed.
"""
route = self.route(
"GET",
"/playlists/{playlist_id}/followers/contains",
playlist_id=playlist_id,
)
payload: Dict[str, Any] = {"ids": ids}
return self.request(route, params=payload)
def follow_artist_or_user(self, type_: str, ids: List[str]) -> Awaitable:
"""Add the current user as a follower of one or more artists or other Spotify users.
Parameters
----------
type_ : :class:`str`
either artist or user.
ids : List[:class:`str`]
A list of the artist or the user Spotify IDs.
"""
route = self.route("PUT", "/me/following")
payload: Dict[str, Any] = {"ids": ids, "type": type_}
return self.request(route, params=payload)
def followed_artists(
self, *, limit: Optional[int] = 20, after: Optional[str] = None
) -> Awaitable:
"""Get the current user’s followed artists.
Parameters
----------
limit : Optional[:class:`int`]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
after : Optional[:class:`str`]
The last artist ID retrieved from the previous request.
"""
route = self.route("GET", "/me/following")
payload: Dict[str, Any] = {"limit": limit, "type": "artist"}
if after:
payload["after"] = after
return self.request(route, params=payload)
def unfollow_artists_or_users(self, type_: str, ids: List[str]) -> Awaitable:
"""Remove the current user as a follower of one or more artists or other Spotify users.
Parameters
----------
type_ : :class:`str`
either artist or user.
ids : List[:class:`str`]
A list of the artist or the user Spotify IDs.
"""
route = self.route("DELETE", "/me/following")
payload: Dict[str, Any] = {"ids": ids, "type": type_}
return self.request(route, params=payload)
def unfollow_playlist(self, playlist_id: str) -> Awaitable:
"""Remove the current user as a follower of a playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID of the playlist that is to be no longer followed.
"""
route = self.route(
"DELETE", "/playlists/{playlist_id}/followers", playlist_id=playlist_id
)
return self.request(route)
def is_saved_album(self, ids: List[str]) -> Awaitable:
"""Check if one or more albums is already saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("GET", "/me/albums/contains")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
return self.request(route, params=payload)
def is_saved_track(self, ids: List[str]) -> Awaitable:
"""Check if one or more tracks is already saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("GET", "/me/tracks/contains")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
return self.request(route, params=payload)
def saved_albums(
self,
*,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
market: Optional[str] = None,
) -> Awaitable:
"""Get a list of the albums saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
limit : Optional[:class:`str`]
The maximum number of objects to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[:class:`str`]
The index of the first object to return. Default: 0 (i.e., the first object). Use with limit to get the next set of objects.
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string from_token. Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/me/albums")
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if market is not None:
payload["market"] = market
return self.request(route, params=payload)
def saved_tracks(
self,
*,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
market: Optional[str] = None,
) -> Awaitable:
"""Get a list of the songs saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
limit : Optional[:class:`str`]
The maximum number of objects to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[:class:`str`]
The index of the first object to return. Default: 0 (i.e., the first object). Use with limit to get the next set of objects.
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string from_token. Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/me/tracks")
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if market:
payload["market"] = market
return self.request(route, params=payload)
def delete_saved_albums(self, ids: List[str]) -> Awaitable:
"""Remove one or more albums from the current user’s ‘Your Music’ library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("DELETE", "/me/albums")
return self.request(route, json=ids)
def delete_saved_tracks(self, ids: List[str]) -> Awaitable:
"""Remove one or more tracks from the current user’s ‘Your Music’ library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("DELETE", "/me/tracks")
return self.request(route, json=ids)
def save_tracks(self, ids: List[str]) -> Awaitable:
"""Save one or more tracks to the current user’s ‘Your Music’ library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("PUT", "/me/tracks")
return self.request(route, json=ids)
def save_albums(self, ids: List[str]) -> Awaitable:
"""Save one or more albums to the current user’s ‘Your Music’ library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("PUT", "/me/albums")
return self.request(route, json=ids)
def top_artists_or_tracks(
self,
type_: str,
*,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
time_range: Optional[str] = None,
) -> Awaitable:
"""Get the current user’s top artists or tracks based on calculated affinity.
Affinity is a measure of the expected preference a user has for a particular track or artist.
It is based on user behavior, including play history, but does not include actions made while in incognito mode.
Light or infrequent users of Spotify may not have sufficient play history to generate a full affinity data set.
As a user’s behavior is likely to shift over time, this preference data is available over three time spans.
For each time range, the top 50 tracks and artists are available for each user.
In the future, it is likely that this restriction will be relaxed. This data is typically updated once each day for each user.
Parameters
----------
type_ : :class;`str`
The type of entity to return. Valid values: "artists" or "tracks".
limit : Optional[:class:`int`]
The number of entities to return. Default: 20. Minimum: 1. Maximum: 50. For example: limit=2
offset : Optional[:class:`int`]
The index of the first entity to return. Default: 0 (i.e., the first track). Use with limit to get the next set of entities.
time_range : Optional[:class:`str`]
Over what time frame the affinities are computed.
Valid values:
- "long_term" (calculated from several years of data and including all new data as it becomes available)
- "medium_term" (approximately last 6 months)
- "short_term" (approximately last 4 weeks). Default: medium_term.
"""
route = self.route("GET", "/me/top/{type_}", type_=type_)
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if time_range is not None:
payload["time_range"] = time_range
return self.request(route, params=payload)
def available_devices(self) -> Awaitable:
"""Get information about a user’s available devices."""
route = self.route("GET", "/me/player/devices")
return self.request(route)
def current_player(self, *, market: Optional[str] = None) -> Awaitable:
"""Get information about the user’s current playback state, including track, track progress, and active device.
Parameters
----------
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string from_token. Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/me/player")
payload: Dict[str, Any] = {}
if market:
payload["market"] = market
return self.request(route, params=payload)
def playback_queue(self, *, uri: str, device_id: Optional[str] = None) -> Awaitable:
"""Add an item to the end of the user’s current playback queue.
Parameters
----------
uri : :class:`str`
The uri of the item to add to the queue. Must be a track or an
episode uri.
device_id : :class:`str`
The id of the device this command is targeting. If not supplied,
the user’s currently active device is the target.
"""
route = self.route("POST", "/me/player/queue")
params = {"uri": uri}
if device_id is not None:
params["device_id"] = device_id
return self.request(route, params=params)
def recently_played(
self,
*,
limit: Optional[int] = 20,
before: Optional[str] = None,
after: Optional[str] = None,
) -> Awaitable:
"""Get tracks from the current user’s recently played tracks.
Returns the most recent 50 tracks played by a user.
Note that a track currently playing will not be visible in play history until it has completed.
A track must be played for more than 30 seconds to be included in play history.
Any tracks listened to while the user had “Private Session” enabled in their client will not be returned in the list of recently played tracks.
The endpoint uses a bidirectional cursor for paging.
Follow the next field with the before parameter to move back in time, or use the after parameter to move forward in time.
If you supply no before or after parameter, the endpoint will return the most recently played songs, and the next link will page back in time.
Parameters
----------
limit : Optional[:class:`int`]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
after : Optional[:class:`str`]
A Unix timestamp in milliseconds. Returns all items after (but not including) this cursor position. If after is specified, before must not be specified.
before : Optional[:class:`str`]
A Unix timestamp in milliseconds. Returns all items before (but not including) this cursor position. If before is specified, after must not be specified.
"""
route = self.route("GET", "/me/player/recently-played")
payload: Dict[str, Any] = {"limit": limit}
if before:
payload["before"] = before
elif after:
payload["after"] = after
return self.request(route, params=payload)
def currently_playing(self, *, market: Optional[str] = None) -> Awaitable:
"""Get the object currently being played on the user’s Spotify account.
Parameters
----------
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string from_token. Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/me/player/currently-playing")
payload: Dict[str, Any] = {}
if market:
payload["market"] = market
return self.request(route, params=payload)
def pause_playback(self, *, device_id: Optional[str] = None) -> Awaitable:
"""Pause playback on the user’s account.
Parameters
----------
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("PUT", "/me/player/pause")
payload: Dict[str, Any] = {}
if device_id:
payload["device_id"] = device_id
return self.request(route, params=payload)
def seek_playback(
self, position_ms: int, *, device_id: Optional[str] = None
) -> Awaitable:
"""Seeks to the given position in the user’s currently playing track.
Parameters
----------
position_ms : :class:`int`
The position in milliseconds to seek to. Must be a positive number. Passing in a position that is greater than the length of the track will cause the player to start playing the next song.
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("PUT", "/me/player/seek")
payload: Dict[str, Any] = {"position_ms": position_ms}
if device_id:
payload["device_id"] = device_id
return self.request(route, params=payload)
def repeat_playback(
self, state: str, *, device_id: Optional[str] = None
) -> Awaitable:
"""Set the repeat mode for the user’s playback. Options are repeat-track, repeat-context, and off.
Parameters
----------
state : :class:`str`
"track", "context" or "off".
- track will repeat the current track.
- context will repeat the current context.
- off will turn repeat off.
device_id : Optional[str]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("PUT", "/me/player/repeat")
payload: Dict[str, Any] = {"state": state}
if device_id:
payload["device_id"] = device_id
return self.request(route, params=payload)
def set_playback_volume(
self, volume: int, *, device_id: Optional[str] = None
) -> Awaitable:
"""Set the volume for the user’s current playback device.
Parameters
----------
volume : :class:`int`
The volume to set. Must be a value from 0 to 100 inclusive.
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("PUT", "/me/player/volume")
payload: Dict[str, Any] = {"volume_percent": volume}
if device_id:
payload["device_id"] = device_id
return self.request(route, params=payload)
def skip_next(self, *, device_id: Optional[str] = None) -> Awaitable:
"""Skips to next track in the user’s queue.
Parameters
----------
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("POST", "/me/player/next")
payload: Dict[str, Any] = {}
if device_id:
payload["device_id"] = device_id
return self.request(route, params=payload)
def skip_previous(self, *, device_id: Optional[str] = None) -> Awaitable:
"""Skips to previous track in the user’s queue.
Parameters
----------
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("POST", "/me/player/previous")
payload: Dict[str, Any] = {}
if device_id:
payload["device_id"] = device_id
return self.request(route, params=payload)
def play_playback(
self,
context_uri: Union[str, Sequence[str]],
*,
offset: Optional[Union[str, int]] = None,
device_id: Optional[str] = None,
position_ms: Optional[int] = 0,
) -> Awaitable:
"""Start a new context or resume current playback on the user’s active device.
.. note::
In order to resume playback set the context_uri to None.
Parameters
----------
context_uri : Union[str, Sequence[:class:`str`]]
The context to play, if it is a string
then it must be a uri of a album, artist
or playlist.
Otherwise a sequece of strings can be passed
in and they must all be track URIs
offset : Optional[Union[:class:`str`, :class:`int`]]
The offset of which to start from,
must either be an integer or a track URI.
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
position_ms : Optional[:class:`int`]
indicates from what position to start playback. Must be a positive number.
Passing in a position that is greater than the length of the track will cause the player to start playing the next song.
"""
route = self.route("PUT", "/me/player/play")
payload: Dict[str, Any] = {"position_ms": position_ms}
params: Dict[str, Any] = {}
can_set_offset: bool = False
if isinstance(context_uri, str):
payload["context_uri"] = context_uri
can_set_offset = "playlist" in context_uri or "album" in context_uri
elif hasattr(context_uri, "__iter__"):
payload["uris"] = list(context_uri)
can_set_offset = True
elif context_uri is None:
pass # Do nothing, context_uri == None is allowed and intended for resume's
else:
raise TypeError(
f"`context_uri` must be a string or an iterable object, got {type(context_uri)}"
)
if offset is not None:
if can_set_offset:
_offset: Dict[str, Union[int, str]]
if isinstance(offset, str):
_offset = {"uri": offset}
elif isinstance(offset, int):
_offset = {"position": offset}
else:
raise TypeError(
f"`offset` should be either a string or an integer, got {type(offset)}"
)
payload["offset"] = _offset
else:
raise ValueError(
"not able to set `offset` as either `context_uri` was not a list or it was a playlist or album uri."
)
if device_id is not None:
params["device_id"] = device_id
return self.request(route, params=params, json=payload)
def shuffle_playback(
self, state: bool, *, device_id: Optional[str] = None
) -> Awaitable:
"""Toggle shuffle on or off for user’s playback.
Parameters
----------
state : :class:`bool`
True : Shuffle user’s playback
False : Do not shuffle user’s playback.
device_id : Optional[:class:`str`]
The id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
"""
route = self.route("PUT", "/me/player/shuffle")
payload: Dict[str, Any] = {"state": f"{bool(state)}".lower()}
if device_id is not None:
payload["device_id"] = device_id
return self.request(route, params=payload)
def transfer_player(
self, device_id: str, *, play: Optional[bool] = False
) -> Awaitable:
"""Transfer playback to a new device and determine if it should start playing.
.. note:
Note that a value of false for the play parameter when also transferring to another device_id will not pause playback.
To ensure that playback is paused on the new device you should send a pause command to the currently active device before transferring to the new device_id.
Parameters
----------
device_id : :class:`str`
A Spotify Device ID
play : Optional[:class:`bool`]
True: ensure playback happens on new device.
False or not provided: keep the current playback state.
"""
route = self.route("PUT", "/me/player")
payload: Dict[str, Any] = {"device_ids": [device_id], "play": play}
return self.request(route, json=payload)
def add_playlist_tracks(
self,
playlist_id: str,
tracks: Sequence[Union[str]],
position: Optional[int] = None,
) -> Awaitable:
"""Add one or more tracks to a user’s playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
tracks : Sequence[Union[:class:`str`]]
A sequence of track URIs.
position : Optional[:class:`int`]
The position to insert the tracks, a zero-based index.
"""
route = self.route(
"POST", "/playlists/{playlist_id}/tracks", playlist_id=playlist_id
)
payload: Dict[str, Any] = {"uris": [track for track in tracks]}
if position is not None:
payload["position"] = position
return self.request(route, json=payload)
def change_playlist_details(
self,
playlist_id: str,
*,
name: Optional[str] = None,
public: Optional[bool] = None,
collaborative: Optional[bool] = None,
description: Optional[str] = None,
) -> Awaitable:
"""Change a playlist’s name and public/private state. (The user must, of course, own the playlist.)
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
name : :class:`str`
The name for the new playlist
public : Optional[:class:`bool`]
Defaults to true . If true the playlist will be public, if false it will be private
collaborative : Optional[:class:`bool`]
Defaults to false . If true the playlist will be collaborative.
.. note::
to create a collaborative playlist you must also set public to false
description : Optional[:class:`str`]
The value for playlist description as displayed in Spotify Clients and in the Web API.
"""
route = self.route("PUT", "/playlists/{playlist_id}", playlist_id=playlist_id)
payload: Dict[str, Any] = filter_items(
{
"name": name,
"public": public,
"collaborative": collaborative,
"description": description,
}
)
return self.request(route, json=payload)
def create_playlist(
self,
user_id: str,
*,
name: str,
public: Optional[bool] = True,
collaborative: Optional[bool] = False,
description: Optional[str] = "",
) -> Awaitable:
"""Create a playlist for a Spotify user. (The playlist will be empty until you add tracks.)
Parameters
----------
user_id : :class:`str`
The user’s Spotify user ID.
name : :class:`str`
The name for the new playlist
public : Optional[:class:`bool`]
Defaults to true . If true the playlist will be public, if false it will be private
collaborative : Optional[:class:`bool`]
Defaults to false . If true the playlist will be collaborative.
.. note::
to create a collaborative playlist you must also set public to false
description : Optional[:class:`str`]
The value for playlist description as displayed in Spotify Clients and in the Web API.
"""
route = self.route("POST", "/users/{user_id}/playlists", user_id=user_id)
payload: Dict[str, Any] = {
"name": name,
"public": public,
"collaborative": collaborative,
"description": description,
}
return self.request(route, json=payload)
def follow_playlist(
self, playlist_id: str, *, public: Optional[bool] = True
) -> Awaitable:
"""Add the current user as a follower of a playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID of the playlist. Any playlist can be followed, regardless of its public/private status, as long as you know its playlist ID.
public : Optional[:class:`bool`]
Defaults to true. If true the playlist will be included in user’s public playlists, if false it will remain private.
"""
route = self.route(
"PUT", "/playlists/{playlist_id}/followers", playlist_id=playlist_id
)
payload: Dict[str, Any] = {"public": public}
return self.request(route, json=payload)
def current_playlists(
self, *, limit: Optional[int] = 20, offset: Optional[int] = 0
) -> Awaitable:
"""Get a list of the playlists owned or followed by the current Spotify user.
Parameters
----------
limit : Optional[:class:`str`]
The maximum number of playlists to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[:class:`str`]
he index of the first playlist to return. Default: 0 (the first object). Maximum offset: 100.000.
"""
route = self.route("GET", "/me/playlists")
return self.request(route, params={"limit": limit, "offset": offset})
def get_playlists(
self, user_id: str, *, limit: Optional[int] = 20, offset: Optional[int] = 0
) -> Awaitable:
"""Get a list of the playlists owned or followed by a Spotify user.
Parameters
----------
user_id : :class:`str`
The user’s Spotify user ID.
limit : Optional[:class:`str`]
The maximum number of playlists to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[:class:`str`]
he index of the first playlist to return. Default: 0 (the first object). Maximum offset: 100.000.
"""
route = self.route("GET", "/users/{user_id}/playlists", user_id=user_id)
return self.request(route, params={"limit": limit, "offset": offset})
def get_playlist_cover_image(self, playlist_id: str) -> Awaitable:
"""Get the current image associated with a specific playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
"""
route = self.route(
"GET", "/playlists/{playlist_id}/images", playlist_id=playlist_id
)
return self.request(route)
def get_playlist(
self,
playlist_id: str,
*,
fields: Optional[str] = None,
market: Optional[str] = None,
) -> Awaitable:
"""Get a playlist owned by a Spotify user.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
fields: Optional[:class:`str`]
Filters for the query: a comma-separated list of the fields to return.
If omitted, all fields are returned. For example, to get just the total number of tracks and the request limit: `fields=total,limit`
A dot separator can be used to specify non-reoccurring fields, while parentheses can be used to specify reoccurring fields within objects.
For example, to get just the added date and user ID of the adder: `fields=items(added_at,added_by.id)`
Use multiple parentheses to drill down into nested objects, for example: `fields=items(track(name,href,album(name,href)))`
Fields can be excluded by prefixing them with an exclamation mark, for example: `fields=items.track.album(!external_urls,images)`
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string "from_token".
Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/playlists/{playlist_id}", playlist_id=playlist_id)
payload: Dict[str, Any] = {}
if fields:
payload["fields"] = fields
if market:
payload["market"] = market
return self.request(route, params=payload)
def get_playlist_tracks(
self,
playlist_id: str,
*,
fields: Optional[str] = None,
market: Optional[str] = None,
limit: Optional[int] = 20,
offset: Optional[int] = 0,
) -> Awaitable:
"""Get full details of the tracks of a playlist owned by a Spotify user.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
fields: Optional[:class:`str`]
Filters for the query: a comma-separated list of the fields to return.
If omitted, all fields are returned. For example, to get just the total number of tracks and the request limit: `fields=total,limit`
A dot separator can be used to specify non-reoccurring fields, while parentheses can be used to specify reoccurring fields within objects.
For example, to get just the added date and user ID of the adder: `fields=items(added_at,added_by.id)`
Use multiple parentheses to drill down into nested objects, for example: `fields=items(track(name,href,album(name,href)))`
Fields can be excluded by prefixing them with an exclamation mark, for example: `fields=items.track.album(!external_urls,images)`
limit : Optional[:class:`str`]
The maximum number of tracks to return. Default: 100. Minimum: 1. Maximum: 100.
offset : Optional[:class:`str`]
The index of the first track to return. Default: 0 (the first object).
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string "from_token".
Provide this parameter if you want to apply Track Relinking.
"""
route = self.route(
"GET", "/playlists/{playlist_id}/tracks", playlist_id=playlist_id
)
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if fields:
payload["fields"] = fields
if market:
payload["market"] = market
return self.request(route, params=payload)
def remove_playlist_tracks(
self,
playlist_id: str,
tracks: Sequence[Union[str, Dict[str, Any]]],
*,
snapshot_id: str = None,
) -> Awaitable:
"""Remove one or more tracks from a user’s playlist.
Parameters
----------
playlist_id : str
The id of the playlist to target
tracks : Sequence[Union[str, Dict[str, Union[str, int]]]]
Either a sequence of track URIs to remove a specific occurence
of a track or for targeted removal pass in a dict that looks like
`{'uri': URI, 'position': POSITIONS}` where `URI` is track URI and
`POSITIONS` is an list of integers
snapshot_id : Optional[str]
The snapshot to target.
"""
route = self.route(
"DELETE ", "/playlists/{playlist_id}/tracks", playlist_id=playlist_id
)
payload: Dict[str, Any] = {
"tracks": [
({"uri": track} if isinstance(track, str) else track)
for track in tracks
]
}
if snapshot_id:
payload["snapshot_id"] = snapshot_id
return self.request(route, json=payload)
def reorder_playlists_tracks(
self,
playlist_id: str,
range_start: int,
range_length: int,
insert_before: int,
*,
snapshot_id: Optional[str] = None,
) -> Awaitable:
"""Reorder a track or a group of tracks in a playlist.
Visualization of how reordering tracks works
.. image:: /images/visualization-reordering-tracks.png
.. note::
When reordering tracks, the timestamp indicating when they were added and the user who added them will be kept untouched.
In addition, the users following the playlists won’t be notified about changes in the playlists when the tracks are reordered.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
range_start : :class:`int`
The position of the first track to be reordered.
range_length : :class:`int`
The amount of tracks to be reordered. Defaults to 1 if not set.
The range of tracks to be reordered begins from the range_start position, and includes the range_length subsequent tracks.
insert_before : :class:`int`
The position where the tracks should be inserted.
To reorder the tracks to the end of the playlist, simply set insert_before to the position after the last track.
snapshot_id : Optional[:class:`str`]
The playlist’s snapshot ID against which you want to make the changes.
"""
route = self.route(
"PUT", "/playlists/{playlist_id}/tracks", playlist_id=playlist_id
)
payload: Dict[str, Any] = {
"range_start": range_start,
"range_length": range_length,
"insert_before": insert_before,
}
if snapshot_id:
payload["snapshot_id"] = snapshot_id
return self.request(route, json=payload)
def replace_playlist_tracks(
self, playlist_id: str, tracks: Sequence[str]
) -> Awaitable:
"""Replace all the tracks in a playlist, overwriting its existing tracks.
.. note::
This powerful request can be useful for replacing tracks, re-ordering existing tracks, or clearing the playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
tracks : Sequence[:class:`str`]
A list of tracks to replace with.
"""
route = self.route(
"PUT", "/playlists/{playlist_id}/tracks", playlist_id=playlist_id
)
payload: Dict[str, Any] = {"uris": tuple(tracks)}
return self.request(route, json=payload)
def upload_playlist_cover_image(
self, playlist_id: str, file: BinaryIO
) -> Awaitable:
"""Replace the image used to represent a specific playlist.
Parameters
----------
playlist_id : :class:`str`
The Spotify ID for the playlist.
file : File-like object
An file-like object that supports reading
the contents that are being read should be :class:`bytes`
"""
route = self.route(
"PUT", "/playlists/{playlist_id}/images", playlist_id=playlist_id
)
return self.request(
route, data=b64encode(file.read()), content_type="image/jpeg"
)
def track_audio_analysis(self, track_id: str) -> Awaitable:
"""Get a detailed audio analysis for a single track identified by its unique Spotify ID.
The Audio Analysis endpoint provides low-level audio analysis for all of the tracks in the Spotify catalog.
The Audio Analysis describes the track’s structure and musical content, including rhythm, pitch, and timbre.
All information is precise to the audio sample.
Many elements of analysis include confidence values, a floating-point number ranging from 0.0 to 1.0.
Confidence indicates the reliability of its corresponding attribute.
Elements carrying a small confidence value should be considered speculative.
There may not be sufficient data in the audio to compute the attribute with high certainty.
Parameters
----------
track_id : :class:`str`
The Spotify ID for the track.
"""
route = self.route("GET", "/audio-analysis/{id}", id=track_id)
return self.request(route)
def track_audio_features(self, track_id: str) -> Awaitable:
"""Get audio feature information for a single track identified by its unique Spotify ID.
Parameters
----------
track_id : :class:`str`
The Spotify ID for the track.
"""
route = self.route("GET", "/audio-features/{id}", id=track_id)
return self.request(route)
def audio_features(self, track_ids: List[str]) -> Awaitable:
"""Get audio features for multiple tracks based on their Spotify IDs.
Parameters
----------
track_ids : List[:class:`str`]
A comma-separated list of the Spotify IDs for the tracks. Maximum: 100 IDs.
"""
route = self.route("GET", "/audio-features")
return self.request(route, params={"ids": track_ids})
def track(self, track_id: str, market: Optional[str] = None) -> Awaitable:
"""Get Spotify catalog information for a single track identified by its unique Spotify ID.
Parameters
----------
track_id : :class:`str`
The Spotify ID for the track.
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string "from_token".
Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/tracks/{id}", id=track_id)
payload: Dict[str, Any] = {}
if market is not None:
payload["market"] = market
return self.request(route, params=payload)
def tracks(self, track_ids: List[str], market: Optional[str] = None) -> Awaitable:
"""Get Spotify catalog information for multiple tracks based on their Spotify IDs.
Parameters
----------
track_ids : List[:class:`str`]
A comma-separated list of the Spotify IDs for the tracks. Maximum: 50 IDs.
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string "from_token".
Provide this parameter if you want to apply Track Relinking.
"""
route = self.route("GET", "/tracks")
payload: Dict[str, Any] = {"ids": track_ids}
if market is not None:
payload["market"] = market
return self.request(route, params=payload)
def current_user(self) -> Awaitable:
"""Get detailed profile information about the current user (including the current user’s username)."""
route = self.route("GET", "/me")
return self.request(route)
def user(self, user_id: str) -> Awaitable:
"""Get public profile information about a Spotify user.
Parameters
---------
user_id : class:`str`
The user’s Spotify user ID.
"""
route = self.route("GET", "/users/{user_id}", user_id=user_id)
return self.request(route)
def search( # pylint: disable=invalid-name
self,
q: str,
query_type: str = "track,playlist,artist,album",
market: str = "US",
limit: int = 20,
offset: int = 0,
include_external: Optional[str] = None,
) -> Awaitable:
"""Get Spotify Catalog information about artists, albums, tracks or playlists that match a keyword string.
Parameters
----------
q : :class:`str`
Search query keywords and optional field filters and operators. e.g. `roadhouse blues.`
query_type : Optional[:class:`str`]
A comma-separated list of item types to search across. (default: "track,playlist,artist,album")
Valid types are: album, artist, playlist, and track.
Search results include hits from all the specified item types.
market : Optional[:class:`str`]
An ISO 3166-1 alpha-2 country code or the string "from_token". (default: "US")
If a country code is specified, only artists, albums, and tracks with content that is playable in that market is returned.
.. note::
- Playlist results are not affected by the market parameter.
- If market is set to "from_token", and a valid access token is specified in the request header, only
content playable in the country associated with the user account, is returned.
- Users can view the country that is associated with their account in the account settings. A user must
grant access to the user-read-private scope prior to when the access token is issued.
limit : Optional[:class:`int`]
Maximum number of results to return. (Default: 20, Minimum: 1, Maximum: 50)
.. note::
The limit is applied within each type, not on the total response.
For example, if the limit value is 3 and the type is artist,album, the response contains 3 artists and 3 albums.
offset : Optional[:class:`int`]
The index of the first result to return.
Default: 0 (the first result).
Maximum offset (including limit): 10,000.
Use with limit to get the next page of search results.
include_external : Optional[:class:`str`]
Possible values: `audio`
If `include_external=audio` is specified the response will include any relevant audio content that is hosted externally.
By default external content is filtered out from responses.
"""
route = self.route("GET", "/search")
payload: Dict[str, Any] = {
"q": q,
"type": query_type,
"limit": limit,
"offset": offset,
}
if market:
payload["market"] = market
if include_external is not None:
payload["include_external"] = include_external
return self.request(route, params=payload)
# Shows & Episode related endpoints
def save_shows(self, ids: List[str]) -> Awaitable:
"""Save one or more shows to current Spotify user’s library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("PUT", "/me/shows")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
return self.request(route, params=payload)
def get_saved_shows(self, limit: int = 20, offset: int = 0) -> Awaitable:
"""Get a list of shows saved in the current Spotify user’s library.
Optional parameters can be used to limit the number of shows returned.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
"""
route = self.route("GET", "/me/shows")
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
return self.request(route, params=payload)
def get_show(self, spotify_id: str, market: Optional[str] = "US") -> Awaitable:
"""Get Spotify catalog information for a single show identified by its unique Spotify ID.
Parameters
----------
spotify_id : str
The spotify_id to for the show.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/shows/{spotify_id}", spotify_id=spotify_id)
payload: Dict[str, Any] = {}
if market:
payload["market"] = market
return self.request(route, params=payload)
def get_multiple_shows(
self, ids: List[str], market: Optional[str] = "US"
) -> Awaitable:
"""Get Spotify catalog information for several shows based on their Spotify IDs.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/shows")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
if market:
payload["market"] = market
return self.request(route, params=payload)
def get_shows_episodes(
self,
spotify_id: str,
market: Optional[str] = "US",
limit: int = 20,
offset: int = 0,
) -> Awaitable:
"""Get Spotify catalog information about an show’s episodes.
Optional parameters can be used to limit the number of episodes returned.
Parameters
----------
spotify_id : str
The spotify_id to for the show.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
"""
route = self.route("GET", "/shows/{spotify_id}/episodes", spotify_id=spotify_id)
payload: Dict[str, Any] = {"limit": limit, "offset": offset}
if market:
payload["market"] = market
return self.request(route, params=payload)
def check_saved_shows(self, ids: List[str]) -> Awaitable:
"""Check if one or more shows is already saved in the current Spotify user’s library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
"""
route = self.route("GET", "/me/shows/contains")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
return self.request(route, params=payload)
def remove_saved_shows(
self, ids: List[str], market: Optional[str] = "US"
) -> Awaitable:
"""Delete one or more shows from current Spotify user’s library.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("DELETE", "/me/shows")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
if market:
payload["market"] = market
return self.request(route, params=payload)
def get_episode(self, spotify_id: str, market: Optional[str] = "US") -> Awaitable:
"""Get Spotify catalog information for a single episode identified by its unique Spotify ID.
Parameters
----------
spotify_id : str
The spotify_id to for the show.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/episodes/{spotify_id}", spotify_id=spotify_id)
payload: Dict[str, Any] = {}
if market:
payload["market"] = market
return self.request(route, params=payload)
def get_multiple_episodes(
self, ids: List[str], market: Optional[str] = "US"
) -> Awaitable:
"""Get Spotify catalog information for several episodes based on their Spotify IDs.
Parameters
----------
ids : List[:class:`str`]
A list of the Spotify IDs.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = self.route("GET", "/episodes")
payload: Dict[str, Any] = {"ids": ",".join(ids)}
if market:
payload["market"] = market
return self.request(route, params=payload)
REFRESH_TOKEN_URL = "https://accounts.spotify.com/api/token?grant_type=refresh_token&refresh_token={refresh_token}"
class HTTPUserClient(HTTPClient):
"""HTTPClient for access to user endpoints."""
def __init__(
self,
client_id: str,
client_secret: str,
token: str = None,
refresh_token: str = None,
loop=None,
):
assert token or refresh_token
super().__init__(client_id, client_secret, loop=loop)
if token:
self.bearer_info = {"access_token": token}
self.refresh_token = refresh_token
async def get_bearer_info(self, *_, **__):
if not self.refresh_token:
# Should only happen if User.from_token didn't receive refresh_token
raise SpotifyException(
"Access token expired and no refresh token was provided"
)
headers = {
"Authorization": f"Basic {b64encode(':'.join((self.client_id, self.client_secret)).encode()).decode()}",
"Content-Type": "application/x-www-form-urlencoded",
}
route = ("POST", REFRESH_TOKEN_URL.format(refresh_token=self.refresh_token))
return await self.request(route, headers=headers)
|
py | b4010daf56d3adc1e7b1a354fa9f8ad1f31abd4c | # https://leetcode.com/problems/matrix-block-sum/
class Solution:
def matrixBlockSum(self, mat: list[list[int]], k: int) -> list[list[int]]:
rows = len(mat)
cols = len(mat[0])
if rows == cols == 1:
return mat
# Preprocess mat, calculate block sums for columns.
col_block_sums = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
for j in range(cols):
if i == 0:
col_block_sums[i][j] = sum([row[j] for row in mat[:k + 1]])
else:
top_sum = col_block_sums[i - 1][j]
subtrahend = mat[i - k - 1][j] if i - k - 1 >= 0 else 0
addend = mat[i + k][j] if i + k < rows else 0
col_block_sums[i][j] = top_sum - subtrahend + addend
# Calculate answer using column block sums.
ans = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
for j in range(cols):
if j == 0:
ans[i][j] = sum(col_block_sums[i][:k + 1])
else:
left_sum = ans[i][j - 1]
subtrahend = (
col_block_sums[i][j - k - 1] if j - k - 1 >= 0 else 0)
addend = col_block_sums[i][j + k] if j + k < cols else 0
ans[i][j] = left_sum - subtrahend + addend
# Mission complete!
return ans
|
py | b4010e1fc31c04926a5b5ad6992bf1c406ce042f | """
Django settings for djangoProject2 project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-w8ua_e9hi@((3%%f^y_t)wm+=y-kmdv34hd99ohj--i6z0lzj('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'project_first_app'
]
AUTH_USER_MODEL = 'project_first_app.OwerUser'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoProject2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoProject2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | b4010ebdd6ca12a91be135219120ba4d279e5e63 | #funcao ou metodo em Python, sempre deve iniciar com DEF (definition)
"""
def big_mac():
print("sanduiche big mac")
print("inicio")
big_mac()
print("fim")
"""
def fazer_big_mac(nome):
print(f"sanduiche big mac {nome}")
def fazer_batata(tamanho):
print(f"batata {tamanho}")
def preparar_refri(tipo, tamanho):
print(f"{tipo} {tamanho}")
#fazer_big_mac("Hugo")
#fazer_big_mac("Louyse")
#fazer_big_mac("Juju")
fazer_big_mac("Hugo")
fazer_batata("Grande")
preparar_refri("Coke", "Media")
def fazer_combo(nome, tamanho_batata, tipo_refri, tamanho_refri):
print(f"***Saindo um combo")
fazer_big_mac(nome)
fazer_batata(tamanho_batata)
preparar_refri(tipo_refri, tamanho_refri)
fazer_combo("HUGO", "Gigante", "Fanta", "Enorme")
def maior_num(lista_num):
lista_num.sort()
lista_num.reverse()
maior_num = lista_num[0]
return maior_num
res = maior_num([123,21,45,84984,-1,48,99999])
print(res)
|
py | b4010ed24caaa5b96b1daa4ed3cf162a5857c400 | project_resources = [
{
"urn": "do:loadbalancer:9625f517-75f0-4af8-a336-62374e68dc0d",
"assigned_at": "2022-03-10T16:27:58.728298Z",
"links": {
"self": "https://api.digitalocean.com/v2/load_balancers/9625f517-75f0-4af8-a336-62374e68dc0d"
},
"status": "ok",
},
{
"urn": "do:kubernetes:e1c48631-b382-4001-2168-c47c54795a26",
"assigned_at": "2022-03-10T13:07:00.577221Z",
"links": {
"self": "https://api.digitalocean.com/v2/kubernetes/clusters/e1c48631-b382-4001-2168-c47c54795a26"
},
"status": "ok",
},
{
"urn": "do:dbaas:2848a998-e151-4d5a-9813-0904a44c2397",
"assigned_at": "2022-03-10T11:40:06.461298Z",
"links": {
"self": "https://api.digitalocean.com/v2/databases/2848a998-e151-4d5a-9813-0904a44c2397"
},
"status": "ok",
},
{
"urn": "do:volume:631f81d2-9fc1-11ec-800c-0a58ac14d197",
"assigned_at": "2022-03-09T15:55:55.700167Z",
"links": {
"self": "https://api.digitalocean.com/v2/volumes/631f81d2-9fc1-11ec-800c-0a58ac14d197"
},
"status": "ok",
},
{
"urn": "do:floatingip:127.0.0.1",
"assigned_at": "2022-03-03T22:06:30.0352Z",
"links": {"self": "https://api.digitalocean.com/v2/floating_ips/127.0.0.1"},
"status": "ok",
},
{
"urn": "do:droplet:289110074",
"assigned_at": "2022-03-03T16:26:58.243032Z",
"links": {"self": "https://api.digitalocean.com/v2/droplets/289110074"},
"status": "ok",
},
{
"urn": "do:space:api-test-space.resoto",
"assigned_at": "2022-02-23T13:42:22.636219Z",
"links": {"self": "https://api-test-space.resoto.fra1.digitaloceanspaces.com"},
"status": "ok",
},
]
|
py | b40112d7af7a3343483376a1b27eafa5a3196268 | import io
import pprp
import pprp.config
import pprp.utility
def decrypt_sink(dg, block_size=pprp.config.DEFAULT_BLOCK_SIZE_B):
s = io.BytesIO()
last_block = None
for block in dg:
block_to_send = last_block
last_block = block
if block_to_send is not None:
s.write(block_to_send)
trimmed_last_block = pprp.utility.trim_pkcs7_padding(last_block)
s.write(trimmed_last_block)
return s.getvalue()
def encrypt_sink(eg):
s = io.BytesIO()
for block in eg:
s.write(block)
return s.getvalue()
def decrypt_to_file_sink(f, dg, block_size=pprp.config.DEFAULT_BLOCK_SIZE_B):
last_block = None
for block in dg:
(block_to_send, last_block) = (last_block, block)
if block_to_send is not None:
f.write(block_to_send)
trimmed_last_block = pprp.utility.trim_pkcs7_padding(last_block)
f.write(trimmed_last_block)
f.flush()
def encrypt_to_file_sink(f, eg):
for block in eg:
f.write(block)
f.flush()
|
py | b401130475163cd0f9b43041d3ccd7cc7c78454b | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import urllib3
from django.apps import apps
from ..apps.app_django import INSTALLED_APPS
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import os
from instana.singletons import agent, tracer
from ..helpers import fail_with_message_and_span_dump, get_first_span_by_filter, drop_log_spans_from_list
apps.populate(INSTALLED_APPS)
class TestDjango(StaticLiveServerTestCase):
def setUp(self):
""" Clear all spans before a test run """
self.recorder = tracer.recorder
self.recorder.clear_spans()
self.http = urllib3.PoolManager()
def tearDown(self):
""" Do nothing for now """
os.environ["INSTANA_DISABLE_W3C_TRACE_CORRELATION"] = ""
def test_basic_request(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/', fields={"test": 1})
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
test_span = spans[2]
urllib3_span = spans[1]
django_span = spans[0]
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual("django", django_span.n)
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, django_span.t)
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(django_span.p, urllib3_span.s)
self.assertIsNone(django_span.sy)
self.assertIsNone(urllib3_span.sy)
self.assertIsNone(test_span.sy)
self.assertEqual(None, django_span.ec)
self.assertEqual('/', django_span.data["http"]["url"])
self.assertEqual('GET', django_span.data["http"]["method"])
self.assertEqual(200, django_span.data["http"]["status"])
self.assertEqual('test=1', django_span.data["http"]["params"])
self.assertEqual('^$', django_span.data["http"]["path_tpl"])
self.assertIsNone(django_span.stack)
def test_synthetic_request(self):
headers = {
'X-INSTANA-SYNTHETIC': '1'
}
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/', headers=headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
test_span = spans[2]
urllib3_span = spans[1]
django_span = spans[0]
self.assertEqual('^$', django_span.data["http"]["path_tpl"])
self.assertTrue(django_span.sy)
self.assertIsNone(urllib3_span.sy)
self.assertIsNone(test_span.sy)
def test_request_with_error(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/cause_error')
assert response
self.assertEqual(500, response.status)
spans = self.recorder.queued_spans()
spans = drop_log_spans_from_list(spans)
span_count = len(spans)
if span_count != 3:
msg = "Expected 3 spans but got %d" % span_count
fail_with_message_and_span_dump(msg, spans)
filter = lambda span: span.n == 'sdk' and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == 'urllib3'
urllib3_span = get_first_span_by_filter(spans, filter)
assert (urllib3_span)
filter = lambda span: span.n == 'django'
django_span = get_first_span_by_filter(spans, filter)
assert (django_span)
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual("django", django_span.n)
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, django_span.t)
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(django_span.p, urllib3_span.s)
self.assertEqual(1, django_span.ec)
self.assertEqual('/cause_error', django_span.data["http"]["url"])
self.assertEqual('GET', django_span.data["http"]["method"])
self.assertEqual(500, django_span.data["http"]["status"])
self.assertEqual('This is a fake error: /cause-error', django_span.data["http"]["error"])
self.assertEqual('^cause_error$', django_span.data["http"]["path_tpl"])
self.assertIsNone(django_span.stack)
def test_request_with_not_found(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/not_found')
assert response
self.assertEqual(404, response.status)
spans = self.recorder.queued_spans()
spans = drop_log_spans_from_list(spans)
span_count = len(spans)
if span_count != 3:
msg = "Expected 3 spans but got %d" % span_count
fail_with_message_and_span_dump(msg, spans)
filter = lambda span: span.n == 'django'
django_span = get_first_span_by_filter(spans, filter)
assert (django_span)
self.assertIsNone(django_span.ec)
self.assertEqual(404, django_span.data["http"]["status"])
def test_request_with_not_found_no_route(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/no_route')
assert response
self.assertEqual(404, response.status)
spans = self.recorder.queued_spans()
spans = drop_log_spans_from_list(spans)
span_count = len(spans)
if span_count != 3:
msg = "Expected 3 spans but got %d" % span_count
fail_with_message_and_span_dump(msg, spans)
filter = lambda span: span.n == 'django'
django_span = get_first_span_by_filter(spans, filter)
assert (django_span)
self.assertIsNone(django_span.data["http"]["path_tpl"])
self.assertIsNone(django_span.ec)
self.assertEqual(404, django_span.data["http"]["status"])
def test_complex_request(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/complex')
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(5, len(spans))
test_span = spans[4]
urllib3_span = spans[3]
django_span = spans[2]
ot_span1 = spans[1]
ot_span2 = spans[0]
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual("django", django_span.n)
self.assertEqual("sdk", ot_span1.n)
self.assertEqual("sdk", ot_span2.n)
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, django_span.t)
self.assertEqual(django_span.t, ot_span1.t)
self.assertEqual(ot_span1.t, ot_span2.t)
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(django_span.p, urllib3_span.s)
self.assertEqual(ot_span1.p, django_span.s)
self.assertEqual(ot_span2.p, ot_span1.s)
self.assertEqual(None, django_span.ec)
self.assertIsNone(django_span.stack)
self.assertEqual('/complex', django_span.data["http"]["url"])
self.assertEqual('GET', django_span.data["http"]["method"])
self.assertEqual(200, django_span.data["http"]["status"])
self.assertEqual('^complex$', django_span.data["http"]["path_tpl"])
def test_custom_header_capture(self):
# Hack together a manual custom headers list
agent.options.extra_http_headers = [u'X-Capture-This', u'X-Capture-That']
request_headers = dict()
request_headers['X-Capture-This'] = 'this'
request_headers['X-Capture-That'] = 'that'
with tracer.start_active_span('test'):
response = self.http.request('GET', self.live_server_url + '/', headers=request_headers)
# response = self.client.get('/')
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
test_span = spans[2]
urllib3_span = spans[1]
django_span = spans[0]
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual("django", django_span.n)
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, django_span.t)
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(django_span.p, urllib3_span.s)
self.assertEqual(None, django_span.ec)
self.assertIsNone(django_span.stack)
self.assertEqual('/', django_span.data["http"]["url"])
self.assertEqual('GET', django_span.data["http"]["method"])
self.assertEqual(200, django_span.data["http"]["status"])
self.assertEqual('^$', django_span.data["http"]["path_tpl"])
assert "X-Capture-This" in django_span.data["http"]["header"]
self.assertEqual("this", django_span.data["http"]["header"]["X-Capture-This"])
assert "X-Capture-That" in django_span.data["http"]["header"]
self.assertEqual("that", django_span.data["http"]["header"]["X-Capture-That"])
def test_with_incoming_context(self):
request_headers = dict()
request_headers['X-INSTANA-T'] = '1'
request_headers['X-INSTANA-S'] = '1'
request_headers['traceparent'] = '01-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01-788777'
request_headers['tracestate'] = 'rojo=00f067aa0ba902b7,in=a3ce929d0e0e4736;8357ccd9da194656,congo=t61rcWkgMzE'
response = self.http.request('GET', self.live_server_url + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
django_span = spans[0]
self.assertEqual(django_span.t, '0000000000000001')
self.assertEqual(django_span.p, '0000000000000001')
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
assert ('traceparent' in response.headers)
self.assertEqual('01-4bf92f3577b34da6a3ce929d0e0e4736-{}-01'.format(django_span.s),
response.headers['traceparent'])
assert ('tracestate' in response.headers)
self.assertEqual(
'in={};{},rojo=00f067aa0ba902b7,congo=t61rcWkgMzE'.format(
django_span.t, django_span.s), response.headers['tracestate'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
def test_with_incoming_context_and_correlation(self):
request_headers = dict()
request_headers['X-INSTANA-T'] = '1'
request_headers['X-INSTANA-S'] = '1'
request_headers['X-INSTANA-L'] = '1, correlationType=web; correlationId=1234567890abcdef'
request_headers['traceparent'] = '00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01'
request_headers['tracestate'] = 'rojo=00f067aa0ba902b7,in=a3ce929d0e0e4736;8357ccd9da194656,congo=t61rcWkgMzE'
response = self.http.request('GET', self.live_server_url + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
django_span = spans[0]
self.assertEqual(django_span.t, 'a3ce929d0e0e4736')
self.assertEqual(django_span.p, '00f067aa0ba902b7')
self.assertEqual(django_span.ia.t, 'a3ce929d0e0e4736')
self.assertEqual(django_span.ia.p, '8357ccd9da194656')
self.assertEqual(django_span.lt, '4bf92f3577b34da6a3ce929d0e0e4736')
self.assertEqual(django_span.tp, True)
self.assertEqual(django_span.crtp, 'web')
self.assertEqual(django_span.crid, '1234567890abcdef')
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
assert ('traceparent' in response.headers)
self.assertEqual('00-4bf92f3577b34da6a3ce929d0e0e4736-{}-01'.format(django_span.s),
response.headers['traceparent'])
assert ('tracestate' in response.headers)
self.assertEqual(
'in={};{},rojo=00f067aa0ba902b7,congo=t61rcWkgMzE'.format(
django_span.t, django_span.s), response.headers['tracestate'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
def test_with_incoming_traceparent_tracestate(self):
request_headers = dict()
request_headers['traceparent'] = '00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01'
request_headers['tracestate'] = 'rojo=00f067aa0ba902b7,in=a3ce929d0e0e4736;8357ccd9da194656,congo=t61rcWkgMzE'
response = self.http.request('GET', self.live_server_url + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
django_span = spans[0]
self.assertEqual(django_span.t, 'a3ce929d0e0e4736') # last 16 chars from traceparent trace_id
self.assertEqual(django_span.p, '00f067aa0ba902b7')
self.assertEqual(django_span.ia.t, 'a3ce929d0e0e4736')
self.assertEqual(django_span.ia.p, '8357ccd9da194656')
self.assertEqual(django_span.lt, '4bf92f3577b34da6a3ce929d0e0e4736')
self.assertEqual(django_span.tp, True)
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
assert ('traceparent' in response.headers)
self.assertEqual('00-4bf92f3577b34da6a3ce929d0e0e4736-{}-01'.format(django_span.s),
response.headers['traceparent'])
assert ('tracestate' in response.headers)
self.assertEqual(
'in=a3ce929d0e0e4736;{},rojo=00f067aa0ba902b7,congo=t61rcWkgMzE'.format(
django_span.s), response.headers['tracestate'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
def test_with_incoming_traceparent_tracestate_disable_traceparent(self):
os.environ["INSTANA_DISABLE_W3C_TRACE_CORRELATION"] = "1"
request_headers = dict()
request_headers['traceparent'] = '00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01'
request_headers['tracestate'] = 'rojo=00f067aa0ba902b7,in=a3ce929d0e0e4736;8357ccd9da194656,congo=t61rcWkgMzE'
response = self.http.request('GET', self.live_server_url + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
django_span = spans[0]
self.assertEqual(django_span.t, 'a3ce929d0e0e4736') # last 16 chars from traceparent trace_id
self.assertEqual(django_span.p, '8357ccd9da194656')
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
assert ('traceparent' in response.headers)
self.assertEqual('00-4bf92f3577b34da6a3ce929d0e0e4736-{}-01'.format(django_span.s),
response.headers['traceparent'])
assert ('tracestate' in response.headers)
self.assertEqual(
'in=a3ce929d0e0e4736;{},rojo=00f067aa0ba902b7,congo=t61rcWkgMzE'.format(
django_span.s), response.headers['tracestate'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
def test_with_incoming_mixed_case_context(self):
request_headers = dict()
request_headers['X-InSTANa-T'] = '0000000000000001'
request_headers['X-instana-S'] = '0000000000000001'
response = self.http.request('GET', self.live_server_url + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
django_span = spans[0]
self.assertEqual(django_span.t, '0000000000000001')
self.assertEqual(django_span.p, '0000000000000001')
assert ('X-INSTANA-T' in response.headers)
assert (int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(django_span.t, response.headers['X-INSTANA-T'])
assert ('X-INSTANA-S' in response.headers)
assert (int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(django_span.s, response.headers['X-INSTANA-S'])
assert ('X-INSTANA-L' in response.headers)
self.assertEqual('1', response.headers['X-INSTANA-L'])
server_timing_value = "intid;desc=%s" % django_span.t
assert ('Server-Timing' in response.headers)
self.assertEqual(server_timing_value, response.headers['Server-Timing'])
|
py | b401138ef4a644cb2f903daa683c45ee3009fc38 | from urllib.parse import urlencode
from zerver.lib.test_classes import WebhookTestCase
class PapertrailHookTests(WebhookTestCase):
STREAM_NAME = 'papertrail'
URL_TEMPLATE = "/api/v1/external/papertrail?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'papertrail'
def test_short_message(self) -> None:
expected_topic = "logs"
expected_message = """
[Search for "Important stuff"](https://papertrailapp.com/searches/42) found **2** matches:
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body
```
May 18 20:30:02 - server1 - cron OR server1:
``` quote
A short event
```
""".strip()
self.check_webhook(
"short_post",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_long_message(self) -> None:
expected_topic = "logs"
expected_message = """
[Search for "Important stuff"](https://papertrailapp.com/searches/42) found **5** matches:
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 1
```
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 2
```
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 3
```
May 18 20:30:02 - abc - cron OR server1:
``` quote
message body 4
```
[See more](https://papertrailapp.com/searches/42)
""".strip()
self.check_webhook(
"long_post",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_incorrect_message(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
"incorrect_post", "", "", content_type="application/x-www-form-urlencoded"
)
self.assertIn("events key is missing from payload", e.exception.args[0])
def get_body(self, fixture_name: str) -> str:
# Papertrail webhook sends a POST request with payload parameter
# containing the JSON body. Documented here:
# https://help.papertrailapp.com/kb/how-it-works/web-hooks#encoding
body = self.webhook_fixture_data("papertrail", fixture_name, file_type="json")
return urlencode({'payload': body})
|
py | b40115295f7e208fd524642b0dd2b7016c4ab972 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallsOperations:
"""AzureFirewallsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def get(
self,
resource_group_name: str,
azure_firewall_name: str,
**kwargs
) -> "_models.AzureFirewall":
"""Gets the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureFirewall, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.AzureFirewall
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.AzureFirewall",
**kwargs
) -> "_models.AzureFirewall":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AzureFirewall')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
azure_firewall_name: str,
parameters: "_models.AzureFirewall",
**kwargs
) -> AsyncLROPoller["_models.AzureFirewall"]:
"""Creates or updates the specified Azure Firewall.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param azure_firewall_name: The name of the Azure Firewall.
:type azure_firewall_name: str
:param parameters: Parameters supplied to the create or update Azure Firewall operation.
:type parameters: ~azure.mgmt.network.v2018_04_01.models.AzureFirewall
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AzureFirewall or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_04_01.models.AzureFirewall]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewall"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
azure_firewall_name=azure_firewall_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AzureFirewall', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'azureFirewallName': self._serialize.url("azure_firewall_name", azure_firewall_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls/{azureFirewallName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.AzureFirewallListResult"]:
"""Lists all Azure Firewalls in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.AzureFirewallListResult"]:
"""Gets all the Azure Firewalls in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.AzureFirewallListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewalls'} # type: ignore
|
py | b40115381b553a6f7bcad269cf8174b7b404199f | # coding:utf-8
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
class Window(QWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.resize(200, 200)
self.btn = QPushButton('点我', parent=self)
self.btn.move(18, 75)
self.btn.clicked.connect(lambda: print('按下按钮'))
with open('resource/push_button.qss', encoding='utf-8') as f:
self.setStyleSheet(f.read())
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Window()
w.show()
sys.exit(app.exec_())
|
py | b401158ffde1b2b6e42e3ce7af6b59ba72c54dc1 | from typing import (
Any,
Dict,
List,
)
from sqlalchemy import false
from galaxy import model
from galaxy.app import MinimalManagerApp
from galaxy.exceptions import (
Conflict,
ObjectAttributeMissingException,
ObjectNotFound,
)
from galaxy.managers.base import decode_id
from galaxy.managers.context import ProvidesAppContext
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.web import url_for
class GroupsManager:
"""Interface/service object shared by controllers for interacting with groups."""
def __init__(self, app: MinimalManagerApp) -> None:
self._app = app
def index(self, trans: ProvidesAppContext):
"""
Displays a collection (list) of groups.
"""
rval = []
for group in trans.sa_session.query(model.Group).filter(model.Group.deleted == false()):
item = group.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(group.id)
item["url"] = url_for("group", id=encoded_id)
rval.append(item)
return rval
def create(self, trans: ProvidesAppContext, payload: Dict[str, Any]):
"""
Creates a new group.
"""
name = payload.get("name", None)
if name is None:
raise ObjectAttributeMissingException("Missing required name")
self._check_duplicated_group_name(trans, name)
group = model.Group(name=name)
trans.sa_session.add(group)
encoded_user_ids = payload.get("user_ids", [])
users = self._get_users_by_encoded_ids(trans, encoded_user_ids)
encoded_role_ids = payload.get("role_ids", [])
roles = self._get_roles_by_encoded_ids(trans, encoded_role_ids)
trans.app.security_agent.set_entity_group_associations(groups=[group], roles=roles, users=users)
trans.sa_session.flush()
encoded_id = trans.security.encode_id(group.id)
item = group.to_dict(view="element", value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("group", id=encoded_id)
return [item]
def show(self, trans: ProvidesAppContext, encoded_id: EncodedDatabaseIdField):
"""
Displays information about a group.
"""
group = self._get_group(trans, encoded_id)
item = group.to_dict(view="element", value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("group", id=encoded_id)
item["users_url"] = url_for("group_users", group_id=encoded_id)
item["roles_url"] = url_for("group_roles", group_id=encoded_id)
return item
def update(self, trans: ProvidesAppContext, encoded_id: EncodedDatabaseIdField, payload: Dict[str, Any]):
"""
Modifies a group.
"""
group = self._get_group(trans, encoded_id)
name = payload.get("name", None)
if name:
self._check_duplicated_group_name(trans, name)
group.name = name
trans.sa_session.add(group)
encoded_user_ids = payload.get("user_ids", [])
users = self._get_users_by_encoded_ids(trans, encoded_user_ids)
encoded_role_ids = payload.get("role_ids", [])
roles = self._get_roles_by_encoded_ids(trans, encoded_role_ids)
trans.app.security_agent.set_entity_group_associations(
groups=[group], roles=roles, users=users, delete_existing_assocs=False
)
trans.sa_session.flush()
def _decode_id(self, encoded_id: EncodedDatabaseIdField) -> int:
return decode_id(self._app, encoded_id)
def _decode_ids(self, encoded_ids: List[EncodedDatabaseIdField]) -> List[int]:
return [self._decode_id(encoded_id) for encoded_id in encoded_ids]
def _check_duplicated_group_name(self, trans: ProvidesAppContext, group_name: str) -> None:
if trans.sa_session.query(model.Group).filter(model.Group.name == group_name).first():
raise Conflict(f"A group with name '{group_name}' already exists")
def _get_group(self, trans: ProvidesAppContext, encoded_id: EncodedDatabaseIdField) -> model.Group:
decoded_group_id = self._decode_id(encoded_id)
group = trans.sa_session.query(model.Group).get(decoded_group_id)
if group is None:
raise ObjectNotFound(f"Group with id {encoded_id} was not found.")
return group
def _get_users_by_encoded_ids(
self, trans: ProvidesAppContext, encoded_user_ids: List[EncodedDatabaseIdField]
) -> List[model.User]:
decoded_user_ids = self._decode_ids(encoded_user_ids)
users = trans.sa_session.query(model.User).filter(model.User.table.c.id.in_(decoded_user_ids)).all()
return users
def _get_roles_by_encoded_ids(
self, trans: ProvidesAppContext, encoded_role_ids: List[EncodedDatabaseIdField]
) -> List[model.Role]:
decoded_role_ids = self._decode_ids(encoded_role_ids)
roles = trans.sa_session.query(model.Role).filter(model.Role.id.in_(decoded_role_ids)).all()
return roles
|
py | b4011764cecf2d6675b5f358f303a638fee41664 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ... import opcodes
from ...core import recursive_tile
from ...core.custom_log import redirect_custom_log
from ...serialization.serializables import KeyField, FunctionField, \
TupleField, DictField, BoolField
from ...utils import enter_current_session, has_unknown_shape, quiet_stdio
from ..operands import DataFrameOperand, DataFrameOperandMixin, OutputType
from ..utils import build_df, build_empty_df, build_series, parse_index, \
validate_output_types
class DataFrameMapChunk(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = opcodes.MAP_CHUNK
_input = KeyField('input')
_func = FunctionField('func')
_args = TupleField('args')
_kwargs = DictField('kwargs')
_with_chunk_index = BoolField('with_chunk_index')
def __init__(self, input=None, func=None, args=None, kwargs=None, output_types=None,
with_chunk_index=None, **kw):
super().__init__(_input=input, _func=func, _args=args, _kwargs=kwargs,
_output_types=output_types, _with_chunk_index=with_chunk_index, **kw)
@property
def input(self):
return self._input
@property
def func(self):
return self._func
@property
def args(self):
return self._args
@property
def kwargs(self):
return self._kwargs
@property
def with_chunk_index(self):
return self._with_chunk_index
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, df_or_series, index=None, dtypes=None):
test_obj = build_df(df_or_series, size=2) \
if df_or_series.ndim == 2 else \
build_series(df_or_series, size=2, name=df_or_series.name)
output_type = self._output_types[0] if self.output_types else None
# try run to infer meta
try:
kwargs = self.kwargs or dict()
if self.with_chunk_index:
kwargs['chunk_index'] = (0,) * df_or_series.ndim
with np.errstate(all='ignore'), quiet_stdio():
obj = self._func(test_obj, *self._args, **kwargs)
except: # noqa: E722 # nosec
if df_or_series.ndim == 1 or output_type == OutputType.series:
obj = pd.Series([], dtype=np.dtype(object))
elif output_type == OutputType.dataframe and dtypes is not None:
obj = build_empty_df(dtypes)
else:
raise TypeError('Cannot determine `output_type`, '
'you have to specify it as `dataframe` or `series`, '
'for dataframe, `dtypes` is required as well '
'if output_type=\'dataframe\'')
if getattr(obj, 'ndim', 0) == 1 or output_type == OutputType.series:
shape = self._kwargs.pop('shape', None)
if shape is None:
# series
if obj.shape == test_obj.shape:
shape = df_or_series.shape
else:
shape = (np.nan,)
if index is None:
index = obj.index
index_value = parse_index(index, df_or_series,
self._func, self._args, self._kwargs)
return self.new_series([df_or_series], dtype=obj.dtype,
shape=shape, index_value=index_value,
name=obj.name)
else:
dtypes = dtypes if dtypes is not None else obj.dtypes
# dataframe
if obj.shape == test_obj.shape:
shape = (df_or_series.shape[0], len(dtypes))
else:
shape = (np.nan, len(dtypes))
columns_value = parse_index(dtypes.index, store_data=True)
if index is None:
index = obj.index
index_value = parse_index(index, df_or_series,
self._func, self._args, self._kwargs)
return self.new_dataframe([df_or_series], shape=shape,
dtypes=dtypes, index_value=index_value,
columns_value=columns_value)
@classmethod
def tile(cls, op: "DataFrameMapChunk"):
inp = op.input
out = op.outputs[0]
if inp.ndim == 2 and inp.chunk_shape[1] > 1:
if has_unknown_shape(inp):
yield
# if input is a DataFrame, make sure 1 chunk on axis columns
inp = yield from recursive_tile(inp.rechunk({1: inp.shape[1]}))
out_chunks = []
nsplits = [[]] if out.ndim == 1 else [[], [out.shape[1]]]
for chunk in inp.chunks:
chunk_op = op.copy().reset_key()
chunk_op.tileable_op_key = op.key
if op.output_types[0] == OutputType.dataframe:
if np.isnan(out.shape[0]):
shape = (np.nan, out.shape[1])
else:
shape = (chunk.shape[0], out.shape[1])
index_value = parse_index(out.index_value.to_pandas(), chunk,
op.func, op.args, op.kwargs)
out_chunk = chunk_op.new_chunk([chunk], shape=shape,
dtypes=out.dtypes,
index_value=index_value,
columns_value=out.columns_value,
index=(chunk.index[0], 0))
out_chunks.append(out_chunk)
nsplits[0].append(out_chunk.shape[0])
else:
if np.isnan(out.shape[0]):
shape = (np.nan,)
else:
shape = (chunk.shape[0],)
index_value = parse_index(out.index_value.to_pandas(), chunk,
op.func, op.args, op.kwargs)
out_chunk = chunk_op.new_chunk([chunk], shape=shape,
index_value=index_value,
name=out.name,
dtype=out.dtype,
index=(chunk.index[0],))
out_chunks.append(out_chunk)
nsplits[0].append(out_chunk.shape[0])
params = out.params
params['nsplits'] = tuple(tuple(ns) for ns in nsplits)
params['chunks'] = out_chunks
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[params])
@classmethod
@redirect_custom_log
@enter_current_session
def execute(cls, ctx, op: "DataFrameMapChunk"):
inp = ctx[op.input.key]
out_chunk = op.outputs[0]
kwargs = op.kwargs or dict()
if op.with_chunk_index:
kwargs['chunk_index'] = out_chunk.index
ctx[out_chunk.key] = op.func(inp, *op.args, **kwargs)
def map_chunk(df_or_series, func, args=(), **kwargs):
"""
Apply function to each chunk.
Parameters
----------
func : function
Function to apply to each chunk.
args : tuple
Positional arguments to pass to func in addition to the array/series.
**kwargs
Additional keyword arguments to pass as keywords arguments to func.
Returns
-------
Series or DataFrame
Result of applying ``func`` to each chunk of the DataFrame or Series.
See Also
--------
DataFrame.apply : Perform any type of operations.
Examples
--------
>>> import mars.dataframe as md
>>> df = md.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df.execute()
A B
0 4 9
1 4 9
2 4 9
Output type including Series or DataFrame will be auto inferred.
>>> df.map_chunk(lambda c: c['A'] + c['B']).execute()
0 13
1 13
2 13
dtype: int64
You can specify ``output_type`` by yourself if auto infer failed.
>>> import pandas as pd
>>> import numpy as np
>>> df['c'] = ['s1', 's2', 's3']
>>> df.map_chunk(lambda c: pd.concat([c['A'], c['c'].str.slice(1).astype(int)], axis=1)).execute()
Traceback (most recent call last):
TypeError: Cannot determine `output_type`, you have to specify it as `dataframe` or `series`...
>>> df.map_chunk(lambda c: pd.concat([c['A'], c['c'].str.slice(1).astype(int)], axis=1),
>>> output_type='dataframe', dtypes=pd.Series([np.dtype(object), np.dtype(int)])).execute()
A c
0 4 1
1 4 2
2 4 3
"""
output_type = kwargs.pop('output_type', None)
output_types = kwargs.pop('output_types', None)
object_type = kwargs.pop('object_type', None)
output_types = validate_output_types(
output_type=output_type, output_types=output_types, object_type=object_type)
output_type = output_types[0] if output_types else None
if output_type:
output_types = [output_type]
index = kwargs.pop('index', None)
dtypes = kwargs.pop('dtypes', None)
with_chunk_index = kwargs.pop('with_chunk_index', False)
op = DataFrameMapChunk(input=df_or_series, func=func, args=args, kwargs=kwargs,
output_types=output_types, with_chunk_index=with_chunk_index)
return op(df_or_series, index=index, dtypes=dtypes)
|
py | b4011864caaca67c028eb8766aa710c92ccd51ba | """
The ``train`` subcommand can be used to train a model.
It requires a configuration file and a directory in
which to write the results.
.. code-block:: bash
$ allennlp train --help
usage: allennlp train [-h] -s SERIALIZATION_DIR [-r] [-f] [-o OVERRIDES]
[--file-friendly-logging]
[--include-package INCLUDE_PACKAGE]
param_path
Train the specified model on the specified dataset.
positional arguments:
param_path path to parameter file describing the model to be
trained
optional arguments:
-h, --help show this help message and exit
-s SERIALIZATION_DIR, --serialization-dir SERIALIZATION_DIR
directory in which to save the model and its logs
-r, --recover recover training from the state in serialization_dir
-f, --force overwrite the output directory if it exists
-o OVERRIDES, --overrides OVERRIDES
a JSON structure used to override the experiment
configuration
--file-friendly-logging
outputs tqdm status on separate lines and slows tqdm
refresh rate
--include-package INCLUDE_PACKAGE
additional packages to include
"""
from typing import Dict, Iterable
import argparse
import logging
import os
import re
import shutil
import torch
from allennlp.commands.evaluate import evaluate
from allennlp.commands.subcommand import Subcommand
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.common import Params
from allennlp.common.util import prepare_environment, prepare_global_logging, \
get_frozen_and_tunable_parameter_names, dump_metrics
from allennlp.data import Vocabulary
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.iterators.data_iterator import DataIterator
from allennlp.models.archival import archive_model, CONFIG_NAME
from allennlp.models.model import Model, _DEFAULT_WEIGHTS
from allennlp.training.trainer import Trainer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Train(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Train the specified model on the specified dataset.'''
subparser = parser.add_parser(name, description=description, help='Train a model')
subparser.add_argument('param_path',
type=str,
help='path to parameter file describing the model to be trained')
subparser.add_argument('-s', '--serialization-dir',
required=True,
type=str,
help='directory in which to save the model and its logs')
subparser.add_argument('-r', '--recover',
action='store_true',
default=False,
help='recover training from the state in serialization_dir')
subparser.add_argument('-f', '--force',
action='store_true',
required=False,
help='overwrite the output directory if it exists')
subparser.add_argument('-o', '--overrides',
type=str,
default="",
help='a JSON structure used to override the experiment configuration')
subparser.add_argument('--file-friendly-logging',
action='store_true',
default=False,
help='outputs tqdm status on separate lines and slows tqdm refresh rate')
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
train_model_from_file(args.param_path,
args.serialization_dir,
args.overrides,
args.file_friendly_logging,
args.recover,
args.force)
def train_model_from_file(parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
Parameters
----------
parameter_filename : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides)
return train_model(params, serialization_dir, file_friendly_logging, recover, force)
def datasets_from_params(params: Params) -> Dict[str, Iterable[Instance]]:
"""
Load all the datasets specified by the config.
"""
dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
validation_dataset_reader_params = params.pop("validation_dataset_reader", None)
validation_and_test_dataset_reader: DatasetReader = dataset_reader
if validation_dataset_reader_params is not None:
logger.info("Using a separate dataset reader to load validation and test data.")
validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
train_data_path = params.pop('train_data_path')
logger.info("Reading training data from %s", train_data_path)
train_data = dataset_reader.read(train_data_path)
datasets: Dict[str, Iterable[Instance]] = {"train": train_data}
validation_data_path = params.pop('validation_data_path', None)
if validation_data_path is not None:
logger.info("Reading validation data from %s", validation_data_path)
validation_data = validation_and_test_dataset_reader.read(validation_data_path)
datasets["validation"] = validation_data
test_data_path = params.pop("test_data_path", None)
if test_data_path is not None:
logger.info("Reading test data from %s", test_data_path)
test_data = validation_and_test_dataset_reader.read(test_data_path)
datasets["test"] = test_data
return datasets
def create_serialization_dir(
params: Params,
serialization_dir: str,
recover: bool,
force: bool) -> None:
"""
This function creates the serialization directory if it doesn't exist. If it already exists
and is non-empty, then it verifies that we're recovering from a training with an identical configuration.
Parameters
----------
params: ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir: ``str``
The directory in which to save results and logs.
recover: ``bool``
If ``True``, we will try to recover from an existing serialization directory, and crash if
the directory doesn't exist, or doesn't match the configuration we're given.
force: ``bool``
If ``True``, we will overwrite the serialization directory if it already exists.
"""
if recover and force:
raise ConfigurationError("Illegal arguments: both force and recover are true.")
if os.path.exists(serialization_dir) and force:
shutil.rmtree(serialization_dir)
if os.path.exists(serialization_dir) and os.listdir(serialization_dir):
if not recover:
raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists and is "
f"not empty. Specify --recover to recover training from existing output.")
logger.info(f"Recovering from prior training at {serialization_dir}.")
recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(recovered_config_file):
raise ConfigurationError("The serialization directory already exists but doesn't "
"contain a config.json. You probably gave the wrong directory.")
else:
loaded_params = Params.from_file(recovered_config_file)
# Check whether any of the training configuration differs from the configuration we are
# resuming. If so, warn the user that training may fail.
fail = False
flat_params = params.as_flat_dict()
flat_loaded = loaded_params.as_flat_dict()
for key in flat_params.keys() - flat_loaded.keys():
logger.error(f"Key '{key}' found in training configuration but not in the serialization "
f"directory we're recovering from.")
fail = True
for key in flat_loaded.keys() - flat_params.keys():
logger.error(f"Key '{key}' found in the serialization directory we're recovering from "
f"but not in the training config.")
fail = True
for key in flat_params.keys():
if flat_params.get(key, None) != flat_loaded.get(key, None):
logger.error(f"Value for '{key}' in training configuration does not match that the value in "
f"the serialization directory we're recovering from: "
f"{flat_params[key]} != {flat_loaded[key]}")
fail = True
if fail:
raise ConfigurationError("Training configuration does not match the configuration we're "
"recovering from.")
else:
if recover:
raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) "
"does not exist. There is nothing to recover from.")
os.makedirs(serialization_dir, exist_ok=True)
def train_model(params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False) -> Model:
"""
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
Parameters
----------
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
Returns
-------
best_model: ``Model``
The model with the best epoch weights.
"""
prepare_environment(params)
create_serialization_dir(params, serialization_dir, recover, force)
prepare_global_logging(serialization_dir, file_friendly_logging)
cuda_device = params.params.get('trainer').get('cuda_device', -1)
if isinstance(cuda_device, list):
for device in cuda_device:
check_for_gpu(device)
else:
check_for_gpu(cuda_device)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
all_datasets = datasets_from_params(params)
datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets))
for dataset in datasets_for_vocab_creation:
if dataset not in all_datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info("From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation))
if recover and os.path.exists(os.path.join(serialization_dir, "vocabulary")):
vocab = Vocabulary.from_files(os.path.join(serialization_dir, "vocabulary"))
else:
vocab = Vocabulary.from_params(
params.pop("vocabulary", {}),
(instance for key, dataset in all_datasets.items()
for instance in dataset
if key in datasets_for_vocab_creation)
)
model = Model.from_params(vocab=vocab, params=params.pop('model'))
# Initializing the model can have side effect of expanding the vocabulary
vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))
iterator = DataIterator.from_params(params.pop("iterator"))
iterator.index_with(vocab)
validation_iterator_params = params.pop("validation_iterator", None)
if validation_iterator_params:
validation_iterator = DataIterator.from_params(validation_iterator_params)
validation_iterator.index_with(vocab)
else:
validation_iterator = None
train_data = all_datasets['train']
validation_data = all_datasets.get('validation')
test_data = all_datasets.get('test')
trainer_params = params.pop("trainer")
no_grad_regexes = trainer_params.pop("no_grad", ())
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad_regexes):
parameter.requires_grad_(False)
frozen_parameter_names, tunable_parameter_names = \
get_frozen_and_tunable_parameter_names(model)
logger.info("Following parameters are Frozen (without gradient):")
for name in frozen_parameter_names:
logger.info(name)
logger.info("Following parameters are Tunable (with gradient):")
for name in tunable_parameter_names:
logger.info(name)
trainer_choice = trainer_params.pop_choice("type",
Trainer.list_available(),
default_to_first_choice=True)
trainer = Trainer.by_name(trainer_choice).from_params(model=model,
serialization_dir=serialization_dir,
iterator=iterator,
train_data=train_data,
validation_data=validation_data,
params=trainer_params,
validation_iterator=validation_iterator)
evaluate_on_test = params.pop_bool("evaluate_on_test", False)
params.assert_empty('base train command')
try:
metrics = trainer.train()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info("Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights.")
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
# Now tar up results
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
logger.info("Loading the best epoch weights.")
best_model_state_path = os.path.join(serialization_dir, 'best.th')
best_model_state = torch.load(best_model_state_path)
best_model = model
best_model.load_state_dict(best_model_state)
if test_data and evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = evaluate(
best_model, test_data, validation_iterator or iterator,
cuda_device=trainer._cuda_devices[0] # pylint: disable=protected-access
)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif test_data:
logger.info("To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
dump_metrics(os.path.join(serialization_dir, "metrics.json"), metrics, log=True)
return best_model
|
py | b40119d5c661ad46c03a29848e6de492afee196b | from django.db import models
# Create your models here.
class Photo(models.Model):
image = models.ImageField(upload_to='upload_image')
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.image.url) + ' uploaded : ' + str(self.date)
class Meta:
verbose_name_plural = "Uploaded images"
class CnnModelTable(models.Model):
name = models.CharField(max_length=30)
Top1Accuracy = models.CharField(max_length=10)
Top5Accuracy = models.CharField(max_length=10)
Parameters = models.CharField(max_length=20)
Depth = models.CharField(max_length=5)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "CNN MODELS"
class CNNModels(models.Model):
MODEL_CHOICES = (
('densenet', 'tf.keras.applications.densenet'),
('inception_resnet_v2', 'tf.keras.applications.inception_resnet_v2'),
('inception_v3', 'tf.keras.applications.inception_v3'),
('mobilenet', 'tf.keras.applications.mobilenet'),
('mobilenet_v2', 'tf.keras.applications.mobilenet_v2'),
('nasnet', 'tf.keras.applications.nasnet'),
('resnet50', 'tf.keras.applications.resnet50'),
('vgg16', 'tf.keras.applications.vgg16'),
('vgg19', 'tf.keras.applications.vgg19'),
('xception', 'tf.keras.applications.xception'),
)
CNNModels = models.CharField(max_length=1000, choices=MODEL_CHOICES)
class Meta:
verbose_name_plural = "CNNModelsChoice" |
py | b4011af5c15c563941c3f66b550f2f2b975af77b | from __future__ import absolute_import, division, print_function |
py | b4011c7d8473f509ed124080d3cd128e4316cb35 | """ Django support. """
from __future__ import absolute_import
import datetime
import decimal
from django.db import models
from .. import generators as g, mix_types as t, six
from ..main import (
Field, Relation, NO_VALUE,
TypeMixerMeta as BaseTypeMixerMeta,
TypeMixer as BaseTypeMixer,
GenFactory as BaseFactory,
Mixer as BaseMixer)
class GenFactory(BaseFactory):
""" Map a django classes to simple types. """
types = {
(models.CharField, models.SlugField): str,
models.TextField: t.Text,
models.BooleanField: bool,
models.BigIntegerField: t.BigInteger,
(models.AutoField, models.IntegerField): int,
models.PositiveIntegerField: t.PositiveInteger,
models.PositiveSmallIntegerField: t.PositiveSmallInteger,
models.SmallIntegerField: t.SmallInteger,
models.DateField: datetime.date,
models.DateTimeField: datetime.datetime,
models.TimeField: datetime.time,
models.DecimalField: decimal.Decimal,
models.FloatField: float,
models.EmailField: t.EmailString,
models.IPAddressField: t.IP4String,
models.URLField: t.URL,
}
class TypeMixerMeta(BaseTypeMixerMeta):
""" Load django models from strings. """
def __load_cls(cls, cls_type):
if isinstance(cls_type, six.string_types):
assert '.' in cls_type, ("'model_class' must be either a model"
" or a model name in the format"
" app_label.model_name")
app_label, model_name = cls_type.split(".")
cls_type = models.get_model(app_label, model_name)
return cls_type
class TypeMixer(six.with_metaclass(TypeMixerMeta, BaseTypeMixer)):
""" TypeMixer for Django. """
__metaclass__ = TypeMixerMeta
factory = GenFactory
def set_value(self, target, field_name, field_value, finaly=False):
""" Set value to generated instance.
:return : None or (name, value) for later use
"""
field = self.__fields.get(field_name)
if field and field.scheme in self.__scheme._meta.local_many_to_many:
if not isinstance(field_value, (list, tuple)):
field_value = [field_value]
return field_name, field_value
return super(TypeMixer, self).set_value(
target, field_name, field_value, finaly
)
@staticmethod
def get_default(field, target):
""" Get default value from field.
:return value: A default value or NO_VALUE
"""
if not field.scheme.has_default():
return NO_VALUE
return field.scheme.get_default()
def gen_select(self, target, field_name, field_value):
""" Select exists value from database.
:param target: Target for generate value.
:param field_name: Name of field for generation.
:return : None or (name, value) for later use
"""
field = self.__fields.get(field_name)
if field:
try:
return self.set_value(
target, field_name,
field.scheme.rel.to.objects
.filter(**field_value.kwargs)
.order_by('?')[0]
)
except Exception:
raise Exception(
"Cannot find a value for the field: '{0}'".format(
field_name
))
return super(TypeMixer, self).gen_select(
target, field_name, field_value)
def gen_relation(self, target, field_name, relation, force=False):
""" Generate a related relation by `relation`.
:param target: Target for generate value.
:param field_name: Name of relation for generation.
:param relation: Instance of :class:`Relation`
:return : None or (name, value) for later use
"""
if (
not relation.scheme
or relation.scheme.null
or relation.scheme.blank
or relation.scheme.auto_created
) and not relation.params and not force:
return None
rel = relation.scheme
if not rel:
raise ValueError('Unknown relation: %s' % field_name)
new_scheme = rel.related.parent_model
value = target
if new_scheme != self.__scheme:
value = self.__mixer and self.__mixer.blend(
new_scheme, **relation.params
) or TypeMixer(
new_scheme, factory=self.__factory, fake=self.fake,
).blend(**relation.params)
return self.set_value(target, rel.name, value)
def make_generator(self, field, fname=None, fake=False):
""" Make values generator for field.
:param field: A mixer field
:param field_name: Field name
:param fake: Force fake data
:return generator:
"""
fcls = type(field)
stype = self.__factory.cls_to_simple(fcls)
kwargs = dict()
if fcls is models.CommaSeparatedIntegerField:
return g.gen_choices(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], field.max_length)
if field and field.choices:
choices, _ = list(zip(*field.choices))
return g.gen_choice(choices)
if stype is str:
kwargs['length'] = field.max_length
elif stype is decimal.Decimal:
kwargs['i'] = field.max_digits - field.decimal_places
kwargs['d'] = field.decimal_places
gen_maker = self.__factory.gen_maker(fcls, fname, fake)
return gen_maker(**kwargs)
@staticmethod
def is_unique(field):
""" Return True is field's value should be a unique.
:return bool:
"""
return field.scheme.unique
@staticmethod
def is_required(field):
""" Return True is field's value should be defined.
:return bool:
"""
return not (field.scheme.null and field.scheme.blank)
def __load_fields(self):
for field in self.__scheme._meta.fields:
if isinstance(field, models.AutoField)\
and self.__mixer and self.__mixer.commit:
continue
if isinstance(field, models.ForeignKey):
yield field.name, Relation(field, field.name)
continue
yield field.name, Field(field, field.name)
for field in self.__scheme._meta.local_many_to_many:
yield field.name, Relation(field, field.name)
class Mixer(BaseMixer):
""" Integration with Django. """
type_mixer_cls = TypeMixer
def __init__(self, commit=True, **params):
super(Mixer, self).__init__(**params)
self.commit = commit
def post_generate(self, result):
""" Save objects in db.
:return value: A generated value
"""
if self.commit:
result.save()
return result
# Default mixer
mixer = Mixer()
# lint_ignore=W0212,W0201,E1002,F0401
|
py | b4011d28bb7d0f496cf89ea9e807e4b039a19be5 | import pandas as pd
import datetime
import requests
from io import BytesIO, StringIO
import os
import click
import re
# the data we already cache in the package
in_package_data = range(2002, 2017)
DONWLOAD_URL = "http://yield.chinabond.com.cn/cbweb-mn/yc/downYearBzqx?year=%s&&wrjxCBFlag=0&&zblx=txy&ycDefId=%s"
YIELD_MAIN_URL = 'http://yield.chinabond.com.cn/cbweb-mn/yield_main'
def get_data():
"""
every body need to update the new data every day,
if you wanna cal this function multi-time intra-day, you should cache it your self
Data are combined with 2 parts
1 the xlsx files inside the package
2 newly fetched data those not in the package, we will fetch them on the fly
"""
cur_year = datetime.datetime.now().year
last_in_package_data = max(in_package_data)
# download new data
to_downloads = range(last_in_package_data + 1, cur_year + 1)
# frist, get ycDefIds params
response = requests.get(YIELD_MAIN_URL)
matchs = re.search(r'\?ycDefIds=(.*?)\&', response.text)
ycdefids = matchs.group(1)
assert (ycdefids is not None) # please contact me on github if fails
fetched_data = []
for year in to_downloads:
print('Downloading from ' + DONWLOAD_URL % (year, ycdefids))
response = requests.get(DONWLOAD_URL % (year, ycdefids))
fetched_data.append(BytesIO(response.content))
# combine all data
dfs = []
basedir = os.path.join(os.path.dirname(__file__), "xlsx")
for i in in_package_data:
dfs.append(pd.read_excel(os.path.join(basedir, "%d.xlsx" % i)))
for memfile in fetched_data:
dfs.append(pd.read_excel(memfile))
df = pd.concat(dfs)
return df
def get_pivot_data():
"""
pivot data
"""
df = get_data()
df.columns = ['date','period1','period','return']
return df.pivot(index='date', columns='period', values='return')
def get_zipline_format():
pivot_data = get_pivot_data()
pivot_data.columns.name = None
pivot_data = pivot_data.reset_index()
all_china_bond = pivot_data[[0.08, 0.25, 0.5, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 20.0, 30.0]]
all_china_bond.columns =['1month', '3month','6month', '1year', '2year', '3year', '5year', '7year', '10year', '20year', '30year']
all_china_bond.index = pd.to_datetime(pivot_data['date'])
return all_china_bond
@click.command()
@click.option("-f", '--fileformat', type=str, default='zipline',
help='zipline (default) - zipline style file , all - dump all file')
@click.argument("path_to_save")
def save_zipline_file(fileformat, path_to_save = None):
"""
Save china treasury curve data to file
"""
if path_to_save is None:
raise Exception("please provie path")
if fileformat not in ('zipline', 'all'):
raise Exception("filetype must in ('zipline', 'all') ")
click.echo("saving data")
if fileformat == 'zipline':
get_zipline_format().to_csv(path_to_save)
else:
get_pivot_data().to_csv(path_to_save)
click.echo("done")
if __name__ == '__main__':
save_zipline_file()
|
py | b4011da4afbf5974b75e627a7ae7e118bc061929 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...serialization.serializables import KeyField, DataTypeField, StringField
from ..array_utils import as_same_device, device
from ..operands import TensorHasInput, TensorOperandMixin
from ..utils import get_order
class TensorAstype(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.ASTYPE
_input = KeyField('input')
_dtype = DataTypeField('dtype')
_order = StringField('order')
_casting = StringField('casting')
def __init__(self, dtype=None, order=None, casting=None, sparse=False, **kw):
super().__init__(_dtype=dtype, _order=order, _casting=casting,
sparse=sparse, **kw)
@property
def dtype(self):
return self._dtype
@property
def order(self):
return self._order
@property
def casting(self):
return self._casting
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, tensor, order=None):
return self.new_tensor([tensor], tensor.shape, order=order)
@classmethod
def tile(cls, op):
in_tensor = op.input
out_tensor = op.outputs[0]
out_chunks = []
for c in in_tensor.chunks:
chunk_op = op.copy().reset_key()
chunk = chunk_op.new_chunk([c], shape=c.shape, index=c.index,
order=out_tensor.order)
out_chunks.append(chunk)
new_op = op.copy()
return new_op.new_tensors(op.inputs, nsplits=in_tensor.nsplits,
chunks=out_chunks, kws=[out_tensor.params])
@classmethod
def execute(cls, ctx, op):
chunk = op.outputs[0]
(x,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
if op.sparse:
ctx[chunk.key] = x.astype(op.dtype)
else:
if xp is np:
ctx[chunk.key] = x.astype(op.dtype, order=op.order,
casting=op.casting)
else: # pragma: no cover
# cupy does not support casting
ctx[chunk.key] = x.astype(op.dtype, order=op.order)
def _astype(tensor, dtype, order='K', casting='unsafe', copy=True):
"""
Copy of the tensor, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : Tensor
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new tensor of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
astype method returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([1, 2, 2.5])
>>> x.execute()
array([ 1. , 2. , 2.5])
>>> x.astype(int).execute()
array([1, 2, 2])
"""
dtype = np.dtype(dtype)
tensor_order = get_order(order, tensor.order)
if tensor.dtype == dtype and tensor.order == tensor_order:
return tensor if not copy else tensor.copy(order=order)
elif not np.can_cast(tensor.dtype, dtype, casting=casting):
raise TypeError(f'Cannot cast array from {tensor.dtype!r} to {dtype!r} '
f'according to the rule {casting}')
op = TensorAstype(dtype=dtype, order=order, casting=casting, sparse=tensor.issparse())
return op(tensor, order=tensor_order)
|
py | b4011dcc102a4aca04be70a18bf11c2ba52d2a8d | name=input('what is your name?')
print('Hello, ' + name.upper())
print('Hey, ' + name.lower())
print('I wanna write your name one more time ' + name.capitalize())
print('Actually I want 1 more time haha, so hey ' + name.swapcase())
|
py | b4011dec6a5f07f28cf71cf617d68cb7cf918a3a | from django.shortcuts import render, redirect, reverse, render_to_response
from django.http import HttpResponseRedirect,HttpResponse
from .models import TaskForm, Task, UsernameForm, Username
from django.template import RequestContext
# Create your views here.
def tasks(request):
if request.method == 'POST':
# this is wehere POST request is accessed
form = TaskForm(request.POST or None)
if form.is_valid():
user = Username.objects.get(username=request.COOKIES.get('username'))
temp = form.save(commit=False)
temp.username = user
temp.save()
form = TaskForm()
tasks = Task.objects.filter(username__username=request.COOKIES.get('username')).order_by('priority')
return render(request, 'tasks.html', {'form': form, 'tasks': tasks, 'user': user})
else:
if 'username' not in request.COOKIES:
from django.utils.crypto import get_random_string
unique_id = get_random_string(length=32)
username = Username()
username.username = unique_id
username.save()
response = redirect(reverse('tasks'))
# 604800s = 1 week
response.set_cookie('username', username, max_age=604800)
return response
# this is where GET request are accessed
form = TaskForm()
tasks = Task.objects.filter(username__username=request.COOKIES.get('username')).order_by('priority')
user = Username.objects.filter(username=request.COOKIES.get('username'))
return render(request, 'tasks.html', {'form': form, 'tasks': tasks, 'user': user})
def check_user_validity(request):
'''
Check if user such user exists in Database
'''
try:
return Username.objects.get(username__exact=request.COOKIES["username"])
except Exception:
return False
def delete(request, id):
if 'username' in request.COOKIES and check_user_validity(request):
#now check if user trying to access this task actually created this task
Task.objects.filter(id=id,username=Username.objects.get(username__exact=request.COOKIES["username"])).delete()
return redirect(reverse('tasks'))
else:
return HttpResponse("You are not allowed to access this resource")
def complete(request, id):
if 'username' in request.COOKIES and check_user_validity(request):
try:
task=Task.objects.get(id=id,username=Username.objects.get(username__exact=request.COOKIES["username"]))
if task.complete:
task.complete = 0
else:
task.complete = 1
task.save()
return redirect('/')
except Exception:
return HttpResponse("Sorry You are not allowed to access This task ")
else:
return HttpResponse("You are not allowed to access this resource")
def clear(request):
Username.objects.filter(username=request.COOKIES['username']).delete()
response = HttpResponseRedirect('/tasks/')
response.delete_cookie('username')
return response
|
py | b4011e470d306e28060b0085f7402e43980e6bfd | import greentest
import gevent
from gevent import pywsgi
import test__server
from test__server import *
from test__server import Settings as server_Settings
def application(self, environ, start_response):
if environ['PATH_INFO'] == '/':
start_response("200 OK", [])
return [b"PONG"]
if environ['PATH_INFO'] == '/ping':
start_response("200 OK", [])
return [b"PONG"]
elif environ['PATH_INFO'] == '/short':
gevent.sleep(0.5)
start_response("200 OK", [])
return []
elif environ['PATH_INFO'] == '/long':
gevent.sleep(10)
start_response("200 OK", [])
return []
else:
start_response("404 pywsgi WTF?", [])
return []
class SimpleWSGIServer(pywsgi.WSGIServer):
application = application
internal_error_start = b'HTTP/1.1 500 Internal Server Error\n'.replace(b'\n', b'\r\n')
internal_error_end = b'\n\nInternal Server Error'.replace(b'\n', b'\r\n')
internal_error503 = b'''HTTP/1.1 503 Service Unavailable
Connection: close
Content-type: text/plain
Content-length: 31
Service Temporarily Unavailable'''.replace(b'\n', b'\r\n')
class Settings:
ServerClass = pywsgi.WSGIServer
ServerSubClass = SimpleWSGIServer
close_socket_detected = True
restartable = False
close_socket_detected = False
@staticmethod
def assert500(self):
conn = self.makefile()
conn.write(b'GET / HTTP/1.0\r\n\r\n')
result = conn.read()
assert result.startswith(internal_error_start), (result, internal_error_start)
assert result.endswith(internal_error_end), (result, internal_error_end)
assertAcceptedConnectionError = assert500
@staticmethod
def assert503(self):
conn = self.makefile()
conn.write(b'GET / HTTP/1.0\r\n\r\n')
result = conn.read()
assert result == internal_error503, (result, internal_error503)
@staticmethod
def assertPoolFull(self):
self.assertRaises(socket.timeout, self.assertRequestSucceeded)
@staticmethod
def assertAcceptedConnectionError(self):
conn = self.makefile()
result = conn.read()
assert not result, repr(result)
test__server.Settings = Settings
del TestNoneSpawn
if __name__ == '__main__':
greentest.main()
|
py | b4011e77529fd1b0ff9d862936bebac7a7841d43 | from typing import List
from pdip.data import EntityBase
from pdi.domain.base.secret.AuthenticationTypeBase import AuthenticationTypeBase
from pdi.domain.base.secret.SecretBase import SecretBase
class SecretTypeBase(EntityBase):
def __init__(self,
Name: str = None,
Secrets: List[SecretBase] = [],
AuthenticationTypes: List[AuthenticationTypeBase] = [],
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Name: int = Name
self.Secrets = Secrets
self.AuthenticationTypes = AuthenticationTypes
|
py | b4011f17b881564ef90fef936b0a0d755ba9701b | from dataclasses import dataclass
from typing import List, Tuple, Optional
from fffw import encoding
from fffw.encoding import *
from fffw.wrapper import param
@dataclass
class X11Grab(encoding.Input):
"""
X-server grabbing input.
"""
# `skip=True` removes parameter from argument list
# (it is added manually in `as_pairs`).
# This field overwrites `default` from `encoding.Input`.
input_file: str = param(name='i', default=':0.0', skip=True)
# `init=False` excludes parameter from `__init__`.
# Field is initialized with value passed in `default`
# parameter. Exactly as in dataclasses.
format: str = param(name='f', default='x11grab', init=False)
size: str = param(name='video_size')
fps: float = param(name='framerate')
def as_pairs(self) -> List[Tuple[Optional[str], Optional[str]]]:
return super().as_pairs() + [('i', self.input_file)]
ff = FFMPEG()
ff < X11Grab(fps=25, size='cif', input_file=':0.0+10,20')
# As Output is not initialized with any video codec,
# force excluding `-vn` flag.
ff > output_file('out.mpg', no_video=False)
print(ff.get_cmd())
|
py | b4011f53175525b987f47349bb001053913f8e64 | #!/usr/bin/env python3
def createDataRetriever(name=''):
from .DataRetriever import DataRetriever
return DataRetriever(name=name)
def getFactoriesInfo():
"""
Returns a dictionary with information on how to create an object Sensor from its factory
"""
return {'DataRetriever':
{
'factory':'createDataRetriever'
}
}
|
py | b4011f71d38299ca1bd7c74d545dbc50407dd8b2 | #!python
import sys
import wave
import pyaudio
import wavfile
import argparse
import numpy as np
from playsound import playsound
from scipy.signal import kaiserord, lfilter, firwin, freqz
from pylab import figure, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show
OUTPUT_FOLDER = 'out/'
CHUNK = 1024
CHANNELS = 1
NOISE_A = 500
class Noiser:
"""
Utility class to generate a noise.
Core extracted from https://stackoverflow.com/questions/33933842/how-to-generate-noise-in-frequency-range-with-numpy
"""
def __init__(self):
pass
def fftnoise(self, f):
f = np.array(f, dtype="complex")
Np = (len(f) - 1) // 2
phases = np.random.rand(Np) * 2 * np.pi
phases = np.cos(phases) + 1j * np.sin(phases)
f[1 : Np + 1] *= phases
f[-1 : -1 - Np : -1] = np.conj(f[1 : Np + 1])
return np.fft.ifft(f).real
def band_limited_noise(self, min_freq, max_freq, samples=1024, samplerate=1):
freqs = np.abs(np.fft.fftfreq(samples, 1 / samplerate))
f = np.zeros(samples)
f[np.logical_and(freqs >= min_freq, freqs <= max_freq)] = 1
return self.fftnoise(f)
class Recorder:
def __init__(self, duration, sample_rate):
self.duration = duration
self.sample_rate = sample_rate
def __do_record(self, chunk, channels, files_prefix, aformat=pyaudio.paInt16):
p = pyaudio.PyAudio()
stream = p.open(format=aformat, channels=channels, rate=self.sample_rate, input=True, frames_per_buffer=chunk)
print("* recording")
frames = []
for i in range(0, int(self.sample_rate / chunk * self.duration)):
data = stream.read(chunk)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
file_output = OUTPUT_FOLDER + files_prefix + "_output.wav"
wf = wave.open(file_output, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(aformat))
wf.setframerate(self.sample_rate)
wf.writeframes(b''.join(frames))
wf.close()
return file_output
def record(self, files_prefix):
return self.__do_record(CHUNK, CHANNELS, files_prefix)
class KaiserFilter:
def __init__(self, sample_rate, cutoff_hz_1, cutoff_hz_2, ripple_db):
self.sample_rate = sample_rate
self.cutoff_hz_1 = cutoff_hz_1
self.cutoff_hz_2 = cutoff_hz_2
self.ripple_db = ripple_db
def add_noise_and_filter(self, x, noise, play_sounds, files_prefix):
t = np.arange(len(x)) / self.sample_rate
# Plot original signal.
figure()
plot(t, x)
title('Original signal')
grid(True)
#------------------------------------------------
# Add noise to original signal
#------------------------------------------------
with_noise = x + noise
# Plot the signal with noise.
figure()
plot(t, with_noise)
title('Signal with noise')
grid(True)
# Save audio with noise.
output_with_noise = ''.join([OUTPUT_FOLDER, files_prefix, '_with_noise.wav'])
wavfile.write(output_with_noise, self.sample_rate, with_noise, normalized=True)
# Override x signal.
x = with_noise
#------------------------------------------------
# Create a FIR filter and apply it to x.
#------------------------------------------------
# The Nyquist rate of the signal.
nyq_rate = self.sample_rate / 2.0
# The desired width of the transition from pass to stop,
# relative to the Nyquist rate.
width = (self.cutoff_hz_2 - self.cutoff_hz_1) / nyq_rate
# Compute the order and Kaiser parameter for the FIR filter.
N, beta = kaiserord(self.ripple_db, width)
N |= 1
# Use firwin with a Kaiser window to create a FIR filter.
taps = firwin(N, [self.cutoff_hz_1 / nyq_rate, self.cutoff_hz_2 / nyq_rate], window=('kaiser', beta), pass_zero=True)
# Use lfilter to filter x with the FIR filter.
filtered_x = lfilter(taps, 1.0, x)
#------------------------------------------------
# Plot the magnitude response of the filter.
#------------------------------------------------
figure()
w, h = freqz(taps, worN=8000)
plot((w / np.pi) * nyq_rate, np.absolute(h))
xlabel('Frequency (Hz)')
ylabel('Gain')
title('Frequency response')
ylim(-0.05, 1.05)
grid(True)
#------------------------------------------------
# Plot the filtered signal.
#------------------------------------------------
# The phase delay of the filtered signal.
delay = 0.5 * (N - 1) / self.sample_rate
# Plot the filtered signal, shifted to compensate for the phase delay.
figure()
# Plot just the "good" part of the filtered signal. The first N-1
# samples are "corrupted" by the initial conditions.
plot(t[N-1:]-delay, filtered_x[N-1:], 'g')
title('Filtered signal')
xlabel('t')
grid(True)
# Save filtered audio
output_filtered = "".join([OUTPUT_FOLDER, files_prefix, '_filtered.wav'])
wavfile.write(output_filtered, self.sample_rate, filtered_x, normalized=True)
if play_sounds:
playsound(output_with_noise)
playsound(output_filtered)
# Show plotted figures
show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--duration', '-d', default=5, type=int, help='Recording duration in seconds')
parser.add_argument('--rate', '-r', default=44100, type=int, help='Audio sample rate')
parser.add_argument('--window', '-w', choices=['kaiser'], default='kaiser', help='Filter window type')
parser.add_argument('--cutoffhz1', '-wc1', default=1900, type=int, help='The cutoff frequency 1 of the filter')
parser.add_argument('--cutoffhz2', '-wc2', default=2100, type=int, help='The cutoff frequency 2 of the filter')
parser.add_argument('--ripple_db', '-rd', default=60, type=int, help='The desired attenuation in the stop band, in dB')
parser.add_argument('--noise_1', '-n1', default=1950, type=int, help='Noise minimum frequency')
parser.add_argument('--noise_2', '-n2', default=2050, type=int, help='Noise maximum frequency')
parser.add_argument('--play', '-p', default=True, type=bool, help='Play audios with and without noise')
args = vars(parser.parse_args())
files_prefix = '_'.join(['noise', str(args['noise_1']), str(args['noise_2'])\
, str(args['cutoffhz1']), str(args['cutoffhz2']), str(args['ripple_db'])])
audio_path = Recorder(args['duration'], args['rate']).record(files_prefix)
x = wavfile.read(audio_path, normalized=True, forcestereo=False)[1]
noise = Noiser().band_limited_noise(min_freq=args['noise_1'], max_freq=args['noise_2']\
, samples=len(x), samplerate=args['rate']) * NOISE_A
if args['window'] == 'kaiser':
KaiserFilter(args['rate'], args['cutoffhz1'], args['cutoffhz2'], args['ripple_db'])\
.add_noise_and_filter(x, noise, args['play'], files_prefix)
if __name__ == "__main__":
main() |
py | b4011fa6b77ebd6c7bd06d228ce60c19fe945524 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
class TestFlavorNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def test_flavor_create(self):
body = {
"flavor": {
"name": "test_flavor",
"ram": 1024,
"vcpus": 2,
"disk": 10,
"id": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
"rxtx_factor": 2.0
}
}
self.admin_api.api_post('flavors', body)
self._verify_notification('flavor-create')
def test_flavor_destroy(self):
body = {
"flavor": {
"name": "test_flavor",
"ram": 1024,
"vcpus": 2,
"disk": 10,
"id": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
"rxtx_factor": 2.0
}
}
# Create a flavor.
self.admin_api.api_post('flavors', body)
self.admin_api.api_delete(
'flavors/a22d5517-147c-4147-a0d1-e698df5cd4e3')
self._verify_notification(
'flavor-delete', actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
def test_flavor_update(self):
body = {
"flavor": {
"name": "test_flavor",
"ram": 1024,
"vcpus": 2,
"disk": 10,
"id": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
"os-flavor-access:is_public": False,
"rxtx_factor": 2.0
}
}
# Create a flavor.
self.admin_api.api_post('flavors', body)
body = {
"extra_specs": {
"key1": "value1",
"key2": "value2"
}
}
self.admin_api.api_post(
'flavors/a22d5517-147c-4147-a0d1-e698df5cd4e3/os-extra_specs',
body)
body = {
"addTenantAccess": {
"tenant": "fake_tenant"
}
}
self.admin_api.api_post(
'flavors/a22d5517-147c-4147-a0d1-e698df5cd4e3/action',
body)
self._verify_notification(
'flavor-update', actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
|
py | b401215b62161cc96339fea4b0a68dfd02abb285 | """
List of form-of templates.
Auto-generated with `python -m scripts`.
"""
# START
labels = {
"&": "and",
"+": "with",
"2ch slang": "2channel slang",
"Ante-Classical": "pre-Classical",
"Ante-classical": "pre-Classical",
"Anteclassical": "pre-Classical",
"Classic 1811 Dictionary of the i Tongue": "archaic, slang",
"Hispanicism": "Castilianism",
"International Phonetic Alphabet": "IPA",
"NNES": "non-native speakers' English",
"NNS": "non-native speakers",
"NNSE": "non-native speakers' English",
"Now": "now",
"Post-Classical": "post-Classical",
"Post-classical": "post-Classical",
"Postclassical": "post-Classical",
"Pre-Classical": "pre-Classical",
"Pre-classical": "pre-Classical",
"Preclassical": "pre-Classical",
"UK slang": "British slang",
"_": "",
"active voice": "active",
"ad slang": "advertising slang",
"affectionate": "endearing",
"ambitransitive": "transitive, intransitive",
"ante-Classical": "pre-Classical",
"ante-classical": "pre-Classical",
"anteclassical": "pre-Classical",
"argot": "cant",
"baby talk": "childish",
"back-slang": "back slang",
"backslang": "back slang",
"bookish": "literary",
"child language": "childish",
"chu Nom": "Vietnamese chữ Nôm",
"coarse": "vulgar",
"cognate object": "with cognate object",
"colloquially": "colloquial",
"control": "control verb",
"copular": "copulative",
"cosmo": "advertising slang",
"coster": "costermongers",
"costermonger": "costermongers",
"costermongers back slang": "costermongers",
"costermongers' back slang": "costermongers",
"costers": "costermongers",
"cryptolect": "cant",
"derogative": "derogatory",
"disparaging": "derogatory",
"dual": "in the dual",
"dysphemism": "dysphemistic",
"elevated": "solemn",
"euphemism": "euphemistic",
"except in": "outside",
"expletive": "swear word",
"fandom": "fandom slang",
"female speech": "women's speech",
"figurative": "figuratively",
"hapax legomenon": "hapax",
"hedges": "hedge",
"historic": "historical",
"history": "historical",
"hyperbole": "hyperbolic",
"ideophone": "ideophonic",
"idiom": "idiomatic",
"idiomatically": "idiomatic",
"imperative mood": "imperative",
"in dual": "in the dual",
"in mediopassive": "in the mediopassive",
"in plural": "in the plural",
"in singular": "in the singular",
"in the active": "active",
"in the imperative": "imperative",
"in the indicative": "indicative",
"in the jussive": "jussive",
"in the middle": "middle",
"in the middle voice": "middle",
"in the passive": "passive",
"in the subjunctive": "subjunctive",
"indicative mood": "indicative",
"informally": "informal",
"internet slang": "Internet slang",
"jocular": "humorous",
"jussive mood": "jussive",
"leet": "leetspeak",
"literal": "literally",
"mainly": "chiefly",
"male speech": "men's speech",
"mediopassive": "in the mediopassive",
"metaphor": "figuratively",
"metaphorical": "figuratively",
"metaphorically": "figuratively",
"metonym": "metonymically",
"metonymic": "metonymically",
"metonymy": "metonymically",
"middle voice": "middle",
"mostly": "chiefly",
"narrowly": "strictly",
"negative polarity": "chiefly in the negative",
"negative polarity item": "chiefly in the negative",
"neologistic": "neologism",
"no plural": "singular only",
"non-standard": "nonstandard",
"nonce": "nonce word",
"notcomp": "not comparable",
"nowadays": "now",
"obscene": "vulgar",
"passive voice": "passive",
"pejorative": "derogatory",
"plural": "in the plural",
"plurale tantum": "plural only",
"pluralonly": "plural only",
"possessive pronoun": "possessive",
"post-classical": "post-Classical",
"postclassical": "post-Classical",
"pre-classical": "pre-Classical",
"preclassical": "pre-Classical",
"primarily": "chiefly",
"profane": "vulgar",
"profanity": "swear word",
"pronominal": "takes a reflexive pronoun",
"public school slang": "school slang",
"racial slur": "ethnic slur",
"rare sense": "rare",
"rare term": "rare",
"reflexive pronoun": "reflexive",
"sectarian slur": "religious slur",
"self-deprecating": "self-deprecatory",
"seong-eo": "set phrase from Classical Chinese",
"singular": "in the singular",
"singulare tantum": "singular only",
"stative verb": "stative",
"student slang": "college slang",
"subjunctive mood": "subjunctive",
"texting": "text messaging",
"thieves": "thieves' cant",
"thieves cant": "thieves' cant",
"thieves'": "thieves' cant",
"uds.": "used formally in Spain",
"uncomparable": "not comparable",
"university slang": "college slang",
"usually in plural": "usually in the plural",
"usually in the negative": "chiefly in the negative",
"usually plural": "usually in the plural",
} # 144
label_syntaxes = {
"&": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"+": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
";": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": True,
},
"Homeric epithet": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"Now": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"_": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"also": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"and": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"by": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"chiefly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"especially": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"except": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"except in": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"excluding": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"extremely": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"frequently": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"humorously": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"including": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"mainly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"many": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"markedly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"mildly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"mostly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"now": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"nowadays": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"occasionally": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"of": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"of a": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"of an": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"often": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"or": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"originally": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"outside": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
"particularly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"possibly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"primarily": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"rarely": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"slightly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"sometimes": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"somewhat": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"strongly": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"then": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"typically": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"usually": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"very": {
"omit_postComma": True,
"omit_preComma": False,
"omit_preSpace": False,
},
"with": {
"omit_postComma": True,
"omit_preComma": True,
"omit_preSpace": False,
},
} # 46
labels_topical = {
"Ancient Hinduism": "Vedic religion",
"Arabian god": "Arabian mythology",
"Arthurian mythology": "Arthurian legend",
"Asatru": "Germanic paganism",
"Bible": "biblical",
"Biblical": "biblical",
"Biblical character": "biblical",
"Biblical figure": "biblical",
"Catholic": "Catholicism",
"Chinese chess": "xiangqi",
"Chinese constellation": "Chinese astronomy",
"Chinese medicine": "traditional Chinese medicine",
"Chinese star": "Chinese astronomy",
"Communism": "communism",
"Daoism": "Taoism",
"E number": "food manufacture",
"Eastern Catholic": "Eastern Catholicism",
"Egyptian god": "Egyptian mythology",
"GUI": "graphical user interface",
"Germanic Paganism": "Germanic paganism",
"Germanic neopaganism": "Germanic paganism",
"Go": "go",
"Greek god": "Greek mythology",
"Halacha": "Jewish law",
"Halachah": "Jewish law",
"Halakha": "Jewish law",
"Halakhah": "Jewish law",
"Heathenry": "Germanic paganism",
"Hindu god": "Hinduism",
"Japanese god": "Japanese mythology",
"Java PL": "Java programming language",
"JavaPL": "Java programming language",
"Jewish Law": "Jewish law",
"LGBTQ": "LGBT",
"MMO": "online gaming",
"MMORPG": "online gaming",
"Mesopotamian god": "Mesopotamian mythology",
"Nazi": "Nazism",
"Norse neopaganism": "Germanic paganism",
"Norse paganism": "Germanic paganism",
"RPG": "role-playing games",
"Rasta": "Rastafari",
"Rastafarian": "Rastafari",
"Rastafarianism": "Rastafari",
"Roman Catholic": "Roman Catholicism",
"Roman god": "Roman mythology",
"Rubik's cubes": "Rubik's Cube",
"Shamanism": "shamanism",
"TCM": "traditional Chinese medicine",
"TV": "television",
"USSR": "Soviet Union",
"Vedic Hinduism": "Vedic religion",
"Vedicism": "Vedic religion",
"Vedism": "Vedic religion",
"WMF": "Wiktionary and WMF jargon",
"WMF jargon": "Wiktionary and WMF jargon",
"Wiktionary": "Wiktionary and WMF jargon",
"Wiktionary jargon": "Wiktionary and WMF jargon",
"alcohol": "alcoholic beverages",
"amino acid": "biochemistry",
"analysis": "mathematical analysis",
"ancient Hinduism": "Vedic religion",
"angling": "fishing",
"architectural element": "architecture",
"architectural elements": "architecture",
"arts": "art",
"asana": "yoga",
"association football": "soccer",
"asterism": "uranography",
"automotives": "automotive",
"bacteria": "bacteriology",
"baraminology": "creationism",
"betting": "gambling",
"biblical character": "biblical",
"biblical figure": "biblical",
"bicycling": "cycling",
"board game": "board games",
"book of the bible": "biblical",
"canid": "zoology",
"carbohydrate": "biochemistry",
"carboxylic acid": "organic chemistry",
"card game": "card games",
"cards": "card games",
"cat": "zoology",
"cell phone": "mobile telephony",
"cell phones": "mobile telephony",
"chemical element": "chemistry",
"christianity": "Christianity",
"cinema": "film",
"coenzyme": "biochemistry",
"communications": "communication",
"comp sci": "computer science",
"compilation": "software compilation",
"comptheory": "computing theory",
"computer": "computing",
"computer game": "computer games",
"computer language": "computer languages",
"computer programming": "programming",
"computers": "computing",
"constellation": "astronomy",
"constructed languages": "conlanging",
"cookery": "cooking",
"cookware": "cooking",
"copyright": "copyright law",
"coral science": "marine biology",
"cosmetology": "cosmetics",
"cryptocurrency": "cryptocurrencies",
"cuisine": "cooking",
"culinary": "cooking",
"currency": "numismatics",
"database": "databases",
"dice": "dice games",
"disease": "medicine",
"diseases": "medicine",
"element symbol": "chemistry",
"emergency": "emergency medicine",
"enzyme": "biochemistry",
"equestrian": "equestrianism",
"fatty acid": "organic chemistry",
"felid": "zoology",
"fictional character": "fiction",
"figure of speech": "rhetoric",
"film genre": "film",
"filmology": "cinematography",
"firearm": "firearms",
"fish": "zoology",
"football": "soccer",
"footwear": "clothing",
"functional group prefix": "organic chemistry",
"functional group suffix": "organic chemistry",
"galaxy": "astronomy",
"game": "games",
"game of go": "game of Go",
"gardening": "horticulture",
"gastronomy": "cooking",
"genetic disorder": "medical genetics",
"gnosticism": "Gnosticism",
"grammatical case": "grammar",
"grammatical mood": "grammar",
"greekmyth": "Greek mythology",
"gun mechanisms": "firearms",
"gynecology": "gynaecology",
"haematology": "hematology",
"halacha": "Jewish law",
"halachah": "Jewish law",
"halakha": "Jewish law",
"halakhah": "Jewish law",
"ham radio": "amateur radio",
"heathenry": "Germanic paganism",
"heraldic charge": "heraldry",
"heraldiccharge": "heraldry",
"hockey": "field hockey or ice hockey",
"hormone": "biochemistry",
"horoscope": "astrology",
"horses": "equestrianism",
"hydrocarbon chain prefix": "organic chemistry",
"hydrocarbon chain suffix": "organic chemistry",
"indo-european studies": "Indo-European studies",
"industry": "manufacturing",
"inorganic compound": "inorganic chemistry",
"internet": "Internet",
"islam": "Islam",
"isotope": "physics",
"jewish law": "Jewish law",
"landforms": "geography",
"legal": "law",
"letterpress": "letterpress typography",
"lipid": "biochemistry",
"logical fallacy": "rhetoric",
"math": "mathematics",
"maths": "mathematics",
"medical": "medicine",
"medical sign": "medicine",
"medieval folklore": "mediaeval folklore",
"metal type": "letterpress typography",
"metal typesetting": "letterpress typography",
"metamaterial": "physics",
"military rank": "military",
"military ranks": "military",
"military unit": "military",
"mineral": "mineralogy",
"mobile phone": "mobile telephony",
"mobile phones": "mobile telephony",
"morphology": "linguistic morphology",
"motor sport": "motor racing",
"motorbike": "motorcycling",
"motorcycle": "motorcycling",
"motorcycles": "motorcycling",
"motorsport": "motor racing",
"muscle": "anatomy",
"mushroom": "mycology",
"mushrooms": "mycology",
"music genre": "music",
"musical instrument": "music",
"musical instruments": "music",
"musici": "music",
"musician": "music",
"mythological creature": "mythology",
"mythological creatures": "mythology",
"nazi": "Nazism",
"nazism": "Nazism",
"neo-Nazi": "Nazism",
"neo-Nazism": "Nazism",
"neo-nazi": "Nazism",
"neo-nazism": "Nazism",
"neo-pagan": "paganism",
"neo-paganism": "paganism",
"neoNazi": "Nazism",
"neoNazism": "Nazism",
"neonazi": "Nazism",
"neonazism": "Nazism",
"neopagan": "paganism",
"neopaganism": "paganism",
"neurotoxin": "neurotoxicology",
"neurotransmitter": "biochemistry",
"object-oriented": "object-oriented programming",
"obstetric": "obstetrics",
"oil drilling": "oil industry",
"online": "Internet",
"online games": "online gaming",
"organic compound": "organic chemistry",
"pagan": "paganism",
"pain": "medicine",
"palaeontology": "paleontology",
"paleography": "palaeography",
"part of speech": "grammar",
"particle": "physics",
"patents": "patent law",
"pesapallo": "pesäpallo",
"pharmaceutical drug": "pharmacology",
"pharmaceutical effect": "pharmacology",
"philology": "linguistics",
"plant": "botany",
"plant disease": "phytopathology",
"playing card": "card games",
"poison": "toxicology",
"police": "law enforcement",
"policing": "law enforcement",
"political": "politics",
"political subdivision": "government",
"pro wrestling": "professional wrestling",
"protein": "biochemistry",
"proteins": "biochemistry",
"quantum": "quantum mechanics",
"quantum physics": "quantum mechanics",
"radiation": "physics",
"rail": "rail transport",
"railroading": "rail transport",
"railroads": "rail transport",
"rasta": "Rastafari",
"rastafarian": "Rastafari",
"regex": "regular expressions",
"rock": "petrology",
"role playing games": "role-playing games",
"schools": "education",
"sci fi": "science fiction",
"science": "sciences",
"scientific": "sciences",
"scifi": "science fiction",
"scouting": "Scouting",
"scuba": "underwater diving",
"scuba diving": "underwater diving",
"skeleton": "anatomy",
"social science": "social sciences",
"software development": "software engineering",
"space": "space science",
"sport": "sports",
"squash": "squash (sport)",
"standard of identity": "standards of identity",
"star": "astronomy",
"steroid": "biochemistry",
"steroid drug": "biochemistry, steroids",
"steroid hormone": "biochemistry, steroids",
"stock symbol": "stock ticker symbol",
"sugar acid": "organic chemistry",
"surface feature": "planetology",
"symptom": "medicine",
"tax": "taxation",
"taxes": "taxation",
"taxonomic name": "taxonomy",
"telecom": "telecommunications",
"telecommunication": "telecommunications",
"telephone": "telephony",
"telephones": "telephony",
"theatre": "theater",
"tincture": "heraldry",
"trading card games": "collectible card games",
"transportation": "transport",
"typesetting": "typography",
"valentinianism": "Valentinianism",
"vector": "linear algebra",
"vector algebra": "linear algebra",
"vegetables": "vegetable",
"vehicle": "vehicles",
"video game genre": "video games",
"video gaming": "video games",
"vitamin": "biochemistry",
"vulcanology": "volcanology",
"watercraft": "nautical",
"weapons": "weaponry",
"white nationalism": "white supremacist ideology",
"white nationalist": "white supremacist ideology",
"white power": "white supremacist ideology",
"white racism": "white supremacist ideology",
"white supremacism": "white supremacist ideology",
"white supremacist": "white supremacist ideology",
"white supremacy": "white supremacist ideology",
"yoga pose": "yoga",
"zodiac": "astrology",
"zodiac constellations": "astronomy",
"Ásatrú": "Germanic paganism",
} # 311
labels_regional = {
"AU": "Australia",
"Acadia": "Acadian",
"Adrianople": "Edirne",
"African": "Africa",
"Afyon": "Afyonkarahisar",
"Aghin": "Ağın",
"Aghjabadi": "Agjabedi",
"Aghjabedi": "Agjabedi",
"Aghri": "Ağrı",
"Akhalkalak": "Akhalkalaki",
"Akhlkalak": "Akhalkalaki",
"Al-Andalus": "al-Andalus",
"Alaskan": "Alaska",
"Alaškert": "Alashkert",
"Algerian": "Algeria",
"Algherese": "Alghero",
"Ali Bayramli": "Shirvan",
"Alto Adige": "South Tyrol",
"America": "US",
"American": "US",
"American form": "US",
"Andean": "Andes",
"Angolan": "Angola",
"Antarctic": "Antarctica",
"Appalachian": "Appalachia",
"Arabkir": "Arapgir",
"Aragon": "Aragón",
"Aragonese": "Aragón",
"Arapkir": "Arapgir",
"Archesh": "Erciş",
"Ardabil": "Ardebil",
"Ardabīl ": "Ardebil",
"Ardanuji": "Ardanuç",
"Ardebīl": "Ardebil",
"Ardvin": "Artvin",
"Ardvini": "Artvin",
"Argentinian": "Argentina",
"Artanuj": "Ardanuç",
"Artchesh": "Erciş",
"Aslanbek": "Aslanbeg",
"Asturian": "Asturias",
"Atana": "Adana",
"Atlantic Canadian": "Atlantic Canada",
"Australian": "Australia",
"Australian form": "Australian",
"Austrian": "Austria",
"Azorean": "Azores",
"Azorian": "Azores",
"Ağcabədi": "Agjabedi",
"Ağdam": "Agdam",
"Baghesh": "Bitlis",
"Bahaman": "Bahamas",
"Bahian": "Bahia",
"Baiano": "Bahia",
"Baki": "Baku",
"Bakı": "Baku",
"Balakən": "Balakan",
"Baleares": "Balearics",
"Balearic": "Balearics",
"Balearic Islands": "Balearics",
"Balears": "Balearics",
"Balu": "Palu",
"Bantenese": "Banten",
"Barbadian": "Barbados",
"Bardizag": "Partizak",
"Bavarian": "Bavaria",
"Bedfordshire dialect": "Bedfordshire",
"Belgian": "Belgium",
"Belizean": "Belize",
"Berkshire dialect": "Berkshire",
"Bermudan": "Bermuda",
"Bernese": "Bern",
"Bhawnagari": "Kathiyawadi",
"Bohuslan": "Bohuslän",
"Bolivian": "Bolivia",
"Bolognese": "Bologna",
"Borchali": "Borchaly",
"Borchalu": "Borchaly",
"Borçalı": "Borchaly",
"Bosnian": "Bosnia",
"Brabantian": "Brabant",
"Brazilian": "Brazil",
"Brebian": "Brebes",
"Bristolian": "Bristol",
"Brit": "Britain",
"British": "Britain",
"British Columbian": "British Columbia",
"British form": "British",
"Bucovina": "Bukovina",
"Bukovinian": "Bukovina",
"Bukowina": "Bukovina",
"Burma": "Myanmar",
"Burmese": "Myanmar",
"Burundian": "Burundi",
"Bərdə": "Barda",
"Cajun": "Louisiana",
"Californian": "California",
"Canadian": "Canada",
"Canadian form": "Canadian",
"Canarias": "Canary Islands",
"Canaries": "Canary Islands",
"Cape Verdean": "Cape Verde",
"Carinthian": "Carinthia",
"Carioca": "Rio de Janeiro",
"Carpigiano": "Carpi",
"Central American": "Central America",
"Chadian": "Chad",
"Chebsko": "Egerland",
"Chilean": "Chile",
"Chmshkatsag": "Çemişgezek",
"Cirebonese": "Cirebon",
"Colombian": "Colombia",
"Commonwealth": "Commonwealth of Nations",
"Commonwealth form": "British",
"Commonwealth spelling": "British spelling",
"Congo-Brazzaville": "Congo",
"Congo-Kinshasa": "Congo",
"Congolese": "Congo",
"Cornish": "Cornwall",
"Cornish dialect": "Cornwall",
"Costa Rican": "Costa Rica",
"Crimean": "Crimea",
"Croatian": "Croatia",
"Cuban": "Cuba",
"Culfa": "Julfa",
"Cumbrian": "Cumbria",
"Cypriot": "Cyprus",
"Côte d'Ivoire": "Ivory Coast",
"Côte d’Ivoire": "Ivory Coast",
"Cəbrayıl": "Jabrayil",
"Cəlilabad": "Jalilabad",
"D.C.": "District of Columbia",
"DC": "District of Columbia",
"DDR": "East Germany",
"DR Congo": "Congo",
"Dalmatian": "Dalmatia",
"Democratic Republic of Congo": "Congo",
"Democratic Republic of the Congo": "Congo",
"Den Haag": "The Hague",
"Derbyshire dialect": "Derbyshire",
"Devonian": "Devon",
"Devonian dialect": "Devon",
"Devonshire": "Devon",
"Devonshire dialect": "Devon",
"Devrik": "Divriği",
"Devrike": "Divriği",
"Dewrik": "Divriği",
"Dewrike": "Divriği",
"Diyarbakir": "Diyarbakır",
"Diyarbekir": "Diyarbakır",
"Dobrogea": "Dobruja",
"Dobrujan": "Dobruja",
"Dorset dialect": "Dorset",
"Dərbənd": "Derbent",
"East African": "East Africa",
"East Anglian": "East Anglia",
"East Anglian dialect": "East Anglia",
"East German": "East Germany",
"East Midlands dialect": "East Midlands",
"East Prussia": "Prussia",
"East Prussian": "Prussia",
"Ecuadorian": "Ecuador",
"Egerländisch": "Egerland",
"Egin": "Akn",
"Egyptian": "Egypt",
"Elazig": "Elazığ",
"Elazigh": "Elazığ",
"Eleşkirt": "Alashkert",
"English": "England",
"Equatoguinean": "Equatorial Guinea",
"Equatorial Guinean": "Equatorial Guinea",
"Ercis": "Erciş",
"Erdîş": "Erciş",
"Erzinjan": "Erzincan",
"Erznka": "Erzincan",
"Erzrum": "Erzurum",
"Essex dialect": "Essex",
"Estonian": "Estonia",
"Evdokia": "Tokat",
"Extremaduran": "Extremadura",
"Eğin": "Akn",
"Feer": "Föhr-Amrum",
"Fering": "Föhr-Amrum",
"Finnish": "Finland",
"Fizuli": "Fuzuli",
"Fluminense": "Rio de Janeiro",
"French": "France",
"Föhr-Amrum dialect": "Föhr-Amrum",
"Füzuli": "Fuzuli",
"GDR": "East Germany",
"Gakh": "Qakh",
"Galician": "Galicia",
"Gandzak": "Ganja",
"Gaucho": "Rio Grande do Sul",
"Gavar": "Nor Bayazet",
"Gazakh": "Qazakh",
"Gaúcho": "Rio Grande do Sul",
"Georgia (US)": "Georgia",
"German": "Germany",
"Getabek": "Gadabay",
"Ghars": "Kars",
"Glos": "Gloucestershire",
"Gloucestershire dialect": "Gloucestershire",
"Goan": "Goa",
"Gohilwadi": "Kathiyawadi",
"Gotlandic": "Gotland",
"Goygol": "Göygöl",
"Great Britain": "Britain",
"Guatemalan": "Guatemala",
"Guyanese": "Guyana",
"Gyurin": "Gürün",
"Göyçay": "Goychay",
"Gədəbəy": "Gadabay",
"Gəncə": "Ganja",
"Hachn": "Haçin",
"Hadjin": "Haçin",
"Hague": "The Hague",
"Hajin": "Haçin",
"Hajn": "Haçin",
"Hakari": "Hakkari",
"Hakiari": "Hakkari",
"Hakkiari": "Hakkari",
"Hakkâri": "Hakkari",
"Halunder": "Heligoland",
"Harput": "Kharberd",
"Hawaiian": "Hawaii",
"Helenendorf": "Göygöl",
"Helgoland": "Heligoland",
"Helgoland dialect": "Heligoland",
"Heligoland dialect": "Heligoland",
"Heligolandic": "Heligoland",
"Herefordshire dialect": "Herefordshire",
"Hin Jugha": "Julfa",
"Hodiçor": "Khotorjur",
"Holadi": "Kathiyawadi",
"Hollandic": "Holland",
"Honduran": "Honduras",
"Huelvan": "Extremadura",
"Indian": "India",
"Indianan": "Indiana",
"Indianian": "Indiana",
"Indonesian": "Indonesia",
"Insular Scots": "Orkney, Shetland",
"Iranian": "Iran",
"Iraqi": "Iraq",
"Irish": "Ireland",
"Isle of Lewis": "Lewis",
"Isle of Mann": "Isle of Man",
"Isle of Skye": "Skye",
"Israeli": "Israel",
"Ivorian": "Ivory Coast",
"Izmir": "İzmir",
"Izmit": "İzmit",
"Içel": "İçel",
"Jamaican": "Jamaica",
"Javakhk": "Javakheti",
"Javanese": "Java",
"Jeolla": "Jeolla dialect",
"Jhalawadi": "Kathiyawadi",
"Jordanian": "Jordan",
"Jugha": "Julfa",
"Jutlandic": "Jutland",
"Kakhi": "Qakh",
"Kalbar": "West Kalimantan",
"Kaqavaberd": "Kakavaberd",
"Karaköse": "Ağrı",
"Karin": "Erzurum",
"Karvansara": "Karvansara, Gegharkunik",
"Karvansaray": "Karvansara, Gegharkunik",
"Kathiawadi": "Kathiyawadi",
"Kaua'i": "Kauaʻi",
"Kauai": "Kauaʻi",
"Kazakh": "Qazakh",
"Kazakhstani": "Kazakhstan",
"Kelbajar": "Kalbajar",
"Kent dialect": "Kent",
"Kentish": "Kent",
"Kentish dialect": "Kent",
"Kenyan": "Kenya",
"Kesaria": "Kayseri",
"Khanlar": "Göygöl",
"Kharpert": "Kharberd",
"Kharput": "Kharberd",
"Khevsuria": "Khevsureti",
"Khlat": "Ahlat",
"Khodorchur": "Khotorjur",
"Kirklareli": "Kırklareli",
"Kyurin": "Gürün",
"Kärnten": "Carinthia",
"Kürdəmir": "Kurdamir",
"Kəlbəcər": "Kalbajar",
"Lana'i": "Lānaʻi",
"Lanai": "Lānaʻi",
"Lanaʻi": "Lānaʻi",
"Lankon": "Lankaran",
"Latvian": "Latvia",
"Laçîn": "Lachin",
"Laçın": "Lachin",
"Lebanese": "Lebanon",
"Levant": "Levantine",
"Liberian": "Liberia",
"Libyan": "Libya",
"Lithuanian": "Lithuania",
"Liverpool": "Liverpudlian",
"Lower Austrian": "Lower Austria",
"Loṙi": "Lori",
"Lulea": "Luleå",
"Luxembourgish": "Luxembourg",
"Luxemburg": "Luxembourg",
"Luxemburgish": "Luxembourg",
"Luzern": "Lucerne",
"Lvov": "Lviv",
"Lwow": "Lviv",
"Lwów": "Lviv",
"Lāna'i": "Lānaʻi",
"Lənkaran": "Lankaran",
"Lənkəran": "Lankaran",
"Macanese": "Macau",
"Macao": "Macau",
"Madeiran": "Madeira",
"Mahesani": "Mehsani",
"Mainland": "Mainland China",
"Majorca": "Mallorca",
"Malatia": "Malatya",
"Malaysian": "Malaysia",
"Malian": "Mali",
"Mallorcan": "Mallorca",
"Maltese": "Malta",
"Manc": "Manchester",
"Mancunian": "Manchester",
"Mantovano": "Mantua",
"Manx": "Isle of Man",
"Maramures": "Maramureș",
"Masallı": "Masally",
"Mauritanian": "Mauritania",
"Mexican": "Mexico",
"Meğri": "Meghri",
"Midlands dialect": "Midlands",
"Midwest US": "Midwestern US",
"Miks": "Moks",
"Mingəçevir": "Mingachevir",
"Mirandolese": "Mirandola",
"Mississippian": "Mississippi",
"Modenese": "Modena",
"Moldavian": "Moldavia",
"Moldova": "Moldavia",
"Moldovan": "Moldavia",
"Moloka'i": "Molokaʻi",
"Molokai": "Molokaʻi",
"Montenegrin": "Montenegro",
"Moravian": "Moravia",
"Moroccan": "Morocco",
"Mozambican": "Mozambique",
"Muntenian": "Muntenia",
"Murcian": "Murcia",
"Mush": "Muş",
"Myanmarese": "Myanmar",
"Müküs": "Moks",
"NC": "North Carolina",
"NY": "New York",
"NYC": "New York City",
"NZ": "New Zealand",
"Nahçıvan": "Nakhchivan",
"Nakhichevan": "Nakhchivan",
"Nakhichevan-on-Don": "Nor Nakhichevan",
"Nakhijevan": "Nakhchivan",
"Namibian": "Namibia",
"Napoli": "Naples",
"Navarrese": "Navarre",
"Naxçıvan": "Nakhchivan",
"Neapolitan": "Naples",
"Nepalese": "Nepal",
"Nepali": "Nepal",
"New Mexican": "New Mexico",
"New Nakhichevan": "Nor Nakhichevan",
"New Orleans": "Louisiana",
"New York city": "New York City",
"Ni'ihau": "Niʻihau",
"Nicaraguan": "Nicaragua",
"Nicomedia": "İzmit",
"Nidzh": "Nij",
"Niederösterreich": "Lower Austria",
"Niederösterreichisch": "Lower Austria",
"Nigde": "Niğde",
"Nigerian": "Nigeria",
"Nigerien": "Niger",
"Niihau": "Niʻihau",
"Nikomedia": "İzmit",
"Nor Jugha": "New Julfa",
"Nor Nakhijevan": "Nor Nakhichevan",
"Nordeste": "Northeast Brazil",
"Nordestino": "Northeast Brazil",
"North Carolinian": "North Carolina",
"North East England": "Northumbria",
"North England": "Northern England",
"North Indian": "North India",
"North-East England": "Northumbria",
"Northeast Brazilian": "Northeast Brazil",
"Northeast England": "Northumbria",
"Northeastern Brazilian": "Northeast Brazil",
"Northern Irish": "Northern Ireland",
"Northern Isles": "Orkney, Shetland",
"Northumberland": "Northumbria",
"Northumbrian": "Northumbria",
"Nottinghamshire dialect": "Nottinghamshire",
"Nova Scotian": "Nova Scotia",
"Novo-Bayazet": "Nor Bayazet",
"OH": "Ohio",
"Oaxacan": "Oaxaca",
"Odrin": "Edirne",
"Ohioan": "Ohio",
"Old Julfa": "Julfa",
"Oltenian": "Oltenia",
"Omani": "Oman",
"Oomram": "Föhr-Amrum",
"Orcadian": "Orkney",
"Ostrobothnian": "Ostrobothnia",
"Oxfordshire dialect": "Oxfordshire",
"Oğuz": "Oghuz",
"Pakistani": "Pakistan",
"Palestinian": "Palestine",
"Panamanian": "Panama",
"Paraguayan": "Paraguay",
"Paranaense": "Paraná",
"Parmigiano": "Parma",
"Paulista": "São Paulo",
"Pennsylvanian": "Pennsylvania",
"Peruvian": "Peru",
"Philippine": "Philippines",
"Piacentino": "Piacenza",
"Picard": "Picardy",
"Polis": "Istanbul",
"Portuguese": "Portugal",
"Presov": "Prešov",
"Prussian": "Prussia",
"Puerto Rican": "Puerto Rico",
"Punjabi": "Punjab",
"Qax": "Qakh",
"Qazax": "Qazakh",
"Québec": "Quebec",
"Reggiano": "Reggio Emilia",
"Republic of Congo": "Congo",
"Republic of Moldova": "Moldova",
"Republic of the Congo": "Congo",
"Rhodesian": "Rhodesia",
"Ross": "Ross-shire",
"Rotterdams": "Rotterdam",
"Rwandan": "Rwanda",
"SEA": "Southeast Asia",
"Saarlandic": "Saarland",
"Saarlandish": "Saarland",
"Saarländisch": "Saarland",
"Saatlı": "Saatly",
"Saint Gallen": "St. Gallen",
"Saint Ouen": "Saint Ouën",
"Saint Ouënnais": "Saint Ouën",
"Salvadorian": "El Salvador",
"Sandø": "Sandoy",
"Santomean": "São Tomé and Príncipe",
"Sao Paulo": "São Paulo",
"Sao Tomean": "São Tomé and Príncipe",
"Sason": "Sasun",
"Sassoun": "Sasun",
"Scanian": "Scania",
"Scottish": "Scotland",
"Scouse": "Liverpudlian",
"Sebastea": "Sivas",
"Sebastia": "Sivas",
"Sense District": "Sense",
"Sensler": "Sense",
"Senslerdeutsch": "Sense",
"Serbian": "Serbia",
"Shaki": "Sheki",
"Shanghainese": "Shanghai",
"Shetland Islands": "Shetland",
"Shetland islands": "Shetland",
"Shetlandic": "Shetland",
"Shetlands": "Shetland",
"Shurishkar": "Shuryshkar",
"Singaporean": "Singapore",
"Skanian": "Scania",
"Skåne": "Scania",
"Somerset dialect": "Somerset",
"Sorathi": "Kathiyawadi",
"South African": "South Africa",
"South American": "South America",
"South Asian": "South Asia",
"South Brazilian": "South Brazil",
"South England": "Southern England",
"South Indian": "South India",
"South Midlands": "Midlands",
"South Pole": "Antarctica",
"South Tirol": "South Tyrol",
"South Tirolean": "South Tyrol",
"South Tirolese": "South Tyrol",
"South Tyrolean": "South Tyrol",
"South Tyrolese": "South Tyrol",
"Southeast Asian": "Southeast Asia",
"Southern Brazilian": "South Brazil",
"Southwest US": "Southwestern US",
"Spanish": "Spain",
"Spilambertese": "Spilamberto",
"Sri Lankan": "Sri Lanka",
"St Gallen": "St. Gallen",
"St Louis, Missouri": "Missouri",
"St Petersburg": "Saint Petersburg",
"St. Louis, Missouri": "Missouri",
"St. Petersburg": "Saint Petersburg",
"Steiermark": "Styria",
"Steiermärkisch": "Styria",
"Steirisch": "Styria",
"Styrian": "Styria",
"Sudanese": "Sudan",
"Suduroy": "Suðuroy",
"Suffolk dialect": "Suffolk",
"Sumut": "North Sumatra",
"Surinamese": "Suriname",
"Suzhounese": "Suzhou",
"Swedish": "Sweden",
"Swiss": "Switzerland",
"Swiss German": "Switzerland",
"Syrian": "Syria",
"São Tomé": "São Tomé and Príncipe",
"São Toméan": "São Tomé and Príncipe",
"Söl": "Sylt",
"Söl'": "Sylt",
"Söl'ring": "Sylt",
"Sölring": "Sylt",
"Sırakonak": "Khotorjur",
"TX": "Texas",
"Tabrizi": "Tabriz",
"Taiwanese": "Taiwan",
"Tanzanian": "Tanzania",
"Tebriz": "Tabriz",
"Tevrik": "Divriği",
"Tewrik": "Divriği",
"Texan": "Texas",
"Thai": "Thailand",
"Tiflis": "Tbilisi",
"Tigranakert": "Diyarbakır",
"Tobago": "Trinidad and Tobago",
"Tobagonian": "Trinidad and Tobago",
"Transilvania": "Transylvania",
"Transylvanian": "Transylvania",
"Trapizon": "Trabzon",
"Trinidad": "Trinidad and Tobago",
"Trinidadian": "Trinidad and Tobago",
"Trinidadian and Tobagonian": "Trinidad and Tobago",
"Tunisian": "Tunisia",
"Təbriz": "Tabriz",
"Tərtər": "Tartar",
"U.S.": "US",
"UK": "Britain",
"UK form": "British",
"US South": "Southern US",
"US Southern": "Southern US",
"US form": "US",
"USA": "US",
"Ucar": "Ujar",
"Ugandan": "Uganda",
"United Kingdom": "Britain",
"United States": "US",
"United States of America": "US",
"University of Cambridge": "Cambridge University",
"University of Oxford": "Oxford University",
"Upper Midwest US": "Upper Midwestern US",
"Urha": "Urfa",
"Urner": "Uri",
"Urnerdeutsch": "Uri",
"Urseren": "Uri",
"Uruguayan": "Uruguay",
"Vaghaver": "Ağın",
"Vayots dzor": "Vayots Dzor",
"Venetian": "Venice",
"Venezuelan": "Venezuela",
"Viennese": "Vienna",
"Vietnamese": "Vietnam",
"Virginian": "Virginia",
"Viöl": "Fjolde",
"Vozim": "Vozm",
"Wallonian": "Wallonia",
"Washington, D.C.": "District of Columbia",
"Washington, DC": "District of Columbia",
"Welsh": "Wales",
"West African": "West Africa",
"West Cumbrian": "West Cumbria",
"West England": "West Country",
"West Indies": "Caribbean",
"West Prussia": "Prussia",
"West Prussian": "Prussia",
"West of England": "West Country",
"Western England": "West Country",
"Wilts": "Wiltshire",
"Wilts dialect": "Wiltshire",
"Wiltshire dialect": "Wiltshire",
"Xanlar": "Göygöl",
"Xlat": "Ahlat",
"Xocavənd": "Khojavend",
"Xodiçur": "Khotorjur",
"Xodorçur": "Khotorjur",
"Xodrçur": "Khotorjur",
"Yelenino": "Göygöl",
"Yemeni": "Yemen",
"Yemenite": "Yemen",
"Yerznka": "Erzincan",
"Yevlax": "Yevlakh",
"Zagatala": "Zaqatala",
"Zakatala": "Zaqatala",
"Zangelan": "Zangilan",
"Zurich": "Zürich",
"Zəngilan": "Zangilan",
"Zərdab": "Zardab",
"Zәncan": "Zanjan",
"Zәngan": "Zanjan",
"classical": "Classical",
"cypriot": "Cyprus",
"mainland": "Mainland China",
"mainland China": "Mainland China",
"north England": "Northern England",
"northern England": "Northern England",
"northwestern US": "Northwestern US",
"south England": "Southern England",
"southern England": "Southern England",
"southern US": "Southern US",
"southwest US": "Southwestern US",
"southwestern US": "Southwestern US",
"west England": "West Country",
"western England": "West Country",
"western US": "Western US",
"Çemişkezek": "Çemişgezek",
"Österbotten": "Ostrobothnia",
"Özim": "Vozm",
"Öömrang": "Föhr-Amrum",
"Üzim": "Vozm",
"Čmškacag": "Çemişgezek",
"İmişli": "Imishli",
"İrəvan": "Yerevan",
"İstanbul": "Istanbul",
"Şamaxı": "Shamakhi",
"Şanlıurfa": "Urfa",
"Şirvan": "Shirvan",
"Şuşa": "Shusha",
"Şəki": "Sheki",
"Şəmkir": "Shamkir",
"Əli Bayramlı": "Shirvan",
"Ərdəbil": "Ardebil",
} # 646
labels_subvarieties = {
"13": "Tredici Comuni",
"13 Communities": "Tredici Comuni",
"7": "Sette Comuni",
"AA": "African-American",
"AAVE": "African-American Vernacular",
"Achterhoek": "Achterhoeks",
"Achterhooks": "Achterhoeks",
"Aeolic Greek": "Aeolic",
"African American": "African-American",
"African American English": "African-American",
"African American Vernacular English": "African-American Vernacular",
"African-American English": "African-American",
"African-American Vernacular English": "African-American Vernacular",
"Akan Kasa": "Twi",
"Alsace": "Alsatian",
"Alsacien": "Alsatian",
"Alsatian German": "Alsatian",
"American English": "US",
"Amoy": "Xiamen",
"Amoy dialect": "Xiamen",
"Ancient Aramaic": "Old Aramaic",
"Ancient Hebrew": "Biblical Hebrew",
"Apulian": "Apulia",
"Arbëreshë": "Arbëresh",
"Arbërisht": "Arbëresh",
"Arcadocypriot Greek": "Arcadocypriot",
"Armenian Kipchak": "Armeno-Kipchak",
"Artsakh": "Karabakh",
"Assiutic": "Lycopolitan",
"Attic Greek": "Attic",
"Australian Aboriginal English": "Australian Aboriginal",
"Australian English": "Australia",
"Auvergnat": "Auvergne",
"Auvernhat": "Auvergne",
"Ayitcha": "Kocheyali",
"Ayticha": "Kocheyali",
"BHS": "Buddhist Hybrid Sanskrit",
"Babylonian Talmudic Aramaic": "Jewish Babylonian Aramaic",
"Badiú": "Santiago",
"Badiú or Santiago": "Santiago",
"Bambaiyya": "Bombay",
"Bashmuric": "Fayyumic",
"Batangas Tagalog": "Batangas",
"Batangueño": "Batangas",
"Batavia": "Batavian Malay",
"Batavian": "Batavian Malay",
"Beijing Mandarin": "Beijing",
"Belarusian": "Belarus",
"Belarusian Classical Orthography": "Taraškievica",
"Betawi": "Batavian Malay",
"Bialystok": "Białystok",
"Binh Dinh": "Bình Định",
"Binh Dinh dialect": "Bình Định",
"Boavista": "Boa Vista",
"Boeotian Greek": "Boeotian",
"Breemsch": "Oldenburg",
"Bremen": "Oldenburg",
"British English": "British",
"British Indian English": "British India",
"British Mediaeval Latin": "British Medieval Latin",
"Brummie": "Birmingham",
"Brummy": "Birmingham",
"Byzantine Greek": "Byzantine",
"Bzyp": "Bzyb",
"Bình Định dialect": "Bình Định",
"CPA": "Christian Palestinian Aramaic",
"Campello Monti": "Rimella and Campello Monti",
"Canadian English": "Canada",
"Central Assamese": "Central Assam",
"Central German": "central Germany",
"Central Germany": "central Germany",
"Central Italian": "central Italy",
"Central Italy": "central Italy",
"Central Scots": "Central",
"Central Vietnamese": "Central Vietnam",
"Cham Albanian": "Cham",
"Changchew": "Zhangzhou",
"Changchew dialect": "Zhangzhou",
"Chinchew": "Quanzhou",
"Chinchew dialect": "Quanzhou",
"Choanchew": "Quanzhou",
"Choanchew dialect": "Quanzhou",
"Choynimni": "Choinimni",
"Choynok": "Choinok",
"Chukaymina": "Chukaimina",
"Church Latin": "Ecclesiastical Latin",
"Cieszyn Silesian": "Cieszyn Silesia",
"Classical Hebrew": "Biblical Hebrew",
"Clay Frisian": "Clay",
"Cois Fhairrge": "Cois Fharraige",
"Colloquial Singapore English": "Singlish",
"Colloquial Singaporean English": "Singlish",
"Cologne": "Kölsch",
"Colognian": "Kölsch",
"Contemporary Anglo-Latin": "British Contemporary Latin",
"Contemporary British": "British Contemporary Latin",
"Cracow": "Kraków",
"Crete": "Cretan",
"Crimean Romani": "Crimea",
"Cuyavia": "Kujawy",
"Côte d'Ivoire": "Ivory Coast",
"Côte d'Ivoire slang": "Ivory Coast slang",
"Côte d’Ivoire": "Ivory Coast",
"Côte d’Ivoire slang": "Ivory Coast slang",
"De'kwana": "De'kwana dialect",
"Digor dialect": "Digor",
"Djidjelli": "Jijel",
"Doric Scots": "Doric",
"Drenthe": "Drents",
"Drèents": "Drents",
"EC Scots": "East Central",
"Earlier ME": "Early Middle English",
"Early ME": "Early Middle English",
"East Anglian English": "East Anglia",
"East Central Scots": "East Central",
"East Midland ME": "East Midlands",
"East Midland Middle English": "East Midlands",
"East Midlands English": "East Midlands",
"East Midlands ME": "East Midlands",
"East Norse": "Old East Norse",
"East Palatine": "Vorderpfälzisch",
"Elsässisch": "Alsatian",
"Elu": "Helu",
"Epic Greek": "Epic",
"European French": "Europe",
"Faiyumic": "Fayyumic",
"Fanti": "Fante",
"Fantse": "Fante",
"Ffima": "Kurima-jima",
"Ffyama": "Kurima-jima",
"Flanders": "East and West Flanders",
"Flemish": "East and West Flanders",
"Galilean Aramaic": "Jewish Palestinian Aramaic",
"Gascon": "Gascony",
"Geordie": "Tyneside",
"Gharabagh": "Karabakh",
"Goral": "Góral",
"Groningen": "Gronings",
"Grunnegs": "Gronings",
"Grönnegs": "Gronings",
"Gungjung": "speech of the royal court",
"Gwenedeg": "Gwened",
"Ha Tinh": "Hà Tĩnh",
"Ha Tinh dialect": "Hà Tĩnh",
"Hachijo": "Hachijō",
"Hai Phong": "Hải Phòng",
"Hai Phong dialect": "Hải Phòng",
"Hainan Min": "Hainanese",
"Hainan Min Chinese": "Hainanese",
"Haiphong": "Hải Phòng",
"Hakitia": "Haketia",
"Hallifreesk": "Halligen",
"Hallig": "Halligen",
"Hallig Frisian": "Halligen",
"Hallig dialect": "Halligen",
"Halligen Dialect": "Halligen",
"Halligen Frisian": "Halligen",
"Halligen dialect": "Halligen",
"Hamburg": "Hamburgisch",
"Hamgyeong": "Hamgyong",
"Hamgyŏng": "Hamgyong",
"Hankou": "Wuhan",
"Hankow": "Wuhan",
"Hanmuntu": "highly formal Literary Chinese-style Korean",
"Haquitía": "Haketia",
"Hariyana": "Hariyāṇvī",
"Hariyanvi": "Hariyāṇvī",
"Hatra": "Hatran",
"Hatran Aramaic": "Hatran",
"Hejazi Arabic": "Hejazi",
"Hellenistic": "Koine",
"Hellenistic Koine": "Koine",
"Hellenizing": "Hellenizing School",
"Hellenophile": "Hellenizing School",
"Hellenophile School": "Hellenizing School",
"Hesse": "Hessian",
"Hessen": "Hessian",
"Hessisch": "Hessian",
"Hinghwa": "Puxian Min",
"Hoisanese": "Taishanese",
"Holsteinisch": "Holstein",
"Homeric": "Epic",
"Homshetsi": "Hamshen",
"Homshetsma": "Hamshen",
"Hue": "Huế",
"Hue dialect": "Huế",
"Huế dialect": "Huế",
"Hà Tĩnh dialect": "Hà Tĩnh",
"Hải Phòng dialect": "Hải Phòng",
"Ikema Island": "Ikema",
"Ikema-jima": "Ikema",
"Ikemajima": "Ikema",
"Ikima": "Ikema",
"Ikyaama": "Ikema",
"Ikyama": "Ikema",
"ImpA": "Imperial Aramaic",
"Indian English": "India",
"Ingiloy": "Ingilo",
"Inner Mbugu": "Ma'a",
"Inuttitut": "Nunavik",
"Ionic Greek": "Ionic",
"Irabu Island": "Irabu",
"Irabu-jima": "Irabu",
"Irabujima": "Irabu",
"Irau": "Irabu",
"Irav": "Irabu",
"Iron dialect": "Iron",
"Ivorian": "Ivory Coast",
"Ivorian slang": "Ivory Coast slang",
"Ivory Coast French": "Ivory Coast",
"Ivory Coast French slang": "Ivory Coast slang",
"JBA": "Jewish Babylonian Aramaic",
"JLA": "Jewish Literary Aramaic",
"JPA": "Jewish Palestinian Aramaic",
"Jamaica": "Jamaican",
"Jamaican English": "Jamaican",
"Jamaican Standard English": "Jamaican",
"Ji-Lu Mandarin": "Jilu Mandarin",
"Jiang-Huai Mandarin": "Jianghuai Mandarin",
"Jiao-Liao Mandarin": "Jiaoliao Mandarin",
"Jijel Arabic": "Jijel",
"Jijeli": "Jijel",
"Judean Aramaic": "Jewish Palestinian Aramaic",
"Kaapse Afrikaans": "Cape Afrikaans",
"Kaliarnta": "Kaliarda",
"Kamruipa": "Kamrupi",
"Kamrup": "Kamrupi",
"Kamrupiya": "Kamrupi",
"Kangwon": "Gangwon",
"Kangwŏn": "Gangwon",
"Kichwa": "Standard Ecuadorian Kichwa",
"Koine Greek": "Koine",
"Krakow": "Kraków",
"Krk": "Vegliot",
"Kromanti": "Kromanti spirit possession language",
"Kurema": "Kurima-jima",
"Kurema Island": "Kurima-jima",
"Kurema-jima": "Kurima-jima",
"Kuremajima": "Kurima-jima",
"Kurima": "Kurima-jima",
"Kurima Island": "Kurima-jima",
"Kurimajima": "Kurima-jima",
"Kuyavia": "Kujawy",
"Köln": "Kölsch",
"LDA": "Lisan ud-Dawat",
"Labradorimiutut": "Inuttut",
"Lan-Yin Mandarin": "Lanyin Mandarin",
"Languedocian": "Languedoc",
"Languedocien": "Languedoc",
"Late Anglo-Latin": "British Late Latin",
"Late British": "British Late Latin",
"Late ME": "Late Middle English",
"Later": "Later Sanskrit",
"Later ME": "Late Middle English",
"Later-Sanskrit": "Later Sanskrit",
"Latin American Spanish": "Latin America",
"Lemosin": "Limousin",
"Lisaan ud-Da'wat": "Lisan ud-Dawat",
"Lisaan ud-Da'wat il-'Alaviyah": "Lisan ud-Dawat",
"Lodz": "Łódź",
"Louisiana French": "Louisiana",
"Lower Yangtze Mandarin": "Jianghuai Mandarin",
"Lowicz": "Łowicz",
"Mamluk": "Mamluk-Kipchak",
"Mazury": "Masuria",
"Mecklenburg": "Mecklenburgisch",
"Mecklenburg Low German": "Mecklenburgisch",
"Mecklenburgisch Low German": "Mecklenburgisch",
"Mecklenburgish": "Mecklenburgisch",
"Mediaeval Anglo-Latin": "British Medieval Latin",
"Mediaeval British": "British Medieval Latin",
"Medieval Anglo-Latin": "British Medieval Latin",
"Medieval British": "British Medieval Latin",
"Medieval Greek": "Byzantine",
"Meerut": "Delhi-Meerut",
"Megarian": "Doric",
"Mekong Delta dialect": "Mekong Delta",
"Melkite Aramaic": "Christian Palestinian Aramaic",
"Memphitic": "Bohairic",
"Mesokemic": "Oxyrhynchite",
"Mfantse": "Fante",
"Midlands English": "Midlands",
"Minangkabau": "Minangkabau Malay",
"Minna Island": "Minna",
"Minna-jima": "Minna",
"Minnajima": "Minna",
"Mishnaic": "Mishnaic Hebrew",
"Mooring dialect": "Mooring",
"Moselfränkisch": "Moselle Franconian",
"Multicultural London English": "MLE",
"Mumbai": "Bombay",
"Münster": "Münsterland",
"Münsterländisch": "Münsterland",
"NEC Scots": "Northeast Central",
"NN Scots": "North Northern",
"NabA": "Nabataean",
"Nabataean Aramaic": "Nabataean",
"Namsadang": "traditional acrobats' cant",
"Neo-Hittite": "New Hittite",
"Neo-Sanskrit": "New Sanskrit",
"New Anglo-Latin": "British New Latin",
"New British": "British New Latin",
"New Zealand English": "New Zealand",
"Nghe An": "Nghệ An",
"Nghe An dialect": "Nghệ An",
"Nghe Tinh": "Nghệ Tĩnh",
"Nghe Tinh dialect": "Nghệ Tĩnh",
"Nghệ An dialect": "Nghệ An",
"Nghệ Tinh dialect": "Nghệ Tĩnh",
"Non-Oxford": "Non-Oxford British English",
"Non-Oxford British spelling": "Non-Oxford British English",
"Nopchinchi": "Noptinte",
"Nopthrinthre": "Noptinte",
"Nopṭinṭe": "Noptinte",
"Normal Mbugu": "Mbugu",
"North America": "Canada, US",
"North American": "Canada, US",
"North German": "Northern Germany",
"North Germany": "Northern Germany",
"North Korean": "North Korea",
"North ME": "Northern",
"North Midland US": "Midland US",
"North Northern Scots": "North Northern",
"North Scots": "Northern",
"North Vietnam": "Northern Vietnam",
"North Vietnamese": "Northern Vietnam",
"North and Central German": "northern and central Germany",
"North and Central Germany": "northern and central Germany",
"North-Central Vietnam": "North Central Vietnam",
"Northeast Central Scots": "Northeast Central",
"Northern Dutch": "Northern",
"Northern German": "Northern Germany",
"Northern Italian": "northern Italy",
"Northern Italy": "northern Italy",
"Northern ME": "Northern",
"Northern Middle English": "Northern",
"Northern Midland US": "Midland US",
"Northern Scots": "Northern",
"Northern Vietnamese": "Northern Vietnam",
"Northern and Central German": "northern and central Germany",
"Northern and Central Germany": "northern and central Germany",
"Nouchi": "Ivory Coast slang",
"Noussi": "Ivory Coast slang",
"Nunatsiavummiutut": "Inuttut",
"Nunavimmiutitut": "Nunavik",
"OffA": "Imperial Aramaic",
"Official Aramaic": "Imperial Aramaic",
"Ogami": "Ōgami",
"Ogami Island": "Ōgami",
"Ogami-jima": "Ōgami",
"Ogamijima": "Ōgami",
"Old": "Old Avestan",
"Old Gotlandic": "Old Gutnish",
"Old Hebrew": "Biblical Hebrew",
"Ollnborg": "Oldenburg",
"Oujiang": "Wenzhou",
"Oxford": "Oxford British English",
"Oxford British spelling": "Oxford British English",
"P'yŏng'an": "Pyongan",
"P'yŏngan": "Pyongan",
"PalA": "Palmyrene",
"Palatinate German": "Palatine",
"Palatine German": "Palatine",
"Palestinian Syriac": "Christian Palestinian Aramaic",
"Palmyrene Aramaic": "Palmyrene",
"Peking": "Beijing",
"Pekingese": "Beijing",
"Pfälzisch": "Palatine",
"Philhellene": "Hellenizing School",
"Philhellene School": "Hellenizing School",
"Phu Yen": "Phú Yên",
"Phu Yen dialect": "Phú Yên",
"Phú Yên dialect": "Phú Yên",
"Polar Eskimo": "Inuktun",
"Pomeranian LG": "Pomeranian",
"Pomeranian Low German": "Pomeranian",
"Potteries dialect": "Potteries",
"Poznan": "Poznań",
"Provencal": "Provençal",
"Psїsara": "Hirara",
"Pu-Xian": "Puxian Min",
"Pu-Xian Min": "Puxian Min",
"Puxian": "Puxian Min",
"Pyeong'an": "Pyongan",
"Pyeong-an": "Pyongan",
"Pyeongan": "Pyongan",
"Pyong'an": "Pyongan",
"Pyŏngan": "Pyongan",
"Pälzisch": "Palatine",
"Pїsara": "Hirara",
"Quang Binh": "Quảng Bình",
"Quang Binh dialect": "Quảng Bình",
"Quang Nam": "Quảng Nam",
"Quang Nam dialect": "Quảng Nam",
"Quang Ngai": "Quảng Ngãi",
"Quang Ngai dialect": "Quảng Ngãi",
"Quang Tri": "Quảng Trị",
"Quang Tri dialect": "Quảng Trị",
"Quanzhou dialect": "Quanzhou",
"Quảng Bình dialect": "Quảng Bình",
"Quảng Nam dialect": "Quảng Nam",
"Quảng Ngãi dialect": "Quảng Ngãi",
"Quảng Trị dialect": "Quảng Trị",
"RCM": "Rimella and Campello Monti",
"RLC": "Revived Late Cornish",
"Ralik": "Rālik",
"Rarha": "Rāṛha",
"Renaissance Anglo-Latin": "British Renaissance Latin",
"Renaissance British": "British Renaissance Latin",
"Rimella": "Rimella and Campello Monti",
"Ripuarian Franconian": "Ripuarian",
"Ripuarisch": "Ripuarian",
"Roman": "Rome",
"Roman Italian": "Rome",
"Romanesco": "Rome",
"Romanesco Italian": "Rome",
"Royal Thai": "royal",
"SEC Scots": "Southeast Central",
"SLDE": "Switzerland and Liechtenstein",
"SN Scots": "South Northern",
"SWC Scots": "Southwest Central",
"SWF": "Standard Written Form",
"Sa'idi": "Upper Egypt",
"Saidi": "Upper Egypt",
"Saigonese": "Saigon",
"Saingilo": "Ingilo",
"Sallaans": "Sallands",
"Salland": "Sallands",
"Salop": "Shropshire",
"Salopian": "Shropshire",
"Santoantão": "Santo Antão",
"Sarkese": "Sark",
"Saʽidi": "Upper Egypt",
"Schleswigsch": "Schleswig",
"Scottish English": "Scotland",
"Sercquiais": "Sark",
"Seven Communities": "Sette Comuni",
"Shrops": "Shropshire",
"Shropshire dialect": "Shropshire",
"Sichuan": "Sichuanese",
"Simmani": "ginseng-harvesters' cant",
"Singapore Hokkien": "Singaporean Hokkien",
"South African English": "South Africa",
"South German": "Southern German",
"South Hesse": "South Hessian",
"South Hessen": "South Hessian",
"South Korean": "South Korea",
"South ME": "Southern",
"South Midland US": "Midland US",
"South Northern Scots": "South Northern",
"South Russia": "Southern Russia",
"South Sweden": "Southern",
"South Swedish": "Southern",
"South Vietnam": "Southern Vietnam",
"South Vietnamese": "Southern Vietnam",
"South of Russia": "Southern Russia",
"Southeast Central Scots": "Southeast Central",
"Southern American English": "Southern US",
"Southern Dutch": "Southern",
"Southern England English": "Southern England",
"Southern English": "Southern England",
"Southern English dialect": "Southern England",
"Southern Italian": "southern Italy",
"Southern Italy": "southern Italy",
"Southern ME": "Southern",
"Southern Middle English": "Southern",
"Southern Midland US": "Midland US",
"Southern Russian": "Southern Russia",
"Southern Scots": "South Scots",
"Southern Sweden": "Southern",
"Southern Swedish": "Southern",
"Southern US English": "Southern US",
"Southern Vietnamese": "Southern Vietnam",
"Southwest Central Scots": "Southwest Central",
"Southwest ME": "Southern",
"Southwest Mandarin": "Southwestern Mandarin",
"Stellingwerf": "Stellingwerfs",
"Subakhmimic": "Lycopolitan",
"Swiss German": "Switzerland",
"Switzerland and Liechtenstein spelling": "Switzerland and Liechtenstein",
"Syropalestinian": "Christian Palestinian Aramaic",
"Syropalestinian Aramaic": "Christian Palestinian Aramaic",
"Südhessisch": "South Hessian",
"TAO": "traditional orthography",
"Taihu": "Northern Wu",
"Taihu Wu": "Northern Wu",
"Taiwan Hakka": "Taiwanese Hakka",
"Taiwan Hakka & Hokkien": "Taiwanese Hokkien and Hakka",
"Taiwan Hakka & Min Nan": "Taiwanese Hokkien and Hakka",
"Taiwan Hakka and Hokkien": "Taiwanese Hokkien and Hakka",
"Taiwan Hakka and Min Nan": "Taiwanese Hokkien and Hakka",
"Taiwan Hokkien": "Taiwanese Hokkien",
"Taiwan Hokkien & Hakka": "Taiwanese Hokkien and Hakka",
"Taiwan Hokkien and Hakka": "Taiwanese Hokkien and Hakka",
"Taiwan Mandarin": "Taiwanese Mandarin",
"Taiwan Min Nan": "Taiwanese Hokkien",
"Taiwan Min Nan & Hakka": "Taiwanese Hokkien and Hakka",
"Taiwanese Hakka & Hokkien": "Taiwanese Hokkien and Hakka",
"Taiwanese Hakka & Min Nan": "Taiwanese Hokkien and Hakka",
"Taiwanese Hakka and Hokkien": "Taiwanese Hokkien and Hakka",
"Taiwanese Hakka and Min Nan": "Taiwanese Hokkien and Hakka",
"Taiwanese Hokkien & Hakka": "Taiwanese Hokkien and Hakka",
"Taiwanese Min Nan": "Taiwanese Hokkien",
"Taiwanese Min Nan & Hakka": "Taiwanese Hokkien and Hakka",
"Taiwanese Min Nan and Hakka": "Taiwanese Hokkien and Hakka",
"Talmudic Aramaic": "Jewish Babylonian Aramaic",
"Tanjore": "Thanjavur",
"Taraskievica": "Taraškievica",
"Targumic Aramaic": "Jewish Literary Aramaic",
"Terengganu": "Terengganu Malay",
"Thanh Hoa": "Thanh Hoá",
"Thanh Hoa dialect": "Thanh Hoá",
"Thanh Hoá dialect": "Thanh Hoá",
"Thebaic": "Sahidic",
"Thirteen Communities": "Tredici Comuni",
"Tianjin Mandarin": "Tianjin",
"Tianjin dialect": "Tianjin",
"Tianjinese": "Tianjin",
"Toishanese": "Taishanese",
"Topra": "Delhi-Topra",
"Tulamne": "Tulamni",
"Tuscan": "Tuscany",
"Tweants": "Twents",
"Twente": "Twents",
"U.S. English": "US",
"UC": "Unified Cornish",
"UCR": "Unified Cornish Revised",
"US English": "US",
"US Spanish": "US",
"Ukrainian": "Ukraine",
"Ullans": "Ulster Scots",
"United States Spanish": "US",
"Upper Egyptian": "Upper Egypt",
"Upper Saxony": "Upper Saxon",
"Upper Yangtze Mandarin": "Southwestern Mandarin",
"Valencian": "Valencia",
"Vanga": "Vaṅga",
"Vannes": "Gwened",
"Vannetais": "Gwened",
"Veluwe": "Veluws",
"Vulgar Anglo-Latin": "British Vulgar Latin",
"Vulgar British": "British Vulgar Latin",
"WC Scots": "West Central",
"Walserdeutsch": "Walser",
"Waser German": "Walser",
"Wenzhou Wu": "Wenzhou",
"Wenzhounese": "Wenzhou",
"West Arabian": "Hejazi",
"West Central Scots": "West Central",
"West Country English": "West Country",
"West Country dialect": "West Country",
"West Midland ME": "West Midlands",
"West Midland Middle English": "West Midlands",
"West Midlands English": "West Midlands",
"West Midlands ME": "West Midlands",
"West Midlands dialect": "West Midlands",
"West Norse": "Old West Norse",
"West Palatine": "Westpfälzisch",
"West Pomeranian LG": "Western Pomeranian",
"West Pomeranian Low German": "Western Pomeranian",
"Western Pomeranian LG": "Western Pomeranian",
"Western Pomeranian Low German": "Western Pomeranian",
"Westphalia": "Westphalian",
"Wielkopolska": "Greater Poland",
"Wikchamni": "Wukchumni",
"Wood Frisian": "Wood",
"Wuhan dialect": "Wuhan",
"Wukchamni": "Wukchumni",
"Xaladitka": "North Russia",
"Xiamen dialect": "Xiamen",
"Xinghua": "Puxian Min",
"Yachikumne": "Chulamni",
"Yachikumni": "Chulamni",
"Ye'kwana": "Ye'kwana dialect",
"Young": "Young Avestan",
"Younger": "Young Avestan",
"Younger Avestan": "Young Avestan",
"Zhangzhou dialect": "Zhangzhou",
"Zhongyuan Mandarin": "Central Plains Mandarin",
"ante-classical": "Old Latin",
"central Assam": "Central Assam",
"central Assamese": "Central Assam",
"central German": "central Germany",
"central Italian": "central Italy",
"central Vietnam": "Central Vietnam",
"central Vietnamese": "Central Vietnam",
"cretan": "Cretan",
"daytshmerish": "Daytshmerish",
"early ME": "Early Middle English",
"ebonics": "African-American Vernacular",
"hbo": "Biblical Hebrew",
"hellenizing": "Hellenizing School",
"hellenizing school": "Hellenizing School",
"hellenophile": "Hellenizing School",
"hellenophile school": "Hellenizing School",
"hunaban": "Hellenizing School",
"inner Mbugu": "Ma'a",
"late ME": "Late Middle English",
"later": "Later Sanskrit",
"later Sanskrit": "Later Sanskrit",
"later-Sanskrit": "Later Sanskrit",
"maniot": "Maniot",
"non-Oxford": "Non-Oxford British English",
"non-Oxford British spelling": "Non-Oxford British English",
"normal Mbugu": "Mbugu",
"north German": "Northern Germany",
"north Germany": "Northern Germany",
"north Vietnam": "Northern Vietnam",
"north Vietnamese": "Northern Vietnam",
"north and central German": "northern and central Germany",
"north and central Germany": "northern and central Germany",
"northeastern Mandarin": "Northeastern Mandarin",
"northern German": "Northern Germany",
"northern Germany": "Northern Germany",
"northern Italian": "northern Italy",
"northern Vietnam": "Northern Vietnam",
"northern Vietnamese": "Northern Vietnam",
"northern and central German": "northern and central Germany",
"nouchi": "Ivory Coast slang",
"noussi": "Ivory Coast slang",
"overseas Vietnamese": "Overseas Vietnamese",
"philhellene": "Hellenizing School",
"philhellene school": "Hellenizing School",
"prs": "Dari",
"south German": "Southern German",
"south Hesse": "South Hessian",
"south Hessen": "South Hessian",
"south Hessian": "South Hessian",
"south Sweden": "Southern",
"south Swedish": "Southern",
"south Vietnam": "Southern Vietnam",
"south Vietnamese": "Southern Vietnam",
"south of Russia": "Southern Russia",
"southern German": "Southern German",
"southern Italian": "southern Italy",
"southern Russia": "Southern Russia",
"southern Russian": "Southern Russia",
"southern Sweden": "Southern",
"southern Swedish": "Southern",
"southern Vietnam": "Southern Vietnam",
"southern Vietnamese": "Southern Vietnam",
"southwestern Mandarin": "Southwestern Mandarin",
"wae": "Walser",
"yunaban": "Hellenizing School",
"Čakavian": "Chakavian",
"Ōgami Island": "Ōgami",
"Ōgami-jima": "Ōgami",
"Ōgamijima": "Ōgami",
} # 648
# END
|
py | b401223645cbb552a03f5e3f50ac02b94fb43f19 | #!/usr/bin/env python3
import shutil
import tempfile
import unittest
from collections import Counter, defaultdict
from os import path
from pytorch_translate.research.test import morphology_test_utils as morph_utils
from pytorch_translate.research.unsupervised_morphology.ibm_model1 import IBMModel1
class TestIBMModel1(unittest.TestCase):
def test_str2int(self):
ibm_model = IBMModel1()
# Calling multiple times to make sure we get the same value.
assert ibm_model.str2int("hello") == 1
assert ibm_model.str2int("bye") == 2
assert ibm_model.str2int("hello") == 1
assert ibm_model.str2int("bye") == 2
assert len(ibm_model._str2int) == 3
assert len(ibm_model._int2str) == 3
assert ibm_model._int2str == [ibm_model.null_str, "hello", "bye"]
assert ibm_model.int2str(2) == "bye"
def test_morph_init(self):
ibm_model = IBMModel1()
tmp_dir, f1, f2 = morph_utils.get_two_same_tmp_files()
ibm_model.initialize_translation_probs(f1, f2)
assert len(ibm_model.translation_prob) == 10
assert (
len(ibm_model.translation_prob[ibm_model.str2int(ibm_model.null_str)]) == 9
)
assert len(ibm_model.translation_prob[ibm_model.str2int("345")]) == 6
assert (
ibm_model.translation_prob[ibm_model.str2int("122")][
ibm_model.str2int("123")
]
== 1.0 / 4
)
shutil.rmtree(tmp_dir)
def test_expectation_for_one_sentence(self):
ibm_model = IBMModel1()
tmp_dir, f1, f2 = morph_utils.get_two_same_tmp_files()
ibm_model.initialize_translation_probs(f1, f2)
translation_counts = defaultdict(lambda: defaultdict(float))
ibm_model.expectation_for_one_sentence(
Counter(
ibm_model.str2int(w)
for w in ["123", "124", "234", "345", ibm_model.null_str]
),
Counter(ibm_model.str2int(w) for w in ["123", "124", "234", "345"]),
translation_counts,
)
assert (
round(
translation_counts[ibm_model.str2int("123")][ibm_model.str2int("345")],
3,
)
== 0.176
)
shutil.rmtree(tmp_dir)
def test_ibm_train(self):
ibm_model = IBMModel1()
tmp_dir, f1, f2 = morph_utils.get_two_same_tmp_files()
ibm_model.learn_ibm_parameters(src_path=f1, dst_path=f2, num_iters=3)
assert (
ibm_model.translation_prob[ibm_model.str2int("456789")][
ibm_model.str2int("345")
]
== 0
)
assert (
ibm_model.translation_prob[ibm_model.str2int("456789")][
ibm_model.str2int("456789")
]
== 0.5
)
shutil.rmtree(tmp_dir)
|
py | b4012474126b100d7fed278f4bb9618d7b71bebf | from lin.model import User as LinUser
from lin.model import db, func, manager
class User(LinUser):
def _set_fields(self):
self._exclude = ["delete_time", "create_time", "update_time"]
@classmethod
def count_by_username(cls, username) -> int:
result = db.session.query(func.count(cls.id)).filter(
cls.username == username, cls.delete_time == None
)
count = result.scalar()
return count
@classmethod
def count_by_email(cls, email) -> int:
result = db.session.query(func.count(cls.id)).filter(
cls.email == email, cls.delete_time == None
)
count = result.scalar()
return count
@classmethod
def select_page_by_group_id(cls, group_id, root_group_id) -> list:
"""通过分组id分页获取用户数据"""
query = db.session.query(manager.user_group_model.user_id).filter(
manager.user_group_model.group_id == group_id,
manager.user_group_model.group_id != root_group_id,
)
result = cls.query.filter_by(soft=True).filter(cls.id.in_(query))
users = result.all()
return users
def reset_password(self, new_password):
self.password = new_password
def change_password(self, old_password, new_password):
if self.check_password(old_password):
self.password = new_password
return True
return False
|
py | b4012493412e07eb1252faa88ad3f47eb9bdd402 | import os
from dataclasses import dataclass
from functools import lru_cache
from pydantic import BaseSettings, Field
# Hostname to fetch STAC information from
STAC_API_URL_ENV_VAR = "STAC_API_URL"
# HREF base to be used when sending responses
STAC_API_HREF_ENV_VAR = "STAC_API_HREF"
DEFAULT_MAX_ITEMS_PER_TILE_ENV_VAR = "DEFAULT_MAX_ITEMS_PER_TILE"
@dataclass
class FeatureFlags:
VRT: bool = True if os.environ.get("FF_VRT") else False
class Settings(BaseSettings):
stac_api_url: str = os.environ[STAC_API_URL_ENV_VAR]
stac_api_href: str = os.environ[STAC_API_HREF_ENV_VAR]
title: str = "Preview of Tile Access Services"
openapi_url: str = "/openapi.json"
item_endpoint_prefix: str = "/item"
mosaic_endpoint_prefix: str = "/mosaic"
legend_endpoint_prefix: str = "/legend"
debug: bool = os.getenv("TILER_DEBUG", "False").lower() == "true"
api_version: str = "1.0"
default_max_items_per_tile: int = Field(
env=DEFAULT_MAX_ITEMS_PER_TILE_ENV_VAR, default=10
)
feature_flags: FeatureFlags = FeatureFlags()
@lru_cache
def get_settings() -> Settings:
return Settings()
|
py | b40124aff45c3927c738e8b30286add35c273135 | import pygame
from pygame.sprite import Group
from settings import Settings
from game_stats import GameStats
from scoreboard import Scoreboard
from button import Button
from ship import Ship
import game_functions as gf
def run_game():
# Initialize game
pygame.init()
# Initialize settings
ai_settings = Settings()
# Build the screen
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
# Make Play button
play_button = Button(ai_settings, screen, "PLAY")
# Store game stats, create scoreboard
stats = GameStats(ai_settings)
sb = Scoreboard(ai_settings, screen, stats)
# Make a ship
ship = Ship(ai_settings, screen)
# Make a group to store bullets
bullets = Group()
# Make a group to store aliens
aliens = Group()
# Create the fleet of aliens
gf.create_fleet(ai_settings, screen, ship, aliens)
# Main game loop
while True:
# Check for player input
gf.check_events(ai_settings, screen, stats, sb,
play_button, ship, aliens, bullets)
if stats.game_active:
# Update element positions
ship.update()
gf.update_bullets(ai_settings, screen, stats, sb,
ship, aliens, bullets)
gf.update_aliens(ai_settings, screen, stats, sb,
ship, aliens, bullets)
# Draw screen
gf.update_screen(ai_settings, screen, stats, sb, ship,
aliens, bullets, play_button)
run_game() |
py | b40125d9a4f817e0a6cf52668aacb73b8568e512 | import keras
from utils.query import con, cursor
class Progress(keras.callbacks.Callback):
""" This class serves to keep track of the current training job's progress """
def __init__(self, job_id, steps_per_epoch, num_epochs):
super(keras.callbacks.Callback, self).__init__()
self.steps_per_epoch = steps_per_epoch
self.max_epoch = num_epochs
self.job_id = job_id
self.curr_epoch = 0
self.table_name = 'training_progress'
self.connection = con
self.cursor = cursor
def on_train_begin(self, logs={}):
self.cursor.execute(
f"""UPDATE
{self.table_name}
SET
job_id=%s,
status=1,
curr_epoch=0,
max_epoch=%s,
curr_batch=0,
steps_per_epoch=%s,
stop_flag=False
""",
(self.job_id, self.max_epoch, self.steps_per_epoch))
self.connection.commit()
def on_train_end(self, logs={}):
# Status level 2 tells the frontend that we're done training.
self.cursor.execute(
f"""UPDATE {self.table_name} SET status = 2""")
self.connection.commit()
def on_epoch_begin(self, epoch, logs={}):
self.cursor.execute(f"""UPDATE {self.table_name} SET curr_epoch = %s""",
(epoch,))
self.connection.commit()
def on_epoch_end(self, epoch, logs={}):
self.curr_epoch = epoch
def on_batch_begin(self, batch, logs={}):
self.curr_batch = batch
self.cursor.execute(
f"""UPDATE
{self.table_name}
SET
curr_batch = %s""",
(batch,))
self.connection.commit()
def on_batch_end(self, batch, logs={}):
self.cursor.execute(
f"""SELECT stop_flag FROM {self.table_name}""")
flag = self.cursor.fetchone()[0]
# If the stop flag is set someone has requested that we end the training early
if flag:
print("ending training early")
self.model.stop_training = True
return
# This simulates a small training job
if __name__ == '__main__':
steps_per_epoch = 100
num_epochs = 3
progress = Progress(
job_id=1234, steps_per_epoch=steps_per_epoch, num_epochs=num_epochs)
progress.on_train_begin()
for epoch in range(num_epochs):
progress.on_epoch_begin(epoch)
for batch in range(steps_per_epoch):
progress.on_batch_begin(batch)
progress.on_batch_end(batch)
progress.on_epoch_end(epoch)
progress.on_train_end() |
py | b40128c8350f4e5ca8aa1a5cd39a02e4f0ba0de8 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-02-15 21:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0003_word2vecfile'),
]
operations = [
migrations.CreateModel(
name='UDPipeFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model', models.FileField(upload_to='')),
],
),
]
|
py | b40128f459b4456437c57a15963fad17a721ea14 | # Synchronization module for flow.polar.com
# (c) 2018 Anton Ashmarin, [email protected]
from tapiriik.settings import WEB_ROOT, POLAR_CLIENT_SECRET, POLAR_CLIENT_ID, POLAR_RATE_LIMITS
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.api import APIException, UserException, UserExceptionType
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, SourceFile, ActivityFileType
from tapiriik.services.tcx import TCXIO
from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from urllib.parse import urlencode
from requests.auth import HTTPBasicAuth
from io import StringIO
import uuid
import gzip
import logging
import lxml
import pytz
import requests
import isodate
logger = logging.getLogger(__name__)
class PolarFlowService(ServiceBase):
ID = "polarflow"
DisplayName = "Polar Flow"
DisplayAbbreviation = "PF"
AuthenticationType = ServiceAuthenticationType.OAuth
AuthenticationNoFrame = True # otherwise looks ugly in the small frame
UserProfileURL = "https://flow.polar.com/training/profiles/{0}"
UserActivityURL = "https://flow.polar.com/training/analysis/{1}"
SupportsHR = SupportsCalories = SupportsCadence = SupportsTemp = SupportsPower = True
ReceivesActivities = False # polar accesslink does not support polar data change.
GlobalRateLimits = POLAR_RATE_LIMITS
PartialSyncRequiresTrigger = True
PartialSyncTriggerPollInterval = timedelta(minutes=1)
# For mapping common->Polar Flow (text has no meaning due to upload unsupported)
_activity_type_mappings = {
ActivityType.Cycling: "Ride",
ActivityType.MountainBiking: "Ride",
ActivityType.Hiking: "Hike",
ActivityType.Running: "Run",
ActivityType.Walking: "Walk",
ActivityType.Snowboarding: "Snowboard",
ActivityType.Skating: "IceSkate",
ActivityType.CrossCountrySkiing: "NordicSki",
ActivityType.DownhillSkiing: "AlpineSki",
ActivityType.Swimming: "Swim",
ActivityType.Gym: "Workout",
ActivityType.Rowing: "Rowing",
ActivityType.RollerSkiing: "RollerSki",
ActivityType.StrengthTraining: "WeightTraining",
ActivityType.Climbing: "RockClimbing",
ActivityType.Wheelchair: "Wheelchair",
ActivityType.Other: "Other",
}
# Polar Flow -> common
_reverse_activity_type_mappings = {
"RUNNING": ActivityType.Running,
"JOGGING": ActivityType.Running,
"ROAD_RUNNING": ActivityType.Running,
"TRACK_AND_FIELD_RUNNING": ActivityType.Running,
"TRAIL_RUNNING": ActivityType.Running,
"TREADMILL_RUNNING": ActivityType.Running,
"CYCLING": ActivityType.Cycling,
"ROAD_BIKING": ActivityType.Cycling,
"INDOOR_CYCLING": ActivityType.Cycling,
"MOUNTAIN_BIKING": ActivityType.MountainBiking,
"WALKING": ActivityType.Walking,
"HIKING": ActivityType.Hiking,
"DOWNHILL_SKIING": ActivityType.DownhillSkiing,
"CROSS-COUNTRY_SKIING": ActivityType.CrossCountrySkiing,
"SNOWBOARDING": ActivityType.Snowboarding,
"SKATING": ActivityType.Skating,
"SWIMMING": ActivityType.Swimming,
"OPEN_WATER_SWIMMING": ActivityType.Swimming,
"POOL_SWIMMING": ActivityType.Swimming,
"PARASPORTS_WHEELCHAIR": ActivityType.Wheelchair,
"ROWING": ActivityType.Rowing,
"INDOOR_ROWING": ActivityType.Rowing,
"STRENGTH_TRAINING": ActivityType.StrengthTraining,
"OTHER_INDOOR": ActivityType.Other,
"OTHER_OUTDOOR": ActivityType.Other,
"ROLLER_SKIING_CLASSIC": ActivityType.RollerSkiing,
"ROLLER_SKIING_FREESTYLE": ActivityType.RollerSkiing,
# not supported somehow
#"": ActivityType.Elliptical,
"FUNCTIONAL_TRAINING": ActivityType.Gym,
"CORE": ActivityType.Gym,
"GROUP_EXERCISE": ActivityType.Gym,
"PILATES": ActivityType.Gym,
"YOGA": ActivityType.Gym,
"VERTICALSPORTS_WALLCLIMBING": ActivityType.Climbing,
}
SupportedActivities = list(_activity_type_mappings.keys())
_api_endpoint = "https://www.polaraccesslink.com"
def _register_user(self, access_token):
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer {}".format(access_token)
}
res = requests.post(self._api_endpoint + "/v3/users",
json={"member-id": uuid.uuid4().hex},
headers=headers)
return res.status_code == 200
def _delete_user(self, serviceRecord):
res = requests.delete(self._api_endpoint + "/v3/users/{userid}".format(userid=serviceRecord.ExternalID),
headers=self._api_headers(serviceRecord))
def _create_transaction(self, serviceRecord):
# Transaction contains max 50 items and last 10 minutes and after that it would be no acces to data within its scope
# hope worker can download all data, otherwise still no issue - we will stop downloading skipping exception
# and fetch missed data later in scope of another transaction
res = requests.post(self._api_endpoint +
"/v3/users/{userid}/exercise-transactions".format(userid=serviceRecord.ExternalID),
headers=self._api_headers(serviceRecord))
# No new training data status_code=204
if res.status_code == 401:
#TODO why it could happen
logger.debug("No authorization to create transaction")
raise APIException("No authorization to create transaction", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
transaction_uri = res.json()["resource-uri"] if res.status_code == 201 else None
serviceRecord.ServiceData = {"Transaction-uri": transaction_uri}
return transaction_uri
def _commit_transaction(self, serviceRecord):
if hasattr(serviceRecord, "ServiceData"):
transaction_uri = serviceRecord.ServiceData["Transaction-uri"]
if transaction_uri:
res = requests.put(transaction_uri, headers=self._api_headers(serviceRecord))
# TODO : should handle responce code?
# 200 OK Transaction has been committed and data deleted None
# 204 No Content No content when there is no data available None
# 404 Not Found No transaction was found with given transaction id None
def _api_headers(self, serviceRecord, headers={}):
headers.update({"Authorization": "Bearer {}".format(serviceRecord.Authorization["OAuthToken"])})
return headers
def WebInit(self):
params = {'response_type':'code',
'client_id': POLAR_CLIENT_ID,
'redirect_uri': WEB_ROOT + reverse("oauth_return", kwargs={"service": "polarflow"})}
self.UserAuthorizationURL = "https://flow.polar.com/oauth2/authorization?" + urlencode(params)
def RetrieveAuthorizationToken(self, req, level):
code = req.GET.get("code")
params = {"grant_type": "authorization_code",
"code": code,
"redirect_uri": WEB_ROOT + reverse("oauth_return", kwargs={"service": "polarflow"})}
response = requests.post("https://polarremote.com/v2/oauth2/token", data=params, auth=HTTPBasicAuth(POLAR_CLIENT_ID, POLAR_CLIENT_SECRET))
data = response.json()
if response.status_code != 200:
raise APIException(data["error"])
authorizationData = {"OAuthToken": data["access_token"]}
userId = data["x_user_id"]
try:
self._register_user(data["access_token"])
except requests.exceptions.HTTPError as err:
# Error 409 Conflict means that the user has already been registered for this client.
# That error can be ignored
if err.response.status_code != 409:
raise APIException("Unable to link user", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
return (userId, authorizationData)
def RevokeAuthorization(self, serviceRecord):
self._delete_user(serviceRecord)
def SubscribeToPartialSyncTrigger(self, serviceRecord):
# There is no per-user webhook subscription with Polar Flow.
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
# As above.
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
def PollPartialSyncTrigger(self, multiple_index):
response = requests.get(self._api_endpoint + "/v3/notifications", auth=HTTPBasicAuth(POLAR_CLIENT_ID, POLAR_CLIENT_SECRET))
to_sync_ids = []
if response.status_code == 200:
for item in response.json()["available-user-data"]:
if item["data-type"] == "EXERCISE":
to_sync_ids.append(item["user-id"])
return to_sync_ids
def DownloadActivityList(self, serviceRecord, exhaustive=False):
activities = []
exclusions = []
transaction_url = self._create_transaction(serviceRecord)
if transaction_url:
res = requests.get(transaction_url, headers=self._api_headers(serviceRecord))
if res.status_code == 200: # otherwise no new data, skip
for activity_url in res.json()["exercises"]:
data = requests.get(activity_url, headers=self._api_headers(serviceRecord))
if data.status_code == 200:
activity = self._create_activity(data.json())
activities.append(activity)
else:
# may be just deleted, who knows, skip
logger.debug("Cannot recieve training at url: {}".format(activity_url))
return activities, exclusions
def _create_activity(self, activity_data):
activity = UploadedActivity()
activity.GPS = not activity_data["has-route"]
if "detailed-sport-info" in activity_data and activity_data["detailed-sport-info"] in self._reverse_activity_type_mappings:
activity.Type = self._reverse_activity_type_mappings[activity_data["detailed-sport-info"]]
else:
activity.Type = ActivityType.Other
activity.StartTime = pytz.utc.localize(isodate.parse_datetime(activity_data["start-time"]))
activity.EndTime = activity.StartTime + isodate.parse_duration(activity_data["duration"])
distance = activity_data["distance"] if "distance" in activity_data else None
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=float(distance) if distance else None)
hr_data = activity_data["heart-rate"] if "heart-rate" in activity_data else None
avg_hr = hr_data["average"] if "average" in hr_data else None
max_hr = hr_data["maximum"] if "maximum" in hr_data else None
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(avg_hr) if avg_hr else None, max=float(max_hr) if max_hr else None))
calories = activity_data["calories"] if "calories" in activity_data else None
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=int(calories) if calories else None)
activity.ServiceData = {"ActivityID": activity_data["id"]}
logger.debug("\tActivity s/t {}: {}".format(activity.StartTime, activity.Type))
activity.CalculateUID()
return activity
def DownloadActivity(self, serviceRecord, activity):
# NOTE tcx have to be gzipped but it actually doesn't
# https://www.polar.com/accesslink-api/?python#get-tcx
#tcx_data_raw = requests.get(activity_link + "/tcx", headers=self._api_headers(serviceRecord))
#tcx_data = gzip.GzipFile(fileobj=StringIO(tcx_data_raw)).read()
tcx_url = serviceRecord.ServiceData["Transaction-uri"] + "/exercises/{}/tcx".format(activity.ServiceData["ActivityID"])
response = requests.get(tcx_url, headers=self._api_headers(serviceRecord, {"Accept": "application/vnd.garmin.tcx+xml"}))
if response.status_code == 404:
# Transaction was disbanded, all data linked to it will be returned in next transaction
raise APIException("Transaction disbanded", user_exception=UserException(UserExceptionType.DownloadError))
try:
tcx_data = response.text
activity = TCXIO.Parse(tcx_data.encode('utf-8'), activity)
activity.SourceFile = SourceFile(tcx_data, ActivityFileType.TCX)
except lxml.etree.XMLSyntaxError:
raise APIException("Cannot recieve training tcx at url: {}".format(tcx_url), user_exception=UserException(UserExceptionType.DownloadError))
return activity
def SynchronizationComplete(self, serviceRecord):
# Transaction should be commited to make access to next data possible
self._commit_transaction(serviceRecord)
def DeleteCachedData(self, serviceRecord):
# Nothing to delete
pass
def DeleteActivity(self, serviceRecord, uploadId):
# Not supported
pass
def UploadActivity(self, serviceRecord, activity):
# Not supported
pass
|
py | b40128f947a5475d04748ee5c29bb3958da86e83 | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
|
py | b4012977e0c76ee2dd68c441e42af0a3e8db9693 | # -*- coding: utf-8 -*-
# Copyright (C) 2015-2018 by Brendt Wohlberg <[email protected]>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""ADMM algorithms for the Convolutional Constrained MOD problem with
Mask Decoupling"""
from __future__ import division
from __future__ import absolute_import
import copy
import numpy as np
from sporco.admm import admm
from sporco.admm import ccmod
import sporco.cnvrep as cr
import sporco.linalg as sl
from sporco.common import _fix_dynamic_class_lookup
__author__ = """Brendt Wohlberg <[email protected]>"""
class ConvCnstrMODMaskDcplBase(admm.ADMMTwoBlockCnstrnt):
r"""
Base class for ADMM algorithms for Convolutional Constrained MOD
with Mask Decoupling :cite:`heide-2015-fast`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcplBase
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \left\| W \left(\sum_m \mathbf{d}_m * \mathbf{x}_m -
\mathbf{s}\right) \right\|_2^2 \quad \text{such that} \quad
\mathbf{d}_m \in C \;\; \forall m
where :math:`C` is the feasible set consisting of filters with unit
norm and constrained support, and :math:`W` is a mask array, via the
ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{d},\mathbf{g}_0,\mathbf{g}_1} \;
(1/2) \| W \mathbf{g}_0 \|_2^2 + \iota_C(\mathbf{g}_1)
\;\text{such that}\;
\left( \begin{array}{c} X \\ I \end{array} \right) \mathbf{d}
- \left( \begin{array}{c} \mathbf{g}_0 \\ \mathbf{g}_1 \end{array}
\right) = \left( \begin{array}{c} \mathbf{s} \\
\mathbf{0} \end{array} \right) \;\;,
where :math:`\iota_C(\cdot)` is the indicator function of feasible
set :math:`C`, and :math:`X \mathbf{d} = \sum_m \mathbf{x}_m *
\mathbf{d}_m`.
|
The implementation of this class is substantially complicated by the
support of multi-channel signals. In the following, the number of
channels in the signal and dictionary are denoted by ``C`` and ``Cd``
respectively, the number of signals and the number of filters are
denoted by ``K`` and ``M`` respectively, ``X``, ``Z``, and ``S`` denote
the dictionary, coefficient map, and signal arrays respectively, and
``Y0`` and ``Y1`` denote blocks 0 and 1 of the auxiliary (split)
variable of the ADMM problem. We need to consider three different cases:
1. Single channel signal and dictionary (``C`` = ``Cd`` = 1)
2. Multi-channel signal, single channel dictionary (``C`` > 1,
``Cd`` = 1)
3. Multi-channel signal and dictionary (``C`` = ``Cd`` > 1)
The final three (non-spatial) dimensions of the main variables in each
of these cases are as in the following table:
====== ================== ===================== ==================
Var. ``C`` = ``Cd`` = 1 ``C`` > 1, ``Cd`` = 1 ``C`` = ``Cd`` > 1
====== ================== ===================== ==================
``X`` 1 x 1 x ``M`` 1 x 1 x ``M`` ``Cd`` x 1 x ``M``
``Z`` 1 x ``K`` x ``M`` ``C`` x ``K`` x ``M`` 1 x ``K`` x ``M``
``S`` 1 x ``K`` x 1 ``C`` x ``K`` x 1 ``C`` x ``K`` x 1
``Y0`` 1 x ``K`` x 1 ``C`` x ``K`` x 1 ``C`` x ``K`` x 1
``Y1`` 1 x 1 x ``M`` 1 x 1 x ``M`` ``C`` x 1 x ``M``
====== ================== ===================== ==================
In order to combine the block components ``Y0`` and ``Y1`` of
variable ``Y`` into a single array, we need to be able to
concatenate the two component arrays on one of the axes, but the shapes
``Y0`` and ``Y1`` are not compatible for concatenation. The solution for
cases 1. and 3. is to swap the ``K`` and ``M`` axes of `Y0`` before
concatenating, as well as after extracting the ``Y0`` component from the
concatenated ``Y`` variable. In case 2., since the ``C`` and ``K``
indices have the same behaviour in the dictionary update equation, we
combine these axes in :meth:`.__init__`, so that the case 2. array
shapes become
====== =====================
Var. ``C`` > 1, ``Cd`` = 1
====== =====================
``X`` 1 x 1 x ``M``
``Z`` 1 x ``C`` ``K`` x ``M``
``S`` 1 x ``C`` ``K`` x 1
``Y0`` 1 x ``C`` ``K`` x 1
``Y1`` 1 x 1 x ``M``
====== =====================
making it possible to concatenate ``Y0`` and ``Y1`` using the same
axis swapping strategy as in the other cases. See :meth:`.block_sep0`
and :meth:`block_cat` for additional details.
|
After termination of the :meth:`solve` method, attribute :attr:`itstat`
is a list of tuples representing statistics of each iteration. The
fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``DFid`` : Value of data fidelity term :math:`(1/2) \sum_k \|
W (\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
``Cnstr`` : Constraint violation measure
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``XSlvRelRes`` : Relative residual of X step solver
``Time`` : Cumulative run time
"""
class Options(admm.ADMMTwoBlockCnstrnt.Options):
r"""ConvCnstrMODMaskDcplBase algorithm options
Options include all of those defined in
:class:`.ADMMTwoBlockCnstrnt.Options`, together with
additional options:
``LinSolveCheck`` : Flag indicating whether to compute
relative residual of X step solver.
``ZeroMean`` : Flag indicating whether the solution
dictionary :math:`\{\mathbf{d}_m\}` should have zero-mean
components.
"""
defaults = copy.deepcopy(admm.ADMMEqual.Options.defaults)
defaults.update({'AuxVarObj': False, 'fEvalX': True,
'gEvalY': False, 'LinSolveCheck': False,
'ZeroMean': False, 'RelaxParam': 1.8,
'rho': 1.0, 'ReturnVar': 'Y1'})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMaskDcpl algorithm options
"""
if opt is None:
opt = {}
admm.ADMMTwoBlockCnstrnt.Options.__init__(self, opt)
def __setitem__(self, key, value):
"""Set options 'fEvalX' and 'gEvalY' appropriately when option
'AuxVarObj' is set.
"""
admm.ADMMTwoBlockCnstrnt.Options.__setitem__(self, key, value)
if key == 'AuxVarObj':
if value is True:
self['fEvalX'] = False
self['gEvalY'] = True
else:
self['fEvalX'] = True
self['gEvalY'] = False
itstat_fields_objfn = ('DFid', 'Cnstr')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('DFid', 'Cnstr')
hdrval_objfun = {'DFid': 'DFid', 'Cnstr': 'Cnstr'}
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2):
"""
Parameters
----------
Z : array_like
Coefficient map array
S : array_like
Signal array
W : array_like
Mask array. The array shape must be such that the array is
compatible for multiplication with input array S (see
:func:`.cnvrep.mskWshape` for more details).
dsz : tuple
Filter support size(s)
opt : :class:`ConvCnstrMODMaskDcplBase.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMaskDcplBase.Options()
# Infer problem dimensions and set relevant attributes of self
self.cri = cr.CDU_ConvRepIndexing(dsz, S, dimK=dimK, dimN=dimN)
# Convert W to internal shape
W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)),
dtype=S.dtype)
# Reshape W if necessary (see discussion of reshape of S below)
if self.cri.Cd == 1 and self.cri.C > 1:
# In most cases broadcasting rules make it possible for W
# to have a singleton dimension corresponding to a non-singleton
# dimension in S. However, when S is reshaped to interleave axisC
# and axisK on the same axis, broadcasting is no longer sufficient
# unless axisC and axisK of W are either both singleton or both
# of the same size as the corresponding axes of S. If neither of
# these cases holds, it is necessary to replicate the axis of W
# (axisC or axisK) that does not have the same size as the
# corresponding axis of S.
shpw = list(W.shape)
swck = shpw[self.cri.axisC] * shpw[self.cri.axisK]
if swck > 1 and swck < self.cri.C * self.cri.K:
if W.shape[self.cri.axisK] == 1 and self.cri.K > 1:
shpw[self.cri.axisK] = self.cri.K
else:
shpw[self.cri.axisC] = self.cri.C
W = np.broadcast_to(W, shpw)
self.W = W.reshape(
W.shape[0:self.cri.dimN] +
(1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1))
else:
self.W = W
# Call parent class __init__
Nx = self.cri.N * self.cri.Cd * self.cri.M
CK = (self.cri.C if self.cri.Cd == 1 else 1) * self.cri.K
shpY = list(self.cri.shpX)
shpY[self.cri.axisC] = self.cri.Cd
shpY[self.cri.axisK] = 1
shpY[self.cri.axisM] += CK
super(ConvCnstrMODMaskDcplBase, self).__init__(
Nx, shpY, self.cri.axisM, CK, S.dtype, opt)
# Reshape S to standard layout (Z, i.e. X in cbpdn, is assumed
# to be taken from cbpdn, and therefore already in standard
# form). If the dictionary has a single channel but the input
# (and therefore also the coefficient map array) has multiple
# channels, the channel index and multiple image index have
# the same behaviour in the dictionary update equation: the
# simplest way to handle this is to just reshape so that the
# channels also appear on the multiple image index.
if self.cri.Cd == 1 and self.cri.C > 1:
self.S = S.reshape(self.cri.Nv + (1, self.cri.C*self.cri.K, 1))
else:
self.S = S.reshape(self.cri.shpS)
self.S = np.asarray(self.S, dtype=self.dtype)
# Create constraint set projection function
self.Pcn = cr.getPcn(dsz, self.cri.Nv, self.cri.dimN, self.cri.dimCd,
zm=opt['ZeroMean'])
# Initialise byte-aligned arrays for pyfftw
self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)
xfshp = list(self.cri.Nv + (self.cri.Cd, 1, self.cri.M))
self.Xf = sl.pyfftw_rfftn_empty_aligned(xfshp, self.cri.axisN,
self.dtype)
if Z is not None:
self.setcoef(Z)
def uinit(self, ushape):
"""Return initialiser for working variable U"""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
Ub0 = (self.W**2) * self.block_sep0(self.Y) / self.rho
Ub1 = self.block_sep1(self.Y)
return self.block_cat(Ub0, Ub1)
def setcoef(self, Z):
"""Set coefficient array."""
# If the dictionary has a single channel but the input (and
# therefore also the coefficient map array) has multiple
# channels, the channel index and multiple image index have
# the same behaviour in the dictionary update equation: the
# simplest way to handle this is to just reshape so that the
# channels also appear on the multiple image index.
if self.cri.Cd == 1 and self.cri.C > 1:
Z = Z.reshape(self.cri.Nv + (1, self.cri.Cx*self.cri.K,
self.cri.M,))
self.Z = np.asarray(Z, dtype=self.dtype)
self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
def getdict(self, crop=True):
"""Get final dictionary. If ``crop`` is ``True``, apply
:func:`.cnvrep.bcrop` to returned array.
"""
D = self.block_sep1(self.Y)
if crop:
D = cr.bcrop(D, self.cri.dsz, self.cri.dimN)
return D
def xstep_check(self, b):
r"""Check the minimisation of the Augmented Lagrangian with
respect to :math:`\mathbf{x}` by method `xstep` defined in
derived classes. This method should be called at the end of any
`xstep` method.
"""
if self.opt['LinSolveCheck']:
Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM)
ZHop = lambda x: sl.inner(np.conj(self.Zf), x,
axis=self.cri.axisK)
ax = ZHop(Zop(self.Xf)) + self.Xf
self.xrrs = sl.rrs(ax, b)
else:
self.xrrs = None
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
AXU = self.AX + self.U
Y0 = (self.rho*(self.block_sep0(AXU) - self.S)) / (self.W**2 +
self.rho)
Y1 = self.Pcn(self.block_sep1(AXU))
self.Y = self.block_cat(Y0, Y1)
def relax_AX(self):
"""Implement relaxation if option ``RelaxParam`` != 1.0."""
self.AXnr = self.cnst_A(self.X, self.Xf)
if self.rlx == 1.0:
self.AX = self.AXnr
else:
alpha = self.rlx
self.AX = alpha*self.AXnr + (1-alpha)*self.block_cat(
self.var_y0() + self.S, self.var_y1())
def block_sep0(self, Y):
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method from
parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to
allow swapping of K (multi-image) and M (filter) axes in block 0
so that it can be concatenated on axis M with block 1. This is
necessary because block 0 has the dimensions of S while block 1
has the dimensions of D. Handling of multi-channel signals
substantially complicate this issue. There are two multi-channel
cases: multi-channel dictionary and signal (Cd = C > 1), and
single-channel dictionary with multi-channel signal (Cd = 1, C >
1). In the former case, S and D shapes are (N x C x K x 1) and
(N x C x 1 x M) respectively. In the latter case,
:meth:`.__init__` has already taken care of combining C
(multi-channel) and K (multi-image) axes in S, so the S and D
shapes are (N x 1 x C K x 1) and (N x 1 x 1 x M) respectively.
"""
return np.swapaxes(
Y[(slice(None),)*self.blkaxis + (slice(0, self.blkidx),)],
self.cri.axisK, self.cri.axisM)
def block_cat(self, Y0, Y1):
r"""Concatenate components corresponding to :math:`\mathbf{y}_0`
and :math:`\mathbf{y}_1` to form :math:`\mathbf{y}\;\;`. The
method from parent class :class:`.ADMMTwoBlockCnstrnt` is
overridden here to allow swapping of K (multi-image) and M
(filter) axes in block 0 so that it can be concatenated on axis
M with block 1. This is necessary because block 0 has the
dimensions of S while block 1 has the dimensions of D. Handling
of multi-channel signals substantially complicate this
issue. There are two multi-channel cases: multi-channel
dictionary and signal (Cd = C > 1), and single-channel
dictionary with multi-channel signal (Cd = 1, C > 1). In the
former case, S and D shapes are (N x C x K x 1) and (N x C x 1 x
M) respectively. In the latter case, :meth:`.__init__` has
already taken care of combining C (multi-channel) and K
(multi-image) axes in S, so the S and D shapes are (N x 1 x C K
x 1) and (N x 1 x 1 x M) respectively.
"""
return np.concatenate((np.swapaxes(Y0, self.cri.axisK,
self.cri.axisM), Y1),
axis=self.blkaxis)
def cnst_A(self, X, Xf=None):
r"""Compute :math:`A \mathbf{x}` component of ADMM problem
constraint.
"""
return self.block_cat(self.cnst_A0(X, Xf), self.cnst_A1(X))
def obfn_g0var(self):
"""Variable to be evaluated in computing
:meth:`.ADMMTwoBlockCnstrnt.obfn_g0`, depending on the ``AuxVarObj``
option value.
"""
return self.var_y0() if self.opt['AuxVarObj'] else \
self.cnst_A0(None, self.Xf) - self.cnst_c0()
def cnst_A0(self, X, Xf=None):
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint.
"""
# This calculation involves non-negligible computational cost
# when Xf is None (i.e. the function is not being applied to
# self.X).
if Xf is None:
Xf = sl.rfftn(X, None, self.cri.axisN)
return sl.irfftn(sl.inner(self.Zf, Xf, axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
def cnst_A0T(self, Y0):
r"""Compute :math:`A_0^T \mathbf{y}_0` component of
:math:`A^T \mathbf{y}` (see :meth:`.ADMMTwoBlockCnstrnt.cnst_AT`).
"""
# This calculation involves non-negligible computational cost. It
# should be possible to disable relevant diagnostic information
# (dual residual) to avoid this cost.
Y0f = sl.rfftn(Y0, None, self.cri.axisN)
return sl.irfftn(sl.inner(np.conj(self.Zf), Y0f,
axis=self.cri.axisK), self.cri.Nv,
self.cri.axisN)
def cnst_c0(self):
r"""Compute constant component :math:`\mathbf{c}_0` of
:math:`\mathbf{c}` in the ADMM problem constraint.
"""
return self.S
def eval_objfn(self):
"""Compute components of regularisation function as well as total
contribution to objective function.
"""
dfd = self.obfn_g0(self.obfn_g0var())
cns = self.obfn_g1(self.obfn_g1var())
return (dfd, cns)
def obfn_g0(self, Y0):
r"""Compute :math:`g_0(\mathbf{y}_0)` component of ADMM objective
function.
"""
return (np.linalg.norm(self.W * Y0)**2) / 2.0
def obfn_g1(self, Y1):
r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective
function.
"""
return np.linalg.norm((self.Pcn(Y1) - Y1))
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xrrs,)
def reconstruct(self, D=None):
"""Reconstruct representation."""
if D is None:
Df = self.Xf
else:
Df = sl.rfftn(D, None, self.cri.axisN)
Sf = np.sum(self.Zf * Df, axis=self.cri.axisM)
return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
def rsdl_s(self, Yprev, Y):
"""Compute dual residual vector."""
return self.rho*np.linalg.norm(self.cnst_AT(self.U))
def rsdl_sn(self, U):
"""Compute dual residual normalisation term."""
return self.rho*np.linalg.norm(U)
class ConvCnstrMODMaskDcpl_IterSM(ConvCnstrMODMaskDcplBase):
r"""
ADMM algorithm for Convolutional Constrained MOD with Mask Decoupling
:cite:`heide-2015-fast` with the :math:`\mathbf{x}` step solved via
iterated application of the Sherman-Morrison equation
:cite:`wohlberg-2016-efficient`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_IterSM
:parts: 2
|
Multi-channel signals/images are supported
:cite:`wohlberg-2016-convolutional`. See
:class:`.ConvCnstrMODMaskDcplBase` for interface details.
"""
class Options(ConvCnstrMODMaskDcplBase.Options):
"""ConvCnstrMODMaskDcpl_IterSM algorithm options
Options are the same as those defined in
:class:`.ConvCnstrMODMaskDcplBase.Options`.
"""
defaults = copy.deepcopy(ConvCnstrMODMaskDcplBase.Options.defaults)
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMaskDcpl_IterSM algorithm options
"""
if opt is None:
opt = {}
ConvCnstrMODMaskDcplBase.Options.__init__(self, opt)
def __init__(self, Z, S, W, dsz, opt=None, dimK=1, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/ccmodmdism_init.svg
:width: 20%
:target: ../_static/jonga/ccmodmdism_init.svg
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMaskDcpl_IterSM.Options()
super(ConvCnstrMODMaskDcpl_IterSM, self).__init__(Z, S, W, dsz,
opt, dimK, dimN)
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.YU[:] = self.Y - self.U
self.block_sep0(self.YU)[:] += self.S
YUf = sl.rfftn(self.YU, None, self.cri.axisN)
b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
axis=self.cri.axisK) + self.block_sep1(YUf)
self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM,
self.cri.axisK)
self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
self.xstep_check(b)
class ConvCnstrMODMaskDcpl_CG(ConvCnstrMODMaskDcplBase):
r"""
ADMM algorithm for Convolutional Constrained MOD with Mask Decoupling
:cite:`heide-2015-fast` with the :math:`\mathbf{x}` step solved via
Conjugate Gradient (CG) :cite:`wohlberg-2016-efficient`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_CG
:parts: 2
|
Multi-channel signals/images are supported
:cite:`wohlberg-2016-convolutional`. See
:class:`.ConvCnstrMODMaskDcplBase` for interface details.
"""
class Options(ConvCnstrMODMaskDcplBase.Options):
"""ConvCnstrMODMaskDcpl_CG algorithm options
Options include all of those defined in
:class:`.ConvCnstrMODMaskDcplBase.Options`, together with
additional options:
``CG`` : CG solver options
``MaxIter`` : Maximum CG iterations.
``StopTol`` : CG stopping tolerance.
"""
defaults = copy.deepcopy(ConvCnstrMODMaskDcplBase.Options.defaults)
defaults.update({'CG': {'MaxIter': 1000, 'StopTol': 1e-3}})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
ConvCnstrMODMaskDcpl_CG algorithm options
"""
if opt is None:
opt = {}
ConvCnstrMODMaskDcplBase.Options.__init__(self, opt)
itstat_fields_extra = ('XSlvRelRes', 'XSlvCGIt')
def __init__(self, Z, S, W, dsz, opt=None, dimK=1, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/ccmodmdcg_init.svg
:width: 20%
:target: ../_static/jonga/ccmodmdcg_init.svg
"""
# Set default options if none specified
if opt is None:
opt = ConvCnstrMODMaskDcpl_CG.Options()
super(ConvCnstrMODMaskDcpl_CG, self).__init__(Z, S, W, dsz, opt,
dimK, dimN)
self.Xf[:] = 0.0
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.cgit = None
self.YU[:] = self.Y - self.U
self.block_sep0(self.YU)[:] += self.S
YUf = sl.rfftn(self.YU, None, self.cri.axisN)
b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf),
axis=self.cri.axisK) + self.block_sep1(YUf)
self.Xf[:], cgit = sl.solvemdbi_cg(
self.Zf, 1.0, b, self.cri.axisM, self.cri.axisK,
self.opt['CG', 'StopTol'], self.opt['CG', 'MaxIter'], self.Xf)
self.cgit = cgit
self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)
self.xstep_check(b)
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xrrs, self.cgit)
class ConvCnstrMODMaskDcpl_Consensus(ccmod.ConvCnstrMOD_Consensus):
r"""
Hybrid ADMM Consensus algorithm for Convolutional Constrained MOD with
Mask Decoupling :cite:`garcia-2018-convolutional1`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_Consensus
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{d} \;
(1/2) \left\| W \left(\sum_m \mathbf{d}_m * \mathbf{x}_m -
\mathbf{s} \right) \right\|_2^2 \quad \text{such that} \quad
\mathbf{d}_m \in C \;\; \forall m
where :math:`C` is the feasible set consisting of filters with unit
norm and constrained support, and :math:`W` is a mask array, via a
hybrid ADMM Consensus problem.
See the documentation of :class:`.ConvCnstrMODMaskDcplBase` for a
detailed discussion of the implementational complications resulting
from the support of multi-channel signals.
"""
def __init__(self, Z, S, W, dsz, opt=None, dimK=None, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/ccmodmdcnsns_init.svg
:width: 20%
:target: ../_static/jonga/ccmodmdcnsns_init.svg
|
Parameters
----------
Z : array_like
Coefficient map array
S : array_like
Signal array
W : array_like
Mask array. The array shape must be such that the array is
compatible for multiplication with input array S (see
:func:`.cnvrep.mskWshape` for more details).
dsz : tuple
Filter support size(s)
opt : :class:`.ConvCnstrMOD_Consensus.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
# Set default options if none specified
if opt is None:
opt = ccmod.ConvCnstrMOD_Consensus.Options()
super(ConvCnstrMODMaskDcpl_Consensus, self).__init__(
Z, S, dsz, opt=opt, dimK=dimK, dimN=dimN)
# Convert W to internal shape
if W is None:
W = np.array([1.0], dtype=self.dtype)
W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)),
dtype=S.dtype)
# Reshape W if necessary (see discussion of reshape of S in
# ccmod.ConvCnstrMOD_Consensus.__init__)
if self.cri.Cd == 1 and self.cri.C > 1:
# In most cases broadcasting rules make it possible for W
# to have a singleton dimension corresponding to a non-singleton
# dimension in S. However, when S is reshaped to interleave axisC
# and axisK on the same axis, broadcasting is no longer sufficient
# unless axisC and axisK of W are either both singleton or both
# of the same size as the corresponding axes of S. If neither of
# these cases holds, it is necessary to replicate the axis of W
# (axisC or axisK) that does not have the same size as the
# corresponding axis of S.
shpw = list(W.shape)
swck = shpw[self.cri.axisC] * shpw[self.cri.axisK]
if swck > 1 and swck < self.cri.C * self.cri.K:
if W.shape[self.cri.axisK] == 1 and self.cri.K > 1:
shpw[self.cri.axisK] = self.cri.K
else:
shpw[self.cri.axisC] = self.cri.C
W = np.broadcast_to(W, shpw)
self.W = W.reshape(
W.shape[0:self.cri.dimN] +
(1, W.shape[self.cri.axisC] * W.shape[self.cri.axisK], 1))
else:
self.W = W
# Initialise additional variables required for the different
# splitting used in combining the consensus solution with mask
# decoupling
self.Y1 = np.zeros(self.S.shape, dtype=self.dtype)
self.U1 = np.zeros(self.S.shape, dtype=self.dtype)
self.YU1 = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype)
def setcoef(self, Z):
"""Set coefficient array."""
# This method largely replicates the method from parent class
# ConvCnstrMOD_Consensus that it overrides. The inherited
# method is overridden to avoid the superfluous computation of
# self.ZSf in that method, which is not required for the
# modified algorithm with mask decoupling
if self.cri.Cd == 1 and self.cri.C > 1:
Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) +
(self.cri.M,))
self.Z = np.asarray(Z, dtype=self.dtype)
self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN)
def var_y1(self):
"""Get the auxiliary variable that is constrained to be equal to
the dictionary. The method is named for compatibility with the
method of the same name in :class:`.ConvCnstrMODMaskDcpl_IterSM`
and :class:`.ConvCnstrMODMaskDcpl_CG` (it is *not* variable `Y1`
in this class).
"""
return self.Y
def relax_AX(self):
"""The parent class method that this method overrides only
implements the relaxation step for the variables of the baseline
consensus algorithm. This method calls the overridden method and
then implements the relaxation step for the additional variables
required for the mask decoupling modification to the baseline
algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN)
if self.rlx == 1.0:
self.AX1 = self.AX1nr
else:
alpha = self.rlx
self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S)
def xstep(self):
"""The xstep of the baseline consensus class from which this
class is derived is re-used to implement the xstep of the
modified algorithm by replacing ``self.ZSf``, which is constant
in the baseline algorithm, with a quantity derived from the
additional variables ``self.Y1`` and ``self.U1``. It is also
necessary to set the penalty parameter to unity for the duration
of the x step.
"""
self.YU1[:] = self.Y1 - self.U1
self.ZSf = np.conj(self.Zf) * (self.Sf + sl.rfftn(
self.YU1, None, self.cri.axisN))
rho = self.rho
self.rho = 1.0
super(ConvCnstrMODMaskDcpl_Consensus, self).xstep()
self.rho = rho
def ystep(self):
"""The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).ystep()
AXU1 = self.AX1 + self.U1
self.Y1 = self.rho*(AXU1 - self.S) / (self.W**2 + self.rho)
def ustep(self):
"""The parent class ystep method is overridden to allow also
performing the ystep for the additional variables introduced in
the modification to the baseline algorithm.
"""
super(ConvCnstrMODMaskDcpl_Consensus, self).ustep()
self.U1 += self.AX1 - self.Y1 - self.S
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
"""
Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
- self.Sf
return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
self.cri.axisN))**2) / 2.0
def compute_residuals(self):
"""Compute residuals and stopping thresholds. The parent class
method is overridden to ensure that the residual calculations
include the additional variables introduced in the modification
to the baseline algorithm.
"""
# The full primary residual is straightforward to compute from
# the primary residuals for the baseline algorithm and for the
# additional variables
r0 = self.rsdl_r(self.AXnr, self.Y)
r1 = self.AX1nr - self.Y1 - self.S
r = np.sqrt(np.sum(r0**2) + np.sum(r1**2))
# The full dual residual is more complicated to compute than the
# full primary residual
ATU = self.swapaxes(self.U) + sl.irfftn(
np.conj(self.Zf) * sl.rfftn(self.U1, self.cri.Nv, self.cri.axisN),
self.cri.Nv, self.cri.axisN)
s = self.rho * np.linalg.norm(ATU)
# The normalisation factor for the full primal residual is also not
# straightforward
nAX = np.sqrt(np.linalg.norm(self.AXnr)**2 +
np.linalg.norm(self.AX1nr)**2)
nY = np.sqrt(np.linalg.norm(self.Y)**2 +
np.linalg.norm(self.Y1)**2)
rn = max(nAX, nY, np.linalg.norm(self.S))
# The normalisation factor for the full dual residual is
# straightforward to compute
sn = self.rho * np.sqrt(np.linalg.norm(self.U)**2 +
np.linalg.norm(self.U1)**2)
# Final residual values and stopping tolerances depend on
# whether standard or normalised residuals are specified via the
# options object
if self.opt['AutoRho', 'StdResiduals']:
epri = np.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
rn*self.opt['RelStopTol']
edua = np.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
sn*self.opt['RelStopTol']
else:
if rn == 0.0:
rn = 1.0
if sn == 0.0:
sn = 1.0
r /= rn
s /= sn
epri = np.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
self.opt['RelStopTol']
edua = np.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
self.opt['RelStopTol']
return r, s, epri, edua
def ConvCnstrMODMaskDcpl(*args, **kwargs):
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
with Mask Decoupling problems, and returns an object instantiated
with the provided. parameters. The wrapper is designed to allow the
appropriate object to be created by calling this function using the
same syntax as would be used if it were a class. The specific
implementation is selected by use of an additional keyword
argument 'method'. Valid values are:
- ``'ism'`` :
Use the implementation defined in :class:`.ConvCnstrMODMaskDcpl_IterSM`.
This method works well for a small number of training images, but is
very slow for larger training sets.
- ``'cg'`` :
Use the implementation defined in :class:`.ConvCnstrMODMaskDcpl_CG`.
This method is slower than ``'ism'`` for small training sets, but has
better run time scaling as the training set grows.
- ``'cns'`` :
Use the implementation defined in
:class:`.ConvCnstrMODMaskDcpl_Consensus`. This method is the best choice
for large training sets.
The default value is ``'cns'``.
"""
# Extract method selection argument or set default
if 'method' in kwargs:
method = kwargs['method']
del kwargs['method']
else:
method = 'cns'
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus
else:
raise ValueError('Unknown ConvCnstrMODMaskDcpl solver method %s'
% method)
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskDcpl(base):
def __init__(self, *args, **kwargs):
super(ConvCnstrMODMaskDcpl, self).__init__(*args, **kwargs)
# Allow pickling of objects of type ConvCnstrMODMaskDcpl
_fix_dynamic_class_lookup(ConvCnstrMODMaskDcpl, method)
# Return object of the nested class type
return ConvCnstrMODMaskDcpl(*args, **kwargs)
def ConvCnstrMODMaskDcplOptions(opt=None, method='cns'):
"""A wrapper function that dynamically defines a class derived from
the Options class associated with one of the implementations of
the Convolutional Constrained MOD with Mask Decoupling problem,
and returns an object instantiated with the provided parameters.
The wrapper is designed to allow the appropriate object to be
created by calling this function using the same syntax as would be
used if it were a class. The specific implementation is selected
by use of an additional keyword argument 'method'. Valid values are
as specified in the documentation for :func:`ConvCnstrMODMaskDcpl`.
"""
# Assign base class depending on method selection argument
if method == 'ism':
base = ConvCnstrMODMaskDcpl_IterSM.Options
elif method == 'cg':
base = ConvCnstrMODMaskDcpl_CG.Options
elif method == 'cns':
base = ConvCnstrMODMaskDcpl_Consensus.Options
else:
raise ValueError('Unknown ConvCnstrMODMaskDcpl solver method %s'
% method)
# Nested class with dynamically determined inheritance
class ConvCnstrMODMaskDcplOptions(base):
def __init__(self, opt):
super(ConvCnstrMODMaskDcplOptions, self).__init__(opt)
# Allow pickling of objects of type ConvCnstrMODMaskDcplOptions
_fix_dynamic_class_lookup(ConvCnstrMODMaskDcplOptions, method)
# Return object of the nested class type
return ConvCnstrMODMaskDcplOptions(opt)
|
py | b4012983ed28675ee8c653b98fb745b267b2312a | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'comp-match'
copyright = '2018, franklingu'
author = 'franklingu'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.0.0.dev'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'comp-matchdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'comp-match.tex', 'comp-match Documentation',
'franklingu', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'comp-match', 'comp-match Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'comp-match', 'comp-match Documentation',
author, 'comp-match', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True |
py | b40129e2c39a030b2b194182d7523b7069757617 | #!/usr/bin/env python3
from mylib.ext import http_headers
ROOT_DOMAIN = 'bilibili.com'
class BilibiliSplinterBrowserWrapper:
def __init__(self, splinter_browser, cookies_dict=None, cookies_source=None):
b = self.browser = splinter_browser
b.visit('https://' + ROOT_DOMAIN)
if cookies_dict:
self.add_cookies(cookies_dict)
elif cookies_source:
self.add_cookies_from(cookies_source)
def add_cookies_from(self, x):
self.add_cookies(http_headers.get_cookies_dict_from(x))
def add_cookies(self, cookies: dict):
self.browser.cookies.add(cookies)
for cookie in self.list_cookies():
cookie['domain'] = ROOT_DOMAIN
self.add_single_cookie_dict(cookie)
self.browser.reload()
def add_single_cookie_dict(self, single_cookie_dict):
self.browser.driver.add_cookie(single_cookie_dict)
def get_cookies(self):
return self.browser.cookies.all()
def list_cookies(self):
return self.browser.driver.get_cookies()
|
py | b4012b0b21e8e1418e19e144f1f9e57f60365499 | from django.apps import AppConfig
class StaffConfig(AppConfig):
name = 'staff'
verbose_name = 'Staff'
|
py | b4012be860f9ec9259d12906e4da2147e8685482 | import abc
import re
from typing import (
Any,
Optional,
Union,
)
import litecore.validation.base as base
import litecore.validation.length as length
import litecore.validation.specified as specified
import litecore.validation.exceptions as exc
class RegEx(base.Validator):
__slots__ = base.get_slots(base.Validator) + (
'pattern',
'flags',
'_compiled',
)
def __init__(
self,
*,
pattern: Optional[Union[str, bytes, re.Pattern]] = None,
flags: int = 0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pattern = pattern
self.flags = flags
if isinstance(pattern, re.Pattern):
self._compiled = pattern
else:
self._compiled = re.compile(pattern, flags)
def _validate(self, value: Any) -> Any:
if not re.match(self._compiled, value):
raise exc.PatternError(value, self)
return super()._validate(value)
@base.abstractslots(('regex',))
class HasRegEx(base.Validator):
__slots__ = ()
def __init__(
self,
*,
regex: Optional[RegEx] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.regex = regex
@abc.abstractmethod
def _validate(self, value: Any) -> Any:
if self.regex is not None:
value = self.regex(value)
return super()._validate(value)
class String(HasRegEx, length.HasLength, specified.SimpleChoices):
"""
Examples:
"""
__slots__ = ('encoding',) + base.combine_slots(
HasRegEx,
length.HasLength,
specified.SimpleChoices,
)
default_coerce_type = str
def __init__(
self,
*,
encoding: Optional[str] = 'utf-8',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.encoding = encoding
def _validate(self, value: Any) -> Any:
if isinstance(value, bytes) and self.encoding is not None:
try:
value = value.decode(self.encoding)
except UnicodeDecodeError as err:
args = (value, self, str, err)
raise exc.ValidationTypeError(*args) from err
return super()._validate(value)
|
py | b4012c4378e508ce63325920dec3916fc3ec12bc | """
created by nikos at 4/26/21
"""
import datetime
from ..base import MLBStatsAPIEndpointModel
from mlb_statsapi.utils.stats_api_object import configure_api
YMDTHMS = '%Y-%m-%dT%H:%M:%SZ'
YYYYMMDD_HHMMSS = '%Y%m%d_%H%M%S'
MMDDYYYY_HHMMSS = '%m%d%Y_%H%M%S'
class GameModel(MLBStatsAPIEndpointModel):
date_formats = {
'updatedSince': YMDTHMS,
'timecode': YYYYMMDD_HHMMSS,
'startTimecode': MMDDYYYY_HHMMSS,
'endTimecode': MMDDYYYY_HHMMSS
}
@configure_api
def liveGameV1(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def liveGameDiffPatchV1(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def liveTimestampv11(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def currentGameStats(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def getGameContextMetrics(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def getWinProbability(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def boxscore(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def content(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def colorFeed(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def colorTimestamps(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def linescore(self, **kwargs):
return self.get_api_file_object(**kwargs)
@configure_api
def playByPlay(self, **kwargs):
return self.get_api_file_object(**kwargs)
@property
def _methods(self) -> dict: return {m.__name__: m for m in (
self.liveGameV1,
self.liveGameDiffPatchV1,
self.liveTimestampv11,
self.currentGameStats,
self.getGameContextMetrics,
self.getWinProbability,
self.boxscore,
self.content,
self.colorFeed,
self.colorTimestamps,
self.linescore,
self.playByPlay
)}
@property
def now_timestamp(self):
return datetime.datetime.now().strftime(YYYYMMDD_HHMMSS)
|
py | b4012c685e436a9facb9b1b0c915099848241775 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = [
{
"fieldname": "order_no",
"label": _("Order No"),
"fieldtype": "Link",
"options": "Order Tracking",
"width": 150
},
{
"fieldname": "project",
"label" : _("Project"),
"fieldtype": "Link",
"options": "Project"
},
{
"fieldname": "supplier",
"label": _("Supplier"),
"fieldtype": "Link",
"options": "Supplier",
},
{
"fieldname": "mode_of_transport",
"label": _("Mode of Transport"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "shipped_date",
"label": _("Shipping Date"),
"fieldtype": "Date",
"width": 150
},
{
"fieldname": "expected_arrival_date",
"label": _("Expected Arrival Date"),
"fieldtype": "Date",
"width": 150
},
{
"fieldname": "arrival_date",
"label": _("Arrival Date"),
"fieldtype": "Date",
},
{
"fieldname": "order_status",
"label": _("Status"),
"fieldtype": "Data",
"width": 200
},
{
"fieldname": "bl_number",
"label": _("Bl No"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "container_no",
"label": _("Container"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "container_size",
"label": _("Container Size"),
"fieldtype": "Data",
},
{
"fieldname": "no_of_packages",
"label": _("No of Packages"),
"fieldtype": "Data",
},
{
"fieldname": "clearing_completion_date",
"label": _("Clearing Completion Date"),
"fieldtype": "Date",
},
{
"fieldname": "delivered_date",
"label": _("Delivered Date"),
"fieldtype": "Date",
},
{
"fieldname": "offloading_date",
"label": _("Off-Loading Date"),
"fieldtype": "Date",
},
]
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date {}").format(filters.to_date))
where_filter = {"from_date": filters.from_date,"to_date": filters.to_date,}
where = ""
if filters.order:
where += ' AND tot.name = %(order)s '
where_filter.update({"order": filters.order})
if filters.supplier:
where += ' AND tot.supplier = %(supplier)s '
where_filter.update({"supplier": filters.supplier})
data = frappe.db.sql('''SELECT
tot.name AS order_no,
tot.supplier,
tot.project,
tot.shipped_date,
tot.expected_arrival_date,
tot.mode_of_transport,
tot.bl_number,
tot.arrival_date,
tot.clearing_completion_date,
tot.delivered_date,
tot.offloading_date,
/*tc.container_no,
tc.size,
tc.no_of_packages,*/
(SELECT
CONCAT(op.date, " : ", op.current_location, ":", op.status)
FROM
`tabOrder Progress` AS op
WHERE
tot.name = op.parent
ORDER BY
op.date DESC
LIMIT 0,1
) AS order_status
FROM
(`tabOrder Tracking` tot)
/*LEFT JOIN
(`tabContainer` tc)
ON (tot.name = tc.parent)*/
Where
tot.expected_arrival_date BETWEEN %(from_date)s AND %(to_date)s
'''+ where,
where_filter, as_dict=1)
for order in data:
#
# For container info
#
container_info = frappe.db.sql('''SELECT
container_no, size,no_of_packages
FROM
(`tabContainer` tc)
LEFT JOIN
(`tabOrder Tracking` tot)
ON (tot.name = tc.parent)
WHERE
tot.name = %(parent)s ''',
{"parent": order.order_no,}, as_dict=1)
order.container_no = ''
order.container_size=''
order.no_of_packages=''
for co in container_info:
order.container_no += str(co.container_no) + ','
order.container_size += str(co.size) + ','
order.no_of_packages += str(co.no_of_packages) + ','
return columns, data
|
py | b4012c6a842a6403178b64678bbab37934daa9d7 | from functools import partial
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
Categorical,
CategoricalIndex,
Float64Index,
Index,
Int64Index,
Interval,
IntervalIndex,
date_range,
notna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
@pytest.fixture(params=[None, "foo"])
def name(request):
return request.param
class ConstructorTests:
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
"breaks",
[
[3, 14, 15, 92, 653],
np.arange(10, dtype="int64"),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range("20180101", periods=10),
date_range("20180101", periods=10, tz="US/Eastern"),
timedelta_range("1 day", periods=10),
],
)
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, "dtype", "int64")
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize(
"breaks, subtype",
[
(Int64Index([0, 1, 2, 3, 4]), "float64"),
(Int64Index([0, 1, 2, 3, 4]), "datetime64[ns]"),
(Int64Index([0, 1, 2, 3, 4]), "timedelta64[ns]"),
(Float64Index([0, 1, 2, 3, 4]), "int64"),
(date_range("2017-01-01", periods=5), "int64"),
(timedelta_range("1 day", periods=5), "int64"),
],
)
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
warn = None
if subtype == "int64" and breaks.dtype.kind in ["M", "m"]:
# astype(int64) deprecated
warn = FutureWarning
with tm.assert_produces_warning(warn):
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype, "right")
for dtype in (iv_dtype, str(iv_dtype)):
with tm.assert_produces_warning(warn):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"breaks",
[
Int64Index([0, 1, 2, 3, 4]),
Int64Index([0, 1, 2, 3, 4]),
Int64Index([0, 1, 2, 3, 4]),
Float64Index([0, 1, 2, 3, 4]),
date_range("2017-01-01", periods=5),
timedelta_range("1 day", periods=5),
],
)
def test_constructor_pass_closed(self, constructor, breaks):
# not passing closed to IntervalDtype, but to IntervalArray constructor
warn = None
if isinstance(constructor, partial) and constructor.func is Index:
# passing kwargs to Index is deprecated
warn = FutureWarning
iv_dtype = IntervalDtype(breaks.dtype)
result_kwargs = self.get_kwargs_from_breaks(breaks)
for dtype in (iv_dtype, str(iv_dtype)):
with tm.assert_produces_warning(warn):
result = constructor(dtype=dtype, closed="left", **result_kwargs)
assert result.dtype.closed == "left"
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize("breaks", [[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(np.array(result), expected_values)
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
"breaks",
[
[],
np.array([], dtype="int64"),
np.array([], dtype="float64"),
np.array([], dtype="datetime64[ns]"),
np.array([], dtype="timedelta64[ns]"),
],
)
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, "dtype", np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(np.array(result), expected_values)
@pytest.mark.parametrize(
"breaks",
[
tuple("0123456789"),
list("abcdefghij"),
np.array(list("abcdefghij"), dtype=object),
np.array(list("abcdefghij"), dtype="<U1"),
],
)
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize("cat_constructor", [Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype="int64")
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "closed must be one of 'right', 'left', 'both', 'neither'"
with pytest.raises(ValueError, match=msg):
constructor(closed="invalid", **filler)
# unsupported dtype
msg = "dtype must be an IntervalDtype, got int64"
with pytest.raises(TypeError, match=msg):
constructor(dtype="int64", **filler)
# invalid dtype
msg = "data type [\"']invalid[\"'] not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype="invalid", **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range("2000-01-01", periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = "Period dtypes are not supported, use a PeriodIndex instead"
with pytest.raises(ValueError, match=msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = "left side of interval must be <= right side"
with pytest.raises(ValueError, match=msg):
constructor(**decreasing_kwargs)
class TestFromArrays(ConstructorTests):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {"left": breaks[:-1], "right": breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = "left and right must have the same length"
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize(
"left_subtype, right_subtype", [(np.int64, np.float64), (np.float64, np.int64)]
)
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(ConstructorTests):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {"breaks": breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
def test_left_right_dont_share_data(self):
# GH#36310
breaks = np.arange(5)
result = IntervalIndex.from_breaks(breaks)._data
assert result._left.base is None or result._left.base is not result._right.base
class TestFromTuples(ConstructorTests):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {"data": breaks}
tuples = list(zip(breaks[:-1], breaks[1:]))
if isinstance(breaks, (list, tuple)):
return {"data": tuples}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(tuples)}
return {"data": com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = "IntervalIndex.from_tuples received an invalid item, 2"
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = "IntervalIndex.from_tuples requires tuples of length 2, got {t}"
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an element
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(ConstructorTests):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(
params=[IntervalIndex, partial(Index, dtype="interval")],
ids=["IntervalIndex", "Index"],
)
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {"data": breaks}
ivs = [
Interval(left, right, closed) if notna(left) else left
for left, right in zip(breaks[:-1], breaks[1:])
]
if isinstance(breaks, list):
return {"data": ivs}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(ivs)}
return {"data": np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_string(self):
# GH23013
# When forming the interval from breaks,
# the interval of strings is already forbidden.
pass
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, closed="right"), Interval(2, 3, closed="left")]
msg = "intervals must all be closed on the same side"
with pytest.raises(ValueError, match=msg):
constructor(ivs)
# scalar
msg = (
r"IntervalIndex\(...\) must be called with a collection of "
"some kind, 5 was passed"
)
with pytest.raises(TypeError, match=msg):
constructor(5)
# not an interval; dtype depends on 32bit/windows builds
msg = "type <class 'numpy.int(32|64)'> with value 0 is not an interval"
with pytest.raises(TypeError, match=msg):
constructor([0, 1])
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
"data, closed",
[
([], "both"),
([np.nan, np.nan], "neither"),
(
[Interval(0, 3, closed="neither"), Interval(2, 5, closed="neither")],
"left",
),
(
[Interval(0, 3, closed="left"), Interval(2, 5, closed="right")],
"neither",
),
(IntervalIndex.from_breaks(range(5), closed="both"), "right"),
],
)
def test_override_inferred_closed(self, constructor, data, closed):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
expected = IntervalIndex.from_tuples(tuples, closed=closed)
result = constructor(data, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"values_constructor", [list, np.array, IntervalIndex, IntervalArray]
)
def test_index_object_dtype(self, values_constructor):
# Index(intervals, dtype=object) is an Index (not an IntervalIndex)
intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)]
values = values_constructor(intervals)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
def test_index_mixed_closed(self):
# GH27172
intervals = [
Interval(0, 1, closed="left"),
Interval(1, 2, closed="right"),
Interval(2, 3, closed="neither"),
Interval(3, 4, closed="both"),
]
result = Index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
def test_dtype_closed_mismatch():
# GH#38394 closed specified in both dtype and IntervalIndex constructor
dtype = IntervalDtype(np.int64, "left")
msg = "closed keyword does not match dtype.closed"
with pytest.raises(ValueError, match=msg):
IntervalIndex([], dtype=dtype, closed="neither")
with pytest.raises(ValueError, match=msg):
IntervalArray([], dtype=dtype, closed="neither")
|
py | b4012cd0f1d3867c4172b548eed51ea3414d0cc5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceIPConfigurationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> "models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceIPConfiguration"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
|
py | b4012d078fc04c25d32a2b498740385a6e44ee98 | import tensorflow as tf
import math
import numpy as np
import sys
sys.path.append('../../../')
import tensornet
NUM_CLASSES = 10
opts = {}
opts['use_dropout'] = True
opts['initial_learning_rate'] = 0.1
opts['num_epochs_per_decay'] = 30.0
opts['learning_rate_decay_factor'] = 0.1
def aug_train(image, aux):
aug_image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])
aug_image = tf.random_crop(aug_image, [32, 32, 3])
aug_image = tf.image.random_flip_left_right(aug_image)
aug_image = tf.image.random_contrast(aug_image, 0.75, 1.25)
aug_image = (aug_image - aux['mean']) / aux['std']
return aug_image
def aug_eval(image, aux):
aug_image = (image - aux['mean']) / aux['std']
return aug_image
def inference(images, train_phase, reuse=None, cpu_variables=False):
"""Build the model up to where it may be used for inference.
Args:
images: Images placeholder.
train_phase: Train phase placeholder
Returns:
logits: Output tensor with the computed logits.
"""
tn_init = lambda dev: lambda shape: tf.truncated_normal(shape, stddev=dev)
tu_init = lambda bound: lambda shape: tf.random_uniform(shape, minval = -bound, maxval = bound)
dropout_rate = lambda p: (opts['use_dropout'] * (p - 1.0)) * tf.to_float(train_phase) + 1.0
layers = []
layers.append(images)
layers.append(tensornet.layers.conv(layers[-1],
64,
[3, 3],
cpu_variables=cpu_variables,
biases_initializer=tf.zeros_initializer(),
scope='conv1.1'))
layers.append(tensornet.layers.batch_normalization(layers[-1],
train_phase,
cpu_variables=cpu_variables,
scope='bn1.1'))
layers.append(tf.nn.relu(layers[-1],
name='relu1.1'))
layers.append(tensornet.layers.conv(layers[-1],
64,
[3, 3],
cpu_variables=cpu_variables,
biases_initializer=tf.zeros_initializer(),
scope='conv1.2'))
layers.append(tensornet.layers.batch_normalization(layers[-1],
train_phase,
cpu_variables=cpu_variables,
scope='bn1.2'))
layers.append(tf.nn.relu(layers[-1],
name='relu1.2'))
layers.append(tf.nn.max_pool(layers[-1],
[1, 3, 3, 1],
[1, 2, 2, 1],
'SAME',
name='max_pool1'))
layers.append(tensornet.layers.conv(layers[-1],
128,
[3, 3],
cpu_variables=cpu_variables,
biases_initializer=tf.zeros_initializer(),
scope='conv2.1'))
layers.append(tensornet.layers.batch_normalization(layers[-1],
train_phase,
cpu_variables=cpu_variables,
scope='bn2.1'))
layers.append(tf.nn.relu(layers[-1],
name='relu2.1'))
layers.append(tensornet.layers.conv(layers[-1],
128,
[3, 3],
cpu_variables=cpu_variables,
biases_initializer=tf.zeros_initializer(),
scope='conv2.2'))
layers.append(tensornet.layers.batch_normalization(layers[-1],
train_phase,
cpu_variables=cpu_variables,
scope='bn2.2'))
layers.append(tf.nn.relu(layers[-1],
name='relu2.2'))
layers.append(tf.nn.max_pool(layers[-1],
[1, 3, 3, 1],
[1, 2, 2, 1],
'SAME',
name='max_pool2'))
layers.append(tensornet.layers.conv(layers[-1],
128,
[3, 3],
padding='VALID',
cpu_variables=cpu_variables,
biases_initializer=tf.zeros_initializer(),
scope='conv3.1'))
layers.append(tensornet.layers.batch_normalization(layers[-1],
train_phase,
cpu_variables=cpu_variables,
scope='bn3.1'))
layers.append(tf.nn.relu(layers[-1],
name='relu3.1'))
layers.append(tensornet.layers.conv(layers[-1],
128,
[3, 3],
padding='VALID',
cpu_variables=cpu_variables,
biases_initializer=tf.zeros_initializer(),
scope='conv3.2'))
layers.append(tensornet.layers.batch_normalization(layers[-1],
train_phase,
cpu_variables=cpu_variables,
scope='bn3.2'))
layers.append(tf.nn.relu(layers[-1],
name='relu3.2'))
layers.append(tf.nn.avg_pool(layers[-1],
[1,4,4,1],
[1,4,4,1],
'SAME',
name='avg_pool_full'))
sz = np.prod(layers[-1].get_shape().as_list()[1:])
layers.append(tensornet.layers.linear(tf.reshape(layers[-1], [-1, sz]),
NUM_CLASSES,
cpu_variables=cpu_variables,
biases_initializer=None,
scope='linear4.1'))
return layers[-1]
def losses(logits, labels):
"""Calculates losses from the logits and the labels.
Args:
logits: input tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
losses: list of loss tensors of type float.
"""
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy')
loss = tf.reduce_mean(xentropy, name='loss')
return [loss]
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label's is was in the top k (here k=1)
# of all logits for that example.
correct_flags = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.cast(correct_flags, tf.int32)
|
py | b4012d334130d88bb0ae18961d87cb03641af1d2 | """
ID: kumar.g1
LANG: PYTHON2
TASK: Longest Common Subsequence
"""
def longestCommonSubsequence(a, b):
len_a = len(a)
len_b = len(b)
if not len_a or not len_b:
return 0
L = [[0 for _ in range(len_b+1)] for _ in range(len_a+1)]
for i in range(1, len_a+1):
for j in range(1, len_b+1):
if a[i-1] == b[j-1]:
L[i][j] = 1 + L[i-1][j-1]
else:
L[i][j] = max(L[i-1][j], L[i][j-1])
max_len = L[len_a][len_b]
i = len_a
j = len_b
seq = []
while max_len > 0:
if L[i][j] == L[i-1][j-1]+1 and a[i-1] == b[j-1]:
seq.append(a[i-1])
max_len -= 1
i = i-1
j = j-1
elif i > 0 and L[i-1][j] == L[i][j]:
i = i-1
else:
j = j-1
return list(reversed(seq))
# assert longestCommonSubsequence([1, 2, 3, 4, 1], [3, 4, 1, 2, 1, 3]) == [1, 2, 3]
print longestCommonSubsequence([1, 2, 3, 4, 1], [3, 4, 1, 2, 1, 3])
|
py | b4012de84a9068e346c9c53746bbf91bda08dace | """
This module contains some tests for the `PlatformReportView` which require a different set of
fixtures which clash with those for other tests of this view.
"""
import pytest
from django.urls import reverse
from django.utils.timezone import now
from api.models import OrganizationAPIKey
from sushi.models import CounterReportsToCredentials
from sushi.tests.conftest import counter_report_type, credentials, platforms, organizations # noqa
from test_fixtures.entities.scheduler import FetchIntentionFactory
@pytest.mark.django_db
class TestPlatformReportApiView:
def test_platform_report_view_no_data_with_sushi_no_attempts(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period but there is SUSHI active for this
combination of platform, organization and report
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
credentials.counter_reports.add(counter_report_type)
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is False
assert data['status'] == 'Data not yet harvested'
def test_platform_report_view_no_data_with_inactive_sushi(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period and there is SUSHI which is inactive for this
combination of platform, organization and report
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
credentials.counter_reports.add(counter_report_type)
credentials.enabled = False
credentials.save()
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is False
assert data['status'] == 'SUSHI credentials are not automatically harvested'
def test_platform_report_view_no_data_with_broken_sushi(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period and there is SUSHI which is broken for this
combination of platform, organization and report
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
credentials.counter_reports.add(counter_report_type)
credentials.broken = True
credentials.save()
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is False
assert data['status'] == 'SUSHI credentials are incorrect'
def test_platform_report_view_no_data_with_sushi_with_queued_attempt(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period but there is SUSHI active for this
combination of platform, organization and report.
There is a fetch attempt which is still running.
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
credentials.counter_reports.add(counter_report_type)
fi = FetchIntentionFactory(
credentials=credentials,
counter_report=counter_report_type,
start_date='2020-01-01',
end_date='2020-01-31',
when_processed=now(),
)
FetchIntentionFactory(
credentials=credentials,
counter_report=counter_report_type,
start_date='2020-01-01',
end_date='2020-01-31',
queue=fi.queue,
attempt=None,
)
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is False
assert data['status'] == 'Harvesting ongoing'
def test_platform_report_view_no_data_with_sushi_with_3030_attempt(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period but there is SUSHI active for this
combination of platform, organization and report.
The last attempt ended with 3030 which means valid empty data result.
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
credentials.counter_reports.add(counter_report_type)
FetchIntentionFactory(
credentials=credentials,
counter_report=counter_report_type,
start_date='2020-01-01',
end_date='2020-01-31',
attempt__error_code='3030',
)
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is True
assert data['status'] == 'Empty data'
def test_platform_report_view_no_data_with_sushi_with_unsuccessfull_attempt(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period but there is SUSHI active for this
combination of platform, organization and report.
The last attempt ended without any indication of empty data, so the harvesing was not
successfull.
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
credentials.counter_reports.add(counter_report_type)
FetchIntentionFactory(
credentials=credentials,
counter_report=counter_report_type,
start_date='2020-01-01',
end_date='2020-01-31',
)
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is False
assert data['status'] == 'Harvesting error'
def test_platform_report_view_no_data_with_sushi_with_broken_report(
self, client, counter_report_type, organizations, credentials
):
"""
Report has no data for the requested period but there is SUSHI active for this
combination of platform, organization and report, but the report is marked as broken
for these credentials.
"""
api_key, key_val = OrganizationAPIKey.objects.create_key(
organization=credentials.organization, name='test'
)
CounterReportsToCredentials.objects.create(
credentials=credentials, counter_report=counter_report_type, broken=True
)
resp = client.get(
reverse(
'api_platform_report_data',
kwargs={
'platform_id': credentials.platform.pk,
'report_type': counter_report_type.report_type.short_name,
},
),
{'month': '2020-01', 'dims': ''},
HTTP_AUTHORIZATION=f'Api-Key {key_val}',
)
assert resp.status_code == 200
data = resp.json()
assert data['complete_data'] is False
assert data['status'] == 'Report marked as broken for existing credentials'
|
py | b4012df3d012ebaf2be9f676eafca5a4bdee4f07 | # coding: utf-8
"""
Provides datastore model implementations as well as validator factories for it
"""
from .base import Base, BaseValidator
from .config_auth import ConfigAuth
from .config import Config
from .user import User, UserValidator
from .route import Route, RouteRefStructure, RouteDrawingStructure, RouteValidator
from .waypoint import WayPoint, WayPointValidator
from .tag import TagValidator, TagStructure, Tag, TagRelation, Taggable
from .icon import Iconize, Icon, IconValidator
from .collection import Collection, CollectionUser, AddCollection
from .collection import CollectionValidator
|
py | b4012e18bf9385616fe59f195cdf2e0ef16d4cc0 | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
import unittest
import yaml
from manifests.input_manifest import InputManifest
class TestInputManifest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.manifests_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "manifests"))
def test_1_0(self):
path = os.path.join(self.manifests_path, "1.0.0", "opensearch-1.0.0.yml")
manifest = InputManifest.from_path(path)
self.assertEqual(manifest.version, "1.0")
self.assertEqual(manifest.build.name, "OpenSearch")
self.assertEqual(manifest.build.version, "1.0.0")
self.assertEqual(len(list(manifest.components.select(focus="common-utils"))), 1)
opensearch_component = manifest.components["OpenSearch"]
self.assertEqual(opensearch_component.name, "OpenSearch")
self.assertEqual(
opensearch_component.repository,
"https://github.com/opensearch-project/OpenSearch.git",
)
self.assertEqual(opensearch_component.ref, "1.0")
for component in manifest.components.values():
self.assertIsInstance(component.ref, str)
def test_1_1(self):
path = os.path.join(self.manifests_path, "1.1.0", "opensearch-1.1.0.yml")
manifest = InputManifest.from_path(path)
self.assertEqual(manifest.version, "1.0")
self.assertEqual(manifest.build.name, "OpenSearch")
self.assertEqual(manifest.build.version, "1.1.0")
self.assertEqual(len(list(manifest.components.select(focus="common-utils"))), 1)
# opensearch component
opensearch_component = manifest.components["OpenSearch"]
self.assertEqual(opensearch_component.name, "OpenSearch")
self.assertEqual(
opensearch_component.repository,
"https://github.com/opensearch-project/OpenSearch.git",
)
self.assertEqual(opensearch_component.ref, "1.1.0")
# components
for component in manifest.components.values():
self.assertIsInstance(component.ref, str)
# alerting component checks
alerting_component = manifest.components["alerting"]
self.assertIsNotNone(alerting_component)
self.assertEqual(len(alerting_component.checks), 2)
for check in alerting_component.checks:
self.assertIsInstance(check, InputManifest.Check)
self.assertIsNone(alerting_component.checks[0].args)
self.assertEqual(alerting_component.checks[1].args, "alerting")
def test_1_2(self):
path = os.path.join(self.manifests_path, "1.2.0/opensearch-1.2.0.yml")
manifest = InputManifest.from_path(path)
self.assertEqual(manifest.version, "1.0")
self.assertEqual(manifest.build.name, "OpenSearch")
self.assertEqual(manifest.build.version, "1.2.0")
self.assertEqual(manifest.ci.image.name, "opensearchstaging/ci-runner:centos7-x64-arm64-jdkmulti-node10.24.1-cypress6.9.1-20211028")
self.assertEqual(manifest.ci.image.args, "-e JAVA_HOME=/usr/lib/jvm/adoptopenjdk-14-hotspot")
self.assertNotEqual(len(manifest.components), 0)
self.assertEqual(len(list(manifest.components.select(focus="common-utils"))), 1)
# opensearch component
opensearch_component = manifest.components["OpenSearch"]
self.assertEqual(opensearch_component.name, "OpenSearch")
self.assertEqual(
opensearch_component.repository,
"https://github.com/opensearch-project/OpenSearch.git",
)
self.assertEqual(opensearch_component.ref, "1.2")
# components
for component in manifest.components.values():
self.assertIsInstance(component.ref, str)
# alerting component checks
alerting_component = manifest.components["alerting"]
self.assertIsNotNone(alerting_component)
self.assertEqual(len(alerting_component.checks), 2)
for check in alerting_component.checks:
self.assertIsInstance(check, InputManifest.Check)
self.assertIsNone(alerting_component.checks[0].args)
self.assertEqual(alerting_component.checks[1].args, "alerting")
def test_to_dict(self):
path = os.path.join(self.manifests_path, "1.1.0", "opensearch-1.1.0.yml")
manifest = InputManifest.from_path(path)
data = manifest.to_dict()
with open(path) as f:
self.assertEqual(yaml.safe_load(f), data)
def test_invalid_ref(self):
data_path = os.path.join(os.path.dirname(__file__), "data")
manifest_path = os.path.join(data_path, "invalid-ref.yml")
with self.assertRaises(Exception) as context:
InputManifest.from_path(manifest_path)
self.assertTrue(str(context.exception).startswith("Invalid manifest schema: {'components': "))
def test_select(self):
path = os.path.join(self.manifests_path, "1.1.0", "opensearch-1.1.0.yml")
manifest = InputManifest.from_path(path)
self.assertEqual(len(list(manifest.components.select(focus="common-utils"))), 1)
self.assertNotEqual(len(list(manifest.components.select(platform="windows"))), 0)
self.assertEqual(len(list(manifest.components.select(focus="k-NN", platform="linux"))), 1)
def test_select_none(self):
path = os.path.join(self.manifests_path, "1.1.0", "opensearch-1.1.0.yml")
manifest = InputManifest.from_path(path)
with self.assertRaises(ValueError) as ctx:
self.assertEqual(len(list(manifest.components.select(focus="k-NN", platform="windows"))), 0)
self.assertEqual(str(ctx.exception), "No components matched focus=k-NN, platform=windows.")
def test_component___matches__(self):
self.assertTrue(InputManifest.Component({"name": "x", "repository": "", "ref": ""}).__matches__())
def test_component___matches_platform__(self):
data = {"name": "x", "repository": "", "ref": ""}
self.assertTrue(InputManifest.Component(data).__matches__(platform=None))
self.assertTrue(InputManifest.Component(data).__matches__(platform="x"))
self.assertTrue(InputManifest.Component({**data, "platforms": ["linux"]}).__matches__(platform="linux"))
self.assertTrue(InputManifest.Component({**data, "platforms": ["linux", "windows"]}).__matches__(platform="linux"))
self.assertFalse(InputManifest.Component({**data, "platforms": ["linux"]}).__matches__(platform="x"))
def test_component___matches_focus__(self):
component = InputManifest.Component({"name": "x", "repository": "", "ref": ""})
self.assertTrue(component.__matches__(focus=None))
self.assertTrue(component.__matches__(focus="x"))
self.assertFalse(component.__matches__(focus="y"))
|
py | b4012fd8701c82f864a1f56d7632479ad5215b97 | # Generated by Django 3.0.7 on 2021-05-01 05:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0011_shopsdelhi'),
]
operations = [
migrations.CreateModel(
name='ShopsChennai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='Images/Chennai/Events')),
('location', models.TextField()),
('hreftag', models.TextField()),
],
),
migrations.CreateModel(
name='ShopsKolkata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='Images/Kolkata/Events')),
('location', models.TextField()),
('hreftag', models.TextField()),
],
),
migrations.CreateModel(
name='ShopsMumbai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='Images/Mumbai/Events')),
('location', models.TextField()),
('hreftag', models.TextField()),
],
),
]
|
py | b40130991d295cd4df01b08ec60780dd9393bb63 | # Copyright (c) 2021, Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
from .message_passing_protocol import MPPSocket
|
py | b401313adbb9b3b79b3ca1a9ed961168921cfa2c | """Credential exchange admin routes."""
from aiohttp import web
from aiohttp_apispec import docs, request_schema, response_schema
from marshmallow import fields, Schema
from ....connections.models.connection_record import ConnectionRecord
from ....holder.base import BaseHolder
from ....messaging.credential_definitions.util import CRED_DEF_TAGS
from ....messaging.valid import (
INDY_CRED_DEF_ID,
INDY_DID,
INDY_SCHEMA_ID,
INDY_VERSION,
UUIDFour,
)
from ....storage.error import StorageNotFoundError
from ...problem_report.message import ProblemReport
from .manager import CredentialManager
from .messages.credential_proposal import CredentialProposal
from .messages.inner.credential_preview import (
CredentialPreview,
CredentialPreviewSchema,
)
from .models.credential_exchange import (
V10CredentialExchange,
V10CredentialExchangeSchema,
)
class V10AttributeMimeTypesResultSchema(Schema):
"""Result schema for credential attribute MIME types by credential definition."""
class V10CredentialExchangeListResultSchema(Schema):
"""Result schema for Aries#0036 v1.0 credential exchange query."""
results = fields.List(
fields.Nested(V10CredentialExchangeSchema),
description="Aries#0036 v1.0 credential exchange records",
)
class V10CredentialProposalRequestSchema(Schema):
"""Request schema for sending credential proposal admin message."""
connection_id = fields.UUID(
description="Connection identifier",
required=True,
example=UUIDFour.EXAMPLE, # typically but not necessarily a UUID4
)
cred_def_id = fields.Str(
description="Credential definition identifier",
required=False,
**INDY_CRED_DEF_ID,
)
schema_id = fields.Str(
description="Schema identifier",
required=False,
**INDY_SCHEMA_ID,
)
schema_issuer_did = fields.Str(
description="Schema issuer DID",
required=False,
**INDY_DID,
)
schema_name = fields.Str(
description="Schema name",
required=False,
example="preferences",
)
schema_version = fields.Str(
description="Schema version",
required=False,
**INDY_VERSION,
)
issuer_did = fields.Str(
description="Credential issuer DID",
required=False,
**INDY_DID,
)
comment = fields.Str(description="Human-readable comment", required=False)
credential_proposal = fields.Nested(CredentialPreviewSchema, required=True)
class V10CredentialOfferRequestSchema(Schema):
"""Request schema for sending credential offer admin message."""
connection_id = fields.UUID(
description="Connection identifier",
required=True,
example=UUIDFour.EXAMPLE, # typically but not necessarily a UUID4
)
cred_def_id = fields.Str(
description="Credential definition identifier",
required=True,
**INDY_CRED_DEF_ID,
)
auto_issue = fields.Bool(
description=(
"Whether to respond automatically to credential requests, creating "
"and issuing requested credentials"
),
required=False,
default=False,
)
comment = fields.Str(description="Human-readable comment", required=False)
credential_preview = fields.Nested(CredentialPreviewSchema, required=True)
class V10CredentialIssueRequestSchema(Schema):
"""Request schema for sending credential issue admin message."""
comment = fields.Str(description="Human-readable comment", required=False)
credential_preview = fields.Nested(CredentialPreviewSchema, required=True)
class V10CredentialProblemReportRequestSchema(Schema):
"""Request schema for sending problem report."""
explain_ltxt = fields.Str(required=True)
@docs(tags=["issue-credential"], summary="Get attribute MIME types from wallet")
@response_schema(V10AttributeMimeTypesResultSchema(), 200)
async def attribute_mime_types_get(request: web.BaseRequest):
"""
Request handler for getting credential attribute MIME types.
Args:
request: aiohttp request object
Returns:
The MIME types response
"""
context = request.app["request_context"]
credential_id = request.match_info["credential_id"]
holder: BaseHolder = await context.inject(BaseHolder)
return web.json_response(await holder.get_mime_type(credential_id))
@docs(tags=["issue-credential"], summary="Fetch all credential exchange records")
@response_schema(V10CredentialExchangeListResultSchema(), 200)
async def credential_exchange_list(request: web.BaseRequest):
"""
Request handler for searching connection records.
Args:
request: aiohttp request object
Returns:
The connection list response
"""
context = request.app["request_context"]
tag_filter = {}
if "thread_id" in request.query and request.query["thread_id"] != "":
tag_filter["thread_id"] = request.query["thread_id"]
post_filter = {}
for param_name in ("connection_id", "role", "state"):
if param_name in request.query and request.query[param_name] != "":
post_filter[param_name] = request.query[param_name]
records = await V10CredentialExchange.query(context, tag_filter, post_filter)
return web.json_response({"results": [record.serialize() for record in records]})
@docs(tags=["issue-credential"], summary="Fetch a single credential exchange record")
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_retrieve(request: web.BaseRequest):
"""
Request handler for fetching single connection record.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
credential_exchange_id = request.match_info["cred_ex_id"]
try:
record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
return web.json_response(record.serialize())
@docs(tags=["issue-credential"], summary="Send credential, automating entire flow")
@request_schema(V10CredentialProposalRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send(request: web.BaseRequest):
"""
Request handler for sending credential from issuer to holder from attr values.
If both issuer and holder are configured for automatic responses, the operation
ultimately results in credential issue; otherwise, the result waits on the first
response not automated; the credential exchange record retains state regardless.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
comment = body.get("comment")
connection_id = body.get("connection_id")
preview_spec = body.get("credential_proposal")
if not preview_spec:
raise web.HTTPBadRequest(reason="credential_proposal must be provided.")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_proposal = CredentialProposal(
comment=comment,
credential_proposal=CredentialPreview.deserialize(preview_spec),
**{t: body.get(t) for t in CRED_DEF_TAGS if body.get(t)},
)
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.prepare_send(
connection_id, credential_proposal=credential_proposal
)
await outbound_handler(
credential_offer_message, connection_id=credential_exchange_record.connection_id
)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["issue-credential"], summary="Send issuer a credential proposal")
@request_schema(V10CredentialProposalRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_proposal(request: web.BaseRequest):
"""
Request handler for sending credential proposal.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
comment = body.get("comment")
preview_spec = body.get("credential_proposal")
if not preview_spec:
raise web.HTTPBadRequest(reason="credential_proposal must be provided.")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_preview = CredentialPreview.deserialize(preview_spec)
credential_manager = CredentialManager(context)
credential_exchange_record = await credential_manager.create_proposal(
connection_id,
comment=comment,
credential_preview=credential_preview,
**{t: body.get(t) for t in CRED_DEF_TAGS if body.get(t)},
)
await outbound_handler(
CredentialProposal.deserialize(
credential_exchange_record.credential_proposal_dict
),
connection_id=connection_id,
)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["issue-credential"],
summary="Send holder a credential offer, free from reference to any proposal",
)
@request_schema(V10CredentialOfferRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_free_offer(request: web.BaseRequest):
"""
Request handler for sending free credential offer.
An issuer initiates a such a credential offer, which is free any
holder-initiated corresponding proposal.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
cred_def_id = body.get("cred_def_id")
auto_issue = body.get(
"auto_issue", context.settings.get("debug.auto_respond_credential_request")
)
comment = body.get("comment")
preview_spec = body.get("credential_preview")
if not cred_def_id:
raise web.HTTPBadRequest(reason="cred_def_id is required")
if auto_issue and not preview_spec:
raise web.HTTPBadRequest(
reason="If auto_issue is set to"
+ " true then credential_preview must also be provided."
)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
if preview_spec:
credential_preview = CredentialPreview.deserialize(preview_spec)
credential_proposal = CredentialProposal(
comment=comment,
credential_proposal=credential_preview,
cred_def_id=cred_def_id,
)
credential_proposal_dict = credential_proposal.serialize()
else:
credential_proposal_dict = None
credential_exchange_record = V10CredentialExchange(
connection_id=connection_id,
initiator=V10CredentialExchange.INITIATOR_SELF,
credential_definition_id=cred_def_id,
credential_proposal_dict=credential_proposal_dict,
auto_issue=auto_issue,
)
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.create_offer(
credential_exchange_record, comment=comment
)
await outbound_handler(credential_offer_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["issue-credential"],
summary="Send holder a credential offer in reference to a proposal",
)
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_bound_offer(request: web.BaseRequest):
"""
Request handler for sending bound credential offer.
A holder initiates this sequence with a credential proposal; this message
responds with an offer bound to the proposal.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
assert credential_exchange_record.state == (
V10CredentialExchange.STATE_PROPOSAL_RECEIVED
)
connection_id = credential_exchange_record.connection_id
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.create_offer(credential_exchange_record, comment=None)
await outbound_handler(credential_offer_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["issue-credential"], summary="Send a credential request")
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_request(request: web.BaseRequest):
"""
Request handler for sending credential request.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = credential_exchange_record.connection_id
assert credential_exchange_record.state == (
V10CredentialExchange.STATE_OFFER_RECEIVED
)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_request_message,
) = await credential_manager.create_request(
credential_exchange_record, connection_record.my_did
)
await outbound_handler(credential_request_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["issue-credential"], summary="Send a credential")
@request_schema(V10CredentialIssueRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_issue(request: web.BaseRequest):
"""
Request handler for sending credential.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
comment = body.get("comment")
preview_spec = body.get("credential_preview")
if not preview_spec:
raise web.HTTPBadRequest(reason="credential_preview must be provided.")
credential_exchange_id = request.match_info["cred_ex_id"]
cred_exch_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = cred_exch_record.connection_id
assert cred_exch_record.state == V10CredentialExchange.STATE_REQUEST_RECEIVED
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_preview = CredentialPreview.deserialize(preview_spec)
credential_manager = CredentialManager(context)
(
cred_exch_record,
credential_issue_message,
) = await credential_manager.issue_credential(
cred_exch_record,
comment=comment,
credential_values=credential_preview.attr_dict(decode=False),
)
await outbound_handler(credential_issue_message, connection_id=connection_id)
return web.json_response(cred_exch_record.serialize())
@docs(tags=["issue-credential"], summary="Store a received credential")
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_store(request: web.BaseRequest):
"""
Request handler for storing credential.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = credential_exchange_record.connection_id
assert credential_exchange_record.state == (
V10CredentialExchange.STATE_CREDENTIAL_RECEIVED
)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_stored_message,
) = await credential_manager.store_credential(credential_exchange_record)
await outbound_handler(credential_stored_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["issue-credential"], summary="Send a problem report for credential exchange"
)
@request_schema(V10CredentialProblemReportRequestSchema())
async def credential_exchange_problem_report(request: web.BaseRequest):
"""
Request handler for sending problem report.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
body = await request.json()
try:
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
error_result = ProblemReport(explain_ltxt=body["explain_ltxt"])
error_result.assign_thread_id(credential_exchange_record.thread_id)
await outbound_handler(
error_result, connection_id=credential_exchange_record.connection_id
)
return web.json_response({})
@docs(
tags=["issue-credential"], summary="Remove an existing credential exchange record"
)
async def credential_exchange_remove(request: web.BaseRequest):
"""
Request handler for removing a credential exchange record.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
credential_exchange_id = request.match_info["cred_ex_id"]
try:
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
await credential_exchange_record.delete_record(context)
return web.json_response({})
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[
web.get(
"/issue-credential/mime-types/{credential_id}", attribute_mime_types_get
),
web.get("/issue-credential/records", credential_exchange_list),
web.get(
"/issue-credential/records/{cred_ex_id}", credential_exchange_retrieve
),
web.post("/issue-credential/send", credential_exchange_send),
web.post(
"/issue-credential/send-proposal", credential_exchange_send_proposal
),
web.post(
"/issue-credential/send-offer", credential_exchange_send_free_offer
),
web.post(
"/issue-credential/records/{cred_ex_id}/send-offer",
credential_exchange_send_bound_offer,
),
web.post(
"/issue-credential/records/{cred_ex_id}/send-request",
credential_exchange_send_request,
),
web.post(
"/issue-credential/records/{cred_ex_id}/issue",
credential_exchange_issue,
),
web.post(
"/issue-credential/records/{cred_ex_id}/store",
credential_exchange_store,
),
web.post(
"/issue-credential/records/{cred_ex_id}/problem-report",
credential_exchange_problem_report,
),
web.post(
"/issue-credential/records/{cred_ex_id}/remove",
credential_exchange_remove,
),
]
)
|
py | b4013163cb3c275283ae2662931fefcdffad1fda | # -*- coding: utf-8 -*-
"""Parallel workflow execution via SLURM
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import open
import os
import sys
from ...interfaces.base import CommandLine
from .base import (GraphPluginBase, logger)
def node_completed_status(checknode):
"""
A function to determine if a node has previously completed it's work
:param checknode: The node to check the run status
:return: boolean value True indicates that the node does not need to be run.
"""
""" TODO: place this in the base.py file and refactor """
node_state_does_not_require_overwrite = (
checknode.overwrite is False or
(checknode.overwrite is None and not checknode._interface.always_run))
hash_exists = False
try:
hash_exists, _, _, _ = checknode.hash_exists()
except Exception:
hash_exists = False
return (hash_exists and node_state_does_not_require_overwrite)
class SLURMGraphPlugin(GraphPluginBase):
"""Execute using SLURM
The plugin_args input to run can be used to control the SGE execution.
Currently supported options are:
- template : template to use for batch job submission
- qsub_args : arguments to be prepended to the job execution script in the
qsub call
"""
_template = "#!/bin/bash"
def __init__(self, **kwargs):
self._sbatch_args = ''
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'template' in kwargs['plugin_args']:
self._template = kwargs['plugin_args']['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'sbatch_args' in kwargs['plugin_args']:
self._sbatch_args = kwargs['plugin_args']['sbatch_args']
if 'dont_resubmit_completed_jobs' in kwargs['plugin_args']:
self._dont_resubmit_completed_jobs = kwargs['plugin_args'][
'dont_resubmit_completed_jobs']
else:
self._dont_resubmit_completed_jobs = False
super(SLURMGraphPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies, nodes):
def make_job_name(jobnumber, nodeslist):
"""
- jobnumber: The index number of the job to create
- nodeslist: The name of the node being processed
- return: A string representing this job to be displayed by SLURM
"""
job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id)
# Condition job_name to be a valid bash identifier (i.e. - is invalid)
job_name = job_name.replace('-', '_').replace('.', '_').replace(
':', '_')
return job_name
batch_dir, _ = os.path.split(pyfiles[0])
submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh')
cache_doneness_per_node = dict()
if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
node_status_done = node_completed_status(node)
# if the node itself claims done, then check to ensure all
# dependancies are also done
if node_status_done and idx in dependencies:
for child_idx in dependencies[idx]:
if child_idx in cache_doneness_per_node:
child_status_done = cache_doneness_per_node[
child_idx]
else:
child_status_done = node_completed_status(
nodes[child_idx])
node_status_done = node_status_done and child_status_done
cache_doneness_per_node[idx] = node_status_done
with open(submitjobsfile, 'wt') as fp:
fp.writelines('#!/usr/bin/env bash\n')
fp.writelines('# Condense format attempted\n')
for idx, pyscript in enumerate(pyfiles):
node = nodes[idx]
if cache_doneness_per_node.get(idx, False):
continue
else:
template, sbatch_args = self._get_args(
node, ["template", "sbatch_args"])
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
batchscript = '\n'.join(
(template, '%s %s' % (sys.executable, pyscript)))
batchscriptfile = os.path.join(batch_dir,
'batchscript_%s.sh' % name)
batchscriptoutfile = batchscriptfile + '.o'
batchscripterrfile = batchscriptfile + '.e'
with open(batchscriptfile, 'wt') as batchfp:
batchfp.writelines(batchscript)
batchfp.close()
deps = ''
if idx in dependencies:
values = ''
for jobid in dependencies[idx]:
# Avoid dependancies of done jobs
if not self._dont_resubmit_completed_jobs or not cache_doneness_per_node[jobid]:
values += "${{{0}}}:".format(
make_job_name(jobid, nodes))
if values != '': # i.e. if some jobs were added to dependency list
values = values.rstrip(':')
deps = '--dependency=afterok:%s' % values
jobname = make_job_name(idx, nodes)
# Do not use default output locations if they are set in self._sbatch_args
stderrFile = ''
if self._sbatch_args.count('-e ') == 0:
stderrFile = '-e {errFile}'.format(
errFile=batchscripterrfile)
stdoutFile = ''
if self._sbatch_args.count('-o ') == 0:
stdoutFile = '-o {outFile}'.format(
outFile=batchscriptoutfile)
full_line = '{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk \'/^Submitted/ {{print $4}}\')\n'.format(
jobNm=jobname,
outFileOption=stdoutFile,
errFileOption=stderrFile,
extraSBatchArgs=sbatch_args,
dependantIndex=deps,
batchscript=batchscriptfile)
fp.writelines(full_line)
cmd = CommandLine(
'bash',
environ=dict(os.environ),
resource_monitor=False,
terminal_output='allatonce')
cmd.inputs.args = '%s' % submitjobsfile
cmd.run()
logger.info('submitted all jobs to queue')
|
py | b40131cc0f7760cd11fcf2ba0595a918a3c845c6 | from .Mercs import Mercs
|
py | b40132517b8804560c85bcf3230ee5b60fed3aba | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
p2p_lock,
)
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
)
class P2PFingerprintTest(BlinkhashTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata([x.sha256 for x in new_blocks])
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
node0.wait_for_block(stale_hash, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
node0.wait_for_header(hex(stale_hash), timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
block_hash = int(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16)
assert_equal(self.nodes[0].getblockcount(), 14)
node0.wait_for_block(block_hash, timeout=3)
# Request for very old stale block should now fail
with p2p_lock:
node0.last_message.pop("block", None)
self.send_block_request(stale_hash, node0)
node0.sync_with_ping()
assert "block" not in node0.last_message
# Request for very old stale block header should now fail
with p2p_lock:
node0.last_message.pop("headers", None)
self.send_header_request(stale_hash, node0)
node0.sync_with_ping()
assert "headers" not in node0.last_message
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
node0.wait_for_block(block_hash, timeout=3)
self.send_header_request(block_hash, node0)
node0.wait_for_header(hex(block_hash), timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
|
py | b40132bda48230e9c38fbc8889a54fa61dde2946 | # Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import pytest
import time
from harness import log
from harness.utils import Timeout
from harness.interface.error import Error, TestError
from harness.api.filesystem import get_log_file_with_path
from harness.api.device_info import GetDeviceInfo
from harness.api.backup import BackupInit, BackupGetState
@pytest.mark.service_desktop_test
@pytest.mark.rt1051
@pytest.mark.usefixtures("phone_unlocked")
@pytest.mark.backup
def test_backup(harness):
log.debug("Requesting backup");
resp = BackupInit().run(harness)
assert resp.taskId != ""
taskId = resp.taskId
# in response we get a task ID and status 200
log.debug("Backup started, waiting for results")
time.sleep(1) # wait for the endpoint to be ready
# start polling for backup status and wait for it to end
i = 0
try:
with Timeout.limit(seconds=30):
while True:
i = i+1
# now that we know the task ID we can poll for it's status
# body = { "id": response.taskId }
resp = BackupGetState(taskId).run(harness)
# Backup is still running
if resp.state == "running":
log.debug("Backup is running...")
# Backup has stopped, should be OK and finished, status is 303
# and redirects to a location as per the documentation
elif resp.state == "finished":
log.debug("Backup ended, checking results")
resp = GetDeviceInfo().run(harness)
bkpPath = resp.diag_info["backupLocation"]
# Retrieving backup file from phone
p = bkpPath + "/" + taskId
log.debug(f'Backup file path: {p}')
get_log_file_with_path(harness, p, "./")
break
# Backup ended with error
elif resp.state == "error":
log.debug(f'Backup failed: {resp.reason}')
raise TestError(Error.TEST_FAILED)
# wait for a moment
log.debug("Sleeping 1s")
time.sleep(1)
except Timeout as e:
log.error("Backup timeout reached")
raise TestError(Error.OtherError)
|
py | b401331f4f320dbb0674b58ad9e4bec4352081af | from os import path
from distutils import dir_util, file_util
if path.exists ('package'):
dir_util.remove_tree ('package')
bin_path = path.join ('package', 'bin')
dir_util.mkpath (bin_path)
file_util.copy_file ('EIFGENs/classic/F_code/el_server', bin_path)
dir_util.copy_tree ('graphics/icons', 'package/icons')
|
py | b4013338fb2130c3e5268dc8adf2441c4ae8ce6a | # -*- coding: utf-8 -*-
from collections import OrderedDict
from typing import Callable, Union
import attr
import yaml
# im really surprised this is not there in lib
@attr.s(repr=False, slots=True, hash=True)
class _LengthValidator(object):
_repr = "<instance_of validator for length {length!r}>"
length = attr.ib()
_err_gt = (
"'{name}' must be longer than {length!r} (got {value!r} with length "
"{actual!r}).")
_err_gte = (
"'{name}' must be longer or equal to {length!r} "
"(got {value!r} with length "
"{actual!r}).")
_err_lt = (
"length of '{name}' must be less than {length!r} "
"(got {value!r} with length "
"{actual!r}).")
_err_lte = (
"length of '{name}' must be less than or equal to {length!r} "
"(got {value!r} with length "
"{actual!r}).")
_err_eq = (
"length of '{name}' must be equal to {length!r} "
"(got {value!r} with length "
"{actual!r}).")
def __call__(self, inst, attr, value) -> None:
if str(self.length).startswith('>='):
self._gte(inst, attr, value)
elif str(self.length).startswith('<='):
self._lte(inst, attr, value)
elif str(self.length).startswith('>'):
self._gt(inst, attr, value)
elif str(self.length).startswith('<'):
self._lt(inst, attr, value)
else:
self._eq(inst, attr, value)
def __repr__(self) -> str:
return self._repr.format(length=self.length)
# attr: p.c.attribs.ValidatingAttribs
def _type_error(
self,
msg: str,
attr,
value: Union[dict, list, str, tuple, OrderedDict],
length: int) -> TypeError:
return TypeError(
msg.format(
name=attr.name,
length=length,
actual=len(value),
value=value),
attr,
self.length,
value)
def _gt(self, inst, attr, value) -> None:
if not len(value) > int(self.length.strip('>')):
raise self._type_error(
self._err_gt,
attr,
value,
self.length.strip('>'))
def _gte(self, inst, attr, value) -> None:
if not len(value) >= int(self.length.strip('>=')):
raise self._type_error(
self._err_gte,
attr,
value,
self.length.strip('>='))
def _lt(self, inst, attr, value) -> None:
if not len(value) < int(self.length.strip('<')):
raise self._type_error(
self._err_lt,
attr,
value,
self.length.strip('<'))
def _lte(self, inst, attr, value) -> None:
if not len(value) <= int(self.length.strip('<=')):
raise self._type_error(
self._err_lte,
attr,
value,
self.length.strip('<='))
def _eq(self, inst, attr, value) -> None:
if not len(value) == int(self.length):
raise self._type_error(
self._err_eq,
attr,
value,
self.length)
def has_length(length: Union[int, str]) -> _LengthValidator:
return _LengthValidator(length)
# this may not be useful - may be due to lack of understanding of attrs
# seems useful to me, and gets things moving for now
@attr.s(repr=False, slots=True, hash=True)
class _AllMembersValidator(object):
_repr = "<instance_of validator for membership test {members!r}>"
members = attr.ib()
def __call__(self, inst, attr, value) -> None:
"""
We use a callable class to be able to change the ``__repr__``.
"""
_iter = value
if isinstance(value, (dict, OrderedDict)):
_iter = value.items()
for member in _iter:
if not self.members(member):
raise TypeError(
"'{name}' member did not match requirements "
"(got {value!r})".format(
name=attr.name,
value=member,
),
attr,
member)
def __repr__(self):
return self._repr.format(members=self.members)
def all_members(membertest: Callable):
return _AllMembersValidator(membertest)
@attr.s(repr=False, slots=True, hash=True)
class _WellFormedStringValidator(object):
_repr = "<instance_of validator for well-formed string {string_type!r}>"
string_type = attr.ib()
def __call__(self, inst, attr, value) -> None:
return getattr(self, f'valid_{self.string_type}')(inst, attr, value)
def valid_yaml(self, inst, attr, value) -> None:
try:
yaml.safe_load(value)
except yaml.parser.ParserError:
raise TypeError(
"'{name}' Unable to parse as {string_type}".format(
name=attr.name,
string_type=self.string_type),
attr,
self.string_type,
value)
def __repr__(self) -> str:
return self._repr.format(string_type=self.string_type)
def is_well_formed(format_type: str) -> _WellFormedStringValidator:
return _WellFormedStringValidator(format_type)
|
py | b401336c5f9e3848a2d00983234edc7b0619fc47 | import redis
# connexion à la base de données
DBSession = redis.Redis(unix_socket_path='/var/run/redis/redis.sock')
|
py | b401337b2b38b122e92b36f5734599c59c7051da | """The Sphinx awesome theme as a Python package.
:copyright: Copyright Kai Welke.
:license: MIT, see LICENSE_ for details
.. _LICENSE: https://github.com/kai687/sphinxawesome-theme/blob/master/LICENSE
"""
try:
from importlib.metadata import PackageNotFoundError, version # type: ignore
except ImportError: # pragma: no cover
from importlib_metadata import version, PackageNotFoundError # type: ignore
from os import environ, path
from typing import Any, Dict
from sphinx.application import Sphinx
try:
# obtain version from `pyproject.toml` via `importlib.metadata.version()`
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
def setup(app: "Sphinx") -> Dict[str, Any]:
"""Register the theme and its extensions wih Sphinx.
The setup function of this theme accomplishes the following:
- add the HTML theme
- activate the ``sphinxawesome.sampdirective`` extension
- activate the ``sphinxawesome_theme.highlighting`` extension
- activate the ``sphinxawesome_theme.html_translator`` extension
- add the ``AdmonitionID`` as post-transform
- execute the ``post_process_html`` code when the build has finished
"""
app.add_html_theme("sphinxawesome_theme", path.abspath(path.dirname(__file__)))
app.setup_extension("sphinxawesome.sampdirective")
app.setup_extension("sphinxawesome_theme.highlighting")
app.setup_extension("sphinxawesome_theme.html_translator")
app.setup_extension("sphinxawesome_theme.admonition_ids")
app.setup_extension("sphinxawesome_theme.jinja_filters")
app.setup_extension("sphinxawesome_theme.permalinks_backport")
# if this environment variable is defined, skip the postprocessing
if "SPHINX_AWESOME_THEME_NO_POSTPROCESSING" not in environ:
app.setup_extension("sphinxawesome_theme.postprocess")
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
py | b40133dadb147e2e620b1cd57500932e395dd5bb | # -*- coding: utf-8 -*-
from __future__ import print_function
import caffe
from caffe import layers as L
from caffe import params as P
from google.protobuf import text_format
import math
import os
import shutil
import stat
import subprocess
import sys
import time
sys.path.append('../')
from username import USERNAME
sys.dont_write_bytecode = True
from solverParam import *
from reid_data_layer import *
# we need to define a reidNet to train
from PyLib.Utils.path import *
from PyLib.NetLib.ReIDNet import *
def reid_train():
time_postfix = time.strftime("%m-%d_%H-%M-%S",time.localtime())
################################################################################
os.chdir(caffe_root)
################################################################################
# work dir
ProjectName = "{}_{}_{}".format(BaseNet,Models,Ver)
work_dir = "{}/{}/{}".format(Results_dir,Project,ProjectName)
make_if_not_exist(work_dir)
################################################################################
# work and model dirs
proto_dir = "{}/Proto".format(work_dir)
log_dir = "{}/Logs".format(work_dir)
model_dir = "{}/Models".format(work_dir)
pic_dir = "{}/Pics".format(work_dir)
job_dir = "{}/Job".format(work_dir)
make_if_not_exist(proto_dir)
make_if_not_exist(log_dir)
make_if_not_exist(model_dir)
make_if_not_exist(pic_dir)
make_if_not_exist(job_dir)
################################################################################
# work file
log_file = "{}/{}.log".format(log_dir,time_postfix)
train_net_file = "{}/train.prototxt".format(proto_dir)
test_net_file = "{}/test.prototxt".format(proto_dir)
solver_file = "{}/solver.prototxt".format(proto_dir)
snapshot_prefix = "{}/{}".format(model_dir,ProjectName)
job_file = "{}/train.sh".format(job_dir)
################################################################################
# TRAIN
net = caffe.NetSpec()
net = get_reidDataLayer(net,batch_size=get_train_batchsize())
# we need to define a ReIDNet
net = ReIDNet(net, data_layer="data", label_layer="label", net_input_width=net_input_width, net_input_height=net_input_height)
with open(train_net_file, 'w') as f:
print('name: "{}_train"'.format(ProjectName), file=f)
print(net.to_proto(), file=f)
################################################################################
# TEST
net = caffe.NetSpec()
net = get_reidDataLayer(net,batch_size=get_test_batchsize())
net = ReIDNet_Test(net, data_layer="data", label_layer="label", net_input_width=net_input_width, net_input_height=net_input_height)
with open(test_net_file, 'w') as f:
print('name: "{}_test"'.format(ProjectName), file=f)
print(net.to_proto(), file=f)
################################################################################
# Solver
solver_param = get_solver_param()
solver = caffe_pb2.SolverParameter(train_net=train_net_file, \
test_net=[test_net_file], \
snapshot_prefix=snapshot_prefix, \
**solver_param)
with open(solver_file, 'w') as f:
print(solver, file=f)
################################################################################
# CaffeModel & Snapshot
max_iter = 0
for file in os.listdir(model_dir):
if file.endswith(".solverstate"):
basename = os.path.splitext(file)[0]
iter = int(basename.split("{}_iter_".format(ProjectName))[1])
if iter > max_iter:
max_iter = iter
train_param = '--weights="{}" \\\n'.format(Pretrained_Model)
if resume_training:
if max_iter > 0:
train_param = '--snapshot="{}_iter_{}.solverstate" \\\n'.format(snapshot_prefix, max_iter)
################################################################################
# scripts
with open(job_file, 'w') as f:
f.write('cd {}\n'.format(caffe_root))
f.write('./build/tools/caffe train \\\n')
f.write('--solver="{}" \\\n'.format(solver_file))
f.write(train_param)
if solver_param['solver_mode'] == P.Solver.GPU:
f.write('--gpu {} 2>&1 | tee {}\n'.format(get_gpus(), log_file))
else:
f.write('2>&1 | tee {}.log\n'.format(log_file))
os.chmod(job_file, stat.S_IRWXU)
# ==========================================================================
# Training
subprocess.call(job_file, shell=True)
if __name__ == "__main__":
reid_train()
|
py | b40134b95268b10067f4efc057329a2a98648066 | # -*- coding: utf-8 -*-
'''
Gives SaltStack access to Windows event log
Charles McMarrow <[email protected]>
'''
from __future__ import absolute_import
# Import Python libs
import logging
import collections
# Import Salt Libs
import salt.utils.platform
# Import Third Party Libs
try:
import win32evtlog
import win32evtlogutil
import winerror
import pywintypes
IMPORT_STATUS = True
except ImportError:
IMPORT_STATUS = False
# keys of all the parts of a Event supported by the API
EVENT_PARTS = ('closingRecordNumber',
'computerName',
'data',
'eventCategory',
'eventID',
'eventType',
'recordNumber',
'reserved',
'reservedFlags',
'sid',
'sourceName',
'stringInserts',
'timeGenerated',
'timeWritten',
)
# keys time
TIME_PARTS = ('year',
'month',
'day',
'hour',
'minute',
'second',
)
TimeTuple = collections.namedtuple('TimeTuple', 'year, month, day, hour, minute, second')
log = logging.getLogger(__name__)
__virtualname__ = 'win_event_viewer'
def __virtual__():
'''
Load only on minions running on Windows.
'''
if not salt.utils.platform.is_windows() or not IMPORT_STATUS:
return False, 'win_event_viewer: most be on windows'
return __virtualname__
def _change_str_to_bytes(data, encoding='utf-8', encode_keys=False):
'''
Convert string objects to byte objects.
This function will destroy the data object and objects that data links to.
data
object
encoding
str
encode_keys
bool
if false key strings will not be turned into bytes
return
new object
'''
if isinstance(data, dict):
new_dict = {}
# recursively check every item in dict
for key in data:
item = _change_str_to_bytes(data[key], encoding)
if encode_keys:
# keys that are strings most be made into bytes
key = _change_str_to_bytes(key, encoding)
new_dict[key] = item
data = new_dict
elif isinstance(data, list):
new_list = []
# recursively check every item in list
for item in data:
new_list.append(_change_str_to_bytes(item, encoding))
data = new_list
elif isinstance(data, tuple):
new_list = []
# recursively check every item in list
for item in data:
new_list.append(_change_str_to_bytes(item, encoding))
data = tuple(new_list)
elif isinstance(data, str):
# data is turning into bytes because if was a string
data = data.encode(encoding)
return data
def _get_raw_time(time):
'''
Will make a pywintypes.datetime into a TimeTuple.
time
pywintypes.datetime
return
TimeTuple
'''
return TimeTuple._make((time.year, time.month, time.day, time.hour, time.minute, time.second))
def make_event_dict(event):
'''
Will make a PyEventLogRecord into a dict.
event
PyEventLogRecord
return
dict
'''
event_dict = {}
for event_part in EVENT_PARTS:
# get object value and add it to the event dict
event_dict[event_part] = getattr(event, event_part[0].upper() + event_part[1:], None)
# format items
event_dict['eventID'] = winerror.HRESULT_CODE(event_dict['eventID'])
if event_dict['sid'] is not None:
event_dict['sid'] = event_dict['sid'].GetSidIdentifierAuthority()
event_dict['timeGenerated'] = _get_raw_time(event_dict['timeGenerated'])
event_dict['timeWritten'] = _get_raw_time(event_dict['timeWritten'])
return _change_str_to_bytes(event_dict)
def _get_event_handler(log_name, target_computer=None):
'''
Will try to open a PyHANDLE.
log_name
str
target_computer
None or str
return
PyHANDLE
'''
# TODO: upgrade windows token
# 'log close' can fail if this is not done
try:
return win32evtlog.OpenEventLog(target_computer, log_name)
except pywintypes.error:
raise FileNotFoundError('Log "{0}" of "{1}" can not be found or access was denied!'.format(log_name,
target_computer))
def _close_event_handler(handler):
'''
Will close the event handler.
handler
PyHANDLE
'''
# TODO: downgrade windows token
win32evtlog.CloseEventLog(handler)
def get_event_generator(log_name, target_computer=None, raw=False):
'''
Will get all log events one by one.
Events are not in exact order.
log_name
str
target_computer
None or str
raw
bool
True: PyEventLogRecord
False: dict
return
PyEventLogRecord or dict
'''
handler = _get_event_handler(log_name, target_computer)
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ
event_count = 0
while win32evtlog.GetNumberOfEventLogRecords(handler) > event_count:
# get list of some of the events
events = win32evtlog.ReadEventLog(handler, flags, 0)
if not events:
# event log was updated and events are not ready to be given yet
# rather than wait just return
break
for event in events:
event_count += 1
if raw:
yield event
else:
yield make_event_dict(event)
_close_event_handler(handler)
def get_events(log_name, target_computer=None, raw=False):
'''
Convert pywinypes.datetime into a TimeTuple.
log_name
str
target_computer
None or str
raw
bool
True: PyEventLogRecord
False: dict
return
tuple
'''
return tuple(get_event_generator(log_name, target_computer, raw))
def get_event_sorted_by_info_generator(log_name, target_computer=None):
'''
Makes keys to event
log_name
str
target_computer
None or str
return
dict
'''
for event in get_event_generator(log_name, target_computer):
event_info = {}
for part in event:
event_info[part] = event[part]
for spot, key in enumerate(TIME_PARTS):
event_info[key] = event['timeGenerated'][spot]
yield event, event_info
def get_events_sorted_by_info(log_name, target_computer=None):
'''
Make dict of sorted events
log_name
str
target_computer
None or str
return
dict
'''
event_info = {event_part: collections.defaultdict(list) for event_part in EVENT_PARTS + TIME_PARTS}
for event, info in get_event_sorted_by_info_generator(log_name, target_computer):
for part in info:
event_info[part][info.get(part)].append(event)
return event_info
def get_event_filter_generator(log_name, target_computer=None, all_requirements=True, **kwargs):
'''
Will find events that meet the requirements
log_name
str
target_computer
None or str
all_requirements
bool
True: all requirements most be meet
False: only a single requirement most be meet
kwargs
requirements for the events
return
dict
'''
for event, info in get_event_sorted_by_info_generator(log_name, target_computer):
if all_requirements:
# all keys need to match each other
for key in kwargs:
if kwargs[key] != info[key]:
break
else:
yield event
else:
# just a single key par needs to match
if any([kwargs[key] == info[key] for key in kwargs]):
yield event
def get_events_filter(log_name, target_computer=None, all_requirements=True, **kwargs):
'''
Find events that meet the requirements.
log_name
str
target_computer
None or str
all_requirements
bool
True: all requirements most be meet
False: only a single requirement most be meet
kwargs
requirements for the events
return
list
'''
return tuple(get_event_filter_generator(log_name, target_computer, all_requirements, **kwargs))
def log_event(application_name, event_id, **kwargs):
'''
Adds event to application log.
application_name
str
event_id
int
kwargs
parts of event
'''
win32evtlogutil.ReportEvent(application_name, event_id, **kwargs)
def clear_log(log_name, target_computer=None):
'''
Clears event log.
A clear log event will be add it after the log was clear.
log_name
str
target_computer
None or str
'''
handler = _get_event_handler(log_name, target_computer)
win32evtlog.ClearEventLog(handler, log_name)
_close_event_handler(handler)
def get_number_of_events(log_name, target_computer=None):
'''
Gets the number of events in a log.
log_name
str
target_computer
None or str
return
int
'''
handler = _get_event_handler(log_name, target_computer)
number_of_events = win32evtlog.GetNumberOfEventLogRecords(handler)
_close_event_handler(handler)
return number_of_events
|
py | b40134e198cd9637567056f0ef741163e03cb246 | import numpy
import scipy.special
class NeuralNet(object):
def __init__(self, n_input, n_hidden, n_output, learning_rate):
self.n_input = n_input
self.n_hidden = n_hidden
self.n_output = n_output
self.learning_rate = learning_rate
self.wih = numpy.random.normal(0.0, pow(self.n_hidden, -0.5), (self.n_hidden, self.n_input))
self.who = numpy.random.normal(0.0, pow(self.n_output, -0.5), (self.n_output, self.n_hidden))
def train(self, inputs_list, targets_list):
p = 0.5 # Dropout Error Rate
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
d1 = (numpy.random.rand(*hidden_inputs.shape) < p) / p
hidden_inputs *= d1
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
d2 = (numpy.random.rand(*final_inputs.shape) < p) / p
final_inputs *= d2
final_outputs = self.activation_function(final_inputs)
output_errors = targets - final_outputs
hidden_errors = numpy.dot(self.who.T, output_errors)
self.who += self.learning_rate * numpy.dot(output_errors * final_outputs * (1.0 - final_outputs), hidden_outputs.T)
self.wih += self.learning_rate * numpy.dot(hidden_errors * hidden_outputs * (1.0 - hidden_outputs), inputs.T)
def query(self, inputs_list):
inputs = numpy.array(inputs_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
def activation_function(self, x):
return scipy.special.expit(x)
"""
Example usage of NeuralNetwork to solve the MNIST data set.
"""
import sys, os
import numpy
# Sloppily add neural_network to our path so we can import it
sys.path.insert(0, os.path.abspath('../neural_network'))
from neural_networkD import NeuralNet
def train_the_neural_net(neural_net, epochs=1):
print 'Training the neural network.'
training_data_file = open('mnist_train.csv', 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
epochs = epochs
for i in range(epochs):
print 'Training epoch {}/{}.'.format(i+1, epochs)
for record in training_data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
neural_net.train(inputs, targets)
print 'complete.'
def test_the_neural_net(neural_net):
print 'Testing the neural network.'
test_data_file = open('mnist_test.csv', 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
scorecard = []
for i, record in enumerate(test_data_list):
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
outputs = neural_net.query(inputs)
label = numpy.argmax(outputs)
if label == correct_label:
scorecard.append(1)
else:
scorecard.append(0)
print 'complete.'
return scorecard
if __name__ == '__main__':
print 'Starting neural network to recognize handwritten digits.'
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1
nn = NeuralNet(input_nodes, hidden_nodes, output_nodes, learning_rate)
# Train
train_the_neural_net(nn, epochs=1)
# Test
test_results = numpy.asarray(test_the_neural_net(nn))
# Print results
print('Neural network is {}% accurate at predicting handwritten digits.'
.format(test_results.sum() / float(test_results.size) * 100.0))
|
py | b40134febd67cd63cc62ef82a30bb7493fffe2fb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is very different to AboutModules in Ruby Koans
# Our AboutMultipleInheritance class is a little more comparable
#
from runner.koan import *
#
# Package hierarchy of Python Koans project:
#
# contemplate_koans.py
# koans/
# __init__.py
# about_asserts.py
# about_attribute_access.py
# about_class_attributes.py
# about_classes.py
# ...
# a_package_folder/
# __init__.py
# a_module.py
class AboutPackages(Koan):
def test_subfolders_can_form_part_of_a_module_package(self):
# Import ./a_package_folder/a_module.py
from .a_package_folder.a_module import Duck
duck = Duck()
self.assertEqual('Donald', duck.name)
def test_subfolders_become_modules_if_they_have_an_init_module(self):
# Import ./a_package_folder/__init__.py
from .a_package_folder import an_attribute
self.assertEqual(1984, an_attribute)
# ------------------------------------------------------------------
def test_use_absolute_imports_to_import_upper_level_modules(self):
# Import /contemplate_koans.py
import contemplate_koans
self.assertEqual('contemplate_koans', contemplate_koans.__name__)
# contemplate_koans.py is the root module in this package because it's
# the first python module called in koans.
#
# If contemplate_koans.py was based in a_package_folder that would be
# the root folder, which would make reaching the koans folder
# almost impossible. So always leave the starting python script in
# a folder which can reach everything else.
def test_import_a_module_in_a_subfolder_folder_using_an_absolute_path(self):
# Import contemplate_koans.py/koans/a_package_folder/a_module.py
from koans.a_package_folder.a_module import Duck
self.assertEqual('koans.a_package_folder.a_module', Duck.__module__)
|
py | b401362a9c2b530e1c659830c3dfdd3832ea3489 | #! /usr/bin/python
__author__ = 'xudshen'
from BaseFeatures import BaseFeatures
import math
class MorphologicalFeatures(BaseFeatures):
def __init__(self, sample_path, species_name):
BaseFeatures.__init__(self, sample_path, species_name)
def process(self):
D = float(self.features.get('Diameter'))
Lp = float(self.features.get('Physiological Length'))
Wp = float(self.features.get('Physiological Width'))
A = float(self.features.get('Leaf Area'))
P = float(self.features.get('Leaf Perimeter'))
Ah = float(self.features.get('Convex Hull Area'))
Ph = float(self.features.get('Convex Hull Perimeter'))
self.features.update({
'Aspect ratio': Lp / Wp,
'Form factor': 4 * math.pi * A / P**2,
'Rectangularity': Lp * Wp / A,
'Narrow factor': D / Lp,
'Perimeter radio of diameter': P / D,
'Perimeter ratio of physiological': P / (Lp + Wp),
'Roundness': 4 * A / (math.pi * D**2),
'Roughness': Ph / P,
'Convex area radio': Ah / A,
}) |
py | b40136730bb8901b1d375542f0cf16b658887daa | # test mod_md acme terms-of-service handling
import pytest
from TestEnv import TestEnv
def setup_module(module):
print("setup_module: %s" % module.__name__)
TestEnv.init()
def teardown_module(module):
print("teardown_module: %s" % module.__name__)
class TestRegAdd:
def setup_method(self, method):
print("setup_method: %s" % method.__name__)
TestEnv.clear_store()
def teardown_method(self, method):
print("teardown_method: %s" % method.__name__)
# test case: add a single dns managed domain
def test_100_000(self):
dns = "greenbytes.de"
jout1 = TestEnv.a2md(["add", dns])['jout']
TestEnv.check_json_contains(jout1['output'][0], {
"name": dns,
"domains": [dns],
"contacts": [],
"ca": {
"url": TestEnv.ACME_URL,
"proto": "ACME"
},
"state": TestEnv.MD_S_INCOMPLETE
})
assert TestEnv.a2md(["list"])['jout'] == jout1
# test case: add > 1 dns managed domain
def test_100_001(self):
dns = ["greenbytes2.de", "www.greenbytes2.de", "mail.greenbytes2.de"]
jout1 = TestEnv.a2md(["add"] + dns)['jout']
TestEnv.check_json_contains(jout1['output'][0], {
"name": dns[0],
"domains": dns,
"contacts": [],
"ca": {
"url": TestEnv.ACME_URL,
"proto": "ACME"
},
"state": TestEnv.MD_S_INCOMPLETE
})
assert TestEnv.a2md(["list"])['jout'] == jout1
# test case: add second managed domain
def test_100_002(self):
dns1 = ["test100-002.com", "test100-002a.com", "test100-002b.com"]
TestEnv.a2md(["add"] + dns1)
# add second managed domain
dns2 = ["greenbytes2.de", "www.greenbytes2.de", "mail.greenbytes2.de"]
jout = TestEnv.a2md(["add"] + dns2)['jout']
# assert: output covers only changed md
assert len(jout['output']) == 1
TestEnv.check_json_contains(jout['output'][0], {
"name": dns2[0],
"domains": dns2,
"contacts": [],
"ca": {
"url": TestEnv.ACME_URL,
"proto": "ACME"
},
"state": TestEnv.MD_S_INCOMPLETE
})
assert len(TestEnv.a2md(["list"])['jout']['output']) == 2
# test case: add existing domain
def test_100_003(self):
dns = "greenbytes.de"
assert TestEnv.a2md(["add", dns])['rv'] == 0
assert TestEnv.a2md(["add", dns])['rv'] == 1
# test case: add without CA URL
def test_100_004(self):
dns = "greenbytes.de"
jout1 = TestEnv.run([TestEnv.A2MD, "-d", TestEnv.STORE_DIR, "-j", "add", dns])['jout']
assert len(jout1['output']) == 1
TestEnv.check_json_contains(jout1['output'][0], {
"name": dns,
"domains": [dns],
"contacts": [],
"ca": {
"proto": "ACME"
},
"state": TestEnv.MD_S_INCOMPLETE
})
assert TestEnv.a2md(["list"])['jout'] == jout1
# test case: add with invalid DNS
@pytest.mark.parametrize("invalid_dns", [
"tld", "white sp.ace", "invalid.*.wildcard.com", "k\xc3ller.idn.com"
])
def test_100_005(self, invalid_dns):
assert TestEnv.a2md(["add", invalid_dns])["rv"] == 1
assert TestEnv.a2md(["add", "test-100.de", invalid_dns])["rv"] == 1
# test case: add with invalid ACME URL
@pytest.mark.parametrize("invalid_url", [
"no.schema/path", "http://white space/path", "http://bad.port:-1/path"])
def test_100_006(self, invalid_url):
args = [TestEnv.A2MD, "-a", invalid_url, "-d", TestEnv.STORE_DIR, "-j"]
dns = "greenbytes.de"
args.extend(["add", dns])
assert TestEnv.run(args)["rv"] == 1
# test case: add overlapping dns names
def test_100_007(self):
assert TestEnv.a2md(["add", "test-100.com", "test-101.com"])['rv'] == 0
# 1: alternate DNS exists as primary name
assert TestEnv.a2md(["add", "greenbytes2.de", "test-100.com"])['rv'] == 1
# 2: alternate DNS exists as alternate DNS
assert TestEnv.a2md(["add", "greenbytes2.de", "test-101.com"])['rv'] == 1
# 3: primary name exists as alternate DNS
assert TestEnv.a2md(["add", "test-101.com"])['rv'] == 1
# test case: add subdomains as separate managed domain
def test_100_008(self):
assert TestEnv.a2md(["add", "test-100.com"])['rv'] == 0
assert TestEnv.a2md(["add", "sub.test-100.com"])['rv'] == 0
# test case: add duplicate domain
def test_100_009(self):
dns1 = "test-100.com"
dns2 = "test-101.com"
jout = TestEnv.a2md(["add", dns1, dns2, dns1, dns2])['jout']
# DNS is only listed once
assert len(jout['output']) == 1
md = jout['output'][0]
assert md['domains'] == [dns1, dns2]
# test case: add pnuycode name
def test_100_010(self):
assert TestEnv.a2md(["add", "xn--kller-jua.punycode.de"])['rv'] == 0
# test case: don't sort alternate names
def test_100_011(self):
dns = ["test-100.com", "test-xxx.com", "test-aaa.com"]
jout = TestEnv.a2md(["add"] + dns)['jout']
# DNS is only listed as specified
assert len(jout['output']) == 1
md = jout['output'][0]
assert md['domains'] == dns
# test case: add DNS wildcard
@pytest.mark.parametrize("wild_dns", [
"*.wildcard.com"
])
def test_100_012(self, wild_dns):
assert TestEnv.a2md(["add", wild_dns])['rv'] == 0
|
py | b4013815a3e802958b8202b0096e7fd77b78f268 | # coding: utf-8
import pprint
import re
import six
class CommonTask:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'task_id': 'str',
'status': 'str',
'create_time': 'str',
'start_time': 'str',
'end_time': 'str',
'description': 'str',
'input': 'ObsObjInfo',
'output': 'ObsObjInfo',
'user_data': 'str'
}
attribute_map = {
'task_id': 'task_id',
'status': 'status',
'create_time': 'create_time',
'start_time': 'start_time',
'end_time': 'end_time',
'description': 'description',
'input': 'input',
'output': 'output',
'user_data': 'user_data'
}
def __init__(self, task_id=None, status=None, create_time=None, start_time=None, end_time=None, description=None, input=None, output=None, user_data=None):
"""CommonTask - a model defined in huaweicloud sdk"""
self._task_id = None
self._status = None
self._create_time = None
self._start_time = None
self._end_time = None
self._description = None
self._input = None
self._output = None
self._user_data = None
self.discriminator = None
if task_id is not None:
self.task_id = task_id
if status is not None:
self.status = status
if create_time is not None:
self.create_time = create_time
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if description is not None:
self.description = description
if input is not None:
self.input = input
if output is not None:
self.output = output
if user_data is not None:
self.user_data = user_data
@property
def task_id(self):
"""Gets the task_id of this CommonTask.
任务ID
:return: The task_id of this CommonTask.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this CommonTask.
任务ID
:param task_id: The task_id of this CommonTask.
:type: str
"""
self._task_id = task_id
@property
def status(self):
"""Gets the status of this CommonTask.
任务状态。 取值如下: - INIT:初始状态。 - WAITING:等待启动。 - PROCESSING:处理中。 - SUCCEED:处理成功。 - FAILED:处理失败。 - CANCELED:已取消。
:return: The status of this CommonTask.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CommonTask.
任务状态。 取值如下: - INIT:初始状态。 - WAITING:等待启动。 - PROCESSING:处理中。 - SUCCEED:处理成功。 - FAILED:处理失败。 - CANCELED:已取消。
:param status: The status of this CommonTask.
:type: str
"""
self._status = status
@property
def create_time(self):
"""Gets the create_time of this CommonTask.
任务创建时间
:return: The create_time of this CommonTask.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this CommonTask.
任务创建时间
:param create_time: The create_time of this CommonTask.
:type: str
"""
self._create_time = create_time
@property
def start_time(self):
"""Gets the start_time of this CommonTask.
任务启动时间
:return: The start_time of this CommonTask.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this CommonTask.
任务启动时间
:param start_time: The start_time of this CommonTask.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this CommonTask.
任务结束时间
:return: The end_time of this CommonTask.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this CommonTask.
任务结束时间
:param end_time: The end_time of this CommonTask.
:type: str
"""
self._end_time = end_time
@property
def description(self):
"""Gets the description of this CommonTask.
错误描述
:return: The description of this CommonTask.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CommonTask.
错误描述
:param description: The description of this CommonTask.
:type: str
"""
self._description = description
@property
def input(self):
"""Gets the input of this CommonTask.
:return: The input of this CommonTask.
:rtype: ObsObjInfo
"""
return self._input
@input.setter
def input(self, input):
"""Sets the input of this CommonTask.
:param input: The input of this CommonTask.
:type: ObsObjInfo
"""
self._input = input
@property
def output(self):
"""Gets the output of this CommonTask.
:return: The output of this CommonTask.
:rtype: ObsObjInfo
"""
return self._output
@output.setter
def output(self, output):
"""Sets the output of this CommonTask.
:param output: The output of this CommonTask.
:type: ObsObjInfo
"""
self._output = output
@property
def user_data(self):
"""Gets the user_data of this CommonTask.
用户数据。
:return: The user_data of this CommonTask.
:rtype: str
"""
return self._user_data
@user_data.setter
def user_data(self, user_data):
"""Sets the user_data of this CommonTask.
用户数据。
:param user_data: The user_data of this CommonTask.
:type: str
"""
self._user_data = user_data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CommonTask):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4013956a96dfac33b8c8a895ab208c3b8fcfbef | """
Here the vertex is defined
The following attributes are supported:
* position
* normal
* color
* texcoord
"""
from ..math import *
class Vertex(object):
def __init__(self, **kwargs):
self.position = Vec3(0, 0, 0)
self.normal = None
self.color = Vec3(1.,1.,1.)
self.texcoord = None
if len(kwargs) == 0:
raise ValueError("Vertex must specify attributes, for example: position, normal, color, texcoord")
else:
if "position" in kwargs:
if type(kwargs["position"]) == Vec3:
self.position = kwargs["position"].copy()
elif type(kwargs["position"]) == tuple or type(kwargs["position"]) == list:
if len(kwargs["position"]) != 3:
raise ValueError("position must be specified as Vec3 or a list/tuple")
else:
self.position = Vec3(kwargs["position"][0], kwargs["position"][1], kwargs["position"][2])
else:
raise ValueError("position must be specified as Vec3 or a list/tuple")
if "normal" in kwargs:
if type(kwargs["normal"]) == Vec3: # TODO: a list/tuple of values is ok too...
self.normal = kwargs["normal"].copy()
elif type(kwargs["normal"]) == tuple or type(kwargs["normal"]) == list:
if len(kwargs["normal"]) != 3:
raise ValueError("normal must be specified as Vec3 or a list/tuple with 3 floats")
else:
self.normal = Vec3(kwargs["position"][0], kwargs["position"][1], kwargs["position"][2])
else:
raise ValueError("normal must be specified as Vec3 or a list/tuple")
if "color" in kwargs:
if type(kwargs["color"]) == Vec4:
raise ValueError("Color must be Vec3")
elif type(kwargs["color"]) == Vec3:
color = kwargs["color"]
self.color = Vec3(color.x, color.y, color.z)
elif type(kwargs["color"]) == tuple or type(kwargs["color"]) == list:
color = kwargs["color"]
if len(color) == 3:
self.color = Vec3(color[0], color[1], color[2])
else:
raise ValueError("Wrong number of components for color")
else:
raise ValueError("color must be specified as Vec3 or list/tuple with 3 components")
if "texcoord" in kwargs:
if type(kwargs["texcoord"]) == Vec2:
self.texcoord = kwargs["texcoord"].copy()
elif type(kwargs["texcoord"]) == tuple or type(kwargs["texcoord"]) == list:
if len(kwargs["texcoord"]) == 2:
self.texcoord = Vec2(kwargs["texcoord"][0], kwargs["texcoord"][1])
else:
raise ValueError("Wrong number of components for texcoord")
else:
raise ValueError("position must be specified as Vec2 or list/tuple")
|
py | b4013a2e84723809c03d67f1684fae63a6dcdc87 | import subprocess as sub
import sys
from pathlib import Path
from unittest.mock import patch
import graphviz
import networkx as nx
import pytest
import yaml
from networkx.algorithms import isomorphism
import wic.cli
import wic.compiler
import wic.main
import wic.utils
from wic import auto_gen_header
from wic.schemas import wic_schema
from wic.wic_types import GraphData, GraphReps, NodeData, Yaml, YamlTree
@pytest.mark.slow
def test_examples() -> None:
"""Runs all of the examples in the examples/ directory. Note that some of
the yml files lack inputs and cannot be run independently, and are excluded.
"""
testargs = ['wic', '--yaml', '', '--cwl_output_intermediate_files', 'True'] # ignore --yaml
# For now, we need to enable --cwl_output_intermediate_files. See comment in compiler.py
with patch.object(sys, 'argv', testargs):
args = wic.cli.parser.parse_args()
tools_cwl = wic.main.get_tools_cwl(Path('.'))
yml_paths = wic.main.get_yml_paths(Path('examples/'))
# Generate schemas for validation
validator = wic_schema.get_validator(tools_cwl, list(yml_paths))
# First compile all of the workflows.
for yml_path_str, yml_path in yml_paths.items():
# Load the high-level yaml workflow file.
with open(yml_path, mode='r', encoding='utf-8') as y:
root_yaml_tree: Yaml = yaml.safe_load(y.read())
Path('autogenerated/').mkdir(parents=True, exist_ok=True)
y_t = YamlTree(yml_path_str, root_yaml_tree)
yaml_tree_raw = wic.ast.read_ast_from_disk(y_t, yml_paths, tools_cwl, validator)
with open(f'autogenerated/{Path(yml_path).stem}_tree_raw.yml', mode='w', encoding='utf-8') as f:
f.write(yaml.dump(yaml_tree_raw.yml))
yaml_tree = wic.ast.merge_yml_trees(yaml_tree_raw, {}, tools_cwl)
with open(f'autogenerated/{Path(yml_path).stem}_tree_merged.yml', mode='w', encoding='utf-8') as f:
f.write(yaml.dump(yaml_tree.yml))
graph_gv = graphviz.Digraph(name=f'cluster_{yml_path}')
graph_gv.attr(newrank='True')
graph_nx = nx.DiGraph()
graphdata = GraphData(str(yml_path))
graph = GraphReps(graph_gv, graph_nx, graphdata)
compiler_info = wic.compiler.compile_workflow(yaml_tree, args, [], [graph], {}, {},
tools_cwl, True, relative_run_path=True)
rose_tree = compiler_info.rose
sub_node_data: NodeData = rose_tree.data
yaml_stem = sub_node_data.name
wic.utils.write_to_disk(rose_tree, Path('autogenerated/'), relative_run_path=True)
# Now blindly run all workflows and (if all inputs are present) check for return code 0.
# Workflows are first validated before runtime, so this also checks for validity.
# NOTE: Do not use --cachedir; we want to actually test everything.
cmd = ['cwltool', '--outdir', f'outdir/{yaml_stem}',
f'autogenerated/{yaml_stem}.cwl',
f'autogenerated/{yaml_stem}_inputs.yml']
proc = sub.run(cmd, stdout=sub.PIPE, stderr=sub.STDOUT, check=False) # Capture the output
if not proc.returncode == 0:
# Since some of the workflows will be subworkflows
# (i.e. will not have all inputs), we need to check for
# "Missing required input parameter" and only fail the
# workflows which should have succeeded.
missing_input = "Missing required input parameter"
output = proc.stdout.decode("utf-8")
if not missing_input in output:
print(f"Error! {yml_path} failed!")
print(output)
assert proc.returncode == 0
@pytest.mark.fast
def test_cwl_embedding_independence() -> None:
"""Tests that compiling a subworkflow is independent of how it is embedded
into a parent workflow. Specifically, this compiles the root workflow and
re-compiles every subworkflow (individually) as if it were a root workflow,
then checks that the CWL for each subworkflow remains identical and checks
that the embedded subworkflow DAGs and the re-compiled DAGs are isomorphic.
"""
testargs = ['wic', '--yaml', ''] # ignore --yaml
with patch.object(sys, 'argv', testargs):
args = wic.cli.parser.parse_args()
tools_cwl = wic.main.get_tools_cwl(Path('.'))
yml_paths = wic.main.get_yml_paths(Path('examples'))
# Generate schemas for validation
validator = wic_schema.get_validator(tools_cwl, list(yml_paths))
# First compile all of the workflows once.
for yml_path_str, yml_path in yml_paths.items():
# Load the high-level yaml workflow file.
with open(yml_path, mode='r', encoding='utf-8') as y:
root_yaml_tree: Yaml = yaml.safe_load(y.read())
# Write the combined workflow (with all subworkflows as children) to disk.
Path('autogenerated/').mkdir(parents=True, exist_ok=True)
y_t = YamlTree(yml_path_str + '.yml', root_yaml_tree)
yaml_tree_raw = wic.ast.read_ast_from_disk(y_t, yml_paths, tools_cwl, validator)
with open(f'autogenerated/{yml_path.stem}_tree_raw.yml', mode='w', encoding='utf-8') as f:
f.write(yaml.dump(yaml_tree_raw.yml))
yaml_tree = wic.ast.merge_yml_trees(yaml_tree_raw, {}, tools_cwl)
with open(f'autogenerated/{yml_path.stem}_tree_merged.yml', mode='w', encoding='utf-8') as f:
f.write(yaml.dump(yaml_tree.yml))
# NOTE: The entire purpose of parsing an entire yaml forest is so we
# can easily access the subtrees here. (i.e. without re-walking the AST)
yaml_forest = wic.ast.tree_to_forest(yaml_tree, tools_cwl)
yaml_forest_lst = wic.utils.flatten_forest(yaml_forest)
graph_gv = graphviz.Digraph(name=f'cluster_{yml_path}')
graph_gv.attr(newrank='True')
graph_nx = nx.DiGraph()
graphdata = GraphData(str(yml_path))
graph = GraphReps(graph_gv, graph_nx, graphdata)
is_root = True
compiler_info = wic.compiler.compile_workflow(yaml_tree, args, [], [graph], {}, {},
tools_cwl, is_root, relative_run_path=False)
rose_tree = compiler_info.rose
node_data_lst = wic.utils.flatten_rose_tree(rose_tree)
# This test doesn't need to write to disk, but useful for debugging.
wic.utils.write_to_disk(rose_tree, Path('autogenerated/'), relative_run_path=False)
# Now, for each subworkflow of the given root workflow, compile the
# subworkflow again from scratch, as if it were the root workflow,
# and check that the generated CWL is identical. In other words,
# check that the generated CWL of a subworkflow is independent of its
# embedding into a parent workflow.
for sub_node_data, sub_yaml_forest in zip(node_data_lst[1:], yaml_forest_lst):
sub_name = sub_node_data.name
assert sub_yaml_forest.yaml_tree.name == sub_name + '.yml'
# NOTE: Do we want to also test embedding independence with args.graph_inline_depth?
# If so, we will need to patch testargs depending on len(sub_node_data.namespaces)
# (due to the various instances of `if len(namespaces) < args.graph_inline_depth`)
graph_fakeroot_gv = graphviz.Digraph(name=f'cluster_{sub_name}')
graph_fakeroot_gv.attr(newrank='True')
graph_fakeroot_nx = nx.DiGraph()
graphdata_fakeroot = GraphData(str(sub_name))
graph_fakeroot = GraphReps(graph_fakeroot_gv, graph_fakeroot_nx, graphdata_fakeroot)
fake_root = True
compiler_info_fakeroot = wic.compiler.compile_workflow(sub_yaml_forest.yaml_tree,
args, [], [graph_fakeroot], {}, {}, tools_cwl, fake_root, relative_run_path=False)
sub_node_data_fakeroot: NodeData = compiler_info_fakeroot.rose.data
sub_cwl_fakeroot = sub_node_data_fakeroot.compiled_cwl
# NOTE: Relative run: paths cause this test to fail, so remove them.
# Using namespaced filenames in a single flat directory also
# doesn't work because the namespaces will be of different lengths.
sub_cwl_embedded = wic.utils.recursively_delete_dict_key('run', sub_node_data.compiled_cwl)
sub_cwl_fakeroot = wic.utils.recursively_delete_dict_key('run', sub_cwl_fakeroot)
if sub_cwl_embedded != sub_cwl_fakeroot:
# Before we crash and burn, write out files for debugging.
with open(f'{sub_name}_forest_embedded.yml', mode='w', encoding='utf-8') as w:
w.write(yaml.dump(yaml_forest))
with open(f'{sub_name}_forest_fakeroot.yml', mode='w', encoding='utf-8') as w:
w.write(yaml.dump(sub_yaml_forest))
# NOTE: Use _dot_cwl so we don't glob these files in get_tools_cwl()
yaml_content = yaml.dump(sub_cwl_embedded, sort_keys=False, line_break='\n', indent=2)
filename_emb = f'{sub_name}_embedded_dot_cwl'
with open(filename_emb, mode='w', encoding='utf-8') as w:
w.write('#!/usr/bin/env cwl-runner\n')
w.write(auto_gen_header)
w.write(''.join(yaml_content))
yaml_content = yaml.dump(sub_cwl_fakeroot, sort_keys=False, line_break='\n', indent=2)
filename_fake = f'{sub_name}_fakeroot_dot_cwl'
with open(filename_fake, mode='w', encoding='utf-8') as w:
w.write('#!/usr/bin/env cwl-runner\n')
w.write(auto_gen_header)
w.write(''.join(yaml_content))
cmd = f'diff {filename_emb} {filename_fake} > {sub_name}.diff'
sub.run(cmd, shell=True, check=False)
print(f'Error! Check {filename_emb} and {filename_fake} and {sub_name}.diff')
assert sub_cwl_embedded == sub_cwl_fakeroot
# Check that the subgraphs are isomorphic.
sub_graph_nx = sub_node_data.graph.networkx
sub_graph_fakeroot_nx = sub_node_data_fakeroot.graph.networkx
g_m = isomorphism.GraphMatcher(sub_graph_nx, sub_graph_fakeroot_nx)
assert g_m.is_isomorphic()
def test_inline_subworkflows() -> None:
"""Tests that compiling a workflow is independent of how subworkflows are inlined.
Specifically, this inlines every subworkflow (individually) and checks that
the original DAG and the inlined DAGs are isomorphic.
"""
testargs = ['wic', '--yaml', '', '--cwl_output_intermediate_files', 'True'] # ignore --yaml
# For now, we need to enable --cwl_output_intermediate_files. See comment in compiler.py
with patch.object(sys, 'argv', testargs):
args = wic.cli.parser.parse_args()
tools_cwl = wic.main.get_tools_cwl(Path('.'))
yml_paths = wic.main.get_yml_paths(Path('examples/'))
# Generate schemas for validation
validator = wic_schema.get_validator(tools_cwl, list(yml_paths))
for yml_path_str, yml_path in yml_paths.items():
# Load the high-level yaml workflow file.
with open(yml_path, mode='r', encoding='utf-8') as y:
root_yaml_tree: Yaml = yaml.safe_load(y.read())
Path('autogenerated/').mkdir(parents=True, exist_ok=True)
y_t = YamlTree(yml_path_str, root_yaml_tree)
yaml_tree_raw = wic.ast.read_ast_from_disk(y_t, yml_paths, tools_cwl, validator)
with open(f'autogenerated/{Path(yml_path).stem}_tree_raw.yml', mode='w', encoding='utf-8') as f:
f.write(yaml.dump(yaml_tree_raw.yml))
yaml_tree = wic.ast.merge_yml_trees(yaml_tree_raw, {}, tools_cwl)
with open(f'autogenerated/{Path(yml_path).stem}_tree_merged.yml', mode='w', encoding='utf-8') as f:
f.write(yaml.dump(yaml_tree.yml))
graph_gv = graphviz.Digraph(name=f'cluster_{yml_path}')
graph_gv.attr(newrank='True')
graph_nx = nx.DiGraph()
graphdata = GraphData(str(yml_path))
graph = GraphReps(graph_gv, graph_nx, graphdata)
compiler_info = wic.compiler.compile_workflow(yaml_tree, args, [], [graph], {}, {},
tools_cwl, True, relative_run_path=True)
rose_tree = compiler_info.rose
sub_node_data: NodeData = rose_tree.data
wic.utils.write_to_disk(rose_tree, Path('autogenerated/'), relative_run_path=True)
# Inline each subworkflow individually and check that the graphs are isomorphic.
namespaces_list = wic.ast.get_inlineable_subworkflows(yaml_tree, tools_cwl, [])
for namespaces in namespaces_list:
inline_yaml_tree = wic.ast.inline_subworkflow(yaml_tree, tools_cwl, namespaces)
inline_graph_gv = graphviz.Digraph(name=f'cluster_{yml_path}')
inline_graph_gv.attr(newrank='True')
inline_graph_nx = nx.DiGraph()
inline_graphdata = GraphData(str(yml_path))
inline_graph = GraphReps(inline_graph_gv, inline_graph_nx, inline_graphdata)
inline_compiler_info = wic.compiler.compile_workflow(inline_yaml_tree,
args, [], [inline_graph], {}, {}, tools_cwl, True, relative_run_path=True)
inline_rose_tree = inline_compiler_info.rose
inline_sub_node_data: NodeData = inline_rose_tree.data
# Check that the subgraphs are isomorphic.
sub_graph_nx = sub_node_data.graph.networkx
sub_graph_fakeroot_nx = inline_sub_node_data.graph.networkx
g_m = isomorphism.GraphMatcher(sub_graph_nx, sub_graph_fakeroot_nx)
assert g_m.is_isomorphic()
|
py | b4013bf5371cdb1f2083a57d735f29e230666293 | import time
import os
import datetime
import codecs
import cv2
import urllib2
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
baseurl = "http://academicscc.vit.ac.in/student/stud_login.asp"
regno = raw_input("Registration Number: ")
passwd = raw_input("Password: ")
xpaths = { 'usernameTxtBox' : "html/body/table[3]/tbody/tr/td/form/table/tbody/tr/td/table/tbody/tr[2]/td[2]/input[@name='regno']",
'passwordTxtBox' : "html/body/table[3]/tbody/tr/td/form/table/tbody/tr/td/table/tbody/tr[3]/td[2]/input[@name='passwd']",
'captchaTxtBox' : "html/body/table[3]/tbody/tr/td/form/table/tbody/tr/td/table/tbody/tr[5]/td/input[@name='vrfcd']",
'submitButton' : "html/body/table[3]/tbody/tr/td/form/table/tbody/tr/td/table/tbody/tr[6]/td/input[1]"
}
mydriver = webdriver.Firefox()
test = mydriver.get(baseurl)
mydriver.maximize_window()
mydriver.execute_script('document.getElementById("imgCaptcha").oncontextmenu = "return true"')
#taking screenshot to save the captcha
mydriver.save_screenshot("screenshot.png")
img = cv2.imread("screenshot.png")
crop_img = img[280:320, 650:800] # Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
#cv2.imshow("cropped", crop_img)
#cv2.waitKey(0)
cv2.imwrite('captcha.png',crop_img)
gray_image = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
cv2.imwrite('gray_captcha.png',gray_image)
#Clear Username TextBox if already allowed "Remember Me"
mydriver.find_element_by_xpath(xpaths['usernameTxtBox']).clear()
#Write Username in Username TextBox
mydriver.find_element_by_xpath(xpaths['usernameTxtBox']).send_keys(regno)
#Clear Password TextBox if already allowed "Remember Me"
mydriver.find_element_by_xpath(xpaths['passwordTxtBox']).clear()
#Write Password in password TextBox
mydriver.find_element_by_xpath(xpaths['passwordTxtBox']).send_keys(passwd)
#Get Captcha from the user
vrfcd = raw_input("Please enter the captcha: ")
#Clear Captcha TextBox if already allowed "Remember Me"
mydriver.find_element_by_xpath(xpaths['captchaTxtBox']).clear()
#Write Captcha in captcha TextBox
mydriver.find_element_by_xpath(xpaths['captchaTxtBox']).send_keys(vrfcd)
#Click Login button
mydriver.find_element_by_xpath(xpaths['submitButton']).click()
time.sleep(1)
fromdate = "01-Jan-2017"
todate = datetime.date.today().strftime ("%d-%b-%Y")
attendanceurl = "https://academicscc.vit.ac.in/student/attn_report.asp?sem=WS&fmdt="+fromdate+"&todt="+todate
mydriver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')
mydriver.get(attendanceurl)
html_source = mydriver.page_source
text_file = codecs.open("attendance.html", "w", 'utf-8')
text_file.write(html_source)
text_file.close()
|
py | b4013bf7aab37882b4e0a21183f243ba68a0c498 | # Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dp_optimizer_keras.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras_vectorized
class DPOptimizerComputeGradientsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for _compute_gradients method."""
def _loss(self, val0, val1):
"""Loss function whose derivative w.r.t val1 is val1 - val0."""
return 0.5 * tf.reduce_sum(
input_tensor=tf.math.squared_difference(val0, val1), axis=1)
# Parameters for testing: optimizer, num_microbatches, expected gradient for
# var0, expected gradient for var1.
@parameterized.named_parameters(
('DPGradientDescent 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdam 2', dp_optimizer_keras.DPKerasAdamOptimizer, 2, [-2.5, -2.5
], [-0.5]),
('DPAdagrad 4', dp_optimizer_keras.DPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPGradientDescentVectorized 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdamVectorized 2',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer, 2,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized None',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, None,
[-2.5, -2.5], [-0.5]),
)
def testBaselineWithCallableLoss(self, cls, num_microbatches, expected_grad0,
expected_grad1):
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0])
data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])
data1 = tf.Variable([[8.0], [2.0], [3.0], [1.0]])
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
learning_rate=2.0)
loss = lambda: self._loss(data0, var0) + self._loss(data1, var1)
grads_and_vars = opt._compute_gradients(loss, [var0, var1])
self.assertAllCloseAccordingToType(expected_grad0, grads_and_vars[0][0])
self.assertAllCloseAccordingToType(expected_grad1, grads_and_vars[1][0])
# Parameters for testing: optimizer, num_microbatches, expected gradient for
# var0, expected gradient for var1.
@parameterized.named_parameters(
('DPGradientDescent 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdam 2', dp_optimizer_keras.DPKerasAdamOptimizer, 2, [-2.5, -2.5
], [-0.5]),
('DPAdagrad 4', dp_optimizer_keras.DPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPGradientDescentVectorized 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdamVectorized 2',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer, 2,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized None',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, None,
[-2.5, -2.5], [-0.5]),
)
def testBaselineWithTensorLoss(self, cls, num_microbatches, expected_grad0,
expected_grad1):
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0])
data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])
data1 = tf.Variable([[8.0], [2.0], [3.0], [1.0]])
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
learning_rate=2.0)
tape = tf.GradientTape()
with tape:
loss = self._loss(data0, var0) + self._loss(data1, var1)
grads_and_vars = opt._compute_gradients(
loss, [var0, var1], tape=tape)
self.assertAllCloseAccordingToType(expected_grad0, grads_and_vars[0][0])
self.assertAllCloseAccordingToType(expected_grad1, grads_and_vars[1][0])
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer_keras.DPKerasSGDOptimizer),
('DPGradientDescentVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer),
)
def testClippingNorm(self, cls):
var0 = tf.Variable([0.0, 0.0])
data0 = tf.Variable([[3.0, 4.0], [6.0, 8.0]])
opt = cls(
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=1,
learning_rate=2.0)
loss = lambda: self._loss(data0, var0)
# Expected gradient is sum of differences.
grads_and_vars = opt._compute_gradients(loss, [var0])
self.assertAllCloseAccordingToType([-0.6, -0.8], grads_and_vars[0][0])
@parameterized.named_parameters(
('DPGradientDescent 2 4 1', dp_optimizer_keras.DPKerasSGDOptimizer, 2.0,
4.0, 1),
('DPGradientDescent 4 1 4', dp_optimizer_keras.DPKerasSGDOptimizer, 4.0,
1.0, 4),
('DPGradientDescentVectorized 2 4 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 2.0, 4.0,
1),
('DPGradientDescentVectorized 4 1 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 4.0, 1.0,
4),
)
def testNoiseMultiplier(self, cls, l2_norm_clip, noise_multiplier,
num_microbatches):
var0 = tf.Variable(tf.zeros([1000], dtype=tf.float32))
data0 = tf.Variable(tf.zeros([16, 1000], dtype=tf.float32))
opt = cls(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches,
learning_rate=2.0)
loss = lambda: self._loss(data0, var0)
grads_and_vars = opt._compute_gradients(loss, [var0])
grads = grads_and_vars[0][0].numpy()
# Test standard deviation is close to l2_norm_clip * noise_multiplier.
self.assertNear(
np.std(grads), l2_norm_clip * noise_multiplier / num_microbatches, 0.5)
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer_keras.DPKerasSGDOptimizer),
('DPAdagrad', dp_optimizer_keras.DPKerasAdagradOptimizer),
('DPAdam', dp_optimizer_keras.DPKerasAdamOptimizer),
('DPGradientDescentVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer),
('DPAdagradVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer),
('DPAdamVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer),
)
def testAssertOnNoCallOfComputeGradients(self, cls):
"""Tests that assertion fails when DP gradients are not computed."""
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=1,
learning_rate=2.0)
with self.assertRaises(AssertionError):
grads_and_vars = tf.Variable([0.0])
opt.apply_gradients(grads_and_vars)
# Expect no exception if _compute_gradients is called.
var0 = tf.Variable([0.0])
data0 = tf.Variable([[0.0]])
loss = lambda: self._loss(data0, var0)
grads_and_vars = opt._compute_gradients(loss, [var0])
opt.apply_gradients(grads_and_vars)
class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for get_gradient method.
Since get_gradients must run in graph mode, the method is tested within
the Estimator framework.
"""
def _make_linear_model_fn(self, opt_cls, l2_norm_clip, noise_multiplier,
num_microbatches, learning_rate):
"""Returns a model function for a linear regressor."""
def linear_model_fn(features, labels, mode):
layer = tf.keras.layers.Dense(
1,
activation='linear',
name='dense',
kernel_initializer='zeros',
bias_initializer='zeros')
preds = layer.apply(features)
vector_loss = 0.5 * tf.math.squared_difference(labels, preds)
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
optimizer = opt_cls(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches,
learning_rate=learning_rate)
params = layer.trainable_weights
global_step = tf.compat.v1.train.get_global_step()
train_op = tf.group(
optimizer.get_updates(loss=vector_loss, params=params),
[tf.compat.v1.assign_add(global_step, 1)])
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
return linear_model_fn
# Parameters for testing: optimizer, num_microbatches.
@parameterized.named_parameters(
('DPGradientDescent 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1),
('DPGradientDescent 2', dp_optimizer_keras.DPKerasSGDOptimizer, 2),
('DPGradientDescent 4', dp_optimizer_keras.DPKerasSGDOptimizer, 4),
('DPGradientDescentVectorized 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 1),
('DPGradientDescentVectorized 2',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 2),
('DPGradientDescentVectorized 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 4),
('DPGradientDescentVectorized None',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, None),
)
def testBaseline(self, cls, num_microbatches):
"""Tests that DP optimizers work with tf.estimator."""
linear_regressor = tf.estimator.Estimator(
model_fn=self._make_linear_model_fn(cls, 100.0, 0.0, num_microbatches,
0.05))
true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32)
true_bias = np.array([6.0]).astype(np.float32)
train_data = np.random.normal(scale=3.0, size=(1000, 4)).astype(np.float32)
train_labels = np.matmul(train_data,
true_weights) + true_bias + np.random.normal(
scale=0.0, size=(1000, 1)).astype(np.float32)
def train_input_fn():
return tf.data.Dataset.from_tensor_slices(
(train_data, train_labels)).batch(8)
linear_regressor.train(input_fn=train_input_fn, steps=125)
self.assertAllClose(
linear_regressor.get_variable_value('dense/kernel'),
true_weights,
atol=0.05)
self.assertAllClose(
linear_regressor.get_variable_value('dense/bias'), true_bias, atol=0.05)
# Parameters for testing: optimizer, num_microbatches.
@parameterized.named_parameters(
('DPGradientDescent 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1),
('DPGradientDescentVectorized 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 1),
)
def testClippingNorm(self, cls, num_microbatches):
"""Tests that DP optimizers work with tf.estimator."""
true_weights = np.array([[6.0], [0.0], [0], [0]]).astype(np.float32)
true_bias = np.array([0]).astype(np.float32)
train_data = np.array([[1.0, 0.0, 0.0, 0.0]]).astype(np.float32)
train_labels = np.matmul(train_data, true_weights) + true_bias
def train_input_fn():
return tf.data.Dataset.from_tensor_slices(
(train_data, train_labels)).batch(1)
unclipped_linear_regressor = tf.estimator.Estimator(
model_fn=self._make_linear_model_fn(cls, 1.0e9, 0.0, num_microbatches,
1.0))
unclipped_linear_regressor.train(input_fn=train_input_fn, steps=1)
kernel_value = unclipped_linear_regressor.get_variable_value('dense/kernel')
bias_value = unclipped_linear_regressor.get_variable_value('dense/bias')
global_norm = np.linalg.norm(np.concatenate((kernel_value, [bias_value])))
clipped_linear_regressor = tf.estimator.Estimator(
model_fn=self._make_linear_model_fn(cls, 1.0, 0.0, num_microbatches,
1.0))
clipped_linear_regressor.train(input_fn=train_input_fn, steps=1)
self.assertAllClose(
clipped_linear_regressor.get_variable_value('dense/kernel'),
kernel_value / global_norm,
atol=0.001)
self.assertAllClose(
clipped_linear_regressor.get_variable_value('dense/bias'),
bias_value / global_norm,
atol=0.001)
# Parameters for testing: optimizer, l2_norm_clip, noise_multiplier,
# num_microbatches.
@parameterized.named_parameters(
('DPGradientDescent 2 4 1', dp_optimizer_keras.DPKerasSGDOptimizer, 2.0,
4.0, 1),
('DPGradientDescent 3 2 4', dp_optimizer_keras.DPKerasSGDOptimizer, 3.0,
2.0, 4),
('DPGradientDescent 8 6 8', dp_optimizer_keras.DPKerasSGDOptimizer, 8.0,
6.0, 8),
('DPGradientDescentVectorized 2 4 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 2.0, 4.0,
1),
('DPGradientDescentVectorized 3 2 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 3.0, 2.0,
4),
('DPGradientDescentVectorized 8 6 8',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 8.0, 6.0,
8),
)
def testNoiseMultiplier(self, cls, l2_norm_clip, noise_multiplier,
num_microbatches):
"""Tests that DP optimizers work with tf.estimator."""
linear_regressor = tf.estimator.Estimator(
model_fn=self._make_linear_model_fn(
cls,
l2_norm_clip,
noise_multiplier,
num_microbatches,
learning_rate=1.0))
true_weights = np.zeros((1000, 1), dtype=np.float32)
true_bias = np.array([0.0]).astype(np.float32)
train_data = np.zeros((16, 1000), dtype=np.float32)
train_labels = np.matmul(train_data, true_weights) + true_bias
def train_input_fn():
return tf.data.Dataset.from_tensor_slices(
(train_data, train_labels)).batch(16)
linear_regressor.train(input_fn=train_input_fn, steps=1)
kernel_value = linear_regressor.get_variable_value('dense/kernel')
self.assertNear(
np.std(kernel_value),
l2_norm_clip * noise_multiplier / num_microbatches, 0.5)
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer_keras.DPKerasSGDOptimizer),
('DPAdagrad', dp_optimizer_keras.DPKerasAdagradOptimizer),
('DPAdam', dp_optimizer_keras.DPKerasAdamOptimizer),
('DPGradientDescentVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer),
('DPAdagradVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer),
('DPAdamVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer),
)
def testAssertOnNoCallOfGetGradients(self, cls):
"""Tests that assertion fails when DP gradients are not computed."""
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=1,
learning_rate=2.0)
with self.assertRaises(AssertionError):
grads_and_vars = tf.Variable([0.0])
opt.apply_gradients(grads_and_vars)
def testLargeBatchEmulationNoNoise(self):
# Test for emulation of large batch training.
# It tests that updates are only done every gradient_accumulation_steps
# steps.
# In this test we set noise multiplier to zero and clipping norm to high
# value, such that optimizer essentially behave as non-DP optimizer.
# This makes easier to check how values of variables are changing.
#
# This test optimizes loss var0*x + var1
# Gradients of this loss are computed as:
# d(loss)/d(var0) = x
# d(loss)/d(var1) = 1
var0 = tf.Variable([[1.0, 2.0]], dtype=tf.float32)
var1 = tf.Variable([3.0], dtype=tf.float32)
x1 = tf.constant([[2.0, 0.0], [0.0, 1.0]], dtype=tf.float32)
loss1 = lambda: tf.matmul(var0, x1, transpose_b=True) + var1
x2 = tf.constant([[4.0, 2.0], [2.0, 1.0]], dtype=tf.float32)
loss2 = lambda: tf.matmul(var0, x2, transpose_b=True) + var1
opt = dp_optimizer_keras.DPKerasSGDOptimizer(
l2_norm_clip=100.0,
noise_multiplier=0.0,
gradient_accumulation_steps=2,
learning_rate=1.0)
# before any call to optimizer
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0)
self.assertAllCloseAccordingToType([3.0], var1)
opt.minimize(loss1, [var0, var1])
# After first call to optimizer values didn't change
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0)
self.assertAllCloseAccordingToType([3.0], var1)
opt.minimize(loss2, [var0, var1])
# After second call to optimizer updates were applied
self.assertAllCloseAccordingToType([[-1.0, 1.0]], var0)
self.assertAllCloseAccordingToType([2.0], var1)
opt.minimize(loss2, [var0, var1])
# After third call to optimizer values didn't change
self.assertAllCloseAccordingToType([[-1.0, 1.0]], var0)
self.assertAllCloseAccordingToType([2.0], var1)
opt.minimize(loss2, [var0, var1])
# After fourth call to optimizer updates were applied again
self.assertAllCloseAccordingToType([[-4.0, -0.5]], var0)
self.assertAllCloseAccordingToType([1.0], var1)
@parameterized.named_parameters(
('DPKerasSGDOptimizer 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1),
('DPKerasSGDOptimizer 2', dp_optimizer_keras.DPKerasSGDOptimizer, 2),
('DPKerasSGDOptimizer 4', dp_optimizer_keras.DPKerasSGDOptimizer, 4),
('DPKerasAdamOptimizer 2',
dp_optimizer_keras.DPKerasAdamOptimizer, 1),
('DPKerasAdagradOptimizer 2',
dp_optimizer_keras.DPKerasAdagradOptimizer, 2),
)
def testLargeBatchEmulation(self, cls, gradient_accumulation_steps):
# Tests various optimizers with large batch emulation.
# Uses clipping and noise, thus does not test specific values
# of the variables and only tests how often variables are updated.
var0 = tf.Variable([[1.0, 2.0]], dtype=tf.float32)
var1 = tf.Variable([3.0], dtype=tf.float32)
x = tf.constant([[2.0, 0.0], [0.0, 1.0]], dtype=tf.float32)
loss = lambda: tf.matmul(var0, x, transpose_b=True) + var1
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
gradient_accumulation_steps=gradient_accumulation_steps,
learning_rate=1.0)
for _ in range(gradient_accumulation_steps):
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0)
self.assertAllCloseAccordingToType([3.0], var1)
opt.minimize(loss, [var0, var1])
self.assertNotAllClose([[1.0, 2.0]], var0)
self.assertNotAllClose([3.0], var1)
if __name__ == '__main__':
tf.test.main()
|
py | b4013c38ac7370666ecaad01f9addbf5834f1d48 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Add a GitHub Issue to a ZenHub Epic"
class Input:
EPIC_ID = "epic_id"
ISSUE = "issue"
REPO_ID = "repo_id"
class Output:
ISSUE = "issue"
STATUS_CODE = "status_code"
class AddIssueToEpicInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"epic_id": {
"type": "integer",
"title": "Epic ID",
"description": "GitHub Issue Number of the ZenHub Epic",
"order": 2
},
"issue": {
"$ref": "#/definitions/issue_reference",
"title": "Issue",
"description": "A GitHub Issue to add to the ZenHub Epic",
"order": 3
},
"repo_id": {
"type": "integer",
"title": "Repository ID",
"description": "GitHub Repository ID e.g. 24237263",
"order": 1
}
},
"required": [
"epic_id",
"issue",
"repo_id"
],
"definitions": {
"issue_reference": {
"type": "object",
"title": "issue_reference",
"properties": {
"issue_number": {
"type": "integer",
"title": "Issue Number",
"description": "Issue number e.g. 43",
"order": 2
},
"issue_url": {
"type": "string",
"title": "Issue URL",
"description": "Issue URL e.g. https://github.com/jonschipp/ISLET/issues/79",
"order": 3
},
"repo_id": {
"type": "integer",
"title": "Repo ID",
"description": "Repo ID e.g. 24237263",
"order": 1
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class AddIssueToEpicOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"issue": {
"$ref": "#/definitions/issue_reference",
"title": "Issue",
"description": "The GitHub Issue added to the ZenHub Epic",
"order": 2
},
"status_code": {
"type": "integer",
"title": "Status",
"description": "HTTP status code",
"order": 1
}
},
"definitions": {
"issue_reference": {
"type": "object",
"title": "issue_reference",
"properties": {
"issue_number": {
"type": "integer",
"title": "Issue Number",
"description": "Issue number e.g. 43",
"order": 2
},
"issue_url": {
"type": "string",
"title": "Issue URL",
"description": "Issue URL e.g. https://github.com/jonschipp/ISLET/issues/79",
"order": 3
},
"repo_id": {
"type": "integer",
"title": "Repo ID",
"description": "Repo ID e.g. 24237263",
"order": 1
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | b4013d6def7d17b2cd7cb91bd7b8f54751ffb739 |
import numpy as np
from skimage.morphology import erosion
from pyVideoDatasets.BackgroundSubtraction import *
try:
import cv2 as vv
except:
from pyKinectTools.utils.VideoViewer import VideoViewer
vv = VideoViewer()
class BasePlayer(object):
depthIm = None
colorIm = None
users = None
backgroundModel = None
foregroundMask = None
prevcolorIm = None
def __init__(self, base_dir='./', get_depth=True, get_color=False,
get_skeleton=False, bg_subtraction=False, fill_images=False):
self.base_dir = base_dir
self.deviceID = '[]'
self.get_depth = get_depth
self.get_color = get_color
self.get_skeleton =get_skeleton
self.enable_bg_subtraction = bg_subtraction
self.fill_images = fill_images
def update_background(self):
try:
self.background_model.update(self.depthIm)
self.mask = self.background_model.get_foreground()
except:
self.mask = -1
def set_background(self, im):
self.background_model.backgroundModel = im
def set_bg_model(self, bg_type='box', param=None):
'''
Types:
'box'[param=max_depth]
'static'[param=background]
'mean'
'median'
'adaptive_mog'
'''
if bg_type == 'box':
self.bg_subtraction = BoxModel(param)
elif bg_type == 'static':
if param==None:
param = self.depthIm
self.bg_subtraction = StaticModel(depthIm=param)
elif bg_type == 'mean':
self.bg_subtraction = MeanModel(depthIm=self.depthIm)
elif bg_type == 'median':
self.bg_subtraction = MedianModel(depthIm=self.depthIm)
elif bg_type == 'adaptive_mog':
self.bg_subtraction = AdaptiveMixtureOfGaussians(self.depthIm, maxGaussians=5, learningRate=0.01, decayRate=0.001, variance=300**2)
else:
print "No background model added"
self.backgroundModel = self.bg_subtraction.get_model()
def next(self, frames=1):
pass
def get_person(self, edge_thresh=200):
mask, _, _, _ = extract_people(self.foregroundMask, minPersonPixThresh=5000, gradThresh=edge_thresh)
self.mask = erosion(mask, np.ones([3,3], np.uint8))
return self.mask
def visualize(self, color=True, depth=True, skel=False, text=False):
# ''' Find people '''
if skel:
plotUsers(self.depthIm, self.users)
if self.get_depth and depth:
vv.imshow("Depth", (self.depthIm-1000)/float(self.depthIm.max()))
# vv.imshow("Depth", self.depthIm/6000.)
if self.get_color and color:
vv.imshow("Color "+self.deviceID, self.colorIm)
# vv.putText("Color "+self.deviceID, self.colorIm, "Day "+self.day_dir+" Time "+self.hour_dir+":"+self.minute_dir+" Dev#"+str(self.dev), (10,220))
# vv.imshow("Color", self.colorIm)
vv.waitKey(10)
def run(self):
pass
|
py | b4013e7cd53fb07c353b20294692f9b0bba320f3 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.engine import properties
from heat.engine import resource
class SecurityGroup(resource.Resource):
PROPERTIES = (
GROUP_DESCRIPTION, VPC_ID, SECURITY_GROUP_INGRESS,
SECURITY_GROUP_EGRESS,
) = (
'GroupDescription', 'VpcId', 'SecurityGroupIngress',
'SecurityGroupEgress',
)
_RULE_KEYS = (
RULE_CIDR_IP, RULE_FROM_PORT, RULE_TO_PORT, RULE_IP_PROTOCOL,
RULE_SOURCE_SECURITY_GROUP_ID, RULE_SOURCE_SECURITY_GROUP_NAME,
RULE_SOURCE_SECURITY_GROUP_OWNER_ID,
) = (
'CidrIp', 'FromPort', 'ToPort', 'IpProtocol',
'SourceSecurityGroupId', 'SourceSecurityGroupName',
'SourceSecurityGroupOwnerId',
)
_rule_schema = {
RULE_CIDR_IP: properties.Schema(
properties.Schema.STRING
),
RULE_FROM_PORT: properties.Schema(
properties.Schema.STRING
),
RULE_TO_PORT: properties.Schema(
properties.Schema.STRING
),
RULE_IP_PROTOCOL: properties.Schema(
properties.Schema.STRING
),
RULE_SOURCE_SECURITY_GROUP_ID: properties.Schema(
properties.Schema.STRING
),
RULE_SOURCE_SECURITY_GROUP_NAME: properties.Schema(
properties.Schema.STRING
),
RULE_SOURCE_SECURITY_GROUP_OWNER_ID: properties.Schema(
properties.Schema.STRING,
implemented=False
),
}
properties_schema = {
GROUP_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the security group.'),
required=True
),
VPC_ID: properties.Schema(
properties.Schema.STRING,
_('Physical ID of the VPC. Not implemented.')
),
SECURITY_GROUP_INGRESS: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
_('List of security group ingress rules.'),
schema=_rule_schema,
)
),
SECURITY_GROUP_EGRESS: properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
_('List of security group egress rules.'),
schema=_rule_schema,
)
),
}
def handle_create(self):
if self.is_using_neutron():
self._handle_create_neutron()
else:
self._handle_create_nova()
def _convert_to_neutron_rule(self, direction, sg_rule):
return {
'direction': direction,
'ethertype': 'IPv4',
'remote_ip_prefix': sg_rule.get(self.RULE_CIDR_IP),
'port_range_min': sg_rule.get(self.RULE_FROM_PORT),
'port_range_max': sg_rule.get(self.RULE_TO_PORT),
'protocol': sg_rule.get(self.RULE_IP_PROTOCOL),
# Neutron understands both names and ids
'remote_group_id': sg_rule.get(self.RULE_SOURCE_SECURITY_GROUP_ID)
or sg_rule.get(self.RULE_SOURCE_SECURITY_GROUP_NAME),
'security_group_id': self.resource_id
}
def _handle_create_neutron(self):
client = self.neutron()
sec = client.create_security_group({'security_group': {
'name': self.physical_resource_name(),
'description': self.properties[self.GROUP_DESCRIPTION]}
})['security_group']
def sanitize_security_group(i):
# Neutron only accepts positive ints
if (i.get(self.RULE_FROM_PORT) is not None and
int(i[self.RULE_FROM_PORT]) < 0):
i[self.RULE_FROM_PORT] = None
if (i.get(self.RULE_TO_PORT) is not None and
int(i[self.RULE_TO_PORT]) < 0):
i[self.RULE_TO_PORT] = None
if (i.get(self.RULE_FROM_PORT) is None and
i.get(self.RULE_TO_PORT) is None):
i[self.RULE_CIDR_IP] = None
self.resource_id_set(sec['id'])
if self.properties[self.SECURITY_GROUP_INGRESS]:
for i in self.properties[self.SECURITY_GROUP_INGRESS]:
sanitize_security_group(i)
try:
rule = client.create_security_group_rule({
'security_group_rule':
self._convert_to_neutron_rule('ingress', i)
})
except Exception as ex:
if self.client_plugin('neutron').is_conflict(ex):
# no worries, the rule is already there
pass
else:
# unexpected error
raise
if self.properties[self.SECURITY_GROUP_EGRESS]:
# Delete the default rules which allow all egress traffic
for rule in sec['security_group_rules']:
if rule['direction'] == 'egress':
client.delete_security_group_rule(rule['id'])
for i in self.properties[self.SECURITY_GROUP_EGRESS]:
sanitize_security_group(i)
try:
rule = client.create_security_group_rule({
'security_group_rule':
self._convert_to_neutron_rule('egress', i)
})
except Exception as ex:
if self.client_plugin('neutron').is_conflict(ex):
# no worries, the rule is already there
pass
else:
# unexpected error
raise
def _handle_create_nova(self):
sec = None
groups = self.nova().security_groups.list()
for group in groups:
if group.name == self.physical_resource_name():
sec = group
break
if not sec:
sec = self.nova().security_groups.create(
self.physical_resource_name(),
self.properties[self.GROUP_DESCRIPTION])
self.resource_id_set(sec.id)
if self.properties[self.SECURITY_GROUP_INGRESS]:
rules_client = self.nova().security_group_rules
for i in self.properties[self.SECURITY_GROUP_INGRESS]:
source_group_id = None
if i.get(self.RULE_SOURCE_SECURITY_GROUP_ID) is not None:
source_group_id = i[self.RULE_SOURCE_SECURITY_GROUP_ID]
elif i.get(self.RULE_SOURCE_SECURITY_GROUP_NAME) is not None:
rule_name = i[self.RULE_SOURCE_SECURITY_GROUP_NAME]
for group in groups:
if group.name == rule_name:
source_group_id = group.id
break
else:
raise SecurityGroupNotFound(group_name=rule_name)
try:
rules_client.create(
sec.id,
i.get(self.RULE_IP_PROTOCOL),
i.get(self.RULE_FROM_PORT),
i.get(self.RULE_TO_PORT),
i.get(self.RULE_CIDR_IP),
source_group_id)
except Exception as ex:
if self.client_plugin('nova').is_bad_request(ex) and \
six.text_type(ex).find('already exists') >= 0:
# no worries, the rule is already there
pass
else:
# unexpected error
raise
def handle_delete(self):
if self.is_using_neutron():
self._handle_delete_neutron()
else:
self._handle_delete_nova()
def _handle_delete_nova(self):
if self.resource_id is not None:
try:
sec = self.nova().security_groups.get(self.resource_id)
except Exception as e:
self.client_plugin('nova').ignore_not_found(e)
else:
for rule in sec.rules:
try:
self.nova().security_group_rules.delete(rule['id'])
except Exception as e:
self.client_plugin('nova').ignore_not_found(e)
self.nova().security_groups.delete(self.resource_id)
def _handle_delete_neutron(self):
client = self.neutron()
if self.resource_id is not None:
try:
sec = client.show_security_group(
self.resource_id)['security_group']
except Exception as ex:
self.client_plugin('neutron').ignore_not_found(ex)
else:
for rule in sec['security_group_rules']:
try:
client.delete_security_group_rule(rule['id'])
except Exception as ex:
self.client_plugin('neutron').ignore_not_found(ex)
try:
client.delete_security_group(self.resource_id)
except Exception as ex:
self.client_plugin('neutron').ignore_not_found(ex)
def FnGetRefId(self):
if self.is_using_neutron():
return super(SecurityGroup, self).FnGetRefId()
else:
return self.physical_resource_name()
def validate(self):
res = super(SecurityGroup, self).validate()
if res:
return res
if (self.properties[self.SECURITY_GROUP_EGRESS] and
not self.is_using_neutron()):
raise exception.EgressRuleNotAllowed()
class SecurityGroupNotFound(exception.HeatException):
msg_fmt = _('Security Group "%(group_name)s" not found')
def resource_mapping():
return {
'AWS::EC2::SecurityGroup': SecurityGroup,
}
|
py | b4013f515ae6a1723a14c71d94ff757e8abd2fab | """Setup script for object_detection."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['Pillow>=1.0']
setup(
name='object_detection',
version='0.1',
install_requires=REQUIRED_PACKAGES,
include_package_data=True,
packages=[p for p in find_packages() if p.startswith('object_detection')],
description='Tensorflow Object Detection Library for Mensural Detection',
)
|
py | b4013f6a2f657d29a1165c4cc37349d7bb6a8c1f | import os
from io import StringIO
from pprint import pprint
from typing import Any
from typing import cast
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import attr
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ExceptionRepr
from _pytest._code.code import ReprEntry
from _pytest._code.code import ReprEntryNative
from _pytest._code.code import ReprExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import ReprFuncArgs
from _pytest._code.code import ReprLocals
from _pytest._code.code import ReprTraceback
from _pytest._code.code import TerminalRepr
from _pytest._io import TerminalWriter
from _pytest.compat import final
from _pytest.config import Config
from _pytest.nodes import Collector
from _pytest.nodes import Item
from _pytest.outcomes import skip
if TYPE_CHECKING:
from typing import NoReturn
from typing_extensions import Literal
from _pytest.runner import CallInfo
def getworkerinfoline(node):
try:
return node._workerinfocache
except AttributeError:
d = node.workerinfo
ver = "%s.%s.%s" % d["version_info"][:3]
node._workerinfocache = s = "[{}] {} -- Python {} {}".format(
d["id"], d["sysplatform"], ver, d["executable"]
)
return s
_R = TypeVar("_R", bound="BaseReport")
class BaseReport:
when: Optional[str]
location: Optional[Tuple[str, Optional[int], str]]
longrepr: Union[
None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
]
sections: List[Tuple[str, str]]
nodeid: str
outcome: "Literal['passed', 'failed', 'skipped']"
def __init__(self, **kw: Any) -> None:
self.__dict__.update(kw)
if TYPE_CHECKING:
# Can have arbitrary fields given to __init__().
def __getattr__(self, key: str) -> Any:
...
def toterminal(self, out: TerminalWriter) -> None:
if hasattr(self, "node"):
out.line(getworkerinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr_terminal = cast(TerminalRepr, longrepr)
longrepr_terminal.toterminal(out)
else:
try:
s = str(longrepr)
except UnicodeEncodeError:
s = "<unprintable longrepr>"
out.line(s)
def get_sections(self, prefix: str) -> Iterator[Tuple[str, str]]:
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self) -> str:
"""Read-only property that returns the full string representation of
``longrepr``.
.. versionadded:: 3.0
"""
file = StringIO()
tw = TerminalWriter(file)
tw.hasmarkup = False
self.toterminal(tw)
exc = file.getvalue()
return exc.strip()
@property
def caplog(self) -> str:
"""Return captured log lines, if log capturing is enabled.
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self) -> str:
"""Return captured text from stdout, if capturing is enabled.
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self) -> str:
"""Return captured text from stderr, if capturing is enabled.
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
@property
def passed(self) -> bool:
return self.outcome == "passed"
@property
def failed(self) -> bool:
return self.outcome == "failed"
@property
def skipped(self) -> bool:
return self.outcome == "skipped"
@property
def fspath(self) -> str:
return self.nodeid.split("::")[0]
@property
def count_towards_summary(self) -> bool:
"""**Experimental** Whether this report should be counted towards the
totals shown at the end of the test session: "1 passed, 1 failure, etc".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
return True
@property
def head_line(self) -> Optional[str]:
"""**Experimental** The head line shown with longrepr output for this
report, more commonly during traceback representation during
failures::
________ Test.foo ________
In the example above, the head_line is "Test.foo".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
if self.location is not None:
fspath, lineno, domain = self.location
return domain
return None
def _get_verbose_word(self, config: Config):
_category, _short, verbose = config.hook.pytest_report_teststatus(
report=self, config=config
)
return verbose
def _to_json(self) -> Dict[str, Any]:
"""Return the contents of this report as a dict of builtin entries,
suitable for serialization.
This was originally the serialize_report() function from xdist (ca03269).
Experimental method.
"""
return _report_to_json(self)
@classmethod
def _from_json(cls: Type[_R], reportdict: Dict[str, object]) -> _R:
"""Create either a TestReport or CollectReport, depending on the calling class.
It is the callers responsibility to know which class to pass here.
This was originally the serialize_report() function from xdist (ca03269).
Experimental method.
"""
kwargs = _report_kwargs_from_json(reportdict)
return cls(**kwargs)
def _report_unserialization_failure(
type_name: str, report_class: Type[BaseReport], reportdict
) -> "NoReturn":
url = "https://github.com/pytest-dev/pytest/issues"
stream = StringIO()
pprint("-" * 100, stream=stream)
pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
pprint("report_name: %s" % report_class, stream=stream)
pprint(reportdict, stream=stream)
pprint("Please report this bug at %s" % url, stream=stream)
pprint("-" * 100, stream=stream)
raise RuntimeError(stream.getvalue())
@final
class TestReport(BaseReport):
"""Basic test report object (also used for setup and teardown calls if
they fail)."""
__test__ = False
def __init__(
self,
nodeid: str,
location: Tuple[str, Optional[int], str],
keywords,
outcome: "Literal['passed', 'failed', 'skipped']",
longrepr: Union[
None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
],
when: "Literal['setup', 'call', 'teardown']",
sections: Iterable[Tuple[str, str]] = (),
duration: float = 0,
user_properties: Optional[Iterable[Tuple[str, object]]] = None,
**extra,
) -> None:
#: Normalized collection nodeid.
self.nodeid = nodeid
#: A (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location: Tuple[str, Optional[int], str] = location
#: A name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: Test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: One of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: User properties is a list of tuples (name, value) that holds user
#: defined properties of the test.
self.user_properties = list(user_properties or [])
#: List of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: Time it took to run just the test.
self.duration = duration
self.__dict__.update(extra)
def __repr__(self) -> str:
return "<{} {!r} when={!r} outcome={!r}>".format(
self.__class__.__name__, self.nodeid, self.when, self.outcome
)
@classmethod
def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport":
"""Create and fill a TestReport with standard item and call info."""
when = call.when
# Remove "collect" from the Literal type -- only for collection calls.
assert when != "collect"
duration = call.duration
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome: Literal["passed", "failed", "skipped"] = "passed"
longrepr: Union[
None,
ExceptionInfo[BaseException],
Tuple[str, int, str],
str,
TerminalRepr,
] = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif isinstance(excinfo.value, skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
if excinfo.value._use_item_location:
filename, line = item.reportinfo()[:2]
assert line is not None
longrepr = str(filename), line + 1, r.message
else:
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(
excinfo, style=item.config.getoption("tbstyle", "auto")
)
for rwhen, key, content in item._report_sections:
sections.append((f"Captured {key} {rwhen}", content))
return cls(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
user_properties=item.user_properties,
)
@final
class CollectReport(BaseReport):
"""Collection report object."""
when = "collect"
def __init__(
self,
nodeid: str,
outcome: "Literal['passed', 'failed', 'skipped']",
longrepr: Union[
None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr
],
result: Optional[List[Union[Item, Collector]]],
sections: Iterable[Tuple[str, str]] = (),
**extra,
) -> None:
#: Normalized collection nodeid.
self.nodeid = nodeid
#: Test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: The collected items and collection nodes.
self.result = result or []
#: List of pairs ``(str, str)`` of extra information which needs to
#: marshallable.
# Used by pytest to add captured text : from ``stdout`` and ``stderr``,
# but may be used by other plugins : to add arbitrary information to
# reports.
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self) -> str:
return "<CollectReport {!r} lenresult={} outcome={!r}>".format(
self.nodeid, len(self.result), self.outcome
)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg: str) -> None:
self.longrepr = msg
def toterminal(self, out: TerminalWriter) -> None:
out.line(self.longrepr, red=True)
def pytest_report_to_serializable(
report: Union[CollectReport, TestReport]
) -> Optional[Dict[str, Any]]:
if isinstance(report, (TestReport, CollectReport)):
data = report._to_json()
data["$report_type"] = report.__class__.__name__
return data
# TODO: Check if this is actually reachable.
return None # type: ignore[unreachable]
def pytest_report_from_serializable(
data: Dict[str, Any],
) -> Optional[Union[CollectReport, TestReport]]:
if "$report_type" in data:
if data["$report_type"] == "TestReport":
return TestReport._from_json(data)
elif data["$report_type"] == "CollectReport":
return CollectReport._from_json(data)
assert False, "Unknown report_type unserialize data: {}".format(
data["$report_type"]
)
return None
def _report_to_json(report: BaseReport) -> Dict[str, Any]:
"""Return the contents of this report as a dict of builtin entries,
suitable for serialization.
This was originally the serialize_report() function from xdist (ca03269).
"""
def serialize_repr_entry(
entry: Union[ReprEntry, ReprEntryNative]
) -> Dict[str, Any]:
data = attr.asdict(entry)
for key, value in data.items():
if hasattr(value, "__dict__"):
data[key] = attr.asdict(value)
entry_data = {"type": type(entry).__name__, "data": data}
return entry_data
def serialize_repr_traceback(reprtraceback: ReprTraceback) -> Dict[str, Any]:
result = attr.asdict(reprtraceback)
result["reprentries"] = [
serialize_repr_entry(x) for x in reprtraceback.reprentries
]
return result
def serialize_repr_crash(
reprcrash: Optional[ReprFileLocation],
) -> Optional[Dict[str, Any]]:
if reprcrash is not None:
return attr.asdict(reprcrash)
else:
return None
def serialize_exception_longrepr(rep: BaseReport) -> Dict[str, Any]:
assert rep.longrepr is not None
# TODO: Investigate whether the duck typing is really necessary here.
longrepr = cast(ExceptionRepr, rep.longrepr)
result: Dict[str, Any] = {
"reprcrash": serialize_repr_crash(longrepr.reprcrash),
"reprtraceback": serialize_repr_traceback(longrepr.reprtraceback),
"sections": longrepr.sections,
}
if isinstance(longrepr, ExceptionChainRepr):
result["chain"] = []
for repr_traceback, repr_crash, description in longrepr.chain:
result["chain"].append(
(
serialize_repr_traceback(repr_traceback),
serialize_repr_crash(repr_crash),
description,
)
)
else:
result["chain"] = None
return result
d = report.__dict__.copy()
if hasattr(report.longrepr, "toterminal"):
if hasattr(report.longrepr, "reprtraceback") and hasattr(
report.longrepr, "reprcrash"
):
d["longrepr"] = serialize_exception_longrepr(report)
else:
d["longrepr"] = str(report.longrepr)
else:
d["longrepr"] = report.longrepr
for name in d:
if isinstance(d[name], os.PathLike):
d[name] = os.fspath(d[name])
elif name == "result":
d[name] = None # for now
return d
def _report_kwargs_from_json(reportdict: Dict[str, Any]) -> Dict[str, Any]:
"""Return **kwargs that can be used to construct a TestReport or
CollectReport instance.
This was originally the serialize_report() function from xdist (ca03269).
"""
def deserialize_repr_entry(entry_data):
data = entry_data["data"]
entry_type = entry_data["type"]
if entry_type == "ReprEntry":
reprfuncargs = None
reprfileloc = None
reprlocals = None
if data["reprfuncargs"]:
reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
if data["reprfileloc"]:
reprfileloc = ReprFileLocation(**data["reprfileloc"])
if data["reprlocals"]:
reprlocals = ReprLocals(data["reprlocals"]["lines"])
reprentry: Union[ReprEntry, ReprEntryNative] = ReprEntry(
lines=data["lines"],
reprfuncargs=reprfuncargs,
reprlocals=reprlocals,
reprfileloc=reprfileloc,
style=data["style"],
)
elif entry_type == "ReprEntryNative":
reprentry = ReprEntryNative(data["lines"])
else:
_report_unserialization_failure(entry_type, TestReport, reportdict)
return reprentry
def deserialize_repr_traceback(repr_traceback_dict):
repr_traceback_dict["reprentries"] = [
deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"]
]
return ReprTraceback(**repr_traceback_dict)
def deserialize_repr_crash(repr_crash_dict: Optional[Dict[str, Any]]):
if repr_crash_dict is not None:
return ReprFileLocation(**repr_crash_dict)
else:
return None
if (
reportdict["longrepr"]
and "reprcrash" in reportdict["longrepr"]
and "reprtraceback" in reportdict["longrepr"]
):
reprtraceback = deserialize_repr_traceback(
reportdict["longrepr"]["reprtraceback"]
)
reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"])
if reportdict["longrepr"]["chain"]:
chain = []
for repr_traceback_data, repr_crash_data, description in reportdict[
"longrepr"
]["chain"]:
chain.append(
(
deserialize_repr_traceback(repr_traceback_data),
deserialize_repr_crash(repr_crash_data),
description,
)
)
exception_info: Union[
ExceptionChainRepr, ReprExceptionInfo
] = ExceptionChainRepr(chain)
else:
exception_info = ReprExceptionInfo(reprtraceback, reprcrash)
for section in reportdict["longrepr"]["sections"]:
exception_info.addsection(*section)
reportdict["longrepr"] = exception_info
return reportdict
|
py | b4013fa6ce9d722ed308bb2312738080b3301d58 | from django.core.management.base import BaseCommand
from human_lambdas.user_handler.models import Organization, User
class Command(BaseCommand):
help = "Updates the task count"
def handle(self, *args, **options):
users = User.objects.all()
for user in users:
org = Organization.objects.filter(user=user).first()
if not org:
self.stdout.write(
self.style.SUCCESS(
'No organization was found for user "%s"' % user.email
)
)
else:
user.current_organization_id = org.pk
user.save()
self.stdout.write(
self.style.SUCCESS(
'Successfully updated current organization id for user "%s"'
% user.pk
)
)
|
py | b4014028461ea749cba9c7fae64c5d617337c55e | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are lited on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the crate user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
py | b401411f2ea01d56ca83e8e2a75867971714f3f0 | # encoding: UTF-8
# 默认空值
EMPTY_STRING = ''
EMPTY_UNICODE = u''
EMPTY_INT = 0
EMPTY_FLOAT = 0.0
# 方向常量
DIRECTION_NONE = u'无方向'
DIRECTION_LONG = u'多'
DIRECTION_SHORT = u'空'
DIRECTION_UNKNOWN = u'未知'
DIRECTION_NET = u'净'
DIRECTION_SELL = u'卖出' # IB接口
# 开平常量
OFFSET_NONE = u'无开平'
OFFSET_OPEN = u'开仓'
OFFSET_CLOSE = u'平仓'
OFFSET_CLOSETODAY = u'平今'
OFFSET_CLOSEYESTERDAY = u'平昨'
OFFSET_UNKNOWN = u'未知'
# 状态常量
STATUS_NOTTRADED = u'未成交'
STATUS_PARTTRADED = u'部分成交'
STATUS_ALLTRADED = u'全部成交'
STATUS_CANCELLED = u'已撤销'
STATUS_UNKNOWN = u'未知'
# 合约类型常量
PRODUCT_EQUITY = u'股票'
PRODUCT_FUTURES = u'期货'
PRODUCT_OPTION = u'期权'
PRODUCT_INDEX = u'指数'
PRODUCT_COMBINATION = u'组合'
PRODUCT_FOREX = u'外汇'
PRODUCT_UNKNOWN = u'未知'
PRODUCT_SPOT = u'现货'
PRODUCT_DEFER = u'延期'
PRODUCT_NONE = ''
# 价格类型常量
PRICETYPE_LIMITPRICE = u'限价'
PRICETYPE_MARKETPRICE = u'市价'
PRICETYPE_FAK = u'FAK'
PRICETYPE_FOK = u'FOK'
# 期权类型
OPTION_CALL = u'看涨期权'
OPTION_PUT = u'看跌期权'
# 交易所类型
EXCHANGE_SSE = 'SSE' # 上交所
EXCHANGE_SZSE = 'SZSE' # 深交所
EXCHANGE_CFFEX = 'CFFEX' # 中金所
EXCHANGE_SHFE = 'SHFE' # 上期所
EXCHANGE_CZCE = 'CZCE' # 郑商所
EXCHANGE_DCE = 'DCE' # 大商所
EXCHANGE_SGE = 'SGE' # 上金所
EXCHANGE_UNKNOWN = 'UNKNOWN'# 未知交易所
EXCHANGE_NONE = '' # 空交易所
EXCHANGE_HKEX = 'HKEX' # 港交所
EXCHANGE_SMART = 'SMART' # IB智能路由(股票、期权)
EXCHANGE_NYMEX = 'NYMEX' # IB 期货
EXCHANGE_GLOBEX = 'GLOBEX' # CME电子交易平台
EXCHANGE_IDEALPRO = 'IDEALPRO' # IB外汇ECN
EXCHANGE_CME = 'CME' # CME交易所
EXCHANGE_ICE = 'ICE' # ICE交易所
EXCHANGE_OANDA = 'OANDA' # OANDA外汇做市商
EXCHANGE_OKCOIN = 'OKCOIN' # OKCOIN比特币交易所
# 货币类型
CURRENCY_USD = 'USD' # 美元
CURRENCY_CNY = 'CNY' # 人民币
CURRENCY_UNKNOWN = 'UNKNOWN' # 未知货币
CURRENCY_NONE = '' # 空货币 |
py | b401413e97a2c7138f7abb997a5a784684b0174e | import scipy.sparse
def symmetrize(M):
return (M + M.T)/2
def almost_degenerate(size, density, epsilon, symmetric = False):
N = size
D = scipy.sparse.diags([range(N)], [0], shape = (N,N))
M = D + epsilon*scipy.sparse.rand(N, N, density=density)
if symmetric:
H = symmetrize(M)
else:
H = M
if density >= 0.5:
H = H.toarray()
else:
H.tocsr()
return(H)
def perturbative_matrix(size, density, epsilon, symmetric = False):
N = size
D = scipy.sparse.diags([range(1, N+1)], [0], shape = (N,N))
M = D + epsilon*scipy.sparse.rand(N, N, density=density)
if symmetric:
H = symmetrize(M)
else:
H = M
if density >= 0.5:
H = H.toarray()
else:
H.tocsr()
return(H)
|
py | b40141aeb37b7e1ffe17fdda9c022e255d4a73b1 | __author__ = 'Chris Joakim'
__email__ = "[email protected]"
__license__ = "MIT"
__version__ = "2020.06.23"
import json
import os
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.errors as errors
import azure.cosmos.http_constants as http_constants
import azure.cosmos.diagnostics as diagnostics
import azure.cosmos.documents as documents
import azure.cosmos.exceptions as exceptions
import azure.cosmos.partition_key as partition_key
REQUEST_CHARGE_HEADER = 'x-ms-request-charge'
ACTIVITY_ID_HEADER = 'x-ms-activity-id'
class Cosmos(object):
def __init__(self, opts):
self._opts = opts
self._dbname = None
self._dbproxy = None
self._ctrproxy = None
self._cname = None
self._query_metrics = True
self.reset_record_diagnostics()
print(self._opts)
url = opts['url']
key = opts['key']
self._client = cosmos_client.CosmosClient(url, {'masterKey': key})
# <class 'azure.cosmos.cosmos_client.CosmosClient'>
def list_databases(self):
self.reset_record_diagnostics()
return list(self._client.list_databases())
def set_db(self, dbname):
try:
self.reset_record_diagnostics()
self._dbname = dbname
self._dbproxy = self._client.create_database(
id=dbname,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
except:
self._dbproxy = self._client.get_database_client(database=dbname)
return self._dbproxy
# <class 'azure.cosmos.database.DatabaseProxy'>
def list_containers(self, proxy=None):
self.reset_record_diagnostics()
return list(self._dbproxy.list_containers())
def create_container(self, cname, pk, throughput):
try:
self.reset_record_diagnostics()
self._ctrproxy = self._dbproxy.create_container(
id=cname,
partition_key=partition_key.PartitionKey(path=pk),
offer_throughput=throughput,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
return self._ctrproxy
# <class 'azure.cosmos.container.ContainerProxy'>
except exceptions.CosmosResourceExistsError:
return self.set_container(cname)
except:
return None
def set_container(self, cname):
self.reset_record_diagnostics()
self._ctrproxy = self._dbproxy.get_container_client(cname)
# <class 'azure.cosmos.container.ContainerProxy'>
return self._ctrproxy
def update_container_throughput(self, cname, throughput):
self.reset_record_diagnostics()
self.set_container(cname)
offer = self._ctrproxy.replace_throughput(
throughput=int(throughput),
response_hook=self._record_diagnostics)
# <class 'azure.cosmos.offer.Offer'>
return offer
def get_container_offer(self, cname):
self.reset_record_diagnostics()
self.set_container(cname)
offer = self._ctrproxy.read_offer(
response_hook=self._record_diagnostics)
# <class 'azure.cosmos.offer.Offer'>
return offer
def delete_container(self, cname):
try:
self.reset_record_diagnostics()
return self._dbproxy.delete_container(
cname,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
except:
return None
def upsert_doc(self, doc):
try:
self.reset_record_diagnostics()
return self._ctrproxy.upsert_item(
doc,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
except:
return None
def delete_doc(self, doc, doc_pk):
try:
self.reset_record_diagnostics()
return self._ctrproxy.delete_item(
doc,
partition_key=doc_pk,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
except:
return None
def read_doc(self, cname, doc_id, doc_pk):
try:
self.set_container(cname)
self.reset_record_diagnostics()
return self._ctrproxy.read_item(
doc_id,
partition_key=doc_pk,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
except:
return None
def query_container(self, cname, sql, xpartition, max_count):
try:
self.set_container(cname)
self.reset_record_diagnostics()
return self._ctrproxy.query_items(
query=sql,
enable_cross_partition_query=xpartition,
max_item_count=max_count,
populate_query_metrics=self._query_metrics,
response_hook=self._record_diagnostics)
except:
return None
# Metrics and Diagnostics
def enable_query_metrics(self):
self._query_metrics = True
def disable_query_metrics(self):
self._query_metrics = False
def reset_record_diagnostics(self):
self._record_diagnostics = diagnostics.RecordDiagnostics()
def print_record_diagnostics(self):
# <class 'azure.cosmos.diagnostics.RecordDiagnostics'>
# headers is an instance of <class 'requests.structures.CaseInsensitiveDict'>
# and is not JSON serializable
# headers.keys() is an instance of <class 'collections.abc.KeysView'>
print('record_diagnostics: {}'.format(self._record_diagnostics.headers))
print(str(type(self._record_diagnostics.headers)))
keys = self._record_diagnostics.headers.keys()
print(str(type(keys)))
print(keys)
for header in self._record_diagnostics.headers.items():
print(header)
print(str(type(header)))
def record_diagnostics_headers_dict(self):
data = dict()
for header in self._record_diagnostics.headers.items():
key, val = header # unpack the header 2-tuple
data[key] = val
return data
def print_last_request_charge(self):
print('last_request_charge: {} activity: {}'.format(
self.last_request_charge(),
self.last_activity_id()))
def last_request_charge(self):
if REQUEST_CHARGE_HEADER in self._record_diagnostics.headers:
return self._record_diagnostics.headers[REQUEST_CHARGE_HEADER]
else:
return -1
def last_activity_id(self):
if ACTIVITY_ID_HEADER in self._record_diagnostics.headers:
return self._record_diagnostics.headers[ACTIVITY_ID_HEADER]
else:
return None
# Example dict from method record_diagnostics_headers_dict:
# {
# "Cache-Control": "no-store, no-cache",
# "Content-Location": "https://cjoakimcosmossql-eastus.documents.azure.com/dbs/dev/colls/test/docs/ba5f31cf-7f74-4534-964f-8b36b4dc26cd/",
# "Content-Type": "application/json",
# "Date": "Thu, 04 Jun 2020 16:25:28 GMT",
# "Pragma": "no-cache",
# "Server": "Microsoft-HTTPAPI/2.0",
# "Strict-Transport-Security": "max-age=31536000",
# "Transfer-Encoding": "chunked",
# "etag": "\"7d00094d-0000-0100-0000-5ed920780000\"",
# "lsn": "14",
# "x-ms-activity-id": "19ae0802-6d2e-4cba-b534-5a8b1a1ec502",
# "x-ms-alt-content-path": "dbs/dev/colls/test",
# "x-ms-content-path": "YtUbAOWiNQE=",
# "x-ms-cosmos-item-llsn": "14",
# "x-ms-cosmos-llsn": "14",
# "x-ms-gatewayversion": "version=2.11.0",
# "x-ms-global-Committed-lsn": "14",
# "x-ms-item-lsn": "14",
# "x-ms-last-state-change-utc": "Thu, 04 Jun 2020 11:35:30.048 GMT",
# "x-ms-number-of-read-regions": "0",
# "x-ms-request-charge": "1",
# "x-ms-resource-quota": "documentSize=10240;documentsSize=10485760;documentsCount=-1;collectionSize=10485760;",
# "x-ms-resource-usage": "documentSize=0;documentsSize=2;documentsCount=3;collectionSize=3;",
# "x-ms-schemaversion": "1.9",
# "x-ms-serviceversion": "version=2.11.0.0",
# "x-ms-session-token": "0:-1#14",
# "x-ms-transport-request-id": "2",
# "x-ms-xp-role": "1"
# }
|
py | b40142c70def891c810bef69e74e4f781d7671b2 | import peewee
# Create the database connection
db = peewee.SqliteDatabase('database/dictionary.db')
class BaseModel(peewee.Model):
"""Base model class, all models will inherit this class"""
text = peewee.CharField(unique=True)
normalized_text = peewee.CharField()
meaning = peewee.CharField()
class Meta:
# Indicates where the model should be stored
database = db
class Any(BaseModel):
"""This class represents the 'Any' table"""
class Verb(BaseModel):
"""This class represents the 'Verb' table"""
class Noun(BaseModel):
"""This class represents the 'Noun' table"""
class Adjective(BaseModel):
"""This class represents the 'Adjective' table"""
class Adverb(BaseModel):
"""This class represents the 'Adverb' table"""
|
py | b40142e44c7ecdbfa99c07b889038a2fb9dd1e29 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import inspect
import sqlalchemy as sa
from ggrc.models import all_models
from ggrc.models import inflector
from ggrc.models import reflection
from ggrc.models.all_models import * # noqa
from ggrc.models.custom_attribute_definition import init_cad_listeners
from ggrc.utils import html_cleaner
from ggrc.utils import benchmark
"""All GGRC model objects and associated utilities."""
def init_models(app):
"""Init models."""
for model in all_models.all_models:
inflector.register_inflections(model._inflector)
def init_hooks():
"""Initialize main and extensions related SQLAlchemy hooks."""
from ggrc.extensions import get_extension_modules
from ggrc.models import hooks
hooks.init_hooks()
for extension_module in get_extension_modules():
ext_init_hooks = getattr(extension_module, 'init_hooks', None)
if ext_init_hooks:
ext_init_hooks()
def init_all_models(app):
"""Register all GGRC models services with the Flask application ``app``."""
from ggrc.extensions import get_extension_modules
# Usually importing the module is enough, but just in case, also invoke
# ``init_models``
init_models(app)
for extension_module in get_extension_modules():
ext_init_models = getattr(extension_module, 'init_models', None)
if ext_init_models:
ext_init_models(app)
init_hooks()
def init_lazy_mixins():
"""Lazy mixins initialisation
Mixins with `__lazy__init__` property set to True will wait with their
initialization until after the models have been fully initialized. This is
useful in cases where we need full model class, e.g. to hook up signaling
logic.
"""
for model in all_models.all_models:
# MRO chain includes base model that we don't want to include here
mixins = (mixin for mixin in inspect.getmro(model) if mixin != model)
for mixin in mixins:
if getattr(mixin, '__lazy_init__', False):
mixin.init(model)
def init_session_monitor_cache():
"""Init session cache monitor."""
from sqlalchemy.orm.session import Session
from sqlalchemy import event
from ggrc.models.cache import Cache
def update_cache_before_flush(session, flush_context, objects):
"""Updates cache before flush."""
with benchmark("update cache before flush"):
cache = Cache.get_cache(create=True)
if cache:
cache.update_before_flush(session, flush_context)
def update_cache_after_flush(session, flush_context):
"""Updates cache after flush."""
with benchmark("update cache after flush"):
cache = Cache.get_cache(create=False)
if cache:
cache.update_after_flush(session, flush_context)
def clear_cache(session):
"""Clear cache."""
cache = Cache.get_cache()
if cache:
cache.clear()
event.listen(Session, 'before_flush', update_cache_before_flush)
event.listen(Session, 'after_flush', update_cache_after_flush)
event.listen(Session, 'after_commit', clear_cache)
event.listen(Session, 'after_rollback', clear_cache)
def init_sanitization_hooks():
"""Registers event listener on String/Text attributes."""
for model in all_models.all_models: # noqa
attr_names = reflection.AttributeInfo.gather_attrs(model, "_sanitize_html")
for attr_name in attr_names:
attr = getattr(model, attr_name)
sa.event.listen(attr, 'set', html_cleaner.cleaner, retval=True)
def init_app(app):
"""Init apps."""
init_all_models(app)
init_lazy_mixins()
init_session_monitor_cache()
init_sanitization_hooks()
init_cad_listeners()
from ggrc.models.inflector import get_model # noqa
|
py | b401432a3fc2a42a67f73624df1e8bab1a6f5940 | import itertools
import random
from uuid import uuid4
from moto.packages.boto.ec2.blockdevicemapping import (
BlockDeviceType,
BlockDeviceMapping,
)
from moto.ec2.exceptions import InvalidInstanceIdError
from collections import OrderedDict
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import camelcase_to_underscores, BackendDict
from moto.ec2 import ec2_backends
from moto.elb import elb_backends
from moto.elbv2 import elbv2_backends
from moto.elb.exceptions import LoadBalancerNotFoundError
from .exceptions import (
AutoscalingClientError,
ResourceContentionError,
InvalidInstanceError,
ValidationError,
)
# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown
DEFAULT_COOLDOWN = 300
ASG_NAME_TAG = "aws:autoscaling:groupName"
class InstanceState(object):
def __init__(
self,
instance,
lifecycle_state="InService",
health_status="Healthy",
protected_from_scale_in=False,
autoscaling_group=None,
):
self.instance = instance
self.lifecycle_state = lifecycle_state
self.health_status = health_status
self.protected_from_scale_in = protected_from_scale_in
if not hasattr(self.instance, "autoscaling_group"):
self.instance.autoscaling_group = autoscaling_group
class FakeLifeCycleHook(BaseModel):
def __init__(self, name, as_name, transition, timeout, result):
self.name = name
self.as_name = as_name
if transition:
self.transition = transition
if timeout:
self.timeout = timeout
else:
self.timeout = 3600
if result:
self.result = result
else:
self.result = "ABANDON"
class FakeScalingPolicy(BaseModel):
def __init__(
self,
name,
policy_type,
metric_aggregation_type,
adjustment_type,
as_name,
min_adjustment_magnitude,
scaling_adjustment,
cooldown,
target_tracking_config,
step_adjustments,
estimated_instance_warmup,
predictive_scaling_configuration,
autoscaling_backend,
):
self.name = name
self.policy_type = policy_type
self.metric_aggregation_type = metric_aggregation_type
self.adjustment_type = adjustment_type
self.as_name = as_name
self.min_adjustment_magnitude = min_adjustment_magnitude
self.scaling_adjustment = scaling_adjustment
if cooldown is not None:
self.cooldown = cooldown
else:
self.cooldown = DEFAULT_COOLDOWN
self.target_tracking_config = target_tracking_config
self.step_adjustments = step_adjustments
self.estimated_instance_warmup = estimated_instance_warmup
self.predictive_scaling_configuration = predictive_scaling_configuration
self.autoscaling_backend = autoscaling_backend
@property
def arn(self):
return f"arn:aws:autoscaling:{self.autoscaling_backend.region}:{ACCOUNT_ID}:scalingPolicy:c322761b-3172-4d56-9a21-0ed9d6161d67:autoScalingGroupName/{self.as_name}:policyName/{self.name}"
def execute(self):
if self.adjustment_type == "ExactCapacity":
self.autoscaling_backend.set_desired_capacity(
self.as_name, self.scaling_adjustment
)
elif self.adjustment_type == "ChangeInCapacity":
self.autoscaling_backend.change_capacity(
self.as_name, self.scaling_adjustment
)
elif self.adjustment_type == "PercentChangeInCapacity":
self.autoscaling_backend.change_capacity_percent(
self.as_name, self.scaling_adjustment
)
class FakeLaunchConfiguration(CloudFormationModel):
def __init__(
self,
name,
image_id,
key_name,
ramdisk_id,
kernel_id,
security_groups,
user_data,
instance_type,
instance_monitoring,
instance_profile_name,
spot_price,
ebs_optimized,
associate_public_ip_address,
block_device_mapping_dict,
):
self.name = name
self.image_id = image_id
self.key_name = key_name
self.ramdisk_id = ramdisk_id
self.kernel_id = kernel_id
self.security_groups = security_groups if security_groups else []
self.user_data = user_data
self.instance_type = instance_type
self.instance_monitoring = instance_monitoring
self.instance_profile_name = instance_profile_name
self.spot_price = spot_price
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.block_device_mapping_dict = block_device_mapping_dict
@classmethod
def create_from_instance(cls, name, instance, backend):
config = backend.create_launch_configuration(
name=name,
image_id=instance.image_id,
kernel_id="",
ramdisk_id="",
key_name=instance.key_name,
security_groups=instance.security_groups,
user_data=instance.user_data,
instance_type=instance.instance_type,
instance_monitoring=False,
instance_profile_name=None,
spot_price=None,
ebs_optimized=instance.ebs_optimized,
associate_public_ip_address=instance.associate_public_ip,
block_device_mappings=instance.block_device_mapping,
)
return config
@staticmethod
def cloudformation_name_type():
return "LaunchConfigurationName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-launchconfiguration.html
return "AWS::AutoScaling::LaunchConfiguration"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
instance_profile_name = properties.get("IamInstanceProfile")
backend = autoscaling_backends[region_name]
config = backend.create_launch_configuration(
name=resource_name,
image_id=properties.get("ImageId"),
kernel_id=properties.get("KernelId"),
ramdisk_id=properties.get("RamdiskId"),
key_name=properties.get("KeyName"),
security_groups=properties.get("SecurityGroups"),
user_data=properties.get("UserData"),
instance_type=properties.get("InstanceType"),
instance_monitoring=properties.get("InstanceMonitoring"),
instance_profile_name=instance_profile_name,
spot_price=properties.get("SpotPrice"),
ebs_optimized=properties.get("EbsOptimized"),
associate_public_ip_address=properties.get("AssociatePublicIpAddress"),
block_device_mappings=properties.get("BlockDeviceMapping.member"),
)
return config
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
backend = autoscaling_backends[region_name]
try:
backend.delete_launch_configuration(resource_name)
except KeyError:
pass
def delete(self, region_name):
backend = autoscaling_backends[region_name]
backend.delete_launch_configuration(self.name)
@property
def physical_resource_id(self):
return self.name
@property
def block_device_mappings(self):
if not self.block_device_mapping_dict:
return None
else:
return self._parse_block_device_mappings()
@property
def instance_monitoring_enabled(self):
if self.instance_monitoring:
return "true"
return "false"
def _parse_block_device_mappings(self):
block_device_map = BlockDeviceMapping()
for mapping in self.block_device_mapping_dict:
block_type = BlockDeviceType()
mount_point = mapping.get("device_name")
if "ephemeral" in mapping.get("virtual_name", ""):
block_type.ephemeral_name = mapping.get("virtual_name")
else:
block_type.volume_type = mapping.get("ebs._volume_type")
block_type.snapshot_id = mapping.get("ebs._snapshot_id")
block_type.delete_on_termination = mapping.get(
"ebs._delete_on_termination"
)
block_type.size = mapping.get("ebs._volume_size")
block_type.iops = mapping.get("ebs._iops")
block_device_map[mount_point] = block_type
return block_device_map
class FakeAutoScalingGroup(CloudFormationModel):
def __init__(
self,
name,
availability_zones,
desired_capacity,
max_size,
min_size,
launch_config_name,
launch_template,
vpc_zone_identifier,
default_cooldown,
health_check_period,
health_check_type,
load_balancers,
target_group_arns,
placement_group,
termination_policies,
autoscaling_backend,
ec2_backend,
tags,
new_instances_protected_from_scale_in=False,
):
self.autoscaling_backend = autoscaling_backend
self.ec2_backend = ec2_backend
self.name = name
self._id = str(uuid4())
self.region = self.autoscaling_backend.region
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier)
self.max_size = max_size
self.min_size = min_size
self.launch_template = None
self.launch_config = None
self._set_launch_configuration(launch_config_name, launch_template)
self.default_cooldown = (
default_cooldown if default_cooldown else DEFAULT_COOLDOWN
)
self.health_check_period = health_check_period
self.health_check_type = health_check_type if health_check_type else "EC2"
self.load_balancers = load_balancers
self.target_group_arns = target_group_arns
self.placement_group = placement_group
self.termination_policies = termination_policies
self.new_instances_protected_from_scale_in = (
new_instances_protected_from_scale_in
)
self.suspended_processes = []
self.instance_states = []
self.tags = tags or []
self.set_desired_capacity(desired_capacity)
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, tags):
for tag in tags:
if "resource_id" not in tag or not tag["resource_id"]:
tag["resource_id"] = self.name
if "resource_type" not in tag or not tag["resource_type"]:
tag["resource_type"] = "auto-scaling-group"
self._tags = tags
@property
def arn(self):
return f"arn:aws:autoscaling:{self.region}:{ACCOUNT_ID}:autoScalingGroup:{self._id}:autoScalingGroupName/{self.name}"
def active_instances(self):
return [x for x in self.instance_states if x.lifecycle_state == "InService"]
def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False):
# for updates, if only AZs are provided, they must not clash with
# the AZs of existing VPCs
if update and availability_zones and not vpc_zone_identifier:
vpc_zone_identifier = self.vpc_zone_identifier
if vpc_zone_identifier:
# extract azs for vpcs
subnet_ids = vpc_zone_identifier.split(",")
subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(
subnet_ids=subnet_ids
)
vpc_zones = [subnet.availability_zone for subnet in subnets]
if availability_zones and set(availability_zones) != set(vpc_zones):
raise AutoscalingClientError(
"ValidationError",
"The availability zones of the specified subnets and the Auto Scaling group do not match",
)
availability_zones = vpc_zones
elif not availability_zones:
if not update:
raise AutoscalingClientError(
"ValidationError",
"At least one Availability Zone or VPC Subnet is required.",
)
return
self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier
def _set_launch_configuration(self, launch_config_name, launch_template):
if launch_config_name:
self.launch_config = self.autoscaling_backend.launch_configurations[
launch_config_name
]
self.launch_config_name = launch_config_name
if launch_template:
launch_template_id = launch_template.get("launch_template_id")
launch_template_name = launch_template.get("launch_template_name")
if not (launch_template_id or launch_template_name) or (
launch_template_id and launch_template_name
):
raise ValidationError(
"Valid requests must contain either launchTemplateId or LaunchTemplateName"
)
if launch_template_id:
self.launch_template = self.ec2_backend.get_launch_template(
launch_template_id
)
elif launch_template_name:
self.launch_template = self.ec2_backend.get_launch_template_by_name(
launch_template_name
)
self.launch_template_version = launch_template["version"]
@staticmethod
def __set_string_propagate_at_launch_booleans_on_tags(tags):
bool_to_string = {True: "true", False: "false"}
for tag in tags:
if "PropagateAtLaunch" in tag:
tag["PropagateAtLaunch"] = bool_to_string[tag["PropagateAtLaunch"]]
return tags
@staticmethod
def cloudformation_name_type():
return "AutoScalingGroupName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-autoscalinggroup.html
return "AWS::AutoScaling::AutoScalingGroup"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
launch_config_name = properties.get("LaunchConfigurationName")
launch_template = {
camelcase_to_underscores(k): v
for k, v in properties.get("LaunchTemplate", {}).items()
}
load_balancer_names = properties.get("LoadBalancerNames", [])
target_group_arns = properties.get("TargetGroupARNs", [])
backend = autoscaling_backends[region_name]
group = backend.create_auto_scaling_group(
name=resource_name,
availability_zones=properties.get("AvailabilityZones", []),
desired_capacity=properties.get("DesiredCapacity"),
max_size=properties.get("MaxSize"),
min_size=properties.get("MinSize"),
launch_config_name=launch_config_name,
launch_template=launch_template,
vpc_zone_identifier=(
",".join(properties.get("VPCZoneIdentifier", [])) or None
),
default_cooldown=properties.get("Cooldown"),
health_check_period=properties.get("HealthCheckGracePeriod"),
health_check_type=properties.get("HealthCheckType"),
load_balancers=load_balancer_names,
target_group_arns=target_group_arns,
placement_group=None,
termination_policies=properties.get("TerminationPolicies", []),
tags=cls.__set_string_propagate_at_launch_booleans_on_tags(
properties.get("Tags", [])
),
new_instances_protected_from_scale_in=properties.get(
"NewInstancesProtectedFromScaleIn", False
),
)
return group
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
backend = autoscaling_backends[region_name]
try:
backend.delete_auto_scaling_group(resource_name)
except KeyError:
pass
def delete(self, region_name):
backend = autoscaling_backends[region_name]
backend.delete_auto_scaling_group(self.name)
@property
def physical_resource_id(self):
return self.name
@property
def image_id(self):
if self.launch_template:
version = self.launch_template.get_version(self.launch_template_version)
return version.image_id
return self.launch_config.image_id
@property
def instance_type(self):
if self.launch_template:
version = self.launch_template.get_version(self.launch_template_version)
return version.instance_type
return self.launch_config.instance_type
@property
def user_data(self):
if self.launch_template:
version = self.launch_template.get_version(self.launch_template_version)
return version.user_data
return self.launch_config.user_data
@property
def security_groups(self):
if self.launch_template:
version = self.launch_template.get_version(self.launch_template_version)
return version.security_groups
return self.launch_config.security_groups
def update(
self,
availability_zones,
desired_capacity,
max_size,
min_size,
launch_config_name,
launch_template,
vpc_zone_identifier,
health_check_period,
health_check_type,
new_instances_protected_from_scale_in=None,
):
self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True)
if max_size is not None:
self.max_size = max_size
if min_size is not None:
self.min_size = min_size
if desired_capacity is None:
if min_size is not None and min_size > len(self.instance_states):
desired_capacity = min_size
if max_size is not None and max_size < len(self.instance_states):
desired_capacity = max_size
self._set_launch_configuration(launch_config_name, launch_template)
if health_check_period is not None:
self.health_check_period = health_check_period
if health_check_type is not None:
self.health_check_type = health_check_type
if new_instances_protected_from_scale_in is not None:
self.new_instances_protected_from_scale_in = (
new_instances_protected_from_scale_in
)
if desired_capacity is not None:
self.set_desired_capacity(desired_capacity)
def set_desired_capacity(self, new_capacity):
if new_capacity is None:
self.desired_capacity = self.min_size
else:
self.desired_capacity = new_capacity
curr_instance_count = len(self.active_instances())
if self.desired_capacity == curr_instance_count:
pass # Nothing to do here
elif self.desired_capacity > curr_instance_count:
# Need more instances
count_needed = int(self.desired_capacity) - int(curr_instance_count)
propagated_tags = self.get_propagated_tags()
self.replace_autoscaling_group_instances(count_needed, propagated_tags)
else:
# Need to remove some instances
count_to_remove = curr_instance_count - self.desired_capacity
instances_to_remove = [ # only remove unprotected
state
for state in self.instance_states
if not state.protected_from_scale_in
][:count_to_remove]
if instances_to_remove: # just in case not instances to remove
instance_ids_to_remove = [
instance.instance.id for instance in instances_to_remove
]
self.autoscaling_backend.ec2_backend.terminate_instances(
instance_ids_to_remove
)
self.instance_states = list(
set(self.instance_states) - set(instances_to_remove)
)
if self.name in self.autoscaling_backend.autoscaling_groups:
self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name)
def get_propagated_tags(self):
propagated_tags = {}
for tag in self.tags:
# boto uses 'propagate_at_launch
# boto3 and cloudformation use PropagateAtLaunch
if "propagate_at_launch" in tag and tag["propagate_at_launch"] == "true":
propagated_tags[tag["key"]] = tag["value"]
if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"] == "true":
propagated_tags[tag["Key"]] = tag["Value"]
return propagated_tags
def replace_autoscaling_group_instances(self, count_needed, propagated_tags):
propagated_tags[ASG_NAME_TAG] = self.name
reservation = self.autoscaling_backend.ec2_backend.add_instances(
self.image_id,
count_needed,
self.user_data,
self.security_groups,
instance_type=self.instance_type,
tags={"instance": propagated_tags},
placement=random.choice(self.availability_zones),
)
for instance in reservation.instances:
instance.autoscaling_group = self
self.instance_states.append(
InstanceState(
instance,
protected_from_scale_in=self.new_instances_protected_from_scale_in,
)
)
def append_target_groups(self, target_group_arns):
append = [x for x in target_group_arns if x not in self.target_group_arns]
self.target_group_arns.extend(append)
class AutoScalingBackend(BaseBackend):
def __init__(self, region_name):
self.autoscaling_groups = OrderedDict()
self.launch_configurations = OrderedDict()
self.policies = {}
self.lifecycle_hooks = {}
self.ec2_backend = ec2_backends[region_name]
self.elb_backend = elb_backends[region_name]
self.elbv2_backend = elbv2_backends[region_name]
self.region = region_name
def reset(self):
region = self.region
self.__dict__ = {}
self.__init__(region)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "autoscaling"
) + BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "autoscaling-plans"
)
def create_launch_configuration(
self,
name,
image_id,
key_name,
kernel_id,
ramdisk_id,
security_groups,
user_data,
instance_type,
instance_monitoring,
instance_profile_name,
spot_price,
ebs_optimized,
associate_public_ip_address,
block_device_mappings,
instance_id=None,
):
valid_requests = [
instance_id is not None,
image_id is not None and instance_type is not None,
]
if not any(valid_requests):
raise ValidationError(
"Valid requests must contain either the InstanceID parameter or both the ImageId and InstanceType parameters."
)
if instance_id is not None:
# TODO: https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html
pass
launch_configuration = FakeLaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
security_groups=security_groups,
user_data=user_data,
instance_type=instance_type,
instance_monitoring=instance_monitoring,
instance_profile_name=instance_profile_name,
spot_price=spot_price,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
block_device_mapping_dict=block_device_mappings,
)
self.launch_configurations[name] = launch_configuration
return launch_configuration
def describe_launch_configurations(self, names):
configurations = self.launch_configurations.values()
if names:
return [
configuration
for configuration in configurations
if configuration.name in names
]
else:
return list(configurations)
def delete_launch_configuration(self, launch_configuration_name):
self.launch_configurations.pop(launch_configuration_name, None)
def create_auto_scaling_group(
self,
name,
availability_zones,
desired_capacity,
max_size,
min_size,
launch_config_name,
launch_template,
vpc_zone_identifier,
default_cooldown,
health_check_period,
health_check_type,
load_balancers,
target_group_arns,
placement_group,
termination_policies,
tags,
new_instances_protected_from_scale_in=False,
instance_id=None,
):
def make_int(value):
return int(value) if value is not None else value
max_size = make_int(max_size)
min_size = make_int(min_size)
desired_capacity = make_int(desired_capacity)
default_cooldown = make_int(default_cooldown)
if health_check_period is None:
health_check_period = 300
else:
health_check_period = make_int(health_check_period)
# TODO: Add MixedInstancesPolicy once implemented.
# Verify only a single launch config-like parameter is provided.
params = [launch_config_name, launch_template, instance_id]
num_params = sum([1 for param in params if param])
if num_params != 1:
raise ValidationError(
"Valid requests must contain either LaunchTemplate, LaunchConfigurationName, "
"InstanceId or MixedInstancesPolicy parameter."
)
if instance_id:
try:
instance = self.ec2_backend.get_instance(instance_id)
launch_config_name = name
FakeLaunchConfiguration.create_from_instance(
launch_config_name, instance, self
)
except InvalidInstanceIdError:
raise InvalidInstanceError(instance_id)
group = FakeAutoScalingGroup(
name=name,
availability_zones=availability_zones,
desired_capacity=desired_capacity,
max_size=max_size,
min_size=min_size,
launch_config_name=launch_config_name,
launch_template=launch_template,
vpc_zone_identifier=vpc_zone_identifier,
default_cooldown=default_cooldown,
health_check_period=health_check_period,
health_check_type=health_check_type,
load_balancers=load_balancers,
target_group_arns=target_group_arns,
placement_group=placement_group,
termination_policies=termination_policies,
autoscaling_backend=self,
ec2_backend=self.ec2_backend,
tags=tags,
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in,
)
self.autoscaling_groups[name] = group
self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
return group
def update_auto_scaling_group(
self,
name,
availability_zones,
desired_capacity,
max_size,
min_size,
launch_config_name,
launch_template,
vpc_zone_identifier,
health_check_period,
health_check_type,
new_instances_protected_from_scale_in=None,
):
"""
The parameter DefaultCooldown, PlacementGroup, TerminationPolicies are not yet implemented
"""
# TODO: Add MixedInstancesPolicy once implemented.
# Verify only a single launch config-like parameter is provided.
if launch_config_name and launch_template:
raise ValidationError(
"Valid requests must contain either LaunchTemplate, LaunchConfigurationName "
"or MixedInstancesPolicy parameter."
)
group = self.autoscaling_groups[name]
group.update(
availability_zones=availability_zones,
desired_capacity=desired_capacity,
max_size=max_size,
min_size=min_size,
launch_config_name=launch_config_name,
launch_template=launch_template,
vpc_zone_identifier=vpc_zone_identifier,
health_check_period=health_check_period,
health_check_type=health_check_type,
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in,
)
return group
def describe_auto_scaling_groups(self, names):
groups = self.autoscaling_groups.values()
if names:
return [group for group in groups if group.name in names]
else:
return list(groups)
def delete_auto_scaling_group(self, group_name):
self.set_desired_capacity(group_name, 0)
self.autoscaling_groups.pop(group_name, None)
def describe_auto_scaling_instances(self, instance_ids):
instance_states = []
for group in self.autoscaling_groups.values():
instance_states.extend(
[
x
for x in group.instance_states
if not instance_ids or x.instance.id in instance_ids
]
)
return instance_states
def attach_instances(self, group_name, instance_ids):
group = self.autoscaling_groups[group_name]
original_size = len(group.instance_states)
if (original_size + len(instance_ids)) > group.max_size:
raise ResourceContentionError
else:
group.desired_capacity = original_size + len(instance_ids)
new_instances = [
InstanceState(
self.ec2_backend.get_instance(x),
protected_from_scale_in=group.new_instances_protected_from_scale_in,
autoscaling_group=group,
)
for x in instance_ids
]
for instance in new_instances:
self.ec2_backend.create_tags(
[instance.instance.id], {ASG_NAME_TAG: group.name}
)
group.instance_states.extend(new_instances)
self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
def set_instance_health(self, instance_id, health_status):
"""
The ShouldRespectGracePeriod-parameter is not yet implemented
"""
instance = self.ec2_backend.get_instance(instance_id)
instance_state = next(
instance_state
for group in self.autoscaling_groups.values()
for instance_state in group.instance_states
if instance_state.instance.id == instance.id
)
instance_state.health_status = health_status
def detach_instances(self, group_name, instance_ids, should_decrement):
group = self.autoscaling_groups[group_name]
original_size = group.desired_capacity
detached_instances = [
x for x in group.instance_states if x.instance.id in instance_ids
]
for instance in detached_instances:
self.ec2_backend.delete_tags(
[instance.instance.id], {ASG_NAME_TAG: group.name}
)
new_instance_state = [
x for x in group.instance_states if x.instance.id not in instance_ids
]
group.instance_states = new_instance_state
if should_decrement:
group.desired_capacity = original_size - len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return detached_instances
def set_desired_capacity(self, group_name, desired_capacity):
group = self.autoscaling_groups[group_name]
group.set_desired_capacity(desired_capacity)
self.update_attached_elbs(group_name)
def change_capacity(self, group_name, scaling_adjustment):
group = self.autoscaling_groups[group_name]
desired_capacity = group.desired_capacity + scaling_adjustment
self.set_desired_capacity(group_name, desired_capacity)
def change_capacity_percent(self, group_name, scaling_adjustment):
"""http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html
If PercentChangeInCapacity returns a value between 0 and 1,
Auto Scaling will round it off to 1. If the PercentChangeInCapacity
returns a value greater than 1, Auto Scaling will round it off to the
lower value. For example, if PercentChangeInCapacity returns 12.5,
then Auto Scaling will round it off to 12."""
group = self.autoscaling_groups[group_name]
percent_change = 1 + (scaling_adjustment / 100.0)
desired_capacity = group.desired_capacity * percent_change
if group.desired_capacity < desired_capacity < group.desired_capacity + 1:
desired_capacity = group.desired_capacity + 1
else:
desired_capacity = int(desired_capacity)
self.set_desired_capacity(group_name, desired_capacity)
def create_lifecycle_hook(self, name, as_name, transition, timeout, result):
lifecycle_hook = FakeLifeCycleHook(name, as_name, transition, timeout, result)
self.lifecycle_hooks["%s_%s" % (as_name, name)] = lifecycle_hook
return lifecycle_hook
def describe_lifecycle_hooks(self, as_name, lifecycle_hook_names=None):
return [
lifecycle_hook
for lifecycle_hook in self.lifecycle_hooks.values()
if (lifecycle_hook.as_name == as_name)
and (
not lifecycle_hook_names or lifecycle_hook.name in lifecycle_hook_names
)
]
def delete_lifecycle_hook(self, as_name, name):
self.lifecycle_hooks.pop("%s_%s" % (as_name, name), None)
def put_scaling_policy(
self,
name,
policy_type,
metric_aggregation_type,
adjustment_type,
as_name,
min_adjustment_magnitude,
scaling_adjustment,
cooldown,
target_tracking_config,
step_adjustments,
estimated_instance_warmup,
predictive_scaling_configuration,
):
policy = FakeScalingPolicy(
name,
policy_type,
metric_aggregation_type,
adjustment_type=adjustment_type,
as_name=as_name,
min_adjustment_magnitude=min_adjustment_magnitude,
scaling_adjustment=scaling_adjustment,
cooldown=cooldown,
target_tracking_config=target_tracking_config,
step_adjustments=step_adjustments,
estimated_instance_warmup=estimated_instance_warmup,
predictive_scaling_configuration=predictive_scaling_configuration,
autoscaling_backend=self,
)
self.policies[name] = policy
return policy
def describe_policies(
self, autoscaling_group_name=None, policy_names=None, policy_types=None
):
return [
policy
for policy in self.policies.values()
if (not autoscaling_group_name or policy.as_name == autoscaling_group_name)
and (not policy_names or policy.name in policy_names)
and (not policy_types or policy.policy_type in policy_types)
]
def delete_policy(self, group_name):
self.policies.pop(group_name, None)
def execute_policy(self, group_name):
policy = self.policies[group_name]
policy.execute()
def update_attached_elbs(self, group_name):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(
state.instance.id for state in group.active_instances()
)
# skip this if group.load_balancers is empty
# otherwise elb_backend.describe_load_balancers returns all available load balancers
if not group.load_balancers:
return
try:
elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers)
except LoadBalancerNotFoundError:
# ELBs can be deleted before their autoscaling group
return
for elb in elbs:
elb_instace_ids = set(elb.instance_ids)
self.elb_backend.register_instances(
elb.name, group_instance_ids - elb_instace_ids, from_autoscaling=True
)
self.elb_backend.deregister_instances(
elb.name, elb_instace_ids - group_instance_ids, from_autoscaling=True
)
def update_attached_target_groups(self, group_name):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(state.instance.id for state in group.instance_states)
# no action necessary if target_group_arns is empty
if not group.target_group_arns:
return
target_groups = self.elbv2_backend.describe_target_groups(
target_group_arns=group.target_group_arns,
load_balancer_arn=None,
names=None,
)
for target_group in target_groups:
asg_targets = [
{"id": x, "port": target_group.port} for x in group_instance_ids
]
self.elbv2_backend.register_targets(target_group.arn, (asg_targets))
def create_or_update_tags(self, tags):
for tag in tags:
group_name = tag["resource_id"]
group = self.autoscaling_groups[group_name]
old_tags = group.tags
new_tags = []
# if key was in old_tags, update old tag
for old_tag in old_tags:
if old_tag["key"] == tag["key"]:
new_tags.append(tag)
else:
new_tags.append(old_tag)
# if key was never in old_tag's add it (create tag)
if not any(new_tag["key"] == tag["key"] for new_tag in new_tags):
new_tags.append(tag)
group.tags = new_tags
def delete_tags(self, tags):
for tag_to_delete in tags:
group_name = tag_to_delete["resource_id"]
key_to_delete = tag_to_delete["key"]
group = self.autoscaling_groups[group_name]
old_tags = group.tags
group.tags = [x for x in old_tags if x["key"] != key_to_delete]
def attach_load_balancers(self, group_name, load_balancer_names):
group = self.autoscaling_groups[group_name]
group.load_balancers.extend(
[x for x in load_balancer_names if x not in group.load_balancers]
)
self.update_attached_elbs(group_name)
def describe_load_balancers(self, group_name):
return self.autoscaling_groups[group_name].load_balancers
def detach_load_balancers(self, group_name, load_balancer_names):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(state.instance.id for state in group.instance_states)
elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers)
for elb in elbs:
self.elb_backend.deregister_instances(
elb.name, group_instance_ids, from_autoscaling=True
)
group.load_balancers = [
x for x in group.load_balancers if x not in load_balancer_names
]
def attach_load_balancer_target_groups(self, group_name, target_group_arns):
group = self.autoscaling_groups[group_name]
group.append_target_groups(target_group_arns)
self.update_attached_target_groups(group_name)
def describe_load_balancer_target_groups(self, group_name):
return self.autoscaling_groups[group_name].target_group_arns
def detach_load_balancer_target_groups(self, group_name, target_group_arns):
group = self.autoscaling_groups[group_name]
group.target_group_arns = [
x for x in group.target_group_arns if x not in target_group_arns
]
for target_group in target_group_arns:
asg_targets = [{"id": x.instance.id} for x in group.instance_states]
self.elbv2_backend.deregister_targets(target_group, (asg_targets))
def suspend_processes(self, group_name, scaling_processes):
all_proc_names = [
"Launch",
"Terminate",
"AddToLoadBalancer",
"AlarmNotification",
"AZRebalance",
"HealthCheck",
"InstanceRefresh",
"ReplaceUnhealthy",
"ScheduledActions",
]
group = self.autoscaling_groups[group_name]
set_to_add = set(scaling_processes or all_proc_names)
group.suspended_processes = list(
set(group.suspended_processes).union(set_to_add)
)
def resume_processes(self, group_name, scaling_processes):
group = self.autoscaling_groups[group_name]
if scaling_processes:
group.suspended_processes = list(
set(group.suspended_processes).difference(set(scaling_processes))
)
else:
group.suspended_processes = []
def set_instance_protection(
self, group_name, instance_ids, protected_from_scale_in
):
group = self.autoscaling_groups[group_name]
protected_instances = [
x for x in group.instance_states if x.instance.id in instance_ids
]
for instance in protected_instances:
instance.protected_from_scale_in = protected_from_scale_in
def notify_terminate_instances(self, instance_ids):
for (
autoscaling_group_name,
autoscaling_group,
) in self.autoscaling_groups.items():
original_active_instance_count = len(autoscaling_group.active_instances())
autoscaling_group.instance_states = list(
filter(
lambda i_state: i_state.instance.id not in instance_ids,
autoscaling_group.instance_states,
)
)
difference = original_active_instance_count - len(
autoscaling_group.active_instances()
)
if difference > 0:
autoscaling_group.replace_autoscaling_group_instances(
difference, autoscaling_group.get_propagated_tags()
)
self.update_attached_elbs(autoscaling_group_name)
def enter_standby_instances(self, group_name, instance_ids, should_decrement):
group = self.autoscaling_groups[group_name]
original_size = group.desired_capacity
standby_instances = []
for instance_state in group.instance_states:
if instance_state.instance.id in instance_ids:
instance_state.lifecycle_state = "Standby"
standby_instances.append(instance_state)
if should_decrement:
group.desired_capacity = group.desired_capacity - len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity
def exit_standby_instances(self, group_name, instance_ids):
group = self.autoscaling_groups[group_name]
original_size = group.desired_capacity
standby_instances = []
for instance_state in group.instance_states:
if instance_state.instance.id in instance_ids:
instance_state.lifecycle_state = "InService"
standby_instances.append(instance_state)
group.desired_capacity = group.desired_capacity + len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity
def terminate_instance(self, instance_id, should_decrement):
instance = self.ec2_backend.get_instance(instance_id)
instance_state = next(
instance_state
for group in self.autoscaling_groups.values()
for instance_state in group.instance_states
if instance_state.instance.id == instance.id
)
group = instance.autoscaling_group
original_size = group.desired_capacity
self.detach_instances(group.name, [instance.id], should_decrement)
self.ec2_backend.terminate_instances([instance.id])
return instance_state, original_size, group.desired_capacity
def describe_tags(self, filters):
"""
Pagination is not yet implemented.
Only the `auto-scaling-group` and `propagate-at-launch` filters are implemented.
"""
resources = self.autoscaling_groups.values()
tags = list(itertools.chain(*[r.tags for r in resources]))
for f in filters:
if f["Name"] == "auto-scaling-group":
tags = [t for t in tags if t["resource_id"] in f["Values"]]
if f["Name"] == "propagate-at-launch":
values = [v.lower() for v in f["Values"]]
tags = [
t
for t in tags
if t.get("propagate_at_launch", "").lower() in values
]
return tags
autoscaling_backends = BackendDict(AutoScalingBackend, "ec2")
|
py | b401442ef5eb3bc058ed1290403af040198ac568 | # mininode.py - Deuscoin P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a deuscoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# deuscoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import binascii
import time
import sys
import random
import cStringIO
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
BIP0031_VERSION = 60000
MY_VERSION = 60001 # past bip-31 for ping/pong
MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return chr(len(s)) + s
elif len(s) < 0x10000:
return chr(253) + struct.pack("<H", len(s)) + s
elif len(s) < 0x100000000L:
return chr(254) + struct.pack("<I", len(s)) + s
return chr(255) + struct.pack("<Q", len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = ""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(cStringIO.StringIO(binascii.unhexlify(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return binascii.hexlify(obj.serialize()).decode('utf-8')
# Objects that map to deuscoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = "\x00" * 10 + "\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig="", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), binascii.hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // 100000000, self.nValue % 100000000,
binascii.hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = hash256(self.serialize())[::-1].encode('hex_codec')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].encode('hex_codec')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = ""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = ""
self.strStatusBar = ""
self.strReserved = ""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = ""
self.vchSig = ""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = ""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = "version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = time.time()
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = "verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = "addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = "alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = ""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = "inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = "getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = "getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = "tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = "block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = "getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = "ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = "ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = "pong"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = "mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = "sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = "getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = "headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in deuscoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = "reject"
def __init__(self):
self.message = ""
self.code = ""
self.reason = ""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.message == "block" or self.message == "tx"):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.message == "block" or self.message == "tx"):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping,
"pong": msg_pong,
"headers": msg_headers,
"getheaders": msg_getheaders,
"reject": msg_reject,
"mempool": msg_mempool
}
MAGIC_BYTES = {
"mainnet": "\xf9\xbe\xb4\xd9", # mainnet
"testnet3": "\x0b\x11\x09\x07", # testnet3
"regtest": "\xfa\xbf\xb5\xda" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Deuscoin Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = cStringIO.StringIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == "version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap['ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap['ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
py | b401452d9943cefd023db86bfd1b49d133091afc | import torch
import numpy as np
import reward.utils as U
from reward.batcher import BaseBatcher
class RolloutBatcher(BaseBatcher):
def get_batch(self, act_fn):
if self.s is None:
self.s = self.transform_s(self.runner.reset())
self.s = U.to_tensor(self.s)
horizon = self.batch_size // self.runner.num_envs
batch = U.Batch(keys=["s_and_tp1", "ac", "r", "d"])
for i in range(horizon):
ac = act_fn(self.s, self.num_steps)
sn, r, d, info = self.runner.act(ac)
sn = U.to_tensor(self.transform_s(sn))
batch.s_and_tp1.append(self.s)
batch.ac.append(ac)
batch.r.append(r)
batch.d.append(d)
# batch.info.append(info)
self.s = sn
batch.s_and_tp1.append(self.s)
batch.s_and_tp1 = torch.stack(batch.s_and_tp1)
batch.s = batch.s_and_tp1[:-1]
batch.sn = batch.s_and_tp1[1:]
batch.ac = U.to_np(batch.ac)
batch.r = U.to_np(batch.r)
batch.d = U.to_np(batch.d)
batch = self.transform_batch(batch)
return batch
|
py | b40147346280c612ad8dbd4b24a9c6eb3c889c84 | import argparse
import os
import random
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from dataset.dataset_nocs import Dataset
from libs.network import KeyNet
from libs.loss import Loss
cate_list = ['bottle', 'bowl', 'camera', 'can', 'laptop', 'mug']
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', type=str, default='img_dataset', help='dataset root dir')
parser.add_argument('--img_num', type=int, default=500, help='number of training images')
parser.add_argument('--resume', type=str, default='', help='resume model')
parser.add_argument('--num_points', type=int, default=500, help='points')
parser.add_argument('--workers', type=int, default=5, help='number of data loading workers')
parser.add_argument('--num_kp', type=int, default=8, help='number of kp')
parser.add_argument('--outf', type=str, default='models/', help='save dir')
parser.add_argument('--lr', default=0.0001, help='learning rate')
opt = parser.parse_args()
model = KeyNet(num_points=opt.num_points, num_key=opt.num_kp, num_cates=opt.num_cates)
model.cuda()
if opt.resume != '':
model.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume)))
dataset = Dataset(opt.dataset_root, opt.num_points, opt.image_num)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
# test_dataset = Dataset('val', opt.dataset_root, False, opt.num_points, opt.num_cates, 1000, opt.category)
# testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
criterion = Loss(opt.num_kp, opt.num_cates)
best_test = np.Inf
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
for epoch in range(0, 500):
model.train()
train_dis_avg = 0.0
train_count = 0
optimizer.zero_grad()
for i, data in enumerate(dataloader, 0):
img_fr, choose_fr, cloud_fr, r_fr, t_fr, img_to, choose_to, cloud_to, r_to, t_to, mesh, anchor, scale = data
img_fr, choose_fr, cloud_fr, r_fr, t_fr, img_to, choose_to, cloud_to, r_to, t_to, mesh, anchor, scale= Variable(
img_fr).cuda(), \
Variable(
choose_fr).cuda(), \
Variable(
cloud_fr).cuda(), \
Variable(
r_fr).cuda(), \
Variable(
t_fr).cuda(), \
Variable(
img_to).cuda(), \
Variable(
choose_to).cuda(), \
Variable(
cloud_to).cuda(), \
Variable(
r_to).cuda(), \
Variable(
t_to).cuda(), \
Variable(
mesh).cuda(), \
Variable(
anchor).cuda(), \
Variable(
scale).cuda(), \
Variable(
cate).cuda()
Kp_fr, anc_fr, att_fr = model(img_fr, choose_fr, cloud_fr, anchor, scale, cate, t_fr)
Kp_to, anc_to, att_to = model(img_to, choose_to, cloud_to, anchor, scale, cate, t_to)
loss, _ = criterion(Kp_fr, Kp_to, anc_fr, anc_to, att_fr, att_to, r_fr, t_fr, r_to, t_to, mesh, scale, cate)
loss.backward()
train_dis_avg += loss.item()
train_count += 1
if train_count != 0 and train_count % 8 == 0:
optimizer.step()
optimizer.zero_grad()
print(train_count, float(train_dis_avg) / 8.0)
train_dis_avg = 0.0
if train_count != 0 and train_count % 100 == 0:
torch.save(model.state_dict(), '{0}/model_current_{1}.pth'.format(opt.outf, cate_list[opt.category - 1]))
optimizer.zero_grad()
model.eval()
score = []
for j, data in enumerate(testdataloader, 0):
img_fr, choose_fr, cloud_fr, r_fr, t_fr, img_to, choose_to, cloud_to, r_to, t_to, mesh, anchor, scale, cate = data
img_fr, choose_fr, cloud_fr, r_fr, t_fr, img_to, choose_to, cloud_to, r_to, t_to, mesh, anchor, scale, cate = Variable(
img_fr).cuda(), \
Variable(
choose_fr).cuda(), \
Variable(
cloud_fr).cuda(), \
Variable(
r_fr).cuda(), \
Variable(
t_fr).cuda(), \
Variable(
img_to).cuda(), \
Variable(
choose_to).cuda(), \
Variable(
cloud_to).cuda(), \
Variable(
r_to).cuda(), \
Variable(
t_to).cuda(), \
Variable(
mesh).cuda(), \
Variable(
anchor).cuda(), \
Variable(
scale).cuda(), \
Variable(
cate).cuda()
Kp_fr, anc_fr, att_fr = model(img_fr, choose_fr, cloud_fr, anchor, scale, cate, t_fr)
Kp_to, anc_to, att_to = model(img_to, choose_to, cloud_to, anchor, scale, cate, t_to)
_, item_score = criterion(Kp_fr, Kp_to, anc_fr, anc_to, att_fr, att_to, r_fr, t_fr, r_to, t_to, mesh, scale,
cate)
print(item_score)
score.append(item_score)
test_dis = np.mean(np.array(score))
if test_dis < best_test:
best_test = test_dis
torch.save(model.state_dict(),
'{0}/model_{1}_{2}_{3}.pth'.format(opt.outf, epoch, test_dis, cate_list[opt.category - 1]))
print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<')
|
py | b401486dfa029314aa0b0dacf2fefd214cc2de47 | # -*- coding: utf-8 -*-
import pendulum
from flexmock import flexmock, flexmock_teardown
from ... import OratorTestCase
from orator.query.builder import QueryBuilder
from orator.query.grammars import QueryGrammar
from orator.query.expression import QueryExpression
from orator.orm.builder import Builder
from orator.orm.model import Model
from orator.orm.relations import HasMany
from orator.orm.collection import Collection
class OrmHasManyTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_create_properly_creates_new_model(self):
relation = self._get_relation()
created = flexmock(Model(), save=lambda: True, set_attribute=lambda: None)
created.should_receive('save').once().and_return(True)
relation.get_related().should_receive('new_instance').once().with_args({'name': 'john'}).and_return(created)
created.should_receive('set_attribute').with_args('foreign_key', 1)
self.assertEqual(created, relation.create(name='john'))
def test_find_or_new_finds_model(self):
relation = self._get_relation()
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('find').once().with_args('foo', ['*']).and_return(model)
model.should_receive('set_attribute').never()
self.assertEqual('bar', relation.find_or_new('foo').foo)
def test_find_or_new_returns_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('find').once().with_args('foo', ['*']).and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().with_args().and_return(model)
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.find_or_new('foo').foo)
def test_first_or_new_finds_first_model(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('first').once().with_args().and_return(model)
model.should_receive('set_attribute').never()
self.assertEqual('bar', relation.first_or_new(foo='bar').foo)
def test_first_or_new_returns_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
relation.get_query().should_receive('first').once().with_args().and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().with_args().and_return(model)
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.first_or_new(foo='bar').foo)
def test_first_or_create_finds_first_model(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('first').once().with_args().and_return(model)
model.should_receive('set_attribute').never()
self.assertEqual('bar', relation.first_or_create(foo='bar').foo)
def test_first_or_create_returns_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
relation.get_query().should_receive('first').once().with_args().and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().with_args({'foo': 'bar'}).and_return(model)
model.should_receive('save').once().and_return(True)
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.first_or_create(foo='bar').foo)
def test_update_or_create_finds_first_model_and_updates(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('first').once().with_args().and_return(model)
relation.get_related().should_receive('new_instance').never()
model.should_receive('fill').once().with_args({'foo': 'baz'})
model.should_receive('save').once()
self.assertEqual('bar', relation.update_or_create({'foo': 'bar'}, {'foo': 'baz'}).foo)
def test_update_or_create_creates_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
relation.get_query().should_receive('first').once().with_args().and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().and_return(model)
model.should_receive('fill').once().with_args({'foo': 'baz'})
model.should_receive('save').once()
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.update_or_create({'foo': 'bar'}, {'foo': 'baz'}).foo)
def test_update_updates_models_with_timestamps(self):
relation = self._get_relation()
relation.get_related().should_receive('uses_timestamps').once().and_return(True)
now = pendulum.now()
relation.get_related().should_receive('fresh_timestamp').once().and_return(now)
relation.get_query().should_receive('update').once().with_args({'foo': 'bar', 'updated_at': now}).and_return('results')
self.assertEqual('results', relation.update(foo='bar'))
def test_relation_is_properly_initialized(self):
relation = self._get_relation()
model = flexmock(Model())
model.should_receive('set_relation').once().with_args('foo', Collection)
models = relation.init_relation([model], 'foo')
self.assertEqual([model], models)
def test_eager_constraints_are_properly_added(self):
relation = self._get_relation()
relation.get_query().get_query().should_receive('where_in').once().with_args('table.foreign_key', [1, 2])
model1 = OrmHasOneModelStub()
model1.id = 1
model2 = OrmHasOneModelStub()
model2.id = 2
relation.add_eager_constraints([model1, model2])
def test_save_many_returns_list_of_models(self):
relation = self._get_relation()
model1 = flexmock()
model1.foo = 'foo'
model1.should_receive('save').once().and_return(True)
model1.should_receive('set_attribute').once().with_args('foreign_key', 1)
model2 = flexmock()
model2.foo = 'bar'
model2.should_receive('save').once().and_return(True)
model2.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual([model1, model2], relation.save_many([model1, model2]))
def test_models_are_properly_matched_to_parents(self):
relation = self._get_relation()
result1 = OrmHasOneModelStub()
result1.foreign_key = 1
result2 = OrmHasOneModelStub()
result2.foreign_key = 2
result3 = OrmHasOneModelStub()
result3.foreign_key = 2
model1 = OrmHasOneModelStub()
model1.id = 1
model2 = OrmHasOneModelStub()
model2.id = 2
model3 = OrmHasOneModelStub()
model3.id = 3
relation.get_related().should_receive('new_collection').replace_with(lambda l=None: Collection(l))
relation.get_query().should_receive('where').with_args('table.foreign_key', '=', 2)
relation.get_query().should_receive('where').with_args('table.foreign_key', '=', 3)
models = relation.match([model1, model2, model3], Collection([result1, result2, result3]), 'foo')
self.assertEqual(1, models[0].foo[0].foreign_key)
self.assertEqual(1, len(models[0].foo))
self.assertEqual(2, models[1].foo[0].foreign_key)
self.assertEqual(2, models[1].foo[1].foreign_key)
self.assertEqual(2, len(models[1].foo))
self.assertTrue(models[2].foo.is_empty())
def test_relation_count_query_can_be_built(self):
relation = self._get_relation()
query = flexmock(QueryBuilder(None, QueryGrammar(), None))
builder = Builder(query)
builder.get_query().should_receive('select').once()
relation.get_parent().should_receive('get_table').and_return('table')
builder.should_receive('where').once().with_args('table.foreign_key', '=', QueryExpression)
parent_query = flexmock(QueryBuilder(None, None, None))
relation.get_query().should_receive('get_query').and_return(parent_query)
grammar = flexmock()
parent_query.should_receive('get_grammar').once().and_return(grammar)
grammar.should_receive('wrap').once().with_args('table.id')
relation.get_relation_count_query(builder, builder)
def _get_relation(self):
flexmock(Builder)
query = flexmock(QueryBuilder(None, QueryGrammar(), None))
builder = Builder(query)
builder.should_receive('where').with_args('table.foreign_key', '=', 1)
related = flexmock(Model())
related.should_receive('new_query').and_return(builder)
builder.should_receive('get_model').and_return(related)
parent = flexmock(Model())
parent.should_receive('get_attribute').with_args('id').and_return(1)
parent.should_receive('get_created_at_column').and_return('created_at')
parent.should_receive('get_updated_at_column').and_return('updated_at')
parent.should_receive('new_query').and_return(builder)
return HasMany(builder, parent, 'table.foreign_key', 'id')
class OrmHasOneModelStub(Model):
pass
|
py | b4014a275b29db55cea136361b7986c1d76f3f71 | """engine.SCons.Variables.PackageVariable
This file defines the option type for SCons implementing 'package
activation'.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Usage example:
Examples:
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existance)
To replace autoconf's --with-xxx=yyy
opts = Variables()
opts.Add(PackageVariable('x11',
'use X11 installed here (yes = search some places',
'yes'))
...
if env['x11'] == True:
dir = ... search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... build with x11 ...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PackageVariable.py 5023 2010/06/14 22:05:46 scons"
__all__ = ['PackageVariable',]
import SCons.Errors
__enable_strings = ('1', 'yes', 'true', 'on', 'enable', 'search')
__disable_strings = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
"""
"""
lval = val.lower()
if lval in __enable_strings: return True
if lval in __disable_strings: return False
#raise ValueError("Invalid value for boolean option: %s" % val)
return val
def _validator(key, val, env, searchfunc):
# NB: searchfunc is currenty undocumented and unsupported
"""
"""
# todo: write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (seperated by space).
"""
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k,v,e,searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | b4014b8ee2cb25bf233f46f383d4cf3809248cf6 | from enum import IntEnum
class EntityLabelCatalog(IntEnum):
"""Enumerated named entity labels."""
# spaCy default labels
PERSON = 0 # People, including fictional
NORP = 1 # Nationalities or religious or political groups
FAC = 2 # Buildings, airports, highways, bridges, etc.
ORG = 3 # Companies, agencies, institutions, etc.
GPE = 4 # Countries, sities, states.
LOC = 5 # Non-GPE locations, mountain ranges, bodies of water
PRODUCT = 6 # Objects, vehicles, foods, etc. (Not services.)
EVENT = 7 # Named hurricanes, battles, wars, sports events, etc.
WORK_OF_ART = 8 # Titles of books, songs, etc.
LAW = 9 # Named documents made into laws.
LANGUAGE = 10 # Any named language.
DATE = 11 # Absolute or relative dates or periods.
TIME = 12 # Times smaller than a day.
PERCENT = 13 # Percentage, including ”%“.
MONEY = 14 # Monetary values, including unit.
QUANTITY = 15 # Measurements, as of weight or distance.
ORDINAL = 16 # “first”, “second”, etc.
CARDINAL = 17 # Numerals that do not fall under another type.
# Additional VK labels
SSN = 18 # Social security number
EMAIL_ADDRESS = 19 # Email address
PHYSICAL_ADDRESS = 20 # Physical address (any format)
PHONE_NUMBER = 21 # Phone number (any format)
@classmethod
def get_names(cls):
"""Return names of all classes."""
return [label.name for label in cls]
@classmethod
def get_spacy_labels(cls):
"""Return names of all spaCy labels."""
return [label.name for i, label in enumerate(cls) if i <= 17]
@classmethod
def get_vk_labels(cls):
"""Return names of all VK custom labels."""
return [label.name for i, label in enumerate(cls) if i > 17]
|
py | b4014df3f81c0aead1c6fd9bf4a3dbf13d905b0a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# My imports
import numpy as np
from astropy import coordinates as coord
from astropy import units as u
import pandas as pd
from clint.textui import puts, colored
import time
from ParallaxSpec import parallax
from astroquery.simbad import Simbad
import warnings
warnings.filterwarnings('ignore')
from astroquery.irsa_dust import IrsaDust
from astroquery.vizier import Vizier
def GAIAplx(ra, de):
v = Vizier(columns=["*", "+_r"], catalog='I/345/gaia2')
pos=coord.SkyCoord(ra=ra, dec=de,unit=(u.hourangle,u.deg),frame='icrs',obstime='J2000')
result=v.query_region(pos, radius="10s", catalog='I/345/gaia2')
# Moving the positions to 2000
try:
nlines=len(result[0]['RA_ICRS'])
deltat=-15.5
sep=[]
for ig,name in enumerate(result[0]['Source']):
raold=result[0]['RA_ICRS'].data[ig]+(result[0]['pmRA'].data[ig] *deltat)/3600000.
deold=result[0]['DE_ICRS'].data[ig]+(result[0]['pmDE'].data[ig] *deltat)/3600000.
posold = coord.ICRS(ra=raold * u.deg, dec=deold * u.deg)
sep.append(pos.separation(posold).arcsecond)
indG=np.argmin(sep)
if sep[indG]<1.5 and result[0]['Plx'].data[indG]>0:
return str(round(result[0]['Plx'].data[indG],2)), str(round(result[0]['e_Plx'].data[indG],2))
except:
return 'NULL','NULL'
return 'NULL','NULL'
def torres(name, teff=False, logg=False, feh=False):
"""
Calculates the mass and error from Torres. See source for more information
"""
from TorresMass import massTorres
T, Terr = teff
L, Lerr = logg
F, Ferr = feh
try:
Terr, Lerr, Ferr = float(Terr), float(Lerr), float(Ferr)
T, L, F = float(T), float(L), float(F)
except ValueError:
puts(colored.red('No mass derived for this star...'))
return 'NULL', 'NULL'
M, Merr = massTorres(T, Terr, L, Lerr, F, Ferr)
puts(colored.green('Done'))
return round(M, 2), round(Merr, 2)
def variable_assignment(digits):
try:
if digits > 0:
x = '%.2f' % round(eval(input('> ')), digits)
else:
x = '%d' % round(eval(input('> ')), digits)
except SyntaxError as e:
x = 'NULL'
return x
if __name__ == '__main__':
nasa_str = input('\nExoplanet from NASA database? [True/False]: ')
if nasa_str == 'True':
nasa = True
print('Adding exoplanets from the NASA database')
elif nasa_str == 'False':
nasa = False
print('Adding exoplanets from the EU database')
else:
print('Answer different from True or False\nBye...')
exit
# stars = np.loadtxt('names.txt', dtype='S', delimiter='\t',usecols=(0,), ) "does not work when the file has only one line"
with open('names.txt') as f:
stars = f.readlines()
f.close()
var = 'Y'
# Read the data from exoplanet.eu
fields = ['star_name', 'ra', 'dec', 'mag_v',
'star_metallicity', 'star_metallicity_error_min',
'star_metallicity_error_max',
'star_teff', 'star_teff_error_min', 'star_teff_error_max']
# Read data from NASA exoplanet archive
fields_nasa = ['pl_hostname', 'hd_name', 'ra', 'dec', 'ra_str', 'dec_str',
'st_vj', 'st_vjerr',
'st_metfe', 'st_metfeerr1', 'st_metfeerr2',
'st_teff', 'st_tefferr1', 'st_tefferr2',
'st_plx', 'st_plxerr1', 'st_plxerr2',
'st_logg', 'st_loggerr1', 'st_loggerr2',
'st_mass', 'st_masserr1', 'st_masserr2',
'st_spstr']
if nasa:
# Loading NASA exoplanet archive
exo_all = pd.read_csv('nasaexo.csv',
skipinitialspace=True, usecols=fields_nasa)
# Changing some column names to match exoplanet.EU
exo_all = exo_all.rename(columns={"pl_hostname": "star_name",
"st_vj": "mag_v",
"st_vjerr": "mag_v_err",
"st_teff": "star_teff",
"st_tefferr1": "star_teff_error_max",
"st_tefferr2": "star_teff_error_min",
"st_metfe": "star_metallicity",
"st_metfeerr1": "star_metallicity_error_max",
"st_metfeerr2": "star_metallicity_error_min"})
else:
# Laoding exoplanet.EU
exo_all = pd.read_csv('exo.csv', skipinitialspace=True, usecols=fields)
# Remove trailing whitespaces
exo_all.star_name = exo_all.star_name.str.strip()
print(exo_all.star_name)
output = 'WEBSITE_online_EU-NASA_to_ADD.rdb'
for i, star in enumerate(stars):
star = star.strip('\n').replace('K0I', 'KOI')
print(star)
exo = exo_all[exo_all.star_name == star]
next = True
print('')
print('Star: ' + colored.green(star))
try:
name = exo.star_name.values[0]
except IndexError as e:
print('')
puts(colored.red(star) + ' not found. Star added in the file manual.list.')
print('')
manual = open('manual.list', "a")
manual.write(star+'\n')
manual.close()
next = False
# Update the list of new hosts
with open('names.txt', 'w') as names:
# if the last star was added so no star is updated
if i+1==len(stars):
names.write('')
else:
for j in stars[i+1:]:
names.write(j)
names.close()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# if the star is found in the exoplanet.eu
# or if found in NASA exoplanet archive
if next:
print('')
var = input('Continue? [Y/N]: ')
if var.upper().strip() == 'Y':
# Get RA and dec (can be passed for NASA exoplanets)
ra, dec = float(exo.ra.values[0]), float(exo.dec.values[0])
c = coord.SkyCoord(ra, dec, unit=(u.degree, u.degree),
frame='icrs')
RA = list(c.ra.hms)
RA[0] = str(int(RA[0])).zfill(2)
RA[1] = str(int(RA[1])).zfill(2)
RA[2] = str(round(RA[2], 2)).zfill(4)
if len(RA[2]) == 4:
RA[2] += '0'
RA = "{0} {1} {2}".format(*RA)
DEC = list(c.dec.dms)
DEC[0] = str(int(DEC[0])).zfill(2)
DEC[1] = str(abs(int(DEC[1]))).zfill(2)
DEC[2] = str(abs(round(DEC[2], 2))).zfill(4)
if int(DEC[0]) > 0:
DEC[0] = '+'+DEC[0]
if len(DEC[2]) == 4:
DEC[2] += '0'
DEC = "{0} {1} {2}".format(*DEC)
# Search in Simbad the parallax, Vmag and spectral type
customSimbad = Simbad()
# customSimbad.add_votable_fields('plx','plx_error','flux(V)','flux_error(V)','sptype','otype','ids','dist')
customSimbad.add_votable_fields('plx', 'plx_error', 'flux(V)',
'flux_error(V)', 'sptype',
'otype', 'ids')
result = customSimbad.query_region(coord.SkyCoord(ra=c.ra,
dec=c.dec,
frame='icrs'),
radius='15s')
empty = 'NULL'
# Here comes the user interface part...
puts(colored.black('\nStandard parameters\n'))
# The metallicity error
if ~np.isnan(exo.star_metallicity_error_min.values[0]) and ~np.isnan(exo.star_metallicity_error_max.values[0]):
errFeH_exo = (abs(exo.star_metallicity_error_min.values[0]) +
abs(exo.star_metallicity_error_max.values[0])) / 2.0
elif ~np.isnan(exo.star_metallicity_error_min.values[0]):
errFeH_exo = abs(exo.star_metallicity_error_min.values[0])
elif ~np.isnan(exo.star_metallicity_error_max.values[0]):
errFeH_exo = abs(exo.star_metallicity_error_max.values[0])
else:
errFeH_exo = np.nan
# The metallicity
FeH_exo = exo.star_metallicity.values[0]
if np.isnan(FeH_exo):
puts('The ' + colored.yellow('[Fe/H]'))
FeH = variable_assignment(2)
puts('The error on ' + colored.yellow('[Fe/H]'))
Ferr = variable_assignment(2)
else:
FeH = round(float(FeH_exo), 2)
if np.isnan(errFeH_exo):
puts('The error on ' + colored.yellow('[Fe/H]'))
Ferr = variable_assignment(2)
else:
Ferr = round(errFeH_exo, 2)
print('Fe/H: ', FeH, '+-', Ferr)
# The effective temperature error
if ~np.isnan(exo.star_teff_error_min.values[0]) and ~np.isnan(exo.star_teff_error_max.values[0]):
errTeff_exo = (abs(exo.star_teff_error_min.values[0]) +
abs(exo.star_teff_error_max.values[0])) / 2.0
elif ~np.isnan(exo.star_teff_error_min.values[0]):
errTeff_exo = abs(exo.star_teff_error_min.values[0])
elif ~np.isnan(exo.star_teff_error_max.values[0]):
errTeff_exo = abs(exo.star_teff_error_max.values[0])
else:
errTeff_exo = np.nan
# The effective temperature
Teff_exo = exo.star_teff.values[0]
if np.isnan(Teff_exo):
puts('The ' + colored.yellow('Teff'))
Teff = variable_assignment(0)
puts('The error on ' + colored.yellow('Teff'))
Tefferr = variable_assignment(0)
else:
# the Teff is not float
Teff = int(Teff_exo)
if ~np.isnan(errTeff_exo):
Tefferr = int(errTeff_exo)
else:
puts('The error on ' + colored.yellow('Teff'))
Tefferr = variable_assignment(0)
print('Teff: ', Teff, '+-', Tefferr)
# NASA database has loggs
# The logg error and logg
if nasa:
if ~np.isnan(exo.st_loggerr2.values[0]) and ~np.isnan(exo.st_loggerr1.values[0]):
errlogg_exo = (abs(exo.st_loggerr2.values[0]) +
abs(exo.st_loggerr1.values[0])) / 2.0
elif ~np.isnan(exo.st_loggerr2.values[0]):
errlogg_exo = abs(exo.st_loggerr2.values[0])
elif ~np.isnan(exo.st_loggerr1.values[0]):
errlogg_exo = abs(exo.st_loggerr1.values[0])
else:
errlogg_exo = np.nan
logg_exo = exo.st_logg.values[0]
if np.isnan(logg_exo):
puts('The ' + colored.yellow('logg'))
logg = variable_assignment(2)
puts('The error on ' + colored.yellow('logg'))
loggerr = variable_assignment(2)
else:
# logg is a float
logg = round(float(logg_exo), 2)
if ~np.isnan(errlogg_exo):
loggerr = round(errlogg_exo, 2)
else:
puts('The error on ' + colored.yellow('logg'))
loggerr = variable_assignment(0)
else:
# The log g and log g error for EU database
puts('The ' + colored.yellow('logg'))
logg = variable_assignment(2)
puts('The error on ' + colored.yellow('logg'))
loggerr = variable_assignment(2)
print('logg: ', logg, '+-', loggerr)
# The mass
puts(colored.magenta('Calculating the mass...'))
M, Merr = torres(name, [Teff, Tefferr],
[logg, loggerr], feh=[FeH, Ferr])
# The microturbulence number
puts('The '+colored.yellow('microturbulence'))
vt = variable_assignment(2)
puts('The error on '+colored.yellow('microturbulence'))
vterr = variable_assignment(2)
# Author and link to ADS
puts('Who is the '+colored.yellow('author?'))
author = input('> ').strip()
if author == '':
author = empty
puts('Link to article ('+colored.yellow('ADS')+')')
link = input('> ').strip()
if link == '':
link = empty
# Source flag
puts(colored.yellow('Source flag'))
source = input('(0/1) > ')
if source == '':
source = '0'
V_exo = exo.mag_v.values[0]
try:
# select the star and not the planet,
# they have the same coordinates
if len(result) > 1:
indr = np.where((result['OTYPE'] != 'Planet') &
(result['OTYPE'] != 'Planet?') &
(result['OTYPE'][1] != 'brownD*'))[0][0]
else:
indr = 0
RA = str(result['RA'][indr])[:11]
DEC = str(result['DEC'][indr])[:12]
# The HD number
HD=empty
for iname in result['IDS'][indr].split('|'):
if iname[:2]=='HD':
HD=iname.replace('HD ','')
# The V magnitude
if type(result['FLUX_V'][indr])!=np.ma.core.MaskedConstant:
V=round(float(result['FLUX_V'][indr]), 2)
if type(result['FLUX_ERROR_V'][indr])!=np.ma.core.MaskedConstant:
Verr=round(float(result['FLUX_ERROR_V'][indr]), 2)
else:
print('\nV magnitude = '+str(V))
puts('The error on ' + colored.yellow('V magnitude'))
Verr = variable_assignment(2)
if Verr == '':
Verr = 'NULL'
else:
if ~np.isnan(V_exo):
V = round(float(V_exo), 2)
else:
puts('The ' + colored.yellow('V magnitude'))
V = variable_assignment(2)
if V == '':
V = 'NULL'
print('\nV magnitude = '+str(V))
puts('The error on ' + colored.yellow('V magnitude'))
Verr = variable_assignment(2)
if Verr == '':
Verr = 'NULL'
# The parallax
plx, eplx = GAIAplx(RA, DEC)
if plx != 'NULL':
p = plx
perr = eplx
pflag = 'GAIADR2'
elif type(result['PLX_VALUE'][indr]) != np.ma.core.MaskedConstant:
p = round(float(result['PLX_VALUE'][indr]), 2)
if type(result['PLX_VALUE'][indr]) != np.ma.core.MaskedConstant:
perr = round(float(result['PLX_ERROR'][indr]), 2)
else:
perr = empty
pflag = 'Simbad'
else:
try:
pos = coord.SkyCoord(ra=ra, dec=dec,
unit=(u.hourangle,u.deg),
frame='icrs')
#AvSF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
tableAv = IrsaDust.get_query_table(pos,
radius='02d',
section='ebv',
timeout=60)
Av = tableAv['ext SandF mean'].data[0]
Averr = tableAv['ext SandF std'].data[0]
except:
Av = 0
Averr = 0
try:
p, perr = [round(x, 2) for x in parallax(Teff,
Tefferr,
float(logg),
float(loggerr),
V, Verr, M, Merr, Av, Averr)]
pflag = 'Spec'
except:
p = 'NULL'
perr = 'NULL'
pflag = 'NULL'
# Comments
if result['SP_TYPE'][indr] != '' and result['SP_TYPE'][indr][0] == 'M':
comment = result['SP_TYPE'][indr]
else:
puts('Any '+colored.yellow('comments'))
puts('E.g. if we have a M dwarf...')
comment = input('> ')
if comment == '':
comment = 'NULL'
# Exoplanet database
puts('From which exoplanet database:' + colored.yellow('EU or NASA or EU,NASA'))
database = input('> ')
if database == '':
database = 'NULL'
except:
# The HD number
puts('The '+colored.yellow('HD number'))
HD = input('> ')
if HD == '':
HD = 'NULL'
# The V magnitude
if ~np.isnan(V_exo):
V = round(float(V_exo), 2)
else:
puts('The ' + colored.yellow('V magnitude'))
V = variable_assignment(2)
print('\nV magnitude = '+str(V))
puts('The error on ' + colored.yellow('V magnitude'))
Verr = variable_assignment(2)
# The parallax
plx, eplx = GAIAplx(RA, DEC)
if plx != 'NULL':
p = plx
perr = eplx
pflag = 'GAIADR2'
else:
try:
pos = coord.SkyCoord(ra=RA, dec=DEC,
unit=(u.hourangle, u.deg),
frame='icrs')
#AvSF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
tableAv = IrsaDust.get_query_table(pos,
radius='02d',
section='ebv',
timeout=60)
Av = tableAv['ext SandF mean'].data[0]
Averr = tableAv['ext SandF std'].data[0]
except:
Av = 0
Averr = 0
try:
p, perr = [round(x, 2) for x in parallax(Teff,
Tefferr,
logg, loggerr,
V, Verr, M, Merr, Av, Averr)]
pflag = 'Spec'
# print p,perr
except:
p = 'NULL'
perr = 'NULL'
pflag = 'NULL'
# Comments
puts('Any '+colored.yellow('comments'))
puts('E.g. if we have a M dwarf...')
comment = input('> ')
if comment == '':
comment = 'NULL'
# Exoplanet database
puts('From which exoplanet database: ' + colored.yellow('EU or NASA or EU,NASA'))
database = input('> ')
if database == '':
database = 'NULL'
# Last update
update = str(time.strftime("%Y-%m-%d"))
params = [name, HD, RA, DEC, V, Verr, p, perr, pflag,
Teff, Tefferr, logg, loggerr, 'NULL', 'NULL',
vt, vterr, FeH, Ferr, M, Merr,
author, link, source, update, comment, database]
params = list(map(str, params))
# New host information
with open(output, 'a') as f:
f.write('\n'+'\t'.join(params) + '\tNULL')
f.close()
# Update the list of new hosts
with open('names.txt', 'w') as names:
# if the last star was added so no star is updated
if i+1 == len(stars):
names.write('')
else:
for j in stars[i+1:]:
names.write(j)
names.close()
print('')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
else:
print('Bye...')
break
|
py | b4014e3387647b2270d3e186b0448a74121bda50 | "Test the functionality of Python classes implementing operators."
import sys
import unittest
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"matmul",
"rmatmul",
"truediv",
"rtruediv",
"floordiv",
"rfloordiv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"setitem",
"delitem",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "hash",
# "str",
# "repr",
# "int",
# "float",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
callLst = []
def trackCall(f):
def track(*args, **kwargs):
callLst.append((f.__name__, args))
return f(*args, **kwargs)
return track
statictests = """
@trackCall
def __hash__(self, *args):
return hash(id(self))
@trackCall
def __str__(self, *args):
return "AllTests"
@trackCall
def __repr__(self, *args):
return "AllTests"
@trackCall
def __int__(self, *args):
return 1
@trackCall
def __index__(self, *args):
return 1
@trackCall
def __float__(self, *args):
return 1.0
@trackCall
def __eq__(self, *args):
return True
@trackCall
def __ne__(self, *args):
return False
@trackCall
def __lt__(self, *args):
return False
@trackCall
def __le__(self, *args):
return True
@trackCall
def __gt__(self, *args):
return False
@trackCall
def __ge__(self, *args):
return True
"""
# Synthesize all the other AllTests methods from the names in testmeths.
method_template = """\
@trackCall
def __%s__(self, *args):
pass
"""
d = {}
exec(statictests, globals(), d)
for method in testmeths:
exec(method_template % method, globals(), d)
AllTests = type("AllTests", (object,), d)
del d, statictests, method, method_template
class ClassTests(unittest.TestCase):
def setUp(self):
callLst[:] = []
def assertCallStack(self, expected_calls):
actualCallList = callLst[:] # need to copy because the comparison below will add
# additional calls to callLst
if expected_calls != actualCallList:
self.fail("Expected call list:\n %s\ndoes not match actual call list\n %s" %
(expected_calls, actualCallList))
def testInit(self):
foo = AllTests()
self.assertCallStack([("__init__", (foo,))])
def testBinaryOps(self):
testme = AllTests()
# Binary operations
callLst[:] = []
testme + 1
self.assertCallStack([("__add__", (testme, 1))])
callLst[:] = []
1 + testme
self.assertCallStack([("__radd__", (testme, 1))])
callLst[:] = []
testme - 1
self.assertCallStack([("__sub__", (testme, 1))])
callLst[:] = []
1 - testme
self.assertCallStack([("__rsub__", (testme, 1))])
callLst[:] = []
testme * 1
self.assertCallStack([("__mul__", (testme, 1))])
callLst[:] = []
1 * testme
self.assertCallStack([("__rmul__", (testme, 1))])
callLst[:] = []
testme @ 1
self.assertCallStack([("__matmul__", (testme, 1))])
callLst[:] = []
1 @ testme
self.assertCallStack([("__rmatmul__", (testme, 1))])
callLst[:] = []
testme / 1
self.assertCallStack([("__truediv__", (testme, 1))])
callLst[:] = []
1 / testme
self.assertCallStack([("__rtruediv__", (testme, 1))])
callLst[:] = []
testme // 1
self.assertCallStack([("__floordiv__", (testme, 1))])
callLst[:] = []
1 // testme
self.assertCallStack([("__rfloordiv__", (testme, 1))])
callLst[:] = []
testme % 1
self.assertCallStack([("__mod__", (testme, 1))])
callLst[:] = []
1 % testme
self.assertCallStack([("__rmod__", (testme, 1))])
callLst[:] = []
divmod(testme,1)
self.assertCallStack([("__divmod__", (testme, 1))])
callLst[:] = []
divmod(1, testme)
self.assertCallStack([("__rdivmod__", (testme, 1))])
callLst[:] = []
testme ** 1
self.assertCallStack([("__pow__", (testme, 1))])
callLst[:] = []
1 ** testme
self.assertCallStack([("__rpow__", (testme, 1))])
callLst[:] = []
testme >> 1
self.assertCallStack([("__rshift__", (testme, 1))])
callLst[:] = []
1 >> testme
self.assertCallStack([("__rrshift__", (testme, 1))])
callLst[:] = []
testme << 1
self.assertCallStack([("__lshift__", (testme, 1))])
callLst[:] = []
1 << testme
self.assertCallStack([("__rlshift__", (testme, 1))])
callLst[:] = []
testme & 1
self.assertCallStack([("__and__", (testme, 1))])
callLst[:] = []
1 & testme
self.assertCallStack([("__rand__", (testme, 1))])
callLst[:] = []
testme | 1
self.assertCallStack([("__or__", (testme, 1))])
callLst[:] = []
1 | testme
self.assertCallStack([("__ror__", (testme, 1))])
callLst[:] = []
testme ^ 1
self.assertCallStack([("__xor__", (testme, 1))])
callLst[:] = []
1 ^ testme
self.assertCallStack([("__rxor__", (testme, 1))])
def testListAndDictOps(self):
testme = AllTests()
# List/dict operations
class Empty: pass
try:
1 in Empty()
self.fail('failed, should have raised TypeError')
except TypeError:
pass
callLst[:] = []
1 in testme
self.assertCallStack([('__contains__', (testme, 1))])
callLst[:] = []
testme[1]
self.assertCallStack([('__getitem__', (testme, 1))])
callLst[:] = []
testme[1] = 1
self.assertCallStack([('__setitem__', (testme, 1, 1))])
callLst[:] = []
del testme[1]
self.assertCallStack([('__delitem__', (testme, 1))])
callLst[:] = []
testme[:42]
self.assertCallStack([('__getitem__', (testme, slice(None, 42)))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setitem__', (testme, slice(None, 42),
"The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delitem__', (testme, slice(None, 42)))])
callLst[:] = []
testme[2:1024:10]
self.assertCallStack([('__getitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[2:1024:10] = "A lot"
self.assertCallStack([('__setitem__', (testme, slice(2, 1024, 10),
"A lot"))])
callLst[:] = []
del testme[2:1024:10]
self.assertCallStack([('__delitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__getitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100] = "Strange"
self.assertCallStack([('__setitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100), "Strange"))])
callLst[:] = []
del testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__delitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
def testUnaryOps(self):
testme = AllTests()
callLst[:] = []
-testme
self.assertCallStack([('__neg__', (testme,))])
callLst[:] = []
+testme
self.assertCallStack([('__pos__', (testme,))])
callLst[:] = []
abs(testme)
self.assertCallStack([('__abs__', (testme,))])
callLst[:] = []
int(testme)
self.assertCallStack([('__int__', (testme,))])
callLst[:] = []
float(testme)
self.assertCallStack([('__float__', (testme,))])
callLst[:] = []
oct(testme)
self.assertCallStack([('__index__', (testme,))])
callLst[:] = []
hex(testme)
self.assertCallStack([('__index__', (testme,))])
def testMisc(self):
testme = AllTests()
callLst[:] = []
hash(testme)
self.assertCallStack([('__hash__', (testme,))])
callLst[:] = []
repr(testme)
self.assertCallStack([('__repr__', (testme,))])
callLst[:] = []
str(testme)
self.assertCallStack([('__str__', (testme,))])
callLst[:] = []
testme == 1
self.assertCallStack([('__eq__', (testme, 1))])
callLst[:] = []
testme < 1
self.assertCallStack([('__lt__', (testme, 1))])
callLst[:] = []
testme > 1
self.assertCallStack([('__gt__', (testme, 1))])
callLst[:] = []
testme != 1
self.assertCallStack([('__ne__', (testme, 1))])
callLst[:] = []
1 == testme
self.assertCallStack([('__eq__', (1, testme))])
callLst[:] = []
1 < testme
self.assertCallStack([('__gt__', (1, testme))])
callLst[:] = []
1 > testme
self.assertCallStack([('__lt__', (1, testme))])
callLst[:] = []
1 != testme
self.assertCallStack([('__ne__', (1, testme))])
def testGetSetAndDel(self):
# Interfering tests
class ExtraTests(AllTests):
@trackCall
def __getattr__(self, *args):
return "SomeVal"
@trackCall
def __setattr__(self, *args):
pass
@trackCall
def __delattr__(self, *args):
pass
testme = ExtraTests()
callLst[:] = []
testme.spam
self.assertCallStack([('__getattr__', (testme, "spam"))])
callLst[:] = []
testme.eggs = "spam, spam, spam and ham"
self.assertCallStack([('__setattr__', (testme, "eggs",
"spam, spam, spam and ham"))])
callLst[:] = []
del testme.cardinal
self.assertCallStack([('__delattr__', (testme, "cardinal"))])
def testDel(self):
x = []
class DelTest:
def __del__(self):
x.append("crab people, crab people")
testme = DelTest()
del testme
import gc
gc.collect()
self.assertEqual(["crab people, crab people"], x)
def testBadTypeReturned(self):
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__complex__ = __int__
__str__ = __int__
__repr__ = __int__
__bytes__ = __int__
__bool__ = __int__
__index__ = __int__
def index(x):
return [][x]
for f in [float, complex, str, repr, bytes, bin, oct, hex, bool, index]:
self.assertRaises(TypeError, f, BadTypeClass())
def testHashStuff(self):
# Test correct errors from hash() on objects with comparisons but
# no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C2:
def __eq__(self, other): return 1
self.assertRaises(TypeError, hash, C2())
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def testSFBug532646(self):
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RecursionError:
pass
else:
self.fail("Failed to raise RecursionError")
def testForExceptionsRaisedInInstanceGetattr2(self):
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError("booh")
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError as x:
if str(x) != "booh":
self.fail("attribute error for A().a got masked: %s" % x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
# In debug mode, printed XXX undetected error and
# raises AttributeError
I()
except AttributeError as x:
pass
else:
self.fail("attribute error for I.__init__ got masked")
def assertNotOrderable(self, a, b):
with self.assertRaises(TypeError):
a < b
with self.assertRaises(TypeError):
a > b
with self.assertRaises(TypeError):
a <= b
with self.assertRaises(TypeError):
a >= b
def testHashComparisonOfMethods(self):
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return True
def __hash__(self):
raise TypeError
class B(A):
pass
a1 = A(1)
a2 = A(1)
self.assertTrue(a1.f == a1.f)
self.assertFalse(a1.f != a1.f)
self.assertFalse(a1.f == a2.f)
self.assertTrue(a1.f != a2.f)
self.assertFalse(a1.f == a1.g)
self.assertTrue(a1.f != a1.g)
self.assertNotOrderable(a1.f, a1.f)
self.assertEqual(hash(a1.f), hash(a1.f))
self.assertFalse(A.f == a1.f)
self.assertTrue(A.f != a1.f)
self.assertFalse(A.f == A.g)
self.assertTrue(A.f != A.g)
self.assertTrue(B.f == A.f)
self.assertFalse(B.f != A.f)
self.assertNotOrderable(A.f, A.f)
self.assertEqual(hash(B.f), hash(A.f))
# the following triggers a SystemError in 2.4
a = A(hash(A.f)^(-1))
hash(a.f)
def testSetattrWrapperNameIntern(self):
# Issue #25794: __setattr__ should intern the attribute name
class A:
pass
def add(self, other):
return 'summa'
name = str(b'__add__', 'ascii') # shouldn't be optimized
self.assertIsNot(name, '__add__') # not interned
type.__setattr__(A, name, add)
self.assertEqual(A() + 1, 'summa')
name2 = str(b'__add__', 'ascii')
self.assertIsNot(name2, '__add__')
self.assertIsNot(name2, name)
type.__delattr__(A, name2)
with self.assertRaises(TypeError):
A() + 1
def testSetattrNonStringName(self):
class A:
pass
with self.assertRaises(TypeError):
type.__setattr__(A, b'x', None)
def testConstructorErrorMessages(self):
# bpo-31506: Improves the error message logic for object_new & object_init
# Class without any method overrides
class C:
pass
error_msg = r'C.__init__\(\) takes exactly one argument \(the instance to initialize\)'
with self.assertRaisesRegex(TypeError, r'C\(\) takes no arguments'):
C(42)
with self.assertRaisesRegex(TypeError, r'C\(\) takes no arguments'):
C.__new__(C, 42)
with self.assertRaisesRegex(TypeError, error_msg):
C().__init__(42)
with self.assertRaisesRegex(TypeError, r'C\(\) takes no arguments'):
object.__new__(C, 42)
with self.assertRaisesRegex(TypeError, error_msg):
object.__init__(C(), 42)
# Class with both `__init__` & `__new__` method overridden
class D:
def __new__(cls, *args, **kwargs):
super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
error_msg = r'object.__new__\(\) takes exactly one argument \(the type to instantiate\)'
with self.assertRaisesRegex(TypeError, error_msg):
D(42)
with self.assertRaisesRegex(TypeError, error_msg):
D.__new__(D, 42)
with self.assertRaisesRegex(TypeError, error_msg):
object.__new__(D, 42)
# Class that only overrides __init__
class E:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
error_msg = r'object.__init__\(\) takes exactly one argument \(the instance to initialize\)'
with self.assertRaisesRegex(TypeError, error_msg):
E().__init__(42)
with self.assertRaisesRegex(TypeError, error_msg):
object.__init__(E(), 42)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.