ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40798c0960553f287213fb82863d3bd03c0197e | from django.conf.urls.defaults import *
from tastypie.api import Api
from validation.api.resources import NoteResource, UserResource
api = Api(api_name='v1')
api.register(NoteResource(), canonical=True)
api.register(UserResource(), canonical=True)
urlpatterns = patterns('',
url(r'^api/', include(api.urls)),
)
|
py | b40798c415c51805081f1dbf467ecd6374e6fb2d | f = open("romanTest.txt", 'w')
numeralArr = [
(1000, "M"),
(500, "D"),
(100, "C"),
(50, "L"),
(10, "X"),
(5, "V"),
(1, "I")]
out = ""
def convert(num, nums, iters, halfs):
global out
if num >= nums[0] - iters[0]:
out += iters[1] + nums[1]
num -= nums[0] - iters[0]
elif num < nums[0] - iters[0]:
if halfs[0]: out += halfs[2]; num -= halfs[1]
out += iters[1] * (num // iters[0])
num -= iters[0] * (num // iters[0])
elif num == nums[0]:
out += nums[1]
num -= nums[0]
return num
for userNum in range(1, 5000):
preserveNum = userNum
out = ""
for x in range(0, len(numeralArr) - 2, 2):
number, numeral = numeralArr[x]
halfNumber, halfNumeral = numeralArr[x + 1]
iterNumber, iterNumeral = numeralArr[x + 2]
out += numeral * (userNum // number)
userNum -= number * (userNum // number)
userNum = convert(userNum, (number, numeral) if userNum >= halfNumber else (halfNumber, halfNumeral), (iterNumber, iterNumeral), [userNum >= halfNumber, halfNumber, halfNumeral])
f.write(str(preserveNum) + " " + out + "\n")
f.close()
numerals = open("numerals.txt")
romans = open("romanTest.txt")
nums = numerals.readlines()
roms = romans.readlines()
for x in range(len(nums)):
num = nums[x]
rom = roms[x]
if(num != rom): print(num, rom, sep="") |
py | b4079b1200881a59ab5d3758bbbab8861002170e | """Implementation of Rule L007."""
from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext
after_description = "Operators near newlines should be after, not before the newline"
before_description = "Operators near newlines should be before, not after the newline"
class Rule_L007(BaseRule):
"""Operators should follow a standard for being before/after newlines.
| **Anti-pattern**
| The • character represents a space.
| If ``operator_new_lines = after`` (or unspecified, as this is the default)
| In this example, the operator '+' should not be at the end of the second line.
.. code-block:: sql
SELECT
a +
b
FROM foo
| **Best practice**
| If ``operator_new_lines = after`` (or unspecified, as this is the default)
| Place the operator after the newline.
.. code-block:: sql
SELECT
a
+ b
FROM foo
| If ``operator_new_lines = before``
| Place the operator before the newline.
.. code-block:: sql
SELECT
a +
b
FROM foo
"""
config_keywords = ["operator_new_lines"]
def _eval(self, context: RuleContext) -> LintResult:
"""Operators should follow a standard for being before/after newlines.
We use the memory to keep track of whitespace up to now, and
whether the last code segment was an operator or not.
Anchor is our signal as to whether there's a problem.
We only trigger if we have an operator FOLLOWED BY a newline
before the next meaningful code segment.
"""
anchor = None
memory = context.memory
description = after_description
if self.operator_new_lines == "before": # type: ignore
description = before_description
# The parent stack tells us whether we're in an expression or not.
if context.parent_stack and context.parent_stack[-1].is_type("expression"):
if context.segment.is_code:
# This is code, what kind?
if context.segment.is_type("binary_operator", "comparison_operator"):
# If it's an operator, then check if in "before" mode
if self.operator_new_lines == "before": # type: ignore
# If we're in "before" mode, then check if newline since last
# code
for s in memory["since_code"]:
if s.name == "newline":
# Had a newline - so mark this operator as a fail
anchor = context.segment
# TODO: Work out a nice fix for this failure.
elif memory["last_code"] and memory["last_code"].is_type(
"binary_operator", "comparison_operator"
):
# It's not an operator, but the last code was.
if self.operator_new_lines != "before": # type: ignore
# If in "after" mode, then check to see
# there is a newline between us and the last operator.
for s in memory["since_code"]:
if s.name == "newline":
# Had a newline - so mark last operator as a fail
anchor = memory["last_code"]
# TODO: Work out a nice fix for this failure.
# Prepare memory for later
memory["last_code"] = context.segment
memory["since_code"] = []
else:
# This isn't a code segment...
# Prepare memory for later
memory["since_code"].append(context.segment)
else:
# Reset the memory if we're not in an expression
memory = {"last_code": None, "since_code": []}
# Anchor is our signal as to whether there's a problem
if anchor:
return LintResult(anchor=anchor, memory=memory, description=description)
else:
return LintResult(memory=memory)
|
py | b4079b38a58b4c95b606b5b0d7f4ecc9fae0b7c8 | import pytest
from s3parq.session_helper import SessionHelper
class Test:
def test_no_ec2(self):
'''Verify that False is returned if not run on an ec2 server'''
is_ec2 = SessionHelper._is_ec2(self)
assert is_ec2 is False
'''Verify that ec2 flag evaluates to false if not run on an ec2 server'''
sh = SessionHelper(
region='cheeseburger',
cluster_id='cheeseburger_id',
host='cheeseburger_host',
port='cheeseburger_port',
db_name='cheeseburger_params',
ec2_user='cheeseburger_params')
assert sh.is_ec2_flag is False
def test_iam_user(self):
'''Verify that iam_user will assume the value of ec2_user when run on an ec2 server'''
sh = SessionHelper(
region='cheeseburger',
cluster_id='cheeseburger_id',
host='cheeseburger_host',
port='cheeseburger_port',
db_name='cheeseburger_params',
ec2_user='cheeseburger_user')
# Fake is_ec2_flag to be True
sh.is_ec2_flag = True
sh.set_iam_user()
assert sh.iam_user is 'cheeseburger_user'
def test_no_ec2_user(self):
'''Verify that SessionHelper can accept None as an argument for ec2 user'''
sh = SessionHelper(
region='cheeseburger',
cluster_id='cheeseburger_id',
host='cheeseburger_host',
port='cheeseburger_port',
db_name='cheeseburger_params',
ec2_user=None)
assert(type(sh.ec2_user)) is type(None)
def test_ec2_user(self):
'''Verify that SessionHelper can accept a string as an argument for ec2 user'''
sh = SessionHelper(
region='cheeseburger',
cluster_id='cheeseburger_id',
host='cheeseburger_host',
port='cheeseburger_port',
db_name='cheeseburger_params',
ec2_user='cheeseburger_user')
assert(isinstance(sh.ec2_user, str)) is True
|
py | b4079bb06a8429c298c0d14f3fc11e19f5e90f63 |
import numpy as np
import torch
from typing import Any, Dict, Optional, Tuple
from .search_tree import SearchTree
from .typing import SearchableEnv
from .utils import import_and_get
def create_network(network_type, board_size, num_blocks, base_chans):
# backward compat
if network_type in ('ChessNetwork', 'HexNetwork'):
network_type = 'azalea.network.' + network_type
Net = import_and_get(network_type)
return Net(board_size=board_size,
num_blocks=num_blocks,
base_chans=base_chans)
class Policy:
"""Game playing policy, combination of MCTS and network
"""
def __init__(self):
"""Construct new policy"""
# do greedy & deterministic inference by default
self.settings = {
'move_sampling': False,
'move_exploration': False,
}
self.rng = np.random.RandomState()
self.seed()
def initialize(self, config):
"""Initialize policy for training"""
device = torch.device(config['device'])
if device.type == 'cuda':
# enable cudnn auto-tuner
torch.backends.cudnn.benchmark = True
self.net = create_network(config['network'],
config['board_size'],
config['num_blocks'],
config['base_chans'])
self.net.to(device)
# don't train anything by default
self.net.eval()
# network params
self.network_type = config['network']
self.board_size = config['board_size']
self.num_blocks = config['num_blocks']
self.base_chans = config['base_chans']
# search params
self.simulations = config['simulations']
self.search_batch_size = config['search_batch_size']
self.exploration_coef = config['exploration_coef']
self.exploration_depth = config['exploration_depth']
self.exploration_noise_alpha = config['exploration_noise_alpha']
self.exploration_noise_scale = config['exploration_noise_scale']
self.exploration_temperature = config['exploration_temperature']
if 'seed' in config:
self.seed(config['seed'])
@property
def net(self):
try:
return self._net
except AttributeError:
raise RuntimeError('Policy must be initialized or loaded before use')
@net.setter
def net(self, net):
self._net = net
def reset(self):
"""Start new game
"""
self.tree = SearchTree()
self.ply = 0
def seed(self, seed: Optional[int] = None) -> None:
self.rng.seed(seed)
def load_state_dict(self, state):
"""Load model state
"""
# load network architecture and params
self.network_type = state['network_type']
self.board_size = state['board_size']
self.num_blocks = state['num_blocks']
self.base_chans = state['base_chans']
self.net = create_network(self.network_type,
self.board_size,
self.num_blocks,
self.base_chans)
self.net.load_state_dict(state['net'])
# load search params
self.simulations = state['simulations']
self.search_batch_size = state['search_batch_size']
self.exploration_coef = state['exploration_coef']
self.exploration_depth = state['exploration_depth']
self.exploration_noise_alpha = state['exploration_noise_alpha']
self.exploration_noise_scale = state['exploration_noise_scale']
self.exploration_temperature = state['exploration_temperature']
# load random number generator state
if 'rng' in state:
self.rng.__setstate__(state['rng'])
def state_dict(self):
"""Return model state
Only serializes the (hyper)parameters, not ongoing game state (search tree etc)
"""
return {
'net': self.net.state_dict(),
'network_type': self.network_type,
'board_size': self.board_size,
'num_blocks': self.num_blocks,
'base_chans': self.base_chans,
'simulations': self.simulations,
'search_batch_size': self.search_batch_size,
'exploration_coef': self.exploration_coef,
'exploration_depth': self.exploration_depth,
'exploration_noise_alpha': self.exploration_noise_alpha,
'exploration_noise_scale': self.exploration_noise_scale,
'exploration_temperature': self.exploration_temperature,
'rng': self.rng.__getstate__(),
}
def choose_action(self, game: SearchableEnv) \
-> Tuple[int, Dict[str, Any]]:
"""Choose next move.
can raise SearchTreeFull
:param game: Current game environment
:returns: move - chosen move
info - auxiliary information
"""
assert not game.state.result
temperature = 0.0
noise_scale = 0.0
if self.settings['move_sampling']:
temperature = self.exploration_temperature
if self.settings['move_exploration']:
noise_scale = self.exploration_noise_scale
if self.ply >= self.exploration_depth:
temperature = 0.
probs, value, metrics = self.tree.search(
game, self.net,
temperature=temperature,
exploration_noise_scale=noise_scale,
num_simulations=self.simulations,
batch_size=self.search_batch_size,
exploration_coef=self.exploration_coef,
exploration_noise_alpha=self.exploration_noise_alpha,
rng=self.rng)
move_id = np.argmax(self.rng.multinomial(1, probs))
move = game.state.legal_moves[move_id]
info = dict(prob=probs[move_id],
value=value,
moves=game.state.legal_moves,
moves_prob=probs,
move_id=move_id,
metrics=metrics)
return move, info
def execute_action(self, move: int, legal_moves: np.ndarray) -> None:
"""Update search tree with own or opponent action.
can raise SearchTreeFull
"""
move_id = legal_moves.tolist().index(move)
self.tree.move(move_id)
self.ply += 1
def tree_metrics(self):
return self.tree.metrics()
@classmethod
def load(cls, path: str, device: Optional[str] = None) -> 'Policy':
"""Create policy and load weights from checkpoint
Paths can be local filenames or s3://... URL's (please install
smart_open library for S3 support).
Loads tensors according to device
:param path: Either local or S3 path of policy file
"""
policy = cls()
if device:
device = torch.device(device)
location = device.type
if location == 'cuda':
location += f':{device.index or 0}'
else:
location = None
if path.startswith('s3://'):
# smart_open is optional dependency
import smart_open
with smart_open.smart_open(path) as f:
state = torch.load(f, map_location=location)
else:
state = torch.load(path, map_location=location)
policy.load_state_dict(state['policy'])
policy.net.eval()
if device:
policy.net.to(device)
return policy
|
py | b4079bd0a25f1e8dbe2a575d0ff99905229beb1a | #!/usr/local/bin/python3.6
#-*- coding: utf-8 -*-
#Author WangJiang@2019 15810438848 [email protected]
#All rights reserved
################################################################################################################
import pymysql
from lib.mysql import Cls_Mysql
from lib.usage import Cls_Usage
from lib.file import Cls_File
from lib.xlsx import Cls_Xlsx
from lib.util import Cls_Util
from lib.logging import Cls_Logging
from lib.out import Cls_Out
import conf.config as config
################################################################################################################
"""
使用范围:支持mysql导出数据到csv、txt、xlsx等(保持结果变化下后缀即可~~),但只支持<65535行(excel硬限制)
特别提醒:使用需谨慎。一切后果自负!
"""
### Mysql导出到Excel类
class Cls_Mysql_to_Excel:
### 初始化
def __init__(self):
self.usage = Cls_Usage().mysql_to_excel()
self.util = Cls_Util()
self.logging = Cls_Logging()
self.out = Cls_Out()
self.__config = config
self.__mysql_desc = []
self.__mysql_data = []
self.__mysql_rowcount = 0
### 导出函数
def __export(self):
try:
### 输出格式化、标题
self.out.add_title(["IP", "Port", "Result"])
### 获取源端mysql信息
ip_fd_value = str(self.usage["source_mysql"]).split(',')
### 获取SQL语句列表
sql_file = Cls_File(self.usage["sql_file"])
sql_file.read_all_data()
### 开始获取IP信息,连接数据库
self.mysql_db = Cls_Mysql(ip_fd_value[0], ip_fd_value[1], ip_fd_value[2], ip_fd_value[3], \
ip_fd_value[4], cursorclass=pymysql.cursors.SSCursor)
self.mysql_db.execute('SET NET_READ_TIMEOUT='+self.__config.net_read_timeout)
self.__mysql_result = self.mysql_db.query(sql_file.file_all_data)
### 判断执行结果
if self.__mysql_result["code"] == 0:
file_name = ip_fd_value[0] + '_'+ ip_fd_value[1] + '_' + self.util.timestamp() + \
'_' +self.usage["export_file"]
### 执行SQL,添加xlsx文件、内容等
self.workbook = Cls_Xlsx(file_name)
self.workbook.add_worksheet()
### 添加sheet标题
sheet_title = self.mysql_db.get_description()
sheet_content = self.__mysql_result["content"]
self.workbook.add_title('A1', sheet_title)
### 添加sheet内容
for d in range(0, len(sheet_title)):
self.workbook.add_content(self.workbook.convertToTitle(d+1)+'2', \
self.util.get_single_index_tuple(sheet_content, d))
self.workbook.close()
### 执行SQL,添加行
self.out.add_row([ip_fd_value[0], ip_fd_value[1], file_name])
else:
### 执行SQL,添加行
self.out.add_row([ip_fd_value[0], ip_fd_value[1], "ERROR:"+self.__mysql_result["content"]])
### 释放数据库资源
self.mysql_db.close()
except Exception as e:
### 错误输出
self.logging.loggingError(str(e))
finally:
### 输出
self.out.print()
### 主函数
def main(self):
self.__export()
if __name__ == "__main__":
Cls_Mysql_to_Excel().main()
|
py | b4079c75d82b64409f218cb02b5ba4a3dff21870 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btfs_value1888
except ImportError:
btfs_value1888 = sys.modules["onshape_client.oas.models.btfs_value1888"]
try:
from onshape_client.oas.models import btfs_value_with_units1817_all_of
except ImportError:
btfs_value_with_units1817_all_of = sys.modules[
"onshape_client.oas.models.btfs_value_with_units1817_all_of"
]
class BTFSValueWithUnits1817(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("quantity_type",): {
"UNKNOWN": "UNKNOWN",
"INTEGER": "INTEGER",
"REAL": "REAL",
"LENGTH": "LENGTH",
"ANGLE": "ANGLE",
"MASS": "MASS",
"TIME": "TIME",
"TEMPERATURE": "TEMPERATURE",
"CURRENT": "CURRENT",
"ANYTHING": "ANYTHING",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"quantity_type": (str,), # noqa: E501
"unit_to_power": ({str: (int,)},), # noqa: E501
"value": (float,), # noqa: E501
"value_object": (float,), # noqa: E501
"configuration_value_string": (str,), # noqa: E501
"standard_type_name": (str,), # noqa: E501
"type_tag": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"quantity_type": "quantityType", # noqa: E501
"unit_to_power": "unitToPower", # noqa: E501
"value": "value", # noqa: E501
"value_object": "valueObject", # noqa: E501
"configuration_value_string": "configurationValueString", # noqa: E501
"standard_type_name": "standardTypeName", # noqa: E501
"type_tag": "typeTag", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btfs_value_with_units1817.BTFSValueWithUnits1817 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
quantity_type (str): [optional] # noqa: E501
unit_to_power ({str: (int,)}): [optional] # noqa: E501
value (float): [optional] # noqa: E501
value_object (float): [optional] # noqa: E501
configuration_value_string (str): [optional] # noqa: E501
standard_type_name (str): [optional] # noqa: E501
type_tag (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btfs_value1888.BTFSValue1888,
btfs_value_with_units1817_all_of.BTFSValueWithUnits1817AllOf,
],
"oneOf": [],
}
|
py | b4079c7aa0b3cff4ac650afbcd57f9286920c765 | # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Multithreading in pipeline parallelism."""
from contextlib import contextmanager
from queue import Queue
import sys
from threading import Thread
from types import TracebackType
from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Tuple, Type, Union, cast
import torch
from .microbatch import Batch
from .stream import AbstractStream, use_device, use_stream
__all__: List[str] = []
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
# Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
class Task:
"""A task represents how to compute a micro-batch on a partition.
It consists of two parts: :meth:`compute` and :meth:`finalize`.
:meth:`compute` should be executed in worker threads concurrently.
:meth:`finalize` should be executed after when worker threads complete to
execute :meth:`compute`.
:meth:`compute` might be boosted by worker threads. Because it produces
several CUDA API calls by user code. In PyTorch, parallel CUDA API calls
are not serialized through GIL. So more than one CUDA API call can be
produced at the same time.
"""
def __init__(
self, stream: AbstractStream, *, compute: Callable[[], Batch], finalize: Optional[Callable[[Batch], None]],
) -> None:
self.stream = stream
self._compute = compute
self._finalize = finalize
self._grad_enabled = torch.is_grad_enabled()
def compute(self) -> Batch:
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
return self._compute()
def finalize(self, batch: Batch) -> None:
if self._finalize is None:
return
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
self._finalize(batch)
def worker(in_queue: InQueue, out_queue: OutQueue, device: torch.device) -> None:
"""The main loop of a worker thread."""
with use_device(device):
while True:
task = in_queue.get()
if task is None:
break
try:
batch = task.compute()
except Exception:
exc_info = cast(ExcInfo, sys.exc_info())
out_queue.put((False, exc_info))
continue
out_queue.put((True, (task, batch)))
done = (False, None)
out_queue.put(done)
def create_workers(devices: List[torch.device],) -> Tuple[List[InQueue], List[OutQueue]]:
"""Spawns worker threads. A worker thread is bound to a device."""
in_queues: List[InQueue] = []
out_queues: List[OutQueue] = []
# Spawn workers.
workers: Dict[torch.device, Tuple[InQueue, OutQueue]] = {}
def normalize_device(device: torch.device) -> torch.device:
if device.type == "cuda" and device.index is None:
return torch.device("cuda", index=torch.cuda.current_device())
if device.type == "cpu" and device.index is not None:
return torch.device("cpu")
return device
for device in devices:
device = normalize_device(device)
try:
in_queue, out_queue = workers[device]
except KeyError:
in_queue = Queue()
out_queue = Queue()
workers[device] = (in_queue, out_queue)
t = Thread(target=worker, args=(in_queue, out_queue, device), daemon=True,)
t.start()
in_queues.append(in_queue)
out_queues.append(out_queue)
return (in_queues, out_queues)
@contextmanager
def spawn_workers(devices: List[torch.device],) -> Generator[Tuple[List[InQueue], List[OutQueue]], None, None]:
try:
(in_queues, out_queues) = create_workers(devices)
yield (in_queues, out_queues)
finally:
pass
|
py | b4079cb3b4ea86aba17af291d9e72af516aab510 | import requests
from validando_dados.cep import BuscaEndereco
cep = "05783170"
cep_intance = BuscaEndereco(cep)
print()
|
py | b4079ddf9574dc872f6b4c16f9e66b59b17ae28a | from .discovery import discover_webmention_endpoint
from .send import SendWebmentionResponse, send_webmention
from .validate import validate_webmention
__all__ = ["send_webmention", "validate_webmention", "discover_webmention_endpoint", "SendWebmentionResponse"]
|
py | b4079e2468a7c945f3c3ebaa671d044cddc0d30b | # Generated by Django 3.1 on 2021-04-22 17:08
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fms_core', '0019_v3_1_2'),
]
operations = [
migrations.AlterField(
model_name='container',
name='barcode',
field=models.CharField(help_text='Unique container barcode.', max_length=200, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^.{1,200}$'))]),
),
migrations.AlterField(
model_name='container',
name='name',
field=models.CharField(help_text='Unique name for the container.', max_length=200, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[a-zA-Z0-9.\\-_]{1,200}$'))]),
),
migrations.AlterField(
model_name='sample',
name='name',
field=models.CharField(help_text='Sample name.', max_length=200, validators=[django.core.validators.RegexValidator(re.compile('^[a-zA-Z0-9.\\-_]{1,200}$'))]),
),
migrations.AlterField(
model_name='sample',
name='individual',
field=models.ForeignKey(help_text='Individual associated with the sample.',
on_delete=django.db.models.deletion.PROTECT, related_name='samples',
to='fms_core.individual'),
),
]
|
py | b4079eddfd45ac100bf7714ab570d1accf2664bd | from processors.transformers import numerical_transformer, categorical_transformer
from sklearn.compose import ColumnTransformer
from instances import config
preprocessor = ColumnTransformer(
transformers=[
('numerical', numerical_transformer, config.NUMERICAL_FEATURES),
('categorical', categorical_transformer, config.CATEGORICAL_FEATURES)])
|
py | b4079f169b354166d4184ef62617c1626622a045 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetPredictionModelStatusResult',
'AwaitableGetPredictionModelStatusResult',
'get_prediction_model_status',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:customerinsights:getPredictionModelStatus'.""", DeprecationWarning)
@pulumi.output_type
class GetPredictionModelStatusResult:
"""
The prediction model status.
"""
def __init__(__self__, message=None, model_version=None, prediction_guid_id=None, prediction_name=None, signals_used=None, status=None, tenant_id=None, test_set_count=None, training_accuracy=None, training_set_count=None, validation_set_count=None):
if message and not isinstance(message, str):
raise TypeError("Expected argument 'message' to be a str")
pulumi.set(__self__, "message", message)
if model_version and not isinstance(model_version, str):
raise TypeError("Expected argument 'model_version' to be a str")
pulumi.set(__self__, "model_version", model_version)
if prediction_guid_id and not isinstance(prediction_guid_id, str):
raise TypeError("Expected argument 'prediction_guid_id' to be a str")
pulumi.set(__self__, "prediction_guid_id", prediction_guid_id)
if prediction_name and not isinstance(prediction_name, str):
raise TypeError("Expected argument 'prediction_name' to be a str")
pulumi.set(__self__, "prediction_name", prediction_name)
if signals_used and not isinstance(signals_used, int):
raise TypeError("Expected argument 'signals_used' to be a int")
pulumi.set(__self__, "signals_used", signals_used)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if test_set_count and not isinstance(test_set_count, int):
raise TypeError("Expected argument 'test_set_count' to be a int")
pulumi.set(__self__, "test_set_count", test_set_count)
if training_accuracy and not isinstance(training_accuracy, int):
raise TypeError("Expected argument 'training_accuracy' to be a int")
pulumi.set(__self__, "training_accuracy", training_accuracy)
if training_set_count and not isinstance(training_set_count, int):
raise TypeError("Expected argument 'training_set_count' to be a int")
pulumi.set(__self__, "training_set_count", training_set_count)
if validation_set_count and not isinstance(validation_set_count, int):
raise TypeError("Expected argument 'validation_set_count' to be a int")
pulumi.set(__self__, "validation_set_count", validation_set_count)
@property
@pulumi.getter
def message(self) -> str:
"""
The model status message.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter(name="modelVersion")
def model_version(self) -> str:
"""
Version of the model.
"""
return pulumi.get(self, "model_version")
@property
@pulumi.getter(name="predictionGuidId")
def prediction_guid_id(self) -> str:
"""
The prediction GUID ID.
"""
return pulumi.get(self, "prediction_guid_id")
@property
@pulumi.getter(name="predictionName")
def prediction_name(self) -> str:
"""
The prediction name.
"""
return pulumi.get(self, "prediction_name")
@property
@pulumi.getter(name="signalsUsed")
def signals_used(self) -> int:
"""
The signals used.
"""
return pulumi.get(self, "signals_used")
@property
@pulumi.getter
def status(self) -> str:
"""
Prediction model life cycle. When prediction is in PendingModelConfirmation status, it is allowed to update the status to PendingFeaturing or Active through API.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The hub name.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="testSetCount")
def test_set_count(self) -> int:
"""
Count of the test set.
"""
return pulumi.get(self, "test_set_count")
@property
@pulumi.getter(name="trainingAccuracy")
def training_accuracy(self) -> int:
"""
The training accuracy.
"""
return pulumi.get(self, "training_accuracy")
@property
@pulumi.getter(name="trainingSetCount")
def training_set_count(self) -> int:
"""
Count of the training set.
"""
return pulumi.get(self, "training_set_count")
@property
@pulumi.getter(name="validationSetCount")
def validation_set_count(self) -> int:
"""
Count of the validation set.
"""
return pulumi.get(self, "validation_set_count")
class AwaitableGetPredictionModelStatusResult(GetPredictionModelStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPredictionModelStatusResult(
message=self.message,
model_version=self.model_version,
prediction_guid_id=self.prediction_guid_id,
prediction_name=self.prediction_name,
signals_used=self.signals_used,
status=self.status,
tenant_id=self.tenant_id,
test_set_count=self.test_set_count,
training_accuracy=self.training_accuracy,
training_set_count=self.training_set_count,
validation_set_count=self.validation_set_count)
def get_prediction_model_status(hub_name: Optional[str] = None,
prediction_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPredictionModelStatusResult:
"""
The prediction model status.
Latest API Version: 2017-04-26.
:param str hub_name: The name of the hub.
:param str prediction_name: The name of the Prediction.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_prediction_model_status is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:customerinsights:getPredictionModelStatus'.""")
__args__ = dict()
__args__['hubName'] = hub_name
__args__['predictionName'] = prediction_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights/latest:getPredictionModelStatus', __args__, opts=opts, typ=GetPredictionModelStatusResult).value
return AwaitableGetPredictionModelStatusResult(
message=__ret__.message,
model_version=__ret__.model_version,
prediction_guid_id=__ret__.prediction_guid_id,
prediction_name=__ret__.prediction_name,
signals_used=__ret__.signals_used,
status=__ret__.status,
tenant_id=__ret__.tenant_id,
test_set_count=__ret__.test_set_count,
training_accuracy=__ret__.training_accuracy,
training_set_count=__ret__.training_set_count,
validation_set_count=__ret__.validation_set_count)
|
py | b4079f242a9cd1143d1d07281afb6c2a68f292b9 | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning"]
__all__ = ["ESTIMATOR_TEST_PARAMS", "EXCLUDE_ESTIMATORS", "EXCLUDED_TESTS"]
import numpy as np
from hcrystalball.wrappers import HoltSmoothingWrapper
from pyod.models.knn import KNN
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler
from sktime.annotation.adapters import PyODAnnotator
from sktime.base import BaseEstimator
from sktime.classification.compose import ColumnEnsembleClassifier
from sktime.classification.compose import ComposableTimeSeriesForestClassifier
from sktime.classification.dictionary_based import ContractableBOSS
from sktime.classification.dictionary_based import TemporalDictionaryEnsemble
from sktime.classification.feature_based import (
Catch22Classifier,
MatrixProfileClassifier,
TSFreshClassifier,
SignatureClassifier,
)
from sktime.classification.hybrid import HIVECOTEV1
from sktime.classification.interval_based import CanonicalIntervalForest
from sktime.classification.interval_based import DrCIF
from sktime.classification.interval_based import RandomIntervalSpectralForest
from sktime.classification.interval_based import SupervisedTimeSeriesForest
from sktime.classification.interval_based import TimeSeriesForestClassifier as TSFC
from sktime.classification.kernel_based import Arsenal
from sktime.classification.kernel_based import ROCKETClassifier
from sktime.classification.shapelet_based import ShapeletTransformClassifier
from sktime.dists_kernels.compose_tab_to_panel import AggrDist
from sktime.dists_kernels.scipy_dist import ScipyDist
from sktime.forecasting.arima import AutoARIMA
from sktime.forecasting.bats import BATS
from sktime.forecasting.compose import ColumnEnsembleForecaster
from sktime.forecasting.compose import DirRecTabularRegressionForecaster
from sktime.forecasting.compose import DirRecTimeSeriesRegressionForecaster
from sktime.forecasting.compose import DirectTabularRegressionForecaster
from sktime.forecasting.compose import DirectTimeSeriesRegressionForecaster
from sktime.forecasting.compose import EnsembleForecaster
from sktime.forecasting.compose import ForecastingPipeline
from sktime.forecasting.compose import MultioutputTabularRegressionForecaster
from sktime.forecasting.compose import MultioutputTimeSeriesRegressionForecaster
from sktime.forecasting.compose import MultiplexForecaster
from sktime.forecasting.compose import RecursiveTabularRegressionForecaster
from sktime.forecasting.compose import RecursiveTimeSeriesRegressionForecaster
from sktime.forecasting.compose import StackingForecaster
from sktime.forecasting.compose import AutoEnsembleForecaster
from sktime.forecasting.compose import TransformedTargetForecaster
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.fbprophet import Prophet
from sktime.forecasting.hcrystalball import HCrystalBallForecaster
from sktime.forecasting.model_selection import ForecastingGridSearchCV
from sktime.forecasting.model_selection import ForecastingRandomizedSearchCV
from sktime.forecasting.model_selection import SingleWindowSplitter
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.online_learning import OnlineEnsembleForecaster
from sktime.forecasting.tbats import TBATS
from sktime.forecasting.theta import ThetaForecaster
from sktime.performance_metrics.forecasting import MeanAbsolutePercentageError
from sktime.registry import (
ESTIMATOR_TAG_LIST,
BASE_CLASS_LIST,
BASE_CLASS_LOOKUP,
TRANSFORMER_MIXIN_LIST,
)
from sktime.regression.compose import ComposableTimeSeriesForestRegressor
from sktime.series_as_features.compose import FeatureUnion
from sktime.transformations.panel.compose import ColumnTransformer
from sktime.transformations.panel.compose import (
SeriesToPrimitivesRowTransformer,
)
from sktime.transformations.panel.compose import SeriesToSeriesRowTransformer
from sktime.transformations.panel.dictionary_based import SFA
from sktime.transformations.panel.interpolate import TSInterpolator
from sktime.transformations.panel.reduce import Tabularizer
from sktime.transformations.panel.shapelets import ContractedShapeletTransform
from sktime.transformations.panel.shapelets import ShapeletTransform
from sktime.transformations.panel.signature_based import SignatureTransformer
from sktime.transformations.panel.summarize import FittedParamExtractor
from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor
from sktime.transformations.panel.tsfresh import (
TSFreshRelevantFeatureExtractor,
)
from sktime.transformations.series.acf import AutoCorrelationTransformer
from sktime.transformations.series.acf import PartialAutoCorrelationTransformer
from sktime.transformations.series.adapt import TabularToSeriesAdaptor
from sktime.transformations.series.boxcox import BoxCoxTransformer
from sktime.transformations.series.compose import OptionalPassthrough
from sktime.transformations.series.compose import ColumnwiseTransformer
from sktime.transformations.series.detrend import Detrender
from sktime.transformations.series.impute import Imputer
from sktime.transformations.series.outlier_detection import HampelFilter
from sktime.transformations.series.feature_selection import FeatureSelection
# The following estimators currently do not pass all unit tests
# What do they fail? ShapeDTW fails on 3d_numpy_input test, not set up for that
EXCLUDE_ESTIMATORS = [
"ShapeDTW",
"ElasticEnsemble",
"ProximityForest",
"ProximityStump",
"ProximityTree",
]
# This is temporary until BaseObject is implemented
DIST_KERNELS_IGNORE_TESTS = [
"check_fit_updates_state",
"_make_fit_args",
"check_fit_returns_self",
"check_raises_not_fitted_error",
"check_fit_idempotent",
"check_fit_does_not_overwrite_hyper_params",
"check_methods_do_not_change_state",
"check_persistence_via_pickle",
]
EXCLUDED_TESTS = {
"ShapeletTransformClassifier": ["check_fit_idempotent"],
"ContractedShapeletTransform": ["check_fit_idempotent"],
"HIVECOTEV1": ["check_fit_idempotent", "check_multiprocessing_idempotent"],
"ScipyDist": DIST_KERNELS_IGNORE_TESTS,
"AggrDist": DIST_KERNELS_IGNORE_TESTS,
}
# We here configure estimators for basic unit testing, including setting of
# required hyper-parameters and setting of hyper-parameters for faster training.
SERIES_TO_SERIES_TRANSFORMER = StandardScaler()
SERIES_TO_PRIMITIVES_TRANSFORMER = FunctionTransformer(
np.mean, kw_args={"axis": 0}, check_inverse=False
)
TRANSFORMERS = [
(
"transformer1",
SeriesToSeriesRowTransformer(
SERIES_TO_SERIES_TRANSFORMER, check_transformer=False
),
),
(
"transformer2",
SeriesToSeriesRowTransformer(
SERIES_TO_SERIES_TRANSFORMER, check_transformer=False
),
),
]
REGRESSOR = LinearRegression()
ANOMALY_DETECTOR = KNN()
TIME_SERIES_CLASSIFIER = TSFC(n_estimators=3)
TIME_SERIES_CLASSIFIERS = [
("tsf1", TIME_SERIES_CLASSIFIER),
("tsf2", TIME_SERIES_CLASSIFIER),
]
FORECASTER = ExponentialSmoothing()
FORECASTERS = [("ses1", FORECASTER), ("ses2", FORECASTER)]
STEPS_y = [
("transformer", Detrender(ThetaForecaster())),
("forecaster", NaiveForecaster()),
]
STEPS_X = [
("transformer", TabularToSeriesAdaptor(StandardScaler())),
("forecaster", NaiveForecaster()),
]
ESTIMATOR_TEST_PARAMS = {
ColumnEnsembleForecaster: {"forecasters": FORECASTER},
OnlineEnsembleForecaster: {"forecasters": FORECASTERS},
FeatureUnion: {"transformer_list": TRANSFORMERS},
DirectTabularRegressionForecaster: {"estimator": REGRESSOR},
MultioutputTabularRegressionForecaster: {"estimator": REGRESSOR},
RecursiveTabularRegressionForecaster: {"estimator": REGRESSOR},
DirRecTabularRegressionForecaster: {"estimator": REGRESSOR},
DirectTimeSeriesRegressionForecaster: {
"estimator": make_pipeline(Tabularizer(), REGRESSOR)
},
RecursiveTimeSeriesRegressionForecaster: {
"estimator": make_pipeline(Tabularizer(), REGRESSOR)
},
MultioutputTimeSeriesRegressionForecaster: {
"estimator": make_pipeline(Tabularizer(), REGRESSOR)
},
DirRecTimeSeriesRegressionForecaster: {
"estimator": make_pipeline(Tabularizer(), REGRESSOR)
},
TransformedTargetForecaster: {"steps": STEPS_y},
ForecastingPipeline: {"steps": STEPS_X},
EnsembleForecaster: {"forecasters": FORECASTERS},
StackingForecaster: {"forecasters": FORECASTERS},
AutoEnsembleForecaster: {"forecasters": FORECASTERS},
Detrender: {"forecaster": FORECASTER},
ForecastingGridSearchCV: {
"forecaster": NaiveForecaster(strategy="mean"),
"cv": SingleWindowSplitter(fh=1),
"param_grid": {"window_length": [2, 5]},
"scoring": MeanAbsolutePercentageError(symmetric=True),
},
ForecastingRandomizedSearchCV: {
"forecaster": NaiveForecaster(strategy="mean"),
"cv": SingleWindowSplitter(fh=1),
"param_distributions": {"window_length": [2, 5]},
"scoring": MeanAbsolutePercentageError(symmetric=True),
},
TabularToSeriesAdaptor: {"transformer": StandardScaler()},
ColumnEnsembleClassifier: {
"estimators": [
(name, estimator, 0) for (name, estimator) in TIME_SERIES_CLASSIFIERS
]
},
FittedParamExtractor: {
"forecaster": FORECASTER,
"param_names": ["initial_level"],
},
SeriesToPrimitivesRowTransformer: {
"transformer": SERIES_TO_PRIMITIVES_TRANSFORMER,
"check_transformer": False,
},
SeriesToSeriesRowTransformer: {
"transformer": SERIES_TO_SERIES_TRANSFORMER,
"check_transformer": False,
},
ColumnTransformer: {
"transformers": [(name, estimator, [0]) for name, estimator in TRANSFORMERS]
},
AutoARIMA: {
"d": 0,
"suppress_warnings": True,
"max_p": 2,
"max_q": 2,
"seasonal": False,
},
MultiplexForecaster: {
"forecasters": [
("Naive_mean", NaiveForecaster(strategy="mean")),
("Naive_last", NaiveForecaster(strategy="last")),
("Naive_drift", NaiveForecaster(strategy="drift")),
],
"selected_forecaster": "Naive_mean",
},
ShapeletTransformClassifier: {
"n_estimators": 3,
"transform_contract_in_mins": 0.075,
},
ContractedShapeletTransform: {"time_contract_in_mins": 0.075},
ShapeletTransform: {
"max_shapelets_to_store_per_class": 1,
"min_shapelet_length": 3,
"max_shapelet_length": 4,
},
SignatureTransformer: {
"augmentation_list": ("basepoint", "addtime"),
"depth": 3,
"window_name": "global",
},
SignatureClassifier: {
"augmentation_list": ("basepoint", "addtime"),
"depth": 3,
"window_name": "global",
},
Catch22Classifier: {
"estimator": RandomForestClassifier(n_estimators=3),
},
MatrixProfileClassifier: {
"subsequence_length": 4,
},
TSFreshClassifier: {
"estimator": RandomForestClassifier(n_estimators=3),
"default_fc_parameters": "minimal",
},
ROCKETClassifier: {"num_kernels": 100},
Arsenal: {"num_kernels": 50, "n_estimators": 3},
HIVECOTEV1: {
"stc_params": {"n_estimators": 2, "transform_contract_in_mins": 0.02},
"tsf_params": {"n_estimators": 2},
"rise_params": {"n_estimators": 2},
"cboss_params": {"n_parameter_samples": 4, "max_ensemble_size": 2},
},
TSFreshFeatureExtractor: {"disable_progressbar": True, "show_warnings": False},
TSFreshRelevantFeatureExtractor: {
"disable_progressbar": True,
"show_warnings": False,
"fdr_level": 0.01,
},
TSInterpolator: {"length": 10},
RandomIntervalSpectralForest: {"n_estimators": 3, "acf_lag": 10, "min_interval": 5},
SFA: {"return_pandas_data_series": True},
ContractableBOSS: {"n_parameter_samples": 10, "max_ensemble_size": 5},
TemporalDictionaryEnsemble: {
"n_parameter_samples": 10,
"max_ensemble_size": 5,
"randomly_selected_params": 5,
},
TSFC: {"n_estimators": 3},
ComposableTimeSeriesForestClassifier: {"n_estimators": 3},
ComposableTimeSeriesForestRegressor: {"n_estimators": 3},
SupervisedTimeSeriesForest: {"n_estimators": 3},
CanonicalIntervalForest: {"n_estimators": 3},
DrCIF: {"n_estimators": 3},
HCrystalBallForecaster: {"model": HoltSmoothingWrapper()},
BATS: {
"use_box_cox": False,
"use_trend": False,
"use_damped_trend": False,
"sp": [],
"use_arma_errors": False,
"n_jobs": 1,
},
TBATS: {
"use_box_cox": False,
"use_trend": False,
"use_damped_trend": False,
"sp": [],
"use_arma_errors": False,
"n_jobs": 1,
},
Prophet: {
"n_changepoints": 0,
"yearly_seasonality": False,
"weekly_seasonality": False,
"daily_seasonality": False,
"uncertainty_samples": 1000,
"verbose": False,
},
PartialAutoCorrelationTransformer: {"n_lags": 1},
AutoCorrelationTransformer: {"n_lags": 1},
Imputer: {"method": "mean"},
HampelFilter: {"window_length": 3},
OptionalPassthrough: {"transformer": BoxCoxTransformer(), "passthrough": True},
FeatureSelection: {"method": "all"},
ColumnwiseTransformer: {"transformer": Detrender()},
AggrDist: {"transformer": ScipyDist()},
PyODAnnotator: {"estimator": ANOMALY_DETECTOR},
}
# We use estimator tags in addition to class hierarchies to further distinguish
# estimators into different categories. This is useful for defining and running
# common tests for estimators with the same tags.
VALID_ESTIMATOR_TAGS = tuple(ESTIMATOR_TAG_LIST)
# These methods should not change the state of the estimator, that is, they should
# not change fitted parameters or hyper-parameters. They are also the methods that
# "apply" the fitted estimator to data and useful for checking results.
NON_STATE_CHANGING_METHODS = (
"predict",
"predict_proba",
"decision_function",
"transform",
"inverse_transform",
)
# The following gives a list of valid estimator base classes.
VALID_TRANSFORMER_TYPES = tuple(TRANSFORMER_MIXIN_LIST)
VALID_ESTIMATOR_BASE_TYPES = tuple(BASE_CLASS_LIST)
VALID_ESTIMATOR_TYPES = (
BaseEstimator,
*VALID_ESTIMATOR_BASE_TYPES,
*VALID_TRANSFORMER_TYPES,
)
VALID_ESTIMATOR_BASE_TYPE_LOOKUP = BASE_CLASS_LOOKUP
|
py | b407a0b3ccf7e852ec6ea712ed1299b224a37830 | import sys
import os
import glob
import argparse
import numpy as np
from os.path import expanduser
import h5py
import pickle
from time import time
import subprocess
velocity_bound=10000.
def retrieve_commit_hash(path_to_repo):
""" Return the commit hash of the git branch currently live in the input path.
Parameters
----------
path_to_repo : string
Returns
-------
commit_hash : string
"""
cmd = 'cd {0} && git rev-parse HEAD'.format(path_to_repo)
return subprocess.check_output(cmd, shell=True).strip()
def velocity_bug_fix(output_snapshot, scalefactor=1.0):
"""
output_snapshot: dict containing mock data to be modified
scale: correction factor for velocities
"""
#overwrite with corrected values
for d in ['x', 'y', 'z']:
print(".....Correcting v{}'s".format(d))
corrected_halo_velocity = output_snapshot['target_halo_v{}'.format(d)]*scalefactor
host_centric_v = output_snapshot['host_centric_v{}'.format(d)]
corrected_galaxy_velocity = corrected_halo_velocity + host_centric_v
output_snapshot['target_halo_v{}'.format(d)] = corrected_halo_velocity
output_snapshot['v{}'.format(d)] = corrected_galaxy_velocity
return output_snapshot
def mask_large_velocities(output_snapshot, max_value=velocity_bound):
total = len(output_snapshot['target_halo_vx'])
mask = np.ones(total, dtype=bool)
for d in ['x', 'y', 'z']:
mask &= np.abs(output_snapshot['target_halo_v{}'.format(d)]) < max_value
nbad = np.sum(~mask)
print('.....Masking {} galaxy(ies); total = {}; fraction = {}'.format(nbad, total, nbad/float(total)))
return mask
def apply_mask(output_snapshot, mask):
for k in output_snapshot.keys():
output_snapshot[k] = output_snapshot[k][mask]
return output_snapshot
def healpix_mock_modify(healpix_filename, commit_hash, functions=None, correction_data=None):
output_mock = {}
masks_used = {}
print('Starting correction of {}'.format(os.path.basename(healpix_filename)))
with h5py.File(healpix_filename, 'r') as fh:
print('...copying input to output_mock')
#copy input mock to output mock
for (k, v) in fh.items():
if k.isdigit():
output_snapshot = {}
for kk, vv in v.items():
output_snapshot[kk] = vv.value
output_mock[k] = output_snapshot
print('...Keys copied to output mock: {}'.format(', '.join(output_mock.keys())))
for f in functions:
corrections = correction_data.get(str(f),{})
for (k, v) in output_mock.items():
if k.isdigit():
if len(v) == 0:
print('Skipping empty snap {}'.format(k))
continue
print('...Processing snap {} with {} and data-correction value(s) {}'.format(k, str(f), corrections[int(k)]))
output_mock[k] = f(v, corrections[int(k)])
#apply masks
mask = mask_large_velocities(output_mock[k], max_value=velocity_bound)
output_mock[k] = apply_mask(output_mock[k], mask)
print('...Masked length of arrays in snapshot {} = {}'.format(k, len(output_mock[k]['galaxy_id'])))
masks_used['large_velocities'] = mask_large_velocities
del mask
# copy and correct metaData
k = 'metaData'
output_mock[k] = {}
for tk, v in fh[k].items():
output_mock[k][tk] = v.value
output_mock[k]['versionMinorMinor'] += 1
for n, f in enumerate(functions):
ckey = 'comment_'+str(n)
output_mock[k][ckey] = ' '.join(['Corrected with', str(f)])
print('...Adding metaData comment: {}'.format(output_mock[k][ckey]))
for tk, v in masks_used.items():
ckey = 'mask_'+tk
output_mock[k][ckey] = ' '.join(['Corrected with', str(v)])
print('...Adding metaData comment: {}'.format(output_mock[k][ckey]))
output_mock[k]['current_commit_hash'] = commit_hash
return output_mock
def write_output_mock(output_mock, output_healpix_file):
hdfFile = h5py.File(output_healpix_file, 'w')
for k, v in output_mock.items():
gGroup = hdfFile.create_group(k)
for tk in v.keys():
gGroup[tk] = v[tk]
hdfFile.close()
print('...Wrote {} to disk'.format(output_healpix_file))
return
home = expanduser("~")
path_to_cosmodc2 = os.path.join(home, 'cosmology/cosmodc2')
if 'mira-home' in home:
sys.path.insert(0, '/gpfs/mira-home/ekovacs/.local/lib/python2.7/site-packages')
sys.path.insert(0, path_to_cosmodc2)
parser = argparse.ArgumentParser()
parser.add_argument("-cutout",
help="healpix cutout number to modify",
default='*')
parser.add_argument("-healpix_fname_template",
help="Template filename of healpix cutout to modify",
default='baseDC2*cutout_{}.hdf5')
parser.add_argument("-input_master_dirname",
help="Directory name (relative to home) storing input and output healpix file directories",
default='cosmology/DC2/OR_Production')
parser.add_argument("-output_mock_dirname",
help="Directory name (relative to input_master_dirname) storing output mock healpix files",
default='baseDC2_9.8C_v1.1_velocity_bug_fixes')
# default='baseDC2_min_9.8_centrals_v0.4.7_velocity_bug_fixes')
parser.add_argument("-input_mock_dirname",
help="Directory name (relative to input_master_dirname) storing input mock healpix files",
default='baseDC2_9.8C_v1.1')
# default='baseDC2_min_9.8_centrals_v0.4.5')
# default='baseDC2_min_9.8_centrals_v0.4.5_test')
parser.add_argument("-modify_functions",
help="Functions applied to modify input -> output",
nargs='+', choices=[velocity_bug_fix],
default=[velocity_bug_fix])
args = parser.parse_args()
#setup directory names
input_master_dirname = os.path.join(home, args.input_master_dirname)
input_mock_dirname = os.path.join(input_master_dirname, args.input_mock_dirname)
output_mock_dirname = os.path.join(input_master_dirname, args.output_mock_dirname)
healpix_filename = args.healpix_fname_template.format(args.cutout)
function_list = args.modify_functions
print('Reading input from {}\n'.format(input_mock_dirname))
print('Writing output to {}\n'.format(output_mock_dirname))
print('Modifying healpix files matching {} with function(s) {}\n'.format(healpix_filename, function_list))
current_commit_hash = retrieve_commit_hash(path_to_cosmodc2)[0:7]
print('Using current commit hash {}'.format(current_commit_hash))
#load additional data needed for corrections
correction_data = {}
function_names = map(str, function_list)
for f in map(str, function_list):
if 'velocity_bug_fix' in f:
datfile = os.path.join(path_to_cosmodc2, 'scripts/velocity_correction_factors.pkl')
#text file option
#datfile = os.path.join(path_to_cosmodc2, 'scripts/velocity_correction_factors.txt')
#ts, sf = np.loadtxt(datfile, unpack=True, usecols=[0, 1])
#correction_data[f] =dict(zip(ts.astype(int), sf))
with open(datfile, 'rb') as handle:
correction_data[f] = pickle.load(handle)
print('Using correction data input from {}\n'.format(datfile))
healpix_files = sorted(glob.glob(os.path.join(input_mock_dirname, healpix_filename)))
start_time = time()
for hpx in healpix_files:
start_file = time()
output_mock = healpix_mock_modify(hpx, current_commit_hash, functions=function_list, correction_data=correction_data)
output_healpix_file = os.path.join(output_mock_dirname, os.path.basename(hpx))
write_output_mock(output_mock, output_healpix_file)
end_file = time()
print('Processed {} in {:.2f} minutes\n'.format(os.path.basename(output_healpix_file), (end_file - start_file)/60.))
time_stamp = time()
msg = "End-to-end runtime = {0:.2f} minutes\n"
print(msg.format((time_stamp-start_time)/60.))
|
py | b407a0e0e8d58ca00acc178ecd4fc7c7389ce850 | ######## Video Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/16/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier uses it to perform object detection on a video.
# It draws boxes and scores around the objects of interest in each frame
# of the video.
# Some of the code is copied from Google's example at
# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
# and some is copied from Dat Tran's exampsle at
# https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
# but I changed it to make it more understandable to me.
# Import packages
from image_processing_utils import find_turn_angle
from utils import visualization_utils as vis_util
from utils import label_map_util
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# Modeli devreye sokma
MODEL_ACTIVE = True
# KEEP = True
SHOW = True
SHOW_ORIGINAL = True
# Name of the directory containing the object detection module we're using
MODEL_DIR = 'model'
VIDEO_PATH = r'test/test.mp4'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_DIR, 'ali_model.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH, MODEL_DIR, 'labelmap.pbtxt')
# Path to video
PATH_TO_VIDEO = os.path.join(CWD_PATH, VIDEO_PATH)
# Number of classes the object detector can identify
NUM_CLASSES = 14
# Alt algılama sınırı (olasılık %)
MIN_SCORE_THRESH = 0.60
# OUT = cv2.VideoWriter(
# 'OUTput.avi',
# cv2.VideoWriter_fourcc(*'XVID'),
# 5.0,
# (640, 400)
# ) if KEEP else None
if MODEL_ACTIVE:
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)
while(video.isOpened()):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
ret, frame = video.read()
# Frame varsa kontrol yapma
if frame is not None:
# İlk videoyu ekrana basma
cv2.imshow('Orjinal Video', frame) if SHOW_ORIGINAL else None
if MODEL_ACTIVE:
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=MIN_SCORE_THRESH)
try:
data = find_turn_angle(frame)
if data is not None:
frame, data = data
print("Dönüş değerleri:",
"Sol" if data[0] < 0 else "Sağ", "Pixel uzunluğu:", data[1])
except:
print("Görüntü işlemede sorun meydana geldi")
# Sonucu ekrana basma
cv2.imshow('Islenmis Video', frame) if SHOW else None
# Sonucu videoya kayıt etme
# OUT.write(frame) if KEEP else None
else:
print("Frame değeri None olarak geldi, video kapatıldı.")
break
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
video.release()
cv2.destroyAllWindows()
# OUT.release() if KEEP else None
|
py | b407a4062b7232381c5e1ede5b8e8a78e1188e85 | import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True,
exist_check=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if exist_check and not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True, exist_check=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path, exist_check)
def get_label_path(idx, prefix, training=True, relative_path=True, exist_check=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path, exist_check)
def get_velodyne_path(idx, prefix, training=True, relative_path=True, exist_check=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path, exist_check)
def get_calib_path(idx, prefix, training=True, relative_path=True, exist_check=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path, exist_check)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx, 'pointcloud_num_features': 4}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def label_str_to_int(labels, remove_dontcare=True, dtype=np.int32):
class_to_label = get_class_to_label_map()
ret = np.array([class_to_label[l] for l in labels], dtype=dtype)
if remove_dontcare:
ret = ret[ret > 0]
return ret
def get_class_to_label_map():
class_to_label = {
'Car': 0,
'Pedestrian': 1,
'Cyclist': 2,
'Van': 3,
'Person_sitting': 4,
'Truck': 5,
'Tram': 6,
'Misc': 7,
'DontCare': -1,
}
return class_to_label
def get_classes():
return get_class_to_label_map().keys()
def filter_gt_boxes(gt_boxes, gt_labels, used_classes):
mask = np.array([l in used_classes for l in gt_labels], dtype=np.bool)
return mask
def filter_anno_by_mask(image_anno, mask):
img_filtered_annotations = {}
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][mask])
return img_filtered_annotations
def filter_infos_by_used_classes(infos, used_classes):
new_infos = []
for info in infos:
annos = info["annos"]
name_in_info = False
for name in used_classes:
if name in annos["name"]:
name_in_info = True
break
if name_in_info:
new_infos.append(info)
return new_infos
def remove_dontcare(image_anno):
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x != "DontCare"
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
return img_filtered_annotations
def remove_low_height(image_anno, thresh):
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(image_anno['bbox']) if (s[3] - s[1]) >= thresh
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
return img_filtered_annotations
def remove_low_score(image_anno, thresh):
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(image_anno['score']) if s >= thresh
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
return img_filtered_annotations
def keep_arrays_by_name(gt_names, used_classes):
inds = [
i for i, x in enumerate(gt_names) if x in used_classes
]
inds = np.array(inds, dtype=np.int64)
return inds
def drop_arrays_by_name(gt_names, used_classes):
inds = [
i for i, x in enumerate(gt_names) if x not in used_classes
]
inds = np.array(inds, dtype=np.int64)
return inds
def apply_mask_(array_dict):
pass
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple, np.ndarray)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_class(image_annos, used_class):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(anno['name']) if x in used_class
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def filter_annos_difficulty(image_annos, used_difficulty):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(anno['difficulty']) if x in used_difficulty
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def filter_annos_low_height(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['bbox']) if (s[3] - s[1]) >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def filter_empty_annos(image_annos):
new_image_annos = []
for anno in image_annos:
if anno["name"].shape[0] > 0:
new_image_annos.append(anno.copy())
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', 0.0),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def annos_to_kitti_label(annos):
num_instance = len(annos["name"])
result_lines = []
for i in range(num_instance):
result_dict = {
'name': annos["name"][i],
'truncated': annos["truncated"][i],
'occluded': annos["occluded"][i],
'alpha':annos["alpha"][i],
'bbox': annos["bbox"][i],
'dimensions': annos["dimensions"][i],
'location': annos["location"][i],
'rotation_y': annos["rotation_y"][i],
'score': annos['score'][i]
}
line = kitti_result_line(result_dict)
result_lines.append(line)
return result_lines
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for evaluation
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for evaluation
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def add_difficulty_to_annos_v2(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for evaluation
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for evaluation
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = not ((occlusion > max_occlusion[0]) or (height < min_height[0])
or (truncation > max_trunc[0]))
moderate_mask = not ((occlusion > max_occlusion[1]) or (height < min_height[1])
or (truncation > max_trunc[1]))
hard_mask = not ((occlusion > max_occlusion[2]) or (height < min_height[2])
or (truncation > max_trunc[2]))
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
num_objects = len([x[0] for x in content if x[0] != 'DontCare'])
annotations['name'] = np.array([x[0] for x in content])
num_gt = len(annotations['name'])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros((annotations['bbox'].shape[0], ))
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)
return annotations
def get_pseudo_label_anno():
annotations = {}
annotations.update({
'name': np.array(['Car']),
'truncated': np.array([0.0]),
'occluded': np.array([0]),
'alpha': np.array([0.0]),
'bbox': np.array([[0.1, 0.1, 15.0, 15.0]]),
'dimensions': np.array([[0.1, 0.1, 15.0, 15.0]]),
'location': np.array([[0.1, 0.1, 15.0]]),
'rotation_y': np.array([[0.1, 0.1, 15.0]])
})
return annotations
def get_start_result_anno():
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': [],
'score': [],
})
return annotations
def empty_result_anno():
annotations = {}
annotations.update({
'name': np.array([]),
'truncated': np.array([]),
'occluded': np.array([]),
'alpha': np.array([]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.array([]),
'score': np.array([]),
})
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx_str = get_image_index_str(idx)
label_filename = label_folder / (image_idx_str + '.txt')
anno = get_label_anno(label_filename)
num_example = anno["name"].shape[0]
anno["image_idx"] = np.array([idx] * num_example, dtype=np.int64)
annos.append(anno)
return annos
def anno_to_rbboxes(anno):
loc = anno["location"]
dims = anno["dimensions"]
rots = anno["rotation_y"]
rbboxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
return rbboxes
|
py | b407a4aac79d4dd34eb63371a9c19006b82232dc | import json
from django.test.testcases import TestCase
from django.test.client import RequestFactory
from django.test.testcases import SimpleTestCase
from fakecouch import FakeCouchDb
from corehq.apps.users.models import WebUser
from corehq.apps.domain.models import Domain
from casexml.apps.case.models import CommCareCase
from corehq.apps.userreports.expressions import ExpressionFactory
from corehq.apps.userreports.filters.factory import FilterFactory
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.specs import FactoryContext
from corehq.apps.users.models import CommCareUser
from couchforms.models import XFormInstance
import os
class ChampTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.factory = RequestFactory()
# gets created + removed in package level setup / teardown
domain = Domain.get_or_create_with_name('champ-cameroon')
domain.is_active = True
domain.save()
cls.domain = domain
cls.user = WebUser.create(domain.name, 'test', 'passwordtest')
cls.user.is_authenticated = True
cls.user.is_superuser = True
cls.user.is_authenticated = True
cls.user.is_active = True
@classmethod
def tearDownClass(cls):
cls.user.delete()
super().tearDownClass()
class TestDataSourceExpressions(SimpleTestCase):
data_source_name = None
def get_expression(self, column_id, column_type):
column = self.get_column(column_id)
if column['type'] == 'boolean':
return FilterFactory.from_spec(
column['filter'],
context=FactoryContext(self.named_expressions, {})
)
else:
self.assertEqual(column['datatype'], column_type)
return ExpressionFactory.from_spec(
column['expression'],
context=FactoryContext(self.named_expressions, {})
)
@classmethod
def setUpClass(cls):
super(TestDataSourceExpressions, cls).setUpClass()
data_source_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)),
'ucr_data_sources',
cls.data_source_name
)
with open(data_source_file, encoding='utf-8') as f:
cls.data_source = DataSourceConfiguration.wrap(json.loads(f.read())['config'])
cls.named_expressions = cls.data_source.named_expression_objects
def setUp(self):
self.database = FakeCouchDb()
self.case_orig_db = CommCareCase.get_db()
self.form_orig_db = XFormInstance.get_db()
self.user_orig_db = CommCareUser.get_db()
CommCareCase.set_db(self.database)
XFormInstance.set_db(self.database)
CommCareUser.set_db(self.database)
def tearDown(self):
CommCareCase.set_db(self.case_orig_db)
XFormInstance.set_db(self.form_orig_db)
CommCareUser.set_db(self.user_orig_db)
def get_column(self, column_id):
return [
ind
for ind in self.data_source.configured_indicators
if ind['column_id'] == column_id
][0]
|
py | b407a5857494e86a4da6f6cb21124dab19d3f4fc | MAX_PROFILES = 1e2
MIN_PROFILES = 1e1
TEMP_FOLDER = "s3_data/"
|
py | b407a69089feb50539bb5aca09ba31124f716f9d | import argparse
import time
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", help="show model", action="store_true")
parser.add_argument("--version", "-v", help="show program version", action="store_true")
parser.add_argument("--qwe", "-qwe", help="show program version", action="store_true")
# Read arguments from the command line
args = parser.parse_args()
# Check for --version or -V
if args.version:
time.sleep(3.4)
print("This is myprogram version 0.1")
# Check for --version or -V
if args.model:
print("This is model")
# Check for --version or -V
if args.qwe:
print("This is qwe") |
py | b407a793043a8a3e880d1925749a21ce13491cf9 | from userbot import CMD_LIST
from userbot.utils import admin_cmd
import sys
from telethon import events, functions, __version__
@command(pattern="^.help ?(.*)")
#@borg.on(admin_cmd(pattern=r"help ?(.*)"))
async def cmd_list(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
tgbotusername = Var.TG_BOT_USER_NAME_BF_HER
input_str = event.pattern_match.group(1)
if tgbotusername is None or input_str == "text":
string = ""
for i in CMD_LIST:
string += "📌 " + i + "\n"
for iter_list in CMD_LIST[i]:
string += " `" + str(iter_list) + "`"
string += "\n"
string += "\n"
if len(string) > 4095:
with io.BytesIO(str.encode(string)) as out_file:
out_file.name = "cmd.txt"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="**COMMANDS**",
reply_to=reply_to_id
)
await event.delete()
else:
await event.edit(string)
elif input_str:
if input_str in CMD_LIST:
string = "Commands found in {}:".format(input_str)
for i in CMD_LIST[input_str]:
string += " " + i
string += "\n"
await event.edit(string)
else:
await event.edit(input_str + " is not a valid plugin!")
else:
help_string = """Userbot Helper.. \nProvided by [FRIDAY](https://github.com/leobrownlee/FRIDAY)\n`Userbot Helper to reveal all the commands`"""
results = await bot.inline_query( # pylint:disable=E0602
tgbotusername,
help_string
)
await results[0].click(
event.chat_id,
reply_to=event.reply_to_msg_id,
hide_via=True
)
await event.delete()
@borg.on(admin_cmd(pattern="syntax (.*)"))
async def _(event):
if event.fwd_from:
return
plugin_name = event.pattern_match.group(1)
if plugin_name in borg._plugins:
help_string = borg._plugins[plugin_name].__doc__
unload_string = f"Use `.unload {plugin_name}` to remove this plugin.\n © @UniBorg"
if help_string:
plugin_syntax = f"Syntax for plugin **{plugin_name}**:\n\n{help_string}\n{unload_string}"
else:
plugin_syntax = f"No DOCSTRING has been setup for {plugin_name} plugin."
else:
plugin_syntax = "Enter valid **Plugin** name.\nDo `.exec ls stdplugins` or `.helpme` to get list of valid plugin names."
await event.edit(plugin_syntax)
|
py | b407a85744b463de74474e9c779411722f6d77a2 | import unittest
import sys
import os
try:
from unittest import mock
except:
import mock
import voltverine.plugins
class TestNoShutdownFile(unittest.TestCase):
def test_no_shutdownfile_provided(self):
voltverine_plugin = voltverine.plugins.NoShutdownFile()
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.DUNNO)
self.assertTrue(info == {})
def test_no_shutdownfile(self):
with mock.patch('os.path.exists', return_value=False):
voltverine_plugin = voltverine.plugins.NoShutdownFile('/nonexistant/path')
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.OK)
self.assertTrue(info == {})
def test_shutdownfile(self):
with mock.patch('os.path.exists', return_value=True):
voltverine_plugin = voltverine.plugins.NoShutdownFile('/existant/path')
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.NOT_OK)
self.assertTrue(info == {})
if __name__ == '__main__':
# avoid writing to stderr
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))
|
py | b407a8a643be7c047d93c21405cb7ae0295ca10f | #!/usr/bin/env python3
"""
Convert the X11 locale.alias file into a mapping dictionary suitable
for locale.py.
Written by Marc-Andre Lemburg <[email protected]>, 2004-12-10.
"""
import locale
import sys
_locale = locale
# Location of the X11 alias file.
LOCALE_ALIAS = '/usr/share/X11/locale/locale.alias'
# Location of the glibc SUPPORTED locales file.
SUPPORTED = '/usr/share/i18n/SUPPORTED'
def parse(filename):
with open(filename, encoding='latin1') as f:
lines = list(f)
# Remove mojibake in /usr/share/X11/locale/locale.alias.
# b'\xef\xbf\xbd' == '\ufffd'.encode('utf-8')
lines = [line for line in lines if '\xef\xbf\xbd' not in line]
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
locale, alias = line.split()
# Fix non-standard locale names, e.g. [email protected]
if '@' in alias:
alias_lang, _, alias_mod = alias.partition('@')
if '.' in alias_mod:
alias_mod, _, alias_enc = alias_mod.partition('.')
alias = alias_lang + '.' + alias_enc + '@' + alias_mod
# Strip ':'
if locale[-1] == ':':
locale = locale[:-1]
# Lower-case locale
locale = locale.lower()
# Ignore one letter locale mappings (except for 'c')
if len(locale) == 1 and locale != 'c':
continue
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
data[locale] = alias
return data
def parse_glibc_supported(filename):
with open(filename, encoding='latin1') as f:
lines = list(f)
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
line = line.replace('/', ' ').strip()
line = line.rstrip('\\').rstrip()
words = line.split()
if len(words) != 2:
continue
alias, alias_encoding = words
# Lower-case locale
locale = alias.lower()
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
# Add an encoding to alias
alias, _, modifier = alias.partition('@')
alias = _locale._replace_encoding(alias, alias_encoding)
if modifier and not (modifier == 'euro' and alias_encoding == 'ISO-8859-15'):
alias += '@' + modifier
data[locale] = alias
return data
def pprint(data):
items = sorted(data.items())
for k, v in items:
print(' %-40s%a,' % ('%a:' % k, v))
def print_differences(data, olddata):
items = sorted(olddata.items())
for k, v in items:
if k not in data:
print('# removed %a' % k)
elif olddata[k] != data[k]:
print('# updated %a -> %a to %a' % \
(k, olddata[k], data[k]))
# Additions are not mentioned
def optimize(data):
locale_alias = locale.locale_alias
locale.locale_alias = data.copy()
for k, v in data.items():
del locale.locale_alias[k]
if locale.normalize(k) != v:
locale.locale_alias[k] = v
newdata = locale.locale_alias
errors = check(data)
locale.locale_alias = locale_alias
if errors:
sys.exit(1)
return newdata
def check(data):
# Check that all alias definitions from the X11 file
# are actually mapped to the correct alias locales.
errors = 0
for k, v in data.items():
if locale.normalize(k) != v:
print('ERROR: %a -> %a != %a' % (k, locale.normalize(k), v),
file=sys.stderr)
errors += 1
return errors
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--locale-alias', default=LOCALE_ALIAS,
help='location of the X11 alias file '
'(default: %a)' % LOCALE_ALIAS)
parser.add_argument('--glibc-supported', default=SUPPORTED,
help='location of the glibc SUPPORTED locales file '
'(default: %a)' % SUPPORTED)
args = parser.parse_args()
data = locale.locale_alias.copy()
data.update(parse_glibc_supported(args.glibc_supported))
data.update(parse(args.locale_alias))
while True:
# Repeat optimization while the size is decreased.
n = len(data)
data = optimize(data)
if len(data) == n:
break
print_differences(data, locale.locale_alias)
print()
print('locale_alias = {')
pprint(data)
print('}')
|
py | b407a902de414b0ee78ffd91533817509a5d82c3 | #!/usr/bin/python3
__version__ = '0.0.11' # Time-stamp: <2021-10-26T07:53:12Z>
## Language: Japanese/UTF-8
"""Simulation Buddhism Prototype No.3 - Adultery
不倫関連
"""
##
## Author:
##
## JRF ( http://jrf.cocolog-nifty.com/statuses/ (in Japanese))
##
## License:
##
## The author is a Japanese.
##
## I intended this program to be public-domain, but you can treat
## this program under the (new) BSD-License or under the Artistic
## License, if it is convenient for you.
##
## Within three months after the release of this program, I
## especially admit responsibility of efforts for rational requests
## of correction to this program.
##
## I often have bouts of schizophrenia, but I believe that my
## intention is legitimately fulfilled.
##
import itertools
import math
import random
import numpy as np
import simbdp3.base as base
from simbdp3.base import ARGS, Person0, Economy0, EconomyPlot0
from simbdp3.common import np_clip, np_random_choice, Adultery, Marriage
class PersonAD (Person0):
def adultery_charm (self):
p = self
if p.marriage is None:
ma = 0
else:
no_child_years = None
if not p.marriage.children:
no_child_years = (p.economy.term - p.marriage.begin) / 12
if no_child_years is None:
ma = - 0.2
elif no_child_years < 5:
x = np_clip(no_child_years, 3, 5)
ma = ((-0.2 - 0) / (3 - 5)) \
* (x - 5) + 0
else:
x = np_clip(no_child_years, 5, 8)
ma = ((0.1 - 0) / (8 - 5)) \
* (x - 5) + 0
ma += - 0.1 * len(p.adulteries)
if p.sex == 'M':
suit = 0.2 * math.exp(- ((p.age - 24) / 5) ** 2)
else:
suit = 0.2 * math.exp(- ((p.age - 20) / 5) ** 2)
if p.sex == 'M':
pa = 0.1 * p.adult_success
else:
pa = 0.05 * p.adult_success
if p.sex == 'M':
ast = 0.3 * p.tmp_asset_rank
else:
if p.marriage is None and not p.adulteries:
ast = 0
else:
if p.marriage is not None:
x = p.relative_spouse_asset(p.marriage)
else:
x = max(list(map(lambda a: p.relative_spouse_asset(a),
p.adulteries)))
if x >= 1.1:
x = np_clip(x, 1.1, 3)
ast = ((- 0.1 - 0) / (3 - 1.1)) * (x - 1.1) + 0
else:
x = np_clip(x, 1/3, 1.1)
ast = ((0.1 - 0) / (1/3 - 1.1)) * (x - 1.1) + 0
ed = -0.3 * p.education
pr = -0.05 if p.in_priesthood() else 0
ij = -0.1 * p.injured
return np_clip(ma + suit + pa + ast + ed + pr + ij, 0.0, 1.0)
def adultery_favor (self, q):
p = self
if p.sex == 'M':
ast = 1.5 * q.tmp_asset_rank * (2 * abs(p.education - 0.5)
+ (1 - p.tmp_asset_rank)) / 2
ed = 0.5 * q.education \
+ 0.25 * math.exp(- ((q.education - 0.2 - p.education)
/ 0.2) ** 2)
x = np_clip(p.age, 12, 60)
t1 = ((5 - 2) / (60 - 12)) * (x - 12) + 2
t2 = ((10 - 2) / (60 - 12)) * (x - 12) + 2
t3 = ((7 - 2) / (60 - 12)) * (x - 12) + 2
same = math.exp(- ((q.age + t1 - p.age) / t2) ** 2)
suit = math.exp(- ((q.age - 24) / t3) ** 2)
if q.in_priesthood():
suit *= 0.8
ed2 = 1 if p.education < 0.5 else ((2 - 1) / 0.5)\
* (p.education - 0.5) + 1
age = max([ed2 * same, 2.5 * suit])
mar = -0.5 if p.marriage is None \
and q.marriage is not None else 0
ht = -2.0 * p.hating[q.id] if q.id in p.hating else 0
jl = -1.0 if q.in_jail() else 0
ij = -0.5 * q.injured
else:
ed1 = 0 if p.education > 0.5 else (0.5 - p.education) / 0.5
ast = 3 * q.tmp_asset_rank * (ed1 + (1 - p.tmp_asset_rank)) / 2
ed = 1 * q.education \
+ 0.25 * math.exp(- ((q.education + 0.2 - p.education)
/ 0.2) ** 2)
x = np_clip(p.age, 12, 60)
t1 = ((5 - 2) / (60 - 12)) * (x - 12) + 2
t2 = ((10 - 2) / (60 - 12)) * (x - 12) + 2
t3 = ((7 - 2) / (60 - 12)) * (x - 12) + 2
same = math.exp(- ((q.age - t1 - p.age) / t2) ** 2)
suit = math.exp(- ((q.age - 20) / t3) ** 2)
if q.in_priesthood():
suit *= 0.8
ed2 = 1.5 if p.education < 0.5 else ((2.5 - 1.5) / 0.5)\
* (p.education - 0.5) + 1.5
age = max([ed2 * same, 2 * suit])
mar = -1 if p.marriage is None and q.marriage is not None else 0
ht = -2.0 * p.hating[q.id] if q.id in p.hating else 0
jl = -1.0 if q.in_jail() else 0
ij = -0.5 * q.injured
return ed + ast + age + mar + ht + jl + ij + 4 * q.tmp_luck
def adultery_separability (self, adultery):
p = self
a = adultery
economy = p.economy
years = (economy.term - a.begin) / 12
x = np_clip(years, 0, 3)
q = ((0.1 - 1) / (3 - 0)) * (x - 0) + 1
hating = 0
rel_favor = 0
if a.spouse != '' and economy.is_living(a.spouse):
s = economy.people[a.spouse]
if p.id in s.hating:
hating = s.hating[p.id]
rel_favor = p.adultery_favor(s) - a.init_favor
rel_favor = np_clip(rel_favor, -5, 5)
ch = 0.5 if a.children else 1
ht = 1 + hating
x = rel_favor
if x > 0:
fv = ((0.5 - 1) / (5 - 0)) * (x - 0) + 1
else:
fv = ((2 - 1) / (-5 - 0)) * (x - 0) + 1
q = np_clip(q * ch * ht * fv, 0.05, 1)
return q ** ARGS.adultery_separability_mag
class EconomyPlotAD (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'adulteries': ('Adulteries', self.view_adulteries),
'adultery-separability':
('Ad Separability', self.view_adultery_separability),
'adultery-age-vs-years':
('Adultery age vs years', self.view_adultery_age_vs_years)
})
def view_adultery_age_vs_years (self, ax, economy):
m1 = []
m2 = []
for p in economy.people.values():
for a in p.adulteries:
m1.append(p.age - ((economy.term
- (a.true_begin or a.begin)) / 12))
m2.append((economy.term - (a.true_begin or a.begin)) / 12)
ax.scatter(m1, m2, c="pink", alpha=0.5)
def view_adulteries (self, ax, economy):
m = []
f = []
for p in economy.people.values():
if p.adulteries:
m.append(len(p.adulteries))
if p.sex == 'F':
f.append(len(p.adulteries))
ax.hist(m, bins=ARGS.bins)
print("Adulteries: %d %d" % (len(m), sum(m)))
#print("Female Adulteries: %d %d" % (len(f), sum(f)))
def view_adultery_separability (self, ax, economy):
x = []
l = []
for p in economy.people.values():
for a in p.adulteries:
x.append((economy.term - (a.true_begin or a.begin)) / 12)
l.append(p.adultery_separability(a))
ax.scatter(x, l, c="pink", alpha=0.5)
def choose_from_districts (m_district, f_district, m_choice_nums,
f_choice_nums,
external_rate_m, external_rate_f, duplicate=True):
districts = len(m_district)
assert districts == len(f_district)
assert districts == len(m_choice_nums)
assert districts == len(f_choice_nums)
len_m_district = list(map(len, m_district))
len_f_district = list(map(len, f_district))
am = m_choice_nums
af = f_choice_nums
qm = [[0] * districts for i in range(districts)]
qf = [[0] * districts for i in range(districts)]
for district in range(districts):
aem = int(math.ceil(am[district] * external_rate_m))
aef = int(math.ceil(af[district] * external_rate_f))
lm1 = len_m_district[0:district] \
+ len_m_district[district + 1:districts]
s_lm1 = sum(lm1)
lf1 = len_f_district[0:district] \
+ len_f_district[district + 1:districts]
s_lf1 = sum(lf1)
for i in range(districts):
if i != district:
qm[district][i] = int(math.floor(aem * len_m_district[i]
/ s_lm1))
qf[district][i] = int(math.floor(aef * len_f_district[i]
/ s_lf1))
for i in range(districts):
qm[i][i] = am[i] - sum(qm[i])
qf[i][i] = af[i] - sum(qf[i])
qmt = np.array(qm).T
qft = np.array(qf).T
rm = [[[] for j in range(districts)] for i in range(districts)]
rf = [[[] for j in range(districts)] for i in range(districts)]
for district in range(districts):
l1 = []
l2 = []
for p in m_district[district]:
q = p.tmp_score
if q < 0.02:
q = 0.02
while q > 0:
l1.append(p)
l2.append(q)
if duplicate:
q = q - 0.1
else:
q = 0
l2 = np.array(l2).astype(np.longdouble)
l3 = np_random_choice(l1, size=sum(qmt[district]), replace=False,
p=l2/np.sum(l2))
random.shuffle(l3)
x = 0
for i in range(districts):
rm[i][district] = l3[x:x + qmt[district][i]]
x += qmt[district][i]
l1 = []
l2 = []
for p in f_district[district]:
q = p.tmp_score
if q < 0.02:
q = 0.02
while q > 0:
l1.append(p)
l2.append(q)
if duplicate:
q = q - 0.1
else:
q = 0
l2 = np.array(l2).astype(np.longdouble)
l3 = np_random_choice(l1, size=sum(qft[district]), replace=False,
p=l2/np.sum(l2))
random.shuffle(l3)
x = 0
for i in range(districts):
rf[i][district] = l3[x:x + qft[district][i]]
x += qft[district][i]
r = []
for i in range(districts):
m = []
f = []
for j in range(districts):
m += list(rm[i][j])
f += list(rf[i][j])
r.append((m, f))
return r
def choose_adulterers (economy):
districts = len(ARGS.population)
m_district = [[] for i in range(districts)]
f_district = [[] for i in range(districts)]
m_adulterers = [0] * districts
f_adulterers = [0] * districts
for p in economy.people.values():
if not p.is_dead():
if p.age >= 12 and (p.pregnancy is None
or economy.term - p.pregnancy.begin < 8) \
and not p.in_jail():
p.tmp_score = p.adultery_charm()
if p.sex == 'M':
m_district[p.district].append(p)
else:
f_district[p.district].append(p)
if p.adulteries:
if p.sex == 'M':
m_adulterers[p.district] += len(p.adulteries)
else:
f_adulterers[p.district] += len(p.adulteries)
am = [0] * districts
af = [0] * districts
for district in range(districts):
lm = len(m_district[district])
lf = len(f_district[district])
q = math.ceil((lf + lm) * ARGS.new_adultery_rate) \
- m_adulterers[district] - f_adulterers[district]
if q < 0:
q = 0
am[district] = int(q / 2)
af[district] = int(q / 2)
return choose_from_districts(m_district, f_district, am, af,
ARGS.external_adultery_rate_male,
ARGS.external_adultery_rate_female,
duplicate=True)
def match_favor (male, female, favor_func, threshold=None):
l = [(m, f,
favor_func(male[m], female[f]), favor_func(female[f], male[m]))
for m, f in itertools.product(range(len(male)), range(len(female)))]
if threshold is not None:
l = [(m, f, fm, ff) for m, f, fm, ff in l
if fm >= threshold and ff >= threshold]
l = sorted(l, key=lambda x: x[2] + x[3], reverse=True)
n_m = 0
n_f = 0
mdone = [False] * len(male)
fdone = [False] * len(female)
i = 0
matches = []
for m, f, fm, ff in l:
if not (n_m < len(male) and n_f < len(female)):
break
if (not mdone[m]) and (not fdone[f]):
mdone[m] = True
fdone[f] = True
n_m += 1
n_f += 1
matches.append((male[m], female[f]))
return matches
def update_adultery_hating (economy, person, adultery, rape=False):
p = person
a = adultery
success = True
if p.sex == 'M':
if a.spouse == '' or not economy.is_living(a.spouse):
if a.begin == a.end:
hating = random.random() < 0.1
else:
hating = random.random() < 0.5
if hating:
success = False
p.add_hating('', 0.5)
else:
ex_hating = 0.0
if rape:
ex_hating = 0.25
s = economy.people[a.spouse]
if a.begin == a.end:
hating = random.random() < 0.1
else:
hating = random.random() < 0.5
if hating:
success = False
p.add_hating(s.id, 0.5)
if s.marriage is not None \
and economy.is_living(s.marriage.spouse):
ss = economy.people[s.marriage.spouse]
if s.pregnancy is not None:
hating = 0.8
elif a.children:
hating = 0.7
else:
hating = 0.5
hated = random.random() < 0.3
if hated:
known = random.random() < 0.3
if known:
success = False
ss.add_hating(p.id, hating + ex_hating)
else:
ss.add_hating('', hating + ex_hating)
for a in s.adulteries:
if economy.is_living(a.spouse):
ss = economy.people[a.spouse]
hating = 0.4
hated = random.random() < 0.15
if hated:
known = random.random() < 0.3
if known:
success = False
ss.add_hating(p.id, hating + ex_hating)
else:
ss.add_hating('', hating + ex_hating)
else: # p.sex == 'F':
ex_hating = 0.0
if rape:
ex_hating = 0.25
if a.spouse == '' or not economy.is_living(a.spouse):
if a.begin == a.end:
hating = rape or random.random() < 0.2
else:
hating = random.random() < 0.5
if hating:
success = False
p.add_hating('', 0.5 + ex_hating)
else:
s = economy.people[a.spouse]
if a.begin == a.end:
hating = rape or random.random() < 0.2
if rape and random.random() < 0.5:
s.karma = max([s.karma, 0.25]) \
+ 0.1 * min([s.karma, 0.25])
s.karma = np_clip(s.karma, 0.0, 1.0)
else:
hating = random.random() < 0.5
if hating:
success = False
p.add_hating(s.id, 0.5 + ex_hating)
if s.marriage is not None \
and economy.is_living(s.marriage.spouse):
ss = economy.people[s.marriage.spouse]
if p.pregnancy:
hating = 0.6
elif a.children:
hating = 0.5
else:
hating = 0.5
hated = random.random() < 0.5
if hated:
known = random.random() < 0.7
if known:
success = False
ss.add_hating(p.id, hating)
else:
ss.add_hating('', hating)
for a in s.adulteries:
if economy.is_living(a.spouse):
ss = economy.people[a.spouse]
hating = 0.4
hated = random.random() < 0.15
if hated:
known = random.random() < 0.7
if known:
success = False
ss.add_hating(p.id, hating)
else:
ss.add_hating('', hating)
if success:
p.adult_success += 1
else:
p.adult_success -= 1
if p.adult_success < 0:
p.adult_success = 0
def remove_some_new_adulteries (economy, matches):
n_p = 0
l1 = list(range(len(matches)))
l2 = map(lambda m: m[0].adultery_favor(m[1])
+ m[1].adultery_favor(m[0]) + 5, matches)
l2 = list(map(lambda m: m if m > 1 else 1, l2))
n = int(len(matches) * (1 - ARGS.new_adultery_reduce))
l2 = np.array(l2).astype(np.longdouble)
l3 = np_random_choice(l1, size=n, replace=False,
p=l2/np.sum(l2))
s3 = set(l3)
for i in l1:
m = matches[i][0]
f = matches[i][1]
ex = False
for a in m.adulteries:
if a.spouse == f.id:
ex = True
break
if ex:
continue
am = Adultery()
af = Adultery()
am.spouse = f.id
am.init_favor = m.adultery_favor(f)
am.begin = economy.term
af.spouse = m.id
af.init_favor = f.adultery_favor(m)
af.begin = economy.term
# # 時間を食うので以下のチェックは行わないことにする。
# amt = []
# aft = []
# for x in m.trash:
# if isinstance(x, Marriage) or isinstance(x, Adultery):
# if x.spouse == f.id:
# amt.append(x.end - x.begin)
# for x in f.trash:
# if isinstance(x, Marriage) or isinstance(x, Adultery):
# if x.spouse == m.id:
# aft.append(x.end - x.begin)
# if amt:
# am.true_begin = am.begin
# am.begin -= math.floor(max(amt) / 2)
# if aft:
# af.true_begin = af.begin
# af.begin -= math.floor(max(aft) / 2)
if i in s3:
m.adulteries.append(am)
f.adulteries.append(af)
else:
am.end = economy.term
af.end = economy.term
m.trash.append(am)
f.trash.append(af)
rape = random.random() < 0.01
update_adultery_hating(economy, m, am, rape)
update_adultery_hating(economy, f, af, rape)
if m.fertility != 0 and f.fertility != 0 and f.pregnancy is None:
ft = (m.fertility + f.fertility) / 2
if random.random() < ARGS.new_adulteries_pregnant_rate \
* (ft ** ARGS.new_adulteries_pregnant_mag):
f.get_pregnant(af)
n_p += 1
print("Adultery Pregnancy 1:", n_p)
def reboot_some_adulteries (economy):
rebooting = 0
for p in economy.people.values():
if p.is_dead():
continue
reboot_rate = ARGS.adultery_reboot_rate
if p.marriage is not None or p.adulteries:
reboot_rate = ARGS.with_spouse_adultery_reboot_rate
if random.random() < reboot_rate:
rellist = [x for x in p.trash
if (isinstance(x, Marriage) or isinstance(x, Adultery))]
if not rellist:
continue
l2 = [0.1 + math.log(1 + (x.end - x.begin) / 12) \
* np_clip(x.init_favor, 0, 10) for x in rellist]
l2 = np.array(l2).astype(np.longdouble)
y = np_random_choice(rellist, 1, replace=False,
p=l2/np.sum(l2))[0]
if y.spouse == '' or not economy.is_living(y.spouse):
continue
s = economy.people[y.spouse]
if s.marriage is not None or s.adulteries:
ex = False
for x1 in [s.marriage] + s.adulteries:
if x1 is None:
continue
if x1.spouse == p.id:
ex = True
break
if ex:
continue
if random.random() < 0.5:
continue
rebooting += 1
a1 = Adultery()
a2 = Adultery()
p.adulteries.append(a1)
s.adulteries.append(a2)
a1.spouse = s.id
a1.init_favor = p.adultery_favor(s)
a1.begin = economy.term
a2.spouse = p.id
a2.init_favor = s.adultery_favor(p)
a2.begin = economy.term
a1t = []
a2t = []
for x in p.trash:
if isinstance(x, Marriage) or isinstance(x, Adultery):
if x.spouse == s.id:
a1t.append(x.end - x.begin)
for x in s.trash:
if isinstance(x, Marriage) or isinstance(x, Adultery):
if x.spouse == p.id:
a2t.append(x.end - x.begin)
if a1t:
a1.true_begin = a1.begin
a1.begin -= math.floor(max(a1t) / 2)
if a2t:
a2.true_begin = a2.begin
a2.begin -= math.floor(max(a2t) / 2)
print("Reboot:", rebooting)
def get_pregnant_adulteries (economy):
n_u = 0
n_i = 0
for p in economy.people.values():
if not p.is_dead() and p.sex == 'F' and p.pregnancy is None:
for a in p.adulteries:
wc = p.want_child(a)
if a.spouse == '' or not economy.is_living(a.spouse):
ft = random.random()
if ft < 0.1:
ft = 0
else:
ft = economy.people[a.spouse].fertility
if p.fertility != 0 and ft != 0 and p.pregnancy is None:
ft = (p.fertility + ft) / 2
if wc and p.pregnancy_wait is None:
if random.random() < ARGS.intended_pregnant_rate \
* (ft ** ARGS.intended_pregnant_mag):
p.get_pregnant(a)
n_i += 1
break
else:
if random.random() < ARGS.unintended_pregnant_rate \
* (ft ** ARGS.unintended_pregnant_mag):
p.get_pregnant(a)
n_u += 1
break
print("Adultery Pregnancy 2:", n_i, n_u)
def remove_some_adulteries (economy):
lamu = [] # 相手が不明の男性の不倫のリスト
laf = [] # 不明かどうかに関係ない女性の不倫のリスト
n_m = 0
n_f = 0
for p in economy.people.values():
if not p.is_dead():
if p.age >= 12 and (p.pregnancy is None
or economy.term - p.pregnancy.begin < 8):
if p.sex == 'M':
n_m += 1
else:
n_f += 1
if p.adulteries:
if p.sex == 'F':
laf.extend([(p, a) for a in p.adulteries])
else:
lamu.extend([(p, a) for a in p.adulteries
if a.spouse == ''])
l1 = list(range(len(laf)))
l2 = list(map(lambda x: x[0].adultery_separability(x[1]), laf))
n = math.floor(n_f * ARGS.adultery_rate)
if n > len(l1):
n = len(l1)
l2 = np.array(l2).astype(np.longdouble)
l3 = np_random_choice(l1, len(l1) - n, replace=False,
p=l2/np.sum(l2))
n_u = 0
for i in l3:
p, a = laf[i]
a.end = economy.term
p.adulteries.remove(a)
p.trash.append(a)
update_adultery_hating(economy, p, a)
if a.spouse == '' or not economy.is_living(a.spouse):
n_u += 1
else:
s = economy.people[a.spouse]
sa = [a for a in s.adulteries if a.spouse == p.id][0]
sa.end = economy.term
s.adulteries.remove(sa)
s.trash.append(sa)
update_adultery_hating(economy, s, sa)
l1 = list(range(len(lamu)))
l2 = list(map(lambda x: x[0].adultery_separability(x[1]), lamu))
if n_u > len(l1):
n_u = len(l1)
l2 = np.array(l2).astype(np.longdouble)
l3 = np_random_choice(l1, n_u, replace=False,
p=l2/np.sum(l2))
for i in l3:
p, a = lamu[i]
a.end = economy.term
p.adulteries.remove(a)
p.trash.append(a)
update_adultery_hating(economy, p, a)
def update_adulteries (economy):
print("\nAdulteries:...", flush=True)
print("Choosing...", flush=True)
# 不倫用の tmp_asset_rank の計算
domrank = {
None: 0,
'cavalier': 1 - 0.30,
'vassal': 1 - 0.10,
'governor': 1 - 0.10,
'king': 1 - 0.05
}
l = sorted(economy.people.values(), key=lambda p: p.consumption,
reverse=True)
s = len(l)
for i in range(len(l)):
p = l[i]
p.tmp_asset_rank = (s - i) / s
if p.dominator_position is not None:
p.tmp_asset_rank = max([p.tmp_asset_rank,
domrank[p.dominator_position]])
# 不倫用の幸運度の計算
for p in economy.people.values():
p.tmp_luck = random.random()
adulterers = choose_adulterers(economy)
print("Matching...", flush=True)
matches = []
for m, f in adulterers:
matches.append(match_favor(m, f, lambda p, q: p.adultery_favor(q)))
print("...", flush=True)
print("Matches:", [len(l) for l in matches], flush=True)
m0 = matches[0]
matches = sum(matches, [])
# if len(m0) >= 10:
# print("Match Samples:", flush=True)
# for i in range(0, 10):
# print(m0[i][0], m0[i][1],
# m0[i][0].adultery_favor(m0[i][1]),
# m0[i][1].adultery_favor(m0[i][0]))
# print("...")
# for i in range(len(m0) - 10, len(m0)):
# print(m0[i][0], m0[i][1],
# m0[i][0].adultery_favor(m0[i][1]),
# m0[i][1].adultery_favor(m0[i][0]))
for p in economy.people.values():
p.tmp_luck = 0
print("Updating...", flush=True)
remove_some_new_adulteries(economy, matches)
reboot_some_adulteries(economy)
get_pregnant_adulteries(economy)
remove_some_adulteries(economy)
|
py | b407aaa7f6075c9935c9f6f8a5ee64ec7c347ee9 | #=============================================================================
# Copyright (c) 2017 Paul Fultz II
# conf.py
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#==============================================================================
# -*- coding: utf-8 -*-
#
# Boost.HigherOrderFunctions documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 2 00:33:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
import sphinx_boost
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.githubpages',
'sphinx.ext.autosectionlabel',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_parsers = {
'.md': CommonMarkParser,
'.hpp': CommonMarkParser
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md', '.hpp']
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'doc/index'
# General information about the project.
project = u'Boost.HigherOrderFunctions'
copyright = u'2016, Paul Fultz II'
author = u'Paul Fultz II'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def parse_version():
lines = open('../include/boost/hof/version.hpp').readlines()
defines = ['BOOST_HOF_VERSION_MAJOR', 'BOOST_HOF_VERSION_MINOR', 'BOOST_HOF_VERSION_PATCH']
versions = ['0','0','0']
for line in lines:
for di, define in enumerate(defines):
needle = '#define ' + define
i = line.find(needle)
if i >= 0:
versions[di] = line[i+len(needle):].strip()
final_version = versions[0] + '.' + versions[1]
if versions[2] != '0': final_version = final_version + '.' + versions[2]
return final_version
# The short X.Y version.
version = parse_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'README.md',
'**/alias.hpp',
'**/config.hpp',
'**/hof.hpp',
'**/static_def.hpp',
'**/test.hpp',
'**/detail/*.hpp',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
highlight_language = 'cpp'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'boost'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_boost.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Boost.HigherOrderFunctions v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = 'boost-proposed.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
html_additional_pages = {
'index': 'redirect.html',
}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = True
html_copy_source = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Boost.HigherOrderFunctionsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Boost.HigherOrderFunctions.tex', u'Boost.HigherOrderFunctions Documentation',
u'Paul Fultz II', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Boost.HigherOrderFunctions', u'Boost.HigherOrderFunctions Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Boost.HigherOrderFunctions', u'Boost.HigherOrderFunctions Documentation',
author, 'Boost.HigherOrderFunctions', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
nitpicky = True
def insert_header(lines, f):
for line in lines:
yield line
if line.startswith('=='):
yield ""
yield "Header"
yield "------"
yield ""
yield " #include <{0}>".format(f)
yield ""
extract_prefix = '/// '
include_dir = os.path.abspath('../include/')
def extract_doc(app, docname, source):
path = app.env.doc2path(docname)
if path.endswith('.hpp'):
lines = source[0].split('\n')
md = [line[len(extract_prefix):] for line in lines if line.startswith(extract_prefix)]
source[0] = '\n'.join(insert_header(md, os.path.relpath(path, include_dir)))
# app setup hook
def setup(app):
app.srcdir = os.path.abspath(os.path.join(app.srcdir, os.pardir))
app.add_config_value('recommonmark_config', {
'enable_eval_rst': True,
# 'enable_auto_doc_ref': True,
'commonmark_suffixes': ['.md', '.hpp'],
}, True)
app.add_transform(AutoStructify)
app.connect('source-read', extract_doc)
|
py | b407aaf0557999b5e984968ef01edd69f6bdf379 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from bidu import backend as K
from bidu.models import Sequential
from bidu.layers import convolutional_recurrent
from bidu.utils.test_utils import layer_test
from bidu import regularizers
def test_recurrent_convolutional():
nb_row = 3
nb_col = 3
nb_filter = 5
nb_samples = 2
input_channel = 2
input_nb_row = 5
input_nb_col = 5
sequence_len = 2
for dim_ordering in ['th', 'tf']:
if dim_ordering == 'th':
input = np.random.rand(nb_samples, sequence_len,
input_channel,
input_nb_row, input_nb_col)
else: # tf
input = np.random.rand(nb_samples, sequence_len,
input_nb_row, input_nb_col,
input_channel)
for return_sequences in [True, False]:
# test for ouptput shape:
output = layer_test(convolutional_recurrent.ConvLSTM2D,
kwargs={'dim_ordering': dim_ordering,
'return_sequences': return_sequences,
'nb_filter': nb_filter,
'nb_row': nb_row,
'nb_col': nb_col,
'border_mode': "same"},
input_shape=input.shape)
output_shape = [nb_samples, input_nb_row, input_nb_col]
if dim_ordering == 'th':
output_shape.insert(1, nb_filter)
else:
output_shape.insert(3, nb_filter)
if return_sequences:
output_shape.insert(1, sequence_len)
assert output.shape == tuple(output_shape)
# No need to check statefulness for both
if dim_ordering == 'th' or return_sequences:
continue
# Tests for statefulness
model = Sequential()
kwargs = {'dim_ordering': dim_ordering,
'return_sequences': return_sequences,
'nb_filter': nb_filter,
'nb_row': nb_row,
'nb_col': nb_col,
'stateful': True,
'batch_input_shape': input.shape,
'border_mode': "same"}
layer = convolutional_recurrent.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones_like(input))
assert(out1.shape == tuple(output_shape))
# train once so that the states change
model.train_on_batch(np.ones_like(input),
np.ones_like(output))
out2 = model.predict(np.ones_like(input))
# if the state is not reset, output should be different
assert(out1.max() != out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(input))
assert(out2.max() != out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(input))
assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(input))
assert(out4.max() != out5.max())
# check regularizers
kwargs = {'dim_ordering': dim_ordering,
'return_sequences': return_sequences,
'nb_filter': nb_filter,
'nb_row': nb_row,
'nb_col': nb_col,
'stateful': True,
'batch_input_shape': input.shape,
'W_regularizer': regularizers.WeightRegularizer(l1=0.01),
'U_regularizer': regularizers.WeightRegularizer(l1=0.01),
'b_regularizer': 'l2',
'border_mode': "same"}
layer = convolutional_recurrent.ConvLSTM2D(**kwargs)
layer.build(input.shape)
output = layer(K.variable(np.ones(input.shape)))
K.eval(output)
# check dropout
layer_test(convolutional_recurrent.ConvLSTM2D,
kwargs={'dim_ordering': dim_ordering,
'return_sequences': return_sequences,
'nb_filter': nb_filter,
'nb_row': nb_row,
'nb_col': nb_col,
'border_mode': "same",
'dropout_W': 0.1,
'dropout_U': 0.1},
input_shape=input.shape)
if __name__ == '__main__':
pytest.main([__file__])
|
py | b407ab1dfb587340426ea668331cbed77f42c3a3 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
PROTOCOL = 'http' # or https
RVISION = '127.0.0.1' # ip or hostname
USERNAME = 'admin' # any user login
PASSWORD = 'admin'
requests.packages.urllib3.disable_warnings()
s = requests.Session()
# Авторизируемся
login = s.post(
PROTOCOL + '://' + RVISION + '/login',
data={
'username': USERNAME,
'password': PASSWORD
},
verify=False
)
loginResult = login.json()
# Применяем пагинацию и лимит
orgParams = {
'page': 1, # Пагинация, номер страницы
'start': 0, # Пагинация, позиция элемента с которого начать поиск
'limit': 50 # Сколько всего выводить
}
# Следующая страница
# orgParams = {
# 'page': 2,
# 'start': 10,
# 'limit': 10
# }
# Вывести все (может быть очень долго)
# orgParams = {
# 'page': 1,
# 'start': 0,
# 'limit': 999999 # Итоговое количество может быть ограничено лицензией
# }
org = s.get(
PROTOCOL + '://' + RVISION + '/api/v1/am/organization',
params=orgParams,
verify=False
)
orgResult = org.json()
print json.dumps(orgResult, indent=2, sort_keys=True, ensure_ascii=False)
|
py | b407ab4d90c605d514bc4a1785152b244c2d6b22 | import os
stream_framework_DEFAULT_KEYSPACE = 'test_stream_framework'
if os.environ.get('TEST_CASSANDRA_HOST'):
stream_framework_CASSANDRA_HOSTS = [os.environ['TEST_CASSANDRA_HOST']]
SECRET_KEY = 'ib_^kc#v536)v$x!h3*#xs6&l8&7#4cqi^rjhczu85l9txbz+w'
stream_framework_DISCOVER_CASSANDRA_NODES = False
stream_framework_CASSANDRA_CONSITENCY_LEVEL = 'ONE'
stream_framework_REDIS_CONFIG = {
'default': {
'host': '127.0.0.1',
'port': 6379,
'db': 0,
'password': None
},
}
|
py | b407abef30a56b734f4e01d8484ab72d27e1cfa4 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id: rfc3454.py,v 1.1.1.1 2011/06/10 09:34:42 andrew Exp $
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
def read(filename):
"""return a dict of tables from rfc3454"""
f = open(filename, 'r')
inTable = False
ret = {}
while True:
l = f.readline()
if not l:
break
if inTable:
m = re.search('^ *----- End Table ([A-Z0-9\.]+) ----- *$', l)
if m:
ret[m.group(1)] = t
inTable = False
else:
t.append(l)
if re.search('^ *----- Start Table ([A-Z0-9\.]+) ----- *$', l):
inTable = True
t = []
f.close()
return ret
|
py | b407ac22df5587d433156621b9b8ce46a12c5c4e | from string import printable
from sys import stderr
from fileinput import input
from argparse import ArgumentParser
from pathlib import Path
from time import sleep
from urllib.request import urlopen, Request
from urllib.error import URLError
from urllib.parse import urlparse, quote, ParseResult
from typing import Union, Any
def process(url: str) -> str:
req = Request(url=quote(url, safe=printable), method="HEAD")
req.add_header(key="User-Agent", val="Mozilla/5.0 (Windows NT 10.0; rv:91.0) Gecko/20100101 Firefox/91.0")
code = 0
try:
with urlopen(url=req, timeout=4) as response:
code = response.status
except URLError as e:
if hasattr(e, 'code'):
code = e.code
elif hasattr(e, 'reason'):
print("Processing error: " + url + " -> " + str(e.reason), file=stderr)
return str(code)
def check_url(url: str) -> Union[ParseResult, bool]:
u = urlparse(url)
return u if u.scheme in ["http", "https"] else False
def check_code(code: str, codes: set[str], exclude: bool) -> bool:
return (code not in codes) if exclude \
else (code in codes)
def process_file(file: str, codes: set[str], exclude: bool, delay: int):
try:
with input(file) as f:
for line in f:
url = check_url(line.strip())
if not url:
break
sleep(delay)
code = process(url.geturl())
if check_code(code, codes, exclude):
print(url.geturl())
except PermissionError as e:
print("Permission denied: " + e.filename, file=stderr)
def process_files(params: dict[str, Any]):
for file in params['files']:
if Path(file).is_file() or file == '-':
process_file(file, params['codes'], params['exclude'], params['delay'])
if __name__ == '__main__':
parser = ArgumentParser(description="Performs HEAD HTTP requests to URLs from a list "
"and filters URLs by specified HTTP status code(s). "
"A file should contain one URL per line.")
parser.add_argument("files", metavar="file", default="-", nargs="+", help="Files to process or '-' for stdin.")
parser.add_argument("-d", "--delay", metavar="delay", dest="delay", default=0, type=int,
help="Delay between requests in seconds. Default is 0.")
cgroup = parser.add_mutually_exclusive_group()
cgroup.add_argument("-c", "--http-codes", metavar="codes", dest="http_codes", default="200",
help="HTTP status codes to filter. Default is 200. Several codes can be separated by commas.")
cgroup.add_argument("-nc", "--no-http-codes", metavar="codes", dest="no_http_codes",
help="HTTP status codes to exclude. Several codes can be separated by commas.")
args = parser.parse_args()
if args.no_http_codes is None:
config = {"codes": set(args.http_codes.split(sep=',')), "exclude": False,
"delay": args.delay, "files": set(args.files)}
else:
config = {"codes": set(args.no_http_codes.split(sep=',')), "exclude": True,
"delay": args.delay, "files": set(args.files)}
process_files(config)
|
py | b407ac56ed27a0299ca3bd463df144b5e4eb87b2 | # Generated by Django 2.0.2 on 2018-06-06 23:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seating', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='seating',
name='table',
),
]
|
py | b407ac8c55113c27b8af25f02fe43500bcfbe4ce | from __future__ import absolute_import, unicode_literals
import datetime
from django import forms
from django.db.models.fields import BLANK_CHOICE_DASH
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text
from django.utils.dateparse import parse_date, parse_time, parse_datetime
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from wagtail.wagtailcore.rich_text import RichText
from .base import Block
class FieldBlock(Block):
"""A block that wraps a Django form field"""
class Meta:
default = None
def id_for_label(self, prefix):
return self.field.widget.id_for_label(prefix)
def render_form(self, value, prefix='', errors=None):
widget = self.field.widget
widget_attrs = {'id': prefix, 'placeholder': self.label}
field_value = self.value_for_form(value)
if hasattr(widget, 'render_with_errors'):
widget_html = widget.render_with_errors(prefix, field_value, attrs=widget_attrs, errors=errors)
widget_has_rendered_errors = True
else:
widget_html = widget.render(prefix, field_value, attrs=widget_attrs)
widget_has_rendered_errors = False
return render_to_string('wagtailadmin/block_forms/field.html', {
'name': self.name,
'classes': self.meta.classname,
'widget': widget_html,
'field': self.field,
'errors': errors if (not widget_has_rendered_errors) else None
})
def value_from_form(self, value):
"""
The value that we get back from the form field might not be the type
that this block works with natively; for example, the block may want to
wrap a simple value such as a string in an object that provides a fancy
HTML rendering (e.g. EmbedBlock).
We therefore provide this method to perform any necessary conversion
from the form field value to the block's native value. As standard,
this returns the form field value unchanged.
"""
return value
def value_for_form(self, value):
"""
Reverse of value_from_form; convert a value of this block's native value type
to one that can be rendered by the form field
"""
return value
def value_from_datadict(self, data, files, prefix):
return self.value_from_form(self.field.widget.value_from_datadict(data, files, prefix))
def clean(self, value):
# We need an annoying value_for_form -> value_from_form round trip here to account for
# the possibility that the form field is set up to validate a different value type to
# the one this block works with natively
return self.value_from_form(self.field.clean(self.value_for_form(value)))
class CharBlock(FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
# CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality natively
# (via 'label' and 'default')
self.field = forms.CharField(
required=required,
help_text=help_text,
max_length=max_length,
min_length=min_length
)
super(CharBlock, self).__init__(**kwargs)
def get_searchable_content(self, value):
return [force_text(value)]
class TextBlock(FieldBlock):
def __init__(self, required=True, help_text=None, rows=1, max_length=None, min_length=None, **kwargs):
self.field_options = {
'required': required,
'help_text': help_text,
'max_length': max_length,
'min_length': min_length
}
self.rows = rows
super(TextBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminAutoHeightTextInput
field_kwargs = {'widget': AdminAutoHeightTextInput(attrs={'rows': self.rows})}
field_kwargs.update(self.field_options)
return forms.CharField(**field_kwargs)
def get_searchable_content(self, value):
return [force_text(value)]
class URLBlock(FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
self.field = forms.URLField(
required=required,
help_text=help_text,
max_length=max_length,
min_length=min_length
)
super(URLBlock, self).__init__(**kwargs)
class BooleanBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
# NOTE: As with forms.BooleanField, the default of required=True means that the checkbox
# must be ticked to pass validation (i.e. it's equivalent to an "I agree to the terms and
# conditions" box). To get the conventional yes/no behaviour, you must explicitly pass
# required=False.
self.field = forms.BooleanField(required=required, help_text=help_text)
super(BooleanBlock, self).__init__(**kwargs)
class DateBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(DateBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminDateInput
field_kwargs = {'widget': AdminDateInput}
field_kwargs.update(self.field_options)
return forms.DateField(**field_kwargs)
def to_python(self, value):
# Serialising to JSON uses DjangoJSONEncoder, which converts date/time objects to strings.
# The reverse does not happen on decoding, because there's no way to know which strings
# should be decoded; we have to convert strings back to dates here instead.
if value is None or isinstance(value, datetime.date):
return value
else:
return parse_date(value)
class TimeBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(TimeBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminTimeInput
field_kwargs = {'widget': AdminTimeInput}
field_kwargs.update(self.field_options)
return forms.TimeField(**field_kwargs)
def to_python(self, value):
if value is None or isinstance(value, datetime.time):
return value
else:
return parse_time(value)
class DateTimeBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(DateTimeBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminDateTimeInput
field_kwargs = {'widget': AdminDateTimeInput}
field_kwargs.update(self.field_options)
return forms.DateTimeField(**field_kwargs)
def to_python(self, value):
if value is None or isinstance(value, datetime.datetime):
return value
else:
return parse_datetime(value)
class ChoiceBlock(FieldBlock):
choices = ()
def __init__(self, choices=None, required=True, help_text=None, **kwargs):
if choices is None:
# no choices specified, so pick up the choice list defined at the class level
choices = list(self.choices)
else:
choices = list(choices)
# keep a copy of all kwargs (including our normalised choices list) for deconstruct()
self._constructor_kwargs = kwargs.copy()
self._constructor_kwargs['choices'] = choices
if required is not True:
self._constructor_kwargs['required'] = required
if help_text is not None:
self._constructor_kwargs['help_text'] = help_text
# If choices does not already contain a blank option, insert one
# (to match Django's own behaviour for modelfields:
# https://github.com/django/django/blob/1.7.5/django/db/models/fields/__init__.py#L732-744)
has_blank_choice = False
for v1, v2 in choices:
if isinstance(v2, (list, tuple)):
# this is a named group, and v2 is the value list
has_blank_choice = any([value in ('', None) for value, label in v2])
if has_blank_choice:
break
else:
# this is an individual choice; v1 is the value
if v1 in ('', None):
has_blank_choice = True
break
if not has_blank_choice:
choices = BLANK_CHOICE_DASH + choices
self.field = forms.ChoiceField(choices=choices, required=required, help_text=help_text)
super(ChoiceBlock, self).__init__(**kwargs)
def deconstruct(self):
"""
Always deconstruct ChoiceBlock instances as if they were plain ChoiceBlocks with their
choice list passed in the constructor, even if they are actually subclasses. This allows
users to define subclasses of ChoiceBlock in their models.py, with specific choice lists
passed in, without references to those classes ending up frozen into migrations.
"""
return ('wagtail.wagtailcore.blocks.ChoiceBlock', [], self._constructor_kwargs)
def get_searchable_content(self, value):
# Return the display value as the searchable value
text_value = force_text(value)
for k, v in self.field.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return [k, v2]
else:
if value == k or text_value == force_text(k):
return [v]
return [] # Value was not found in the list of choices
class RichTextBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(RichTextBlock, self).__init__(**kwargs)
def get_default(self):
if isinstance(self.meta.default, RichText):
return self.meta.default
else:
return RichText(self.meta.default)
def to_python(self, value):
# convert a source-HTML string from the JSONish representation
# to a RichText object
return RichText(value)
def get_prep_value(self, value):
# convert a RichText object back to a source-HTML string to go into
# the JSONish representation
return value.source
@cached_property
def field(self):
from wagtail.wagtailcore.fields import RichTextArea
return forms.CharField(widget=RichTextArea, **self.field_options)
def value_for_form(self, value):
# RichTextArea takes the source-HTML string as input (and takes care
# of expanding it for the purposes of the editor)
return value.source
def value_from_form(self, value):
# RichTextArea returns a source-HTML string; concert to a RichText object
return RichText(value)
def get_searchable_content(self, value):
return [force_text(value.source)]
class RawHTMLBlock(FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
self.field = forms.CharField(
required=required, help_text=help_text, max_length=max_length, min_length=min_length,
widget=forms.Textarea)
super(RawHTMLBlock, self).__init__(**kwargs)
def get_default(self):
return mark_safe(self.meta.default or '')
def to_python(self, value):
return mark_safe(value)
def get_prep_value(self, value):
# explicitly convert to a plain string, just in case we're using some serialisation method
# that doesn't cope with SafeText values correctly
return six.text_type(value)
def value_for_form(self, value):
# need to explicitly mark as unsafe, or it'll output unescaped HTML in the textarea
return six.text_type(value)
def value_from_form(self, value):
return mark_safe(value)
class Meta:
icon = 'code'
class ChooserBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.required = required
self.help_text = help_text
super(ChooserBlock, self).__init__(**kwargs)
"""Abstract superclass for fields that implement a chooser interface (page, image, snippet etc)"""
@cached_property
def field(self):
return forms.ModelChoiceField(
queryset=self.target_model.objects.all(), widget=self.widget, required=self.required,
help_text=self.help_text)
def to_python(self, value):
# the incoming serialised value should be None or an ID
if value is None:
return value
else:
try:
return self.target_model.objects.get(pk=value)
except self.target_model.DoesNotExist:
return None
def get_prep_value(self, value):
# the native value (a model instance or None) should serialise to an ID or None
if value is None:
return None
else:
return value.id
def value_from_form(self, value):
# ModelChoiceField sometimes returns an ID, and sometimes an instance; we want the instance
if value is None or isinstance(value, self.target_model):
return value
else:
try:
return self.target_model.objects.get(pk=value)
except self.target_model.DoesNotExist:
return None
def clean(self, value):
# ChooserBlock works natively with model instances as its 'value' type (because that's what you
# want to work with when doing front-end templating), but ModelChoiceField.clean expects an ID
# as the input value (and returns a model instance as the result). We don't want to bypass
# ModelChoiceField.clean entirely (it might be doing relevant validation, such as checking page
# type) so we convert our instance back to an ID here. It means we have a wasted round-trip to
# the database when ModelChoiceField.clean promptly does its own lookup, but there's no easy way
# around that...
if isinstance(value, self.target_model):
value = value.pk
return super(ChooserBlock, self).clean(value)
class PageChooserBlock(ChooserBlock):
def __init__(self, can_choose_root=False, **kwargs):
self.can_choose_root = can_choose_root
super(PageChooserBlock, self).__init__(**kwargs)
@cached_property
def target_model(self):
from wagtail.wagtailcore.models import Page # TODO: allow limiting to specific page types
return Page
@cached_property
def widget(self):
from wagtail.wagtailadmin.widgets import AdminPageChooser
return AdminPageChooser(can_choose_root=self.can_choose_root)
def render_basic(self, value):
if value:
return format_html('<a href="{0}">{1}</a>', value.url, value.title)
else:
return ''
# Ensure that the blocks defined here get deconstructed as wagtailcore.blocks.FooBlock
# rather than wagtailcore.blocks.field.FooBlock
block_classes = [
FieldBlock, CharBlock, URLBlock, RichTextBlock, RawHTMLBlock, ChooserBlock, PageChooserBlock,
TextBlock, BooleanBlock, DateBlock, TimeBlock, DateTimeBlock, ChoiceBlock,
]
DECONSTRUCT_ALIASES = {
cls: 'wagtail.wagtailcore.blocks.%s' % cls.__name__
for cls in block_classes
}
__all__ = [cls.__name__ for cls in block_classes]
|
py | b407ad2406685770092f93d5ce390b45b161424b | import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="hmmpy",
version="0.1.0",
description="Hidden Markov models in Python",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/klaapbakken/hmmpy",
author="Øyvind Klåpbakken",
author_email="[email protected]",
license="MIT",
packages=["hmmpy"],
include_package_data=True,
install_requires=["numpy", "scipy", "tqdm"]
) |
py | b407ae7cc6e2fbc7e3f5ef4c3427b74f5b0b623d | """
Baltrad to S3 porting
"""
import sys
from creds import URL, LOGIN, PASSWORD
import datamover as dm
def main():
"""Run data transfer from Baltrad to S3"""
# ------------------
# DATA TRANSFER
# ------------------
# Setup the connection of the Baltrad and S3
btos = dm.BaltradToS3(URL, LOGIN, PASSWORD, "lw-enram")
# Execute the transfer
btos.transfer(name_match="_vp_", overwrite=False, limit=None)
btos.report(reset_file=False, transfertype="Baltrad to S3")
# ---------------------------------------------
# UPDATE COVERAGE AND MOST RECENT FILE DATETIME
# ---------------------------------------------
# Connect to S3 client
s3client = dm.S3EnramHandler("lw-enram")
# Rerun file list overview to extract the current coverage
coverage_count, most_recent = s3client.count_enram_coverage(level='day')
# Save the coverage information on S3
with open("coverage.csv", 'w') as outfile:
dm.coverage_to_csv(outfile, coverage_count)
s3client.upload_file("coverage.csv", "coverage.csv")
# Save the last provided radar file information on S3
with open("radars.csv", 'w') as outfile:
dm.most_recent_to_csv(outfile, most_recent)
s3client.upload_file("radars.csv", "radars.csv")
# ----------------------------
# UPDATE ZIP FILE AVAILABILITY
# ----------------------------
# Rerun ZIP handling of S3 for the transferred files, given by report
s3client.create_zip_version(btos.transferred)
if __name__ == "__main__":
sys.exit(main())
|
py | b407af242e4de71bdd9a978053e63d28cd72c988 | #
# Copyright (c) 2013, Digium, Inc.
#
"""Async ARI client library.
"""
import json
import urllib
import aiohttp
import aioswagger11.client
import asyncio
from aioari.model import *
log = logging.getLogger(__name__)
class Client(object):
"""Async ARI Client object.
:param base_url: Base URL for accessing Asterisk.
:param http_client: HTTP client interface.
"""
def __init__(self, base_url, http_client):
self.base_url = base_url
self.http_client = http_client
self.app = None
self.websockets = None
url = urllib.parse.urljoin(base_url, "ari/api-docs/resources.json")
self.swagger = aioswagger11.client.SwaggerClient(
http_client=http_client, url=url)
async def init(self):
await self.swagger.init()
# Extract models out of the events resource
events = [api['api_declaration']
for api in self.swagger.api_docs['apis']
if api['name'] == 'events']
if events:
self.event_models = events[0]['models']
else:
self.event_models = {}
self.repositories = {
name: Repository(self, name, api)
for (name, api) in self.swagger.resources.items()}
self.websockets = set()
self.event_listeners = {}
self.exception_handler = \
lambda ex: log.exception("Event listener threw exception")
def __getattr__(self, item):
"""Exposes repositories as fields of the client.
:param item: Field name
"""
repo = self.get_repo(item)
if not repo:
raise AttributeError(
"'%r' object has no attribute '%s'" % (self, item))
return repo
async def close(self):
"""Close this ARI client.
This method will close any currently open WebSockets, and close the
underlying Swaggerclient.
"""
unsubscribe = {
'channel': '__AST_CHANNEL_ALL_TOPIC',
'bridge': '__AST_BRIDGE_ALL_TOPIC',
'endpoint': '__AST_ENDPOINT_ALL_TOPIC',
'deviceState': '__AST_DEVICE_STATE_ALL_TOPIC'
}
unsubscribe_str = ','.join([('%s:%s' % (key, value)) for (key, value) in unsubscribe.items()])
try:
full_url = '%sari/applications/%s/subscription?eventSource=%s' % (self.base_url, self.app, unsubscribe_str)
await self.http_client.request('delete', full_url)
except Exception as ex:
pass
for ws in list(self.websockets): # changes during processing
try:
host, port = ws.get_extra_info('peername')
except TypeError:
# host, port = 'unknown', 'unknown'
self.websockets.remove(ws)
await ws.close()
continue
log.info('Successfully disconnected from ws://%s:%s, app: %s' % (host, port, self.app))
self.websockets.remove(ws)
await ws.close()
await self.swagger.close()
def get_repo(self, name):
"""Get a specific repo by name.
:param name: Name of the repo to get
:return: Repository, or None if not found.
:rtype: aioari.model.Repository
"""
return self.repositories.get(name)
async def __run(self, ws):
"""Drains all messages from a WebSocket, sending them to the client's
listeners.
:param ws: WebSocket to drain.
"""
# TypeChecker false positive on iter(callable, sentinel) -> iterator
# Fixed in plugin v3.0.1
# noinspection PyTypeChecker
while True:
msg = await ws.receive()
if msg is None:
return ## EOF
elif msg.type in {aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSING}:
break
elif msg.type != aiohttp.WSMsgType.TEXT:
log.warning("Unknown JSON message type: %s", repr(msg))
continue # ignore
msg_json = json.loads(msg.data)
if not isinstance(msg_json, dict) or 'type' not in msg_json:
log.error("Invalid event: %s" % msg)
continue
listeners = list(self.event_listeners.get(msg_json['type'], [])) \
+ list(self.event_listeners.get('*', []))
for listener in listeners:
# noinspection PyBroadException
try:
callback, args, kwargs = listener
log.debug("cb_type=%s" % type(callback))
args = args or ()
kwargs = kwargs or {}
cb = callback(msg_json, *args, **kwargs)
# The callback may or may not be an async function
if hasattr(cb,'__await__'):
await cb
except Exception as e:
self.exception_handler(e)
async def run(self, apps, *, _test_msgs=[]):
"""Connect to the WebSocket and begin processing messages.
This method will block until all messages have been received from the
WebSocket, or until this client has been closed.
:param apps: Application (or list of applications) to connect for
:type apps: str or list of str
"""
self.app = apps.split('&')[0]
while True:
if isinstance(apps, list):
apps = ','.join(apps)
try:
ws = await self.swagger.events.eventWebsocket(app=apps)
except (OSError, aiohttp.ClientConnectionError, aiohttp.WSServerHandshakeError) as ex:
log.error(ex)
await asyncio.sleep(1)
continue
host, port = ws.get_extra_info('peername')
log.info('Successfully connected to ws://%s:%s, app: %s' % (host, port, self.app))
self.websockets.add(ws)
# For tests
for m in _test_msgs:
ws.push(m)
await self.__run(ws)
def on_event(self, event_type, event_cb, *args, **kwargs):
"""Register callback for events with given type.
:param event_type: String name of the event to register for.
:param event_cb: Callback function
:type event_cb: (dict) -> None
:param args: Arguments to pass to event_cb
:param kwargs: Keyword arguments to pass to event_cb
"""
listeners = self.event_listeners.setdefault(event_type, list())
for cb in listeners:
if event_cb == cb[0]:
listeners.remove(cb)
callback_obj = (event_cb, args, kwargs)
log.debug("event_cb=%s" % event_cb)
listeners.append(callback_obj)
client = self
class EventUnsubscriber(object):
"""Class to allow events to be unsubscribed.
"""
def close(self):
"""Unsubscribe the associated event callback.
"""
if callback_obj in client.event_listeners[event_type]:
client.event_listeners[event_type].remove(callback_obj)
return EventUnsubscriber()
def on_object_event(self, event_type, event_cb, factory_fn, model_id,
*args, **kwargs):
"""Register callback for events with the given type. Event fields of
the given model_id type are passed along to event_cb.
If multiple fields of the event have the type model_id, a dict is
passed mapping the field name to the model object.
:param event_type: String name of the event to register for.
:param event_cb: Callback function
:type event_cb: (Obj, dict) -> None or (dict[str, Obj], dict) ->
:param factory_fn: Function for creating Obj from JSON
:param model_id: String id for Obj from Swagger models.
:param args: Arguments to pass to event_cb
:param kwargs: Keyword arguments to pass to event_cb
"""
# Find the associated model from the Swagger declaration
log.debug("On object event %s %s %s %s"%(event_type, event_cb, factory_fn, model_id))
event_model = self.event_models.get(event_type)
if not event_model:
raise ValueError("Cannot find event model '%s'" % event_type)
# Extract the fields that are of the expected type
obj_fields = [k for (k, v) in event_model['properties'].items()
if v['type'] == model_id]
if not obj_fields:
raise ValueError("Event model '%s' has no fields of type %s"
% (event_type, model_id))
def extract_objects(event, *args, **kwargs):
"""Extract objects of a given type from an event.
:param event: Event
:param args: Arguments to pass to the event callback
:param kwargs: Keyword arguments to pass to the event
callback
"""
# Extract the fields which are of the expected type
obj = {obj_field: factory_fn(self, event[obj_field])
for obj_field in obj_fields
if event.get(obj_field)}
# If there's only one field in the schema, just pass that along
if len(obj_fields) == 1:
if obj:
vals = list(obj.values())
obj = vals[0]
else:
obj = None
return event_cb(obj, event, *args, **kwargs)
return self.on_event(event_type, extract_objects,
*args,
**kwargs)
def on_channel_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Channel related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Channel, dict) -> None or (list[Channel], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Channel, 'Channel',
*args, **kwargs)
def on_bridge_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Bridge related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Bridge, dict) -> None or (list[Bridge], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Bridge, 'Bridge',
*args, **kwargs)
def on_playback_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Playback related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Playback, dict) -> None or (list[Playback], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Playback, 'Playback',
*args, **kwargs)
def on_live_recording_event(self, event_type, fn, *args, **kwargs):
"""Register callback for LiveRecording related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (LiveRecording, dict) -> None or (list[LiveRecording], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, LiveRecording,
'LiveRecording', *args, **kwargs)
def on_stored_recording_event(self, event_type, fn, *args, **kwargs):
"""Register callback for StoredRecording related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (StoredRecording, dict) -> None or (list[StoredRecording], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, StoredRecording,
'StoredRecording', *args, **kwargs)
def on_endpoint_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Endpoint related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (Endpoint, dict) -> None or (list[Endpoint], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Endpoint, 'Endpoint',
*args, **kwargs)
def on_device_state_event(self, event_type, fn, *args, **kwargs):
"""Register callback for DeviceState related events
:param event_type: String name of the event to register for.
:param fn: Callback function
:type fn: (DeviceState, dict) -> None or (list[DeviceState], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, DeviceState, 'DeviceState',
*args, **kwargs)
def on_sound_event(self, event_type, fn, *args, **kwargs):
"""Register callback for Sound related events
:param event_type: String name of the event to register for.
:param fn: Sound function
:type fn: (Sound, dict) -> None or (list[Sound], dict) -> None
:param args: Arguments to pass to fn
:param kwargs: Keyword arguments to pass to fn
"""
return self.on_object_event(event_type, fn, Sound, 'Sound',
*args, **kwargs)
|
py | b407af95f3d355d737348f65f63816f58dc35d91 | import os
import pickle as pk
import itertools as it
import numpy as np
from pattern_database import PatternDatabase
from algorithm import run
from utils import softmax
class Candidate:
def __init__(self, patterns, wildcards, macros):
self.patterns = patterns
self.wildcards = wildcards
self.macros = macros
self.solved_sum = 0
self.godliness_sum = 0
self.evaluation_count = 0
def check_domination_of(x, by):
# assumes one or both arguments are 2d
return (by >= x).all(axis=1) & (by > x).any(axis=1)
def screen(cand, state, path, rng, tree, max_depth, tree_depth, use_safe_depth):
patterns = cand.patterns
wildcards = cand.wildcards
macros = cand.macros
upgraded = False # becomes True if candidate gets upgraded
# restrict any rules needed so that state will not trigger bad macros
wr, wc = [], [] # rows and columns where wildcards are disabled
triggered = ((state == patterns) | wildcards).all(axis=1)
for r in np.flatnonzero(triggered):
goodmacro = (len(macros[r]) <= len(path)) and macros[r] == path[:len(macros[r])]
if not goodmacro:
wr.append(r)
wc.append(rng.choice(np.flatnonzero(state != patterns[r])))
if len(wr) > 0:
wildcards = wildcards.copy()
wildcards[wr, wc] = False
upgraded = True
# check if state is in neighborhood of a trigger
# due to incomplete tree it must also be triggered within distance to tree_depth
# otherwise macro_search could exit set where pdb is correct
safe_depth = max_depth
if use_safe_depth: safe_depth = min(max_depth, tree_depth - len(path))
triggered = False # until proven otherwise
for _, neighbor in tree.rooted_at(state, up_to_depth=safe_depth):
triggered = ((neighbor == patterns) | wildcards).all(axis=1).any()
if triggered: break
# if not, create a new rule triggered by state
if not triggered:
upgraded = True
# if this code is reached, path is longer than max depth
macro = path[:rng.integers(max_depth, len(path))+1] # random macro
pattern = state
# wildcard = np.ones(pattern.shape, dtype=bool) # start with all wildcards which will gradually be disabled
wildcard = (np.random.rand(*pattern.shape) < (len(path) / domain.god_number())) # more wildcards in deeper states
# add to pdb
patterns = np.append(patterns, pattern[np.newaxis,:], axis=0)
wildcards = np.append(wildcards, wildcard[np.newaxis,:], axis=0)
macros = macros + [macro]
if upgraded:
return patterns, wildcards, macros
else:
return None
if __name__ == "__main__":
# config
# larger exploration is important for larger state spaces, at least with uniform state sampling
# larger state spaces need a few rules to start getting any godliness
# otherwise the initial candidate dominates its offspring and keeps getting selected
# tree_depth = 11
# use_safe_depth = False
tree_depth = 8
use_safe_depth = True
exploration = 1
state_sampling = "bfs"
# state_sampling = "uniform"
max_depth = 1
cube_size = 2
# valid_actions = None
valid_actions = tuple(it.product((0,1,2), (1,), (1, 2, 3))) # only spinning one plane on each axis for 2cube
max_actions = 30
orientation_neutral=False
selection_policy = "hucb"
# selection_policy = "sucb"
# selection_policy = "uniform"
obj_names = ("godliness", "folkliness")
num_search_iters = 2**16
# candidate_buffer_size = num_search_iters
candidate_buffer_size = 64
num_instances = 32
num_reps = 1
# break_seconds = 30 * 60
break_seconds = 0
dump_dir = "psearch"
save_period = 1000
config = {
name: value for (name, value) in globals().items()
if type(value) in [bool, int, str, tuple] and name[:2] != "__"}
animate_tree = False
verbose = True
do_dump = True
do_search = False
show_results = True
post_mortem = False
# set up descriptive dump name
dump_base = "N%d_D%d_M%d_C%d_%s_%s%s" % (
cube_size, tree_depth, max_depth, candidate_buffer_size, state_sampling, selection_policy, exploration)
# Set up domain and state-space
from cube import CubeDomain
domain = CubeDomain(cube_size, valid_actions)
init = domain.solved_state()
from tree import SearchTree
tree = SearchTree(domain, tree_depth, orientation_neutral)
paths, states = zip(*tree.rooted_at(init))
states = np.array(states)
paths = list(map(tuple, map(domain.reverse, paths))) # from state to solved
dists = np.array(list(map(len, paths)))
# if verbose:
# print("tree layer sizes:")
# for dep in range(tree_depth): print(len(tree._layers[dep]))
# random number generation
rng = np.random.default_rng()
def evaluate(cand, instances):
# wrap candidate in pattern database
pdb = PatternDatabase(cand.patterns, cand.wildcards, cand.macros, domain, orientation_neutral)
# run algorithm on problem instances
for state, distance in instances:
solved, plan = run(state, domain, tree, pdb, max_depth, max_actions, orientation_neutral)
if solved:
soln_len = sum([len(actions) + len(macro) for (actions, _, macro) in plan])
if soln_len == 0: godliness = int(distance == 0)
else: godliness = distance / soln_len
cand.godliness_sum += godliness
cand.solved_sum += 1
cand.evaluation_count += 1
# return evaluation metrics
godliness = cand.godliness_sum / cand.evaluation_count
folkliness = -len(cand.macros) # larger pdb is worse
return godliness, folkliness
def instance_minibatch():
index = rng.choice(len(states), num_instances)
return tuple((states[i], dists[i]) for i in index)
if do_search:
from time import sleep
for rep in range(num_reps):
# set up candidate pool
candidate = {} # saves the best candidates found so far
selection_count = np.zeros(num_search_iters, dtype=int)
objective = np.empty((num_search_iters, len(obj_names)))
parent = -np.ones(num_search_iters, dtype=int)
ranking = np.empty(num_search_iters, dtype=int) # rank = number of candidates that dominate (frontier is rank 0)
state_counter = np.zeros(num_search_iters, dtype=int) # for ordered state sampling
# initialize candidate with one rule for solved state
candidate[0] = Candidate(
patterns = states[:1,:].copy(),
wildcards = np.zeros((1, domain.state_size()), dtype=bool),
macros = [()])
# maintain invariant that every candidate is ranked and evaluated at least once
godliness, folkliness = evaluate(candidate[0], instance_minibatch())
objective[0] = godliness, folkliness
ranking[0] = 0 # no other candidates yet
leaves = set([0])
num_cand = 1
for n in range(1, num_search_iters):
# backup leaf rankings
leaf_index = list(leaves)
legacy = np.ones(num_cand) * ranking[:num_cand].max()
legacy[leaf_index] = ranking[leaf_index]
for c in reversed(range(num_cand)):
legacy[parent[c]] = min(legacy[parent[c]], legacy[c])
# enumerate currently buffered candidates
keys = list(candidate.keys())
# discard candidate with worst legacy if buffer size reached
if len(candidate) == candidate_buffer_size:
worst = keys[legacy[keys].argmax()]
keys.remove(worst)
candidate.pop(worst)
# upper confidence bounds
N = selection_count[keys]
# Q = -ranking[keys].copy() # ranking closer to 0 is better
Q = -legacy[keys].copy() # descendent ranking closer to 0 is better
ucb_logits = Q + exploration * np.sqrt(np.log(n) / (N+1))
## select a candidate still saved in memory
if selection_policy == "hucb": c = keys[ucb_logits.argmax()]
if selection_policy == "sucb": c = rng.choice(keys, p = softmax(ucb_logits))
if selection_policy == "uniform": c = rng.choice(keys)
selection_count[c] += 1
# check whether each neighbor is dominated by current candidate before evaluation
# was_dominated = check_domination_of(objective[:num_cand], by=objective[c])
if c in leaves:
was_dominated = check_domination_of(objective[leaf_index], by=objective[c])
# sample a state
if state_sampling == "uniform": s = rng.choice(len(states))
if state_sampling == "bfs": s = state_counter[c] % len(states)
state_counter[c] += 1
state, path = states[s], paths[s]
# evaluate and update objectives
godliness, folkliness = evaluate(candidate[c], instance_minibatch())
objective[c] = godliness, folkliness
# update dominated status of neighbors after evaluation
# is_dominated = check_domination_of(objective[:num_cand], by=objective[c])
if c in leaves:
is_dominated = check_domination_of(objective[leaf_index], by=objective[c])
# # update rankings
# ranking[:num_cand] += (is_dominated.astype(int) - was_dominated.astype(int))
# ranking[c] = check_domination_of(objective[c], by=objective[:num_cand]).sum()
if c in leaves:
ranking[leaf_index] += (is_dominated.astype(int) - was_dominated.astype(int))
ranking[c] = check_domination_of(objective[c], by=objective[leaf_index]).sum()
# upgrade selected candidate if needed
upgrade = screen(candidate[c], state, path, rng, tree, max_depth, tree_depth, use_safe_depth)
if upgrade is not None:
# update candidate set
patterns, wildcards, macros = upgrade
candidate[num_cand] = Candidate(patterns, wildcards, macros)
parent[num_cand] = c
selection_count[num_cand] = 0
state_counter[num_cand] = state_counter[c]
# # carry forward parent metrics
# candidate[num_cand].evaluation_count = candidate[c].evaluation_count
# candidate[num_cand].godliness_sum = candidate[c].godliness_sum
# candidate[num_cand].solved_sum = candidate[c].solved_sum
# godliness = candidate[num_cand].godliness_sum / candidate[num_cand].evaluation_count
# folkliness = -len(candidate[num_cand].macros)
# objective[num_cand] = godliness, folkliness
# first child candidate evaluation
godliness, folkliness = evaluate(candidate[num_cand], instance_minibatch())
objective[num_cand] = godliness, folkliness
# update rankings
# ranking[:num_cand] += check_domination_of(objective[:num_cand], by = objective[num_cand])
# ranking[num_cand] = check_domination_of(objective[num_cand], by = objective[:num_cand]).sum()
ranking[leaf_index] += check_domination_of(objective[leaf_index], by = objective[num_cand])
ranking[num_cand] = check_domination_of(objective[num_cand], by = objective[leaf_index]).sum()
# update leaf set
leaves.add(num_cand)
leaves.discard(c)
# # discard most dominated candidate if buffer size reached
# if len(candidate) > candidate_buffer_size:
# worst = keys[ranking[keys].argmax()]
# candidate.pop(worst)
# update num candidates
num_cand += 1
# save results periodically
if do_dump and (n % save_period == 0 or n + 1 == num_search_iters):
metrics = tuple(metric[:num_cand]
for metric in [selection_count, parent, objective, ranking, state_counter])
dump_name = "%s_r%d" % (dump_base, rep)
with open(dump_name + ".pkl", "wb") as df: pk.dump((config, candidate, leaves, metrics), df)
if verbose: print(" saving to disk")
# if verbose and n % (10**int(np.log10(n))) == 0:
if verbose:
# bests = ["%s: %s" % (obj_names[i], objective[(selection_count > 0), i].max()) for i in range(objective.shape[1])]
bests = ["%s: %s" % (obj_names[i], objective[:num_cand, i].max()) for i in range(objective.shape[1])]
print("%d/%d: selected %d~%.1f~%d | counter <= %d | %d leaves | %d cands | bests: %s" %
(n, num_search_iters,
selection_count[:num_cand].min(), selection_count[:num_cand].mean(), selection_count[:num_cand].max(),
state_counter.max(), len(leaves), len(candidate), ", ".join(bests)))
# print("%d | %d in frontier | %d spawns | counts <=%d | bests: %s" % (c, frontier.size, num_spawns, count[:c+1].max(), ", ".join(bests)))
# print("iter %d: %d <= %d rules, %f wildcard, done=%s (k=%d)" % (epoch, len(macros), len(states), wildcards.sum() / wildcards.size, done, k))
# archive results
if do_dump:
dump_name = "%s_r%d" % (dump_base, rep)
os.system("mv %s.pkl %s/%s.pkl" % (dump_name, dump_dir, dump_name))
if verbose: print("Breaking for %s seconds..." % str(break_seconds))
sleep(break_seconds)
if show_results:
import matplotlib.pyplot as pt
# load results
rep_results = []
rep_leaves = []
for rep in range(num_reps):
# dump_name = "%s/rep_%d.pkl" % (dump_dir, rep)
# dump_name = "%s/rep_%d_N2_D11_bfs_d1_hucb_x1.pkl" % (dump_dir, rep)
dump_name = "%s/%s_r%d" % (dump_dir, dump_base, rep)
with open(dump_name + ".pkl", "rb") as df:
config, candidate, leaves, results = pk.load(df)
rep_results.append(results)
rep_leaves.append(leaves)
# # overwrite config with loaded values
# for name, value in config.items(): eval("%s = %s" % (name, str(value)))
selection_count, parent, objective, ranking, state_counter = rep_results[0]
leaves = list(rep_leaves[0])
nc = len(ranking)
# elites = np.flatnonzero(ranking[1:] < 20) + 1
# elites = np.arange(1, nc)
elites = leaves
if animate_tree:
pt.ion()
pt.figure()
else:
pt.figure(figsize=(20,15))
pt.xlabel("folkliness")
pt.ylabel("godliness")
# for c in range(1,nc):
for c in elites:
pt.plot(
[objective[c,obj_names.index("folkliness")], objective[parent[c],obj_names.index("folkliness")]],
[objective[c,obj_names.index("godliness")], objective[parent[c],obj_names.index("godliness")]],
'-ko')
if animate_tree: pt.pause(0.01)
pt.plot(
objective[leaves, obj_names.index("folkliness")],
objective[leaves, obj_names.index("godliness")],
'go')
if not animate_tree:
pt.savefig("%s/%s_r0_ptree.png" % (dump_dir, dump_base))
pt.show()
# pt.figure(figsize=(15, 5))
# for rep, results in enumerate(rep_results):
# selection_count, parent, objective, ranking = results
# # sc = np.flatnonzero(selection_count > 0)
# sc = np.arange(len(ranking))
# nc = len(ranking)
# num_plots = 5
# pt.subplot(1, num_plots, 1)
# pt.bar(np.arange(len(sc)), objective[sc, obj_names.index("godliness")], label=str(rep))
# pt.xlabel("candidate")
# pt.ylabel("godliness")
# pt.subplot(1, num_plots, 2)
# pt.bar(np.arange(nc), selection_count, label=str(rep))
# pt.xlabel("candidate")
# pt.ylabel("selection count")
# pt.subplot(1, num_plots, 3)
# pt.scatter(selection_count[sc], ranking[sc], label=str(rep))
# pt.xlabel("selection count")
# pt.ylabel("ranking")
# pt.subplot(1, num_plots, 4)
# pt.scatter(selection_count[sc], objective[sc, obj_names.index("godliness")], label=str(rep))
# pt.xlabel("selection count")
# pt.ylabel("godliness")
# pt.subplot(1, num_plots, 5)
# pt.scatter(objective[sc, obj_names.index("folkliness")], objective[sc, obj_names.index("godliness")], label=str(rep))
# pt.xlabel("folkliness")
# pt.ylabel("godliness")
# pt.legend()
# pt.savefig("%s/%s.png" % (dump_dir, dump_base))
# pt.show()
if post_mortem:
godly_metric, godly_so_far, godly_uniform = [], [], []
solved_metric, solved_so_far, solved_uniform = [], [], []
for rep in range(num_reps):
dump_name = "%s/%s_r%d" % (dump_dir, dump_base, rep)
with open(dump_name + ".pkl", "rb") as df: config, candidate, leaves, results = pk.load(df)
selection_count, parent, objective, ranking, state_counter = results
for c, cand in candidate.items():
# if selection_count[c] < 1: continue
godly_metric.append(cand.godliness_sum / cand.evaluation_count)
solved_metric.append(cand.solved_sum / cand.evaluation_count)
cand.godliness_sum = cand.solved_sum = cand.evaluation_count = 0
evaluate(cand, [(states[s], dists[s])
for s in rng.choice(state_counter[c], size=32) % len(states)])
godly_so_far.append(cand.godliness_sum / cand.evaluation_count)
solved_so_far.append(cand.solved_sum / cand.evaluation_count)
cand.godliness_sum = cand.solved_sum = cand.evaluation_count = 0
evaluate(cand, [(states[s], dists[s]) for s in rng.choice(len(states), size=32)])
godly_uniform.append(cand.godliness_sum / cand.evaluation_count)
solved_uniform.append(cand.solved_sum / cand.evaluation_count)
import matplotlib.pyplot as pt
pt.subplot(2,2,1)
pt.scatter(godly_metric, godly_so_far)
pt.xlabel("godly metric")
pt.ylabel("so_far")
pt.subplot(2,2,2)
pt.scatter(godly_metric, godly_uniform)
pt.xlabel("godly metric")
pt.ylabel("uniform")
pt.subplot(2,2,3)
pt.scatter(solved_metric, solved_so_far)
pt.xlabel("solved metric")
pt.ylabel("so_far")
pt.subplot(2,2,4)
pt.scatter(solved_metric, solved_uniform)
pt.xlabel("solved metric")
pt.ylabel("uniform")
pt.tight_layout()
pt.show()
|
py | b407afcf370f3725fdfc167f87bf0052e7b72904 | """StoneVilla URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path ,include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('Stone_app/', include('Stone_app.urls')),
]
urlpatterns +=static(settings.STATIC_URL , commnet_root=settings.STATIC_ROOT)
|
py | b407b1ebd3a84f1acea533fa698be1ac6ed52e72 | # Generated by Django 2.2.9 on 2020-05-21 14:42
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_auto_20200521_1725'),
]
operations = [
migrations.AddField(
model_name='group',
name='pub_date_group',
field=models.DateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name='date published_group'),
preserve_default=False,
),
]
|
py | b407b2635563a722ee413b223e7aaafc0ee32188 | from tqdm import tqdm
import numpy as np
from PIL import Image
from math import log, sqrt, pi
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable, grad
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, utils
from model import Glow
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cuda')
parser = argparse.ArgumentParser(description='Glow trainer')
parser.add_argument('--batch', default=16, type=int, help='batch size')
parser.add_argument('--iter', default=200000, type=int, help='maximum iterations')
parser.add_argument(
'--n_flow', default=32, type=int, help='number of flows in each block'
)
parser.add_argument('--n_block', default=4, type=int, help='number of blocks')
parser.add_argument(
'--no_lu',
action='store_true',
help='use plain convolution instead of LU decomposed version',
)
parser.add_argument(
'--affine', action='store_true', help='use affine coupling instead of additive'
)
parser.add_argument('--n_bits', default=4, type=int, help='number of bits')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--img_size', default=64, type=int, help='image size')
parser.add_argument('--temp', default=0.7, type=float, help='temperature of sampling')
parser.add_argument('--n_sample', default=20, type=int, help='number of samples')
parser.add_argument('path', metavar='PATH', type=str, help='Path to image directory')
def sample_data(path, batch_size, image_size):
transform = transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1)),
]
)
dataset = datasets.ImageFolder(path, transform=transform)
loader = DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=4)
loader = iter(loader)
while True:
try:
yield next(loader)
except StopIteration:
loader = DataLoader(
dataset, shuffle=True, batch_size=batch_size, num_workers=4
)
loader = iter(loader)
yield next(loader)
def calc_z_shapes(n_channel, input_size, n_flow, n_block):
z_shapes = []
for i in range(n_block - 1):
input_size //= 2
n_channel *= 2
z_shapes.append((n_channel, input_size, input_size))
input_size //= 2
z_shapes.append((n_channel * 4, input_size, input_size))
return z_shapes
def calc_loss(log_p, logdet, image_size, n_bins):
# log_p = calc_log_p([z_list])
n_pixel = image_size * image_size * 3
loss = -log(n_bins) * n_pixel
loss = loss + logdet + log_p
return (
(-loss / (log(2) * n_pixel)).mean(),
(log_p / (log(2) * n_pixel)).mean(),
(logdet / (log(2) * n_pixel)).mean(),
)
def train(args, model, optimizer):
dataset = iter(sample_data(args.path, args.batch, args.img_size))
n_bins = 2. ** args.n_bits
z_sample = []
z_shapes = calc_z_shapes(3, args.img_size, args.n_flow, args.n_block)
for z in z_shapes:
z_new = torch.randn(args.n_sample, *z) * args.temp
z_sample.append(z_new.to(device))
with tqdm(range(args.iter)) as pbar:
for i in pbar:
image, _ = next(dataset)
image = image.to(device)
if i == 0:
with torch.no_grad():
log_p, logdet, _ = model.module(image + torch.rand_like(image) / n_bins)
continue
else:
log_p, logdet, _ = model(image + torch.rand_like(image) / n_bins)
logdet = logdet.mean()
loss, log_p, log_det = calc_loss(log_p, logdet, args.img_size, n_bins)
model.zero_grad()
loss.backward()
# warmup_lr = args.lr * min(1, i * batch_size / (50000 * 10))
warmup_lr = args.lr
optimizer.param_groups[0]['lr'] = warmup_lr
optimizer.step()
pbar.set_description(
f'Loss: {loss.item():.5f}; logP: {log_p.item():.5f}; logdet: {log_det.item():.5f}; lr: {warmup_lr:.7f}'
)
if i % 500 == 0:
with torch.no_grad():
utils.save_image(
model_single.reverse(z_sample).cpu().data,
f'sample/{str(i + 1).zfill(6)}.png',
normalize=True,
nrow=10,
range=(-0.5, 0.5),
)
if i % 2000 == 0:
torch.save(
model.state_dict(), f'checkpoint/model.pt'
)
torch.save(
optimizer.state_dict(), f'checkpoint/optim.pt'
)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
model_single = Glow(
3, args.n_flow, args.n_block, affine=args.affine, conv_lu=not args.no_lu
)
model = nn.DataParallel(model_single)
# model = model_single
model = model.to(device)
#model.load_state_dict(torch.load('checkpoint/model.pt', map_location=lambda storage, loc: storage))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
torch.cuda.set_device(0) # adding this line fixed it
#optimizer.load_state_dict(torch.load('checkpoint/optim.pt', map_location=lambda storage, loc: storage))
train(args, model, optimizer)
|
py | b407b355bd980aab57fa70a1f8be627dde21e441 | """
Request
=======
When a page is requested, automatically created a :class:`Request` object that
contains metadata about the request.
Since this object is global within the thread,
you can freely import from anywhere and retrieve request information.
"""
import base64
import fnmatch
import hashlib
import hmac
import threading
import cgi
import json
import pickle
from urllib.parse import SplitResult
from http.cookies import SimpleCookie
##################################################################################
# Request Object #################################################################
##################################################################################
class Request:
""" A wrapper for WSGI environment dictionaries.
"""
__slots__ = ('environ', '_body', '_forms')
def __init__(self, environ=None):
self.environ = {} if environ is None else environ
self.environ['kobin.request'] = self
self._body = None
self._forms = None
def get(self, value, default=None):
return self.environ.get(value, default)
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@property
def headers(self):
return {k[len('HTTP_'):]: v
for k, v in self.environ.items()
if k.startswith('HTTP_')}
@property
def query(self):
params = cgi.FieldStorage(
environ=self.environ,
keep_blank_values=True,
)
p = {k: params[k].value for k in params}
return p
@property
def forms(self):
if self._forms is None:
form = cgi.FieldStorage(
fp=self.environ['wsgi.input'],
environ=self.environ,
keep_blank_values=True,
)
self._forms = {k: form[k].value for k in form}
return self._forms
@property
def raw_body(self):
if self._body is not None:
return self._body
length = self.environ.get('CONTENT_LENGTH')
if length:
self._body = self.environ['wsgi.input'].read(int(length))
else:
self._body = b''
return self._body
@property
def body(self):
return self.raw_body.decode('utf-8')
@property
def json(self):
return json.loads(self.body)
@property
def url(self):
protocol = self.get('HTTP_X_FORWARDED_PROTO') or self.get('wsgi.url_scheme', 'http')
host = self.get('HTTP_X_FORWARDED_HOST') or self.get('HTTP_HOST')
query_params = self.get("QUERY_STRING")
url_split_result = SplitResult(protocol, host, self.path, query_params, '')
return url_split_result.geturl()
@property
def cookies(self):
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return {c.key: c.value for c in cookies}
def get_cookie(self, key, default=None, secret=None, digestmod=hashlib.sha256):
from kobin.app import current_config
if secret is None:
secret = current_config('SECRET_KEY')
value = self.cookies.get(key)
if secret and value and value.startswith('!') and '?' in value:
# See BaseResponse.set_cookie for details.
if isinstance(secret, str):
secret = secret.encode('utf-8')
sig, msg = map(lambda x: x.encode('utf-8'), value[1:].split('?', 1))
hash_string = hmac.new(secret, msg, digestmod=digestmod).digest()
if sig == base64.b64encode(hash_string):
key_and_value = pickle.loads(base64.b64decode(msg))
if key_and_value and key_and_value[0] == key:
return key_and_value[1]
return value or default
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('kobin.request.' + key, None)
def __len__(self):
return len(self.environ)
def __repr__(self):
return '<{cls}: {method} {url}>'.format(
cls=self.__class__.__name__, method=self.method, url=self.path
)
# for Accept header.
def _split_into_mimetype_and_priority(x):
"""Split an accept header item into mimetype and priority.
>>> _split_into_mimetype_and_priority('text/*')
('text/*', 1.0)
>>> _split_into_mimetype_and_priority('application/json;q=0.5')
('application/json', 0.5)
"""
if ';' in x:
content_type, priority = x.split(';')
casted_priority = float(priority.split('=')[1])
else:
content_type, casted_priority = x, 1.0
content_type = content_type.lstrip().rstrip() # Replace ' text/html' to 'text/html'
return content_type, casted_priority
def _parse_and_sort_accept_header(accept_header):
"""Parse and sort the accept header items.
>>> _parse_and_sort_accept_header('application/json;q=0.5, text/*')
[('text/*', 1.0), ('application/json', 0.5)]
"""
return sorted([_split_into_mimetype_and_priority(x) for x in accept_header.split(',')],
key=lambda x: x[1], reverse=True)
def accept_best_match(accept_header, mimetypes):
"""Return a mimetype best matched the accept headers.
>>> accept_best_match('application/json, text/html', ['application/json', 'text/plain'])
'application/json'
>>> accept_best_match('application/json;q=0.5, text/*', ['application/json', 'text/plain'])
'text/plain'
"""
for mimetype_pattern, _ in _parse_and_sort_accept_header(accept_header):
matched_types = fnmatch.filter(mimetypes, mimetype_pattern)
if matched_types:
return matched_types[0]
return mimetypes[0]
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(Request):
bind = Request.__init__
environ = _local_property()
_body = _local_property()
_forms = _local_property()
request = LocalRequest()
|
py | b407b3bc0feaf040ebcaef9bcda9b5872602c1dc | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sbirez', '0031_merge'),
]
operations = [
migrations.AddField(
model_name='element',
name='report_question_number',
field=models.TextField(blank=True, null=True),
),
]
|
py | b407b3d70947178289be5103e1a06cdb915865fb | price = float(input('Digite o preço do pão R$: '))
print('Panificadora Pão de Ontem - Tabela de Preços')
for x in range(1, 52):
quantity = x
print(f'{quantity} - R$ {quantity * price:.2f}')
|
py | b407b41e6b794c3c1ed311584e809f40510f5d73 | # terrascript/provider/brightbox.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:43 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.provider.brightbox
#
# instead of
#
# >>> import terrascript.provider.brightbox.brightbox
#
# This is only available for 'official' and 'partner' providers.
from terrascript.provider.brightbox.brightbox import *
|
py | b407b4926024235ce5c0704b1c66a506b4ba4e0f | import base64
import json
import urllib.request, urllib.parse, urllib.error
from urllib3._collections import HTTPHeaderDict
from . import httplib2
import logger
import traceback
import socket
import time
import re
import uuid
from copy import deepcopy
from threading import Thread
from TestInput import TestInputSingleton
from TestInput import TestInputServer
from testconstants import MIN_KV_QUOTA, INDEX_QUOTA, FTS_QUOTA, CBAS_QUOTA
from testconstants import COUCHBASE_FROM_VERSION_4, IS_CONTAINER, CLUSTER_QUOTA_RATIO
from lib.Cb_constants.CBServer import CbServer
try:
from couchbase_helper.document import DesignDocument, View
except ImportError:
from lib.couchbase_helper.document import DesignDocument, View
from memcached.helper.kvstore import KVStore
from .exception import ServerAlreadyJoinedException, ServerUnavailableException, InvalidArgumentException
from membase.api.exception import BucketCreationException, ServerSelfJoinException, ClusterRemoteException, \
RebalanceFailedException, FailoverFailedException, DesignDocCreationException, QueryViewException, \
ReadDocumentException, GetBucketInfoFailed, CompactViewFailed, SetViewInfoNotFound, AddNodeException, \
BucketFlushFailed, CBRecoveryFailedException, XDCRException, SetRecoveryTypeFailed, BucketCompactionException
log = logger.Logger.get_logger()
# helper library methods built on top of RestConnection interface
class RestHelper(object):
def __init__(self, rest_connection):
self.rest = rest_connection
def is_ns_server_running(self, timeout_in_seconds=360):
log.info("-->is_ns_server_running?")
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time:
try:
status = self.rest.get_nodes_self(5)
if status is not None and status.status == 'healthy':
return True
else:
if status is not None:
log.warn("server {0}:{1} status is {2}"\
.format(self.rest.ip, self.rest.port, status.status))
else:
log.warn("server {0}:{1} status is down"\
.format(self.rest.ip, self.rest.port))
except ServerUnavailableException:
log.error("server {0}:{1} is unavailable"\
.format(self.rest.ip, self.rest.port))
time.sleep(5)
msg = 'unable to connect to the node {0} even after waiting {1} seconds'
log.error(msg.format(self.rest.ip, timeout_in_seconds))
return False
def is_cluster_healthy(self, timeout=120):
# get the nodes and verify that all the nodes.status are healthy
nodes = self.rest.node_statuses(timeout)
return all(node.status == 'healthy' for node in nodes)
def rebalance_reached(self, percentage=100,retry_count=40):
start = time.time()
progress = 0
previous_progress = 0
retry = 0
while progress is not -1 and progress < percentage and retry < retry_count:
# -1 is error , -100 means could not retrieve progress
progress = self.rest._rebalance_progress()
if progress == -100:
log.error("unable to retrieve rebalanceProgress.try again in 2 seconds")
retry += 1
else:
if previous_progress == progress:
retry += 0.5
else:
retry = 0
previous_progress = progress
# sleep for 2 seconds
time.sleep(3)
if progress <= 0:
log.error("rebalance progress code : {0}".format(progress))
return False
elif retry >= retry_count:
log.error("rebalance stuck on {0}%".format(progress))
return False
else:
duration = time.time() - start
log.info('rebalance reached >{0}% in {1} seconds '.format(progress, duration))
return True
# return true if cluster balanced, false if it needs rebalance
def is_cluster_rebalanced(self):
command = "ns_orchestrator:needs_rebalance()"
status, content = self.rest.diag_eval(command)
if status:
return content.lower() == "false"
log.error("can't define if cluster balanced")
return None
# this method will rebalance the cluster by passing the remote_node as
# ejected node
def remove_nodes(self, knownNodes, ejectedNodes, wait_for_rebalance=True):
if len(ejectedNodes) == 0:
return False
self.rest.rebalance(knownNodes, ejectedNodes)
if wait_for_rebalance:
return self.rest.monitorRebalance()
else:
return False
def vbucket_map_ready(self, bucket, timeout_in_seconds=360):
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time:
vBuckets = self.rest.get_vbuckets(bucket)
if vBuckets:
return True
else:
time.sleep(0.5)
msg = 'vbucket map is not ready for bucket {0} after waiting {1} seconds'
log.info(msg.format(bucket, timeout_in_seconds))
return False
def bucket_exists(self, bucket):
try:
buckets = self.rest.get_buckets()
names = [item.name for item in buckets]
log.info("node {1} existing buckets : {0}" \
.format(names, self.rest.ip))
for item in buckets:
if item.name == bucket:
log.info("node {1} found bucket {0}" \
.format(bucket, self.rest.ip))
return True
return False
except Exception:
return False
def wait_for_node_status(self, node, expected_status, timeout_in_seconds):
status_reached = False
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time and not status_reached:
nodes = self.rest.node_statuses()
for n in nodes:
if node.id == n.id:
log.info('node {0} status : {1}'.format(node.id, n.status))
if n.status.lower() == expected_status.lower():
status_reached = True
break
if not status_reached:
log.info("sleep for 5 seconds before reading the node.status again")
time.sleep(5)
log.info('node {0} status_reached : {1}'.format(node.id, status_reached))
return status_reached
def _wait_for_task_pid(self, pid, end_time, ddoc_name):
while (time.time() < end_time):
new_pid, _ = self.rest._get_indexer_task_pid(ddoc_name)
if pid == new_pid:
time.sleep(5)
continue
else:
return
def _wait_for_indexer_ddoc(self, servers, ddoc_name, timeout=300):
nodes = self.rest.get_nodes()
servers_to_check = []
for node in nodes:
for server in servers:
if node.ip == server.ip and str(node.port) == str(server.port):
servers_to_check.append(server)
for server in servers_to_check:
try:
rest = RestConnection(server)
log.info('Check index for ddoc %s , server %s' % (ddoc_name, server.ip))
end_time = time.time() + timeout
log.info('Start getting index for ddoc %s , server %s' % (ddoc_name, server.ip))
old_pid, is_pid_blocked = rest._get_indexer_task_pid(ddoc_name)
if not old_pid:
log.info('Index for ddoc %s is not going on, server %s' % (ddoc_name, server.ip))
continue
while is_pid_blocked:
log.info('Index for ddoc %s is blocked, server %s' % (ddoc_name, server.ip))
self._wait_for_task_pid(old_pid, end_time, ddoc_name)
old_pid, is_pid_blocked = rest._get_indexer_task_pid(ddoc_name)
if time.time() > end_time:
log.error("INDEX IS STILL BLOKED node %s ddoc % pid %" % (server, ddoc_name, old_pid))
break
if old_pid:
log.info('Index for ddoc %s is running, server %s' % (ddoc_name, server.ip))
self._wait_for_task_pid(old_pid, end_time, ddoc_name)
except Exception as ex:
log.error('unable to check index on server %s because of %s' % (server.ip, str(ex)))
def _get_vbuckets(self, servers, bucket_name='default'):
vbuckets_servers = {}
for server in servers:
buckets = RestConnection(server).get_buckets()
if not buckets:
return vbuckets_servers
if bucket_name:
bucket_to_check = [bucket for bucket in buckets
if bucket.name == bucket_name][0]
else:
bucket_to_check = [bucket for bucket in buckets][0]
vbuckets_servers[server] = {}
vbs_active = [vb.id for vb in bucket_to_check.vbuckets
if vb.master.startswith(str(server.ip))]
vbs_replica = []
for replica_num in range(0, bucket_to_check.numReplicas):
vbs_replica.extend([vb.id for vb in bucket_to_check.vbuckets
if replica_num in vb.replica
and vb.replica[replica_num].startswith(str(server.ip))])
vbuckets_servers[server]['active_vb'] = vbs_active
vbuckets_servers[server]['replica_vb'] = vbs_replica
return vbuckets_servers
class RestConnection(object):
def __new__(cls, serverInfo={}):
# allow port to determine
# behavior of restconnection
port = None
if isinstance(serverInfo, dict):
if 'port' in serverInfo:
port = serverInfo['port']
else:
port = serverInfo.port
if not port:
port = CbServer.port
if CbServer.use_https:
port = CbServer.ssl_port
if int(port) in range(9091, 9100):
# return elastic search rest connection
from membase.api.esrest_client import EsRestConnection
obj = super(EsRestConnection,cls).__new__(cls)
else:
# default
obj = object.__new__(cls)
return obj
def __init__(self, serverInfo):
# serverInfo can be a json object/dictionary
if isinstance(serverInfo, dict):
self.ip = serverInfo["ip"]
self.username = serverInfo["username"]
self.password = serverInfo["password"]
self.port = serverInfo["port"]
self.index_port = CbServer.index_port
self.fts_port = CbServer.fts_port
self.query_port = CbServer.n1ql_port
self.eventing_port = CbServer.eventing_port
self.capi_port = CbServer.capi_port
if "index_port" in list(serverInfo.keys()):
self.index_port = serverInfo["index_port"]
if "fts_port" in list(serverInfo.keys()):
if serverInfo['fts_port']:
self.fts_port = serverInfo["fts_port"]
if "eventing_port" in list(serverInfo.keys()):
if serverInfo['eventing_port']:
self.eventing_port = serverInfo["eventing_port"]
self.hostname = ''
self.services = ''
if "hostname" in serverInfo:
self.hostname = serverInfo["hostname"]
if "services" in serverInfo:
self.services = serverInfo["services"]
else:
self.ip = serverInfo.ip
self.username = serverInfo.rest_username
self.password = serverInfo.rest_password
self.port = serverInfo.port
self.hostname = ''
self.index_port = CbServer.index_port
self.fts_port = CbServer.fts_port
self.query_port = CbServer.n1ql_port
self.eventing_port = CbServer.eventing_port
self.capi_port = CbServer.capi_port
self.services = "kv"
self.debug_logs = False
if hasattr(serverInfo, "services"):
self.services = serverInfo.services
if hasattr(serverInfo, 'index_port'):
self.index_port = serverInfo.index_port
if hasattr(serverInfo, 'query_port'):
self.query_port = serverInfo.query_port
if hasattr(serverInfo, 'fts_port'):
if serverInfo.fts_port:
self.fts_port = serverInfo.fts_port
if hasattr(serverInfo, 'eventing_port'):
if serverInfo.eventing_port:
self.eventing_port = serverInfo.eventing_port
if hasattr(serverInfo, 'hostname') and serverInfo.hostname and\
serverInfo.hostname.find(self.ip) == -1:
self.hostname = serverInfo.hostname
if hasattr(serverInfo, 'services'):
self.services = serverInfo.services
self.input = TestInputSingleton.input
if self.input is not None:
""" from watson, services param order and format:
new_services=fts-kv-index-n1ql """
self.services_node_init = self.input.param("new_services", None)
self.debug_logs = self.input.param("debug-logs", False)
self.eventing_role = self.input.param('eventing_role', False)
if CbServer.use_https:
self.port = CbServer.ssl_port_map.get(str(self.port),
str(self.port))
self.index_port = CbServer.ssl_port_map.get(str(self.index_port),
str(self.index_port))
self.query_port = CbServer.ssl_port_map.get(str(self.query_port),
str(self.query_port))
self.fts_port = CbServer.ssl_port_map.get(str(self.fts_port),
str(self.fts_port))
self.eventing_port = CbServer.ssl_port_map.get(str(self.eventing_port),
str(self.eventing_port))
self.capi_port = CbServer.ssl_port_map.get(str(self.capi_port), str(self.capi_port))
http_url = "http://%s:%s/"
https_url = "https://%s:%s/"
generic_url = http_url
if CbServer.use_https:
generic_url = https_url
url_host = "%s" % self.ip
if self.hostname:
url_host = "%s" % self.hostname
self.baseUrl = generic_url % (url_host, self.port)
self.fts_baseUrl = generic_url % (url_host, self.fts_port)
self.index_baseUrl = generic_url % (url_host, self.index_port)
self.query_baseUrl = generic_url % (url_host, self.query_port)
self.capiBaseUrl = generic_url % (url_host, self.capi_port)
self.eventing_baseUrl = generic_url % (url_host, self.eventing_port)
# Initialization of CBAS related params
self.cbas_ip = self.ip
self.cbas_port = CbServer.cbas_port
if hasattr(self.input, 'cbas'):
if self.input.cbas:
self.cbas_node = self.input.cbas
if hasattr(self.cbas_node, 'port'):
self.cbas_port = self.cbas_node.port
if hasattr(self.cbas_node, 'ip'):
self.cbas_ip = self.cbas_node.ip
if CbServer.use_https:
self.cbas_port = CbServer.ssl_cbas_port
self.cbas_base_url = generic_url % (self.cbas_ip, self.cbas_port)
self.cbas_base_url = self.cbas_base_url[:-1]
# for Node is unknown to this cluster error
for iteration in range(5):
http_res, success = self.init_http_request(api=self.baseUrl + "nodes/self")
if not success and isinstance(http_res, str) and\
(http_res.find('Node is unknown to this cluster') > -1 or \
http_res.find('Unexpected server error, request logged') > -1):
log.error("Error {0} was gotten, 5 seconds sleep before retry"\
.format(http_res))
time.sleep(5)
if iteration == 2:
log.error("node {0}:{1} is in a broken state!"\
.format(self.ip, self.port))
raise ServerUnavailableException(self.ip)
continue
else:
break
# determine the real couchApiBase for cluster_run
# couchApiBase appeared in version 2.*
if isinstance(http_res, dict):
if not http_res or http_res["version"][0:2] == "1.":
self.capiBaseUrl = self.baseUrl + "/couchBase"
else:
for iteration in range(5):
if "couchApiBase" not in http_res.keys():
if self.is_cluster_mixed():
self.capiBaseUrl = self.baseUrl + "/couchBase"
return
time.sleep(0.2)
http_res, success = self.init_http_request(self.baseUrl + 'nodes/self')
else:
if CbServer.use_https:
self.capiBaseUrl = http_res["couchApiBaseHTTPS"]
else:
self.capiBaseUrl = http_res["couchApiBase"]
return
raise ServerUnavailableException("couchApiBase doesn't exist in nodes/self: %s " % http_res)
def sasl_streaming_rq(self, bucket, timeout=120,
disable_ssl_certificate_validation=True):
api = self.baseUrl + 'pools/default/bucketsStreaming/{0}'.format(bucket)
if isinstance(bucket, Bucket):
api = self.baseUrl + 'pools/default/bucketsStreaming/{0}'.format(bucket.name)
try:
httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_certificate_validation).\
request(api, 'GET', '', headers=self._create_capi_headers())
except Exception as ex:
log.warn('Exception while streaming: %s' % str(ex))
def open_sasl_streaming_connection(self, bucket, timeout=1000):
if self.debug_logs:
log.info("Opening sasl streaming connection for bucket {0}"\
.format((bucket, bucket.name)[isinstance(bucket, Bucket)]))
t = Thread(target=self.sasl_streaming_rq,
name="streaming_" + str(uuid.uuid4())[:4],
args=(bucket, timeout))
try:
t.start()
except:
log.warn("thread is not started")
return None
return t
def is_cluster_mixed(self, timeout=120):
http_res, success = self.init_http_request(self.baseUrl + 'pools/default', timeout=timeout)
if http_res == 'unknown pool':
return False
try:
versions = list({node["version"][:1] for node in http_res["nodes"]})
except:
log.error('Error while processing cluster info {0}'.format(http_res))
# not really clear what to return but False see to be a good start until we figure what is happening
return False
if '1' in versions and '2' in versions:
return True
return False
def is_cluster_compat_mode_greater_than(self, version):
"""
curl -v -X POST -u Administrator:welcome http://10.3.4.186:8091/diag/eval
-d 'cluster_compat_mode:get_compat_version().'
Returns : [3,2] if version = 3.2.0
"""
status, content = self.diag_eval('cluster_compat_mode:get_compat_version().')
if status:
json_parsed = json.loads(content)
cluster_ver = float("%s.%s" % (json_parsed[0], json_parsed[1]))
if cluster_ver > version:
return True
return False
def is_enterprise_edition(self):
http_res, success = self.init_http_request(self.baseUrl + 'pools/default')
if http_res == 'unknown pool':
return False
editions = []
community_nodes = []
""" get the last word in node["version"] as in "version": "2.5.1-1073-rel-enterprise" """
for node in http_res["nodes"]:
editions.extend(node["version"].split("-")[-1:])
if "community" in node["version"].split("-")[-1:]:
community_nodes.extend(node["hostname"].split(":")[:1])
if "community" in editions:
log.error("IP(s) for node(s) with community edition {0}".format(community_nodes))
return False
return True
def init_http_request(self, api, timeout=120):
content = None
try:
headers = self._create_capi_headers()
status, content, header = self._http_request(api, 'GET', headers=headers, timeout=timeout)
json_parsed = json.loads(content)
if status:
return json_parsed, True
else:
print("{0} with status {1}: {2}".format(api, status, json_parsed))
return json_parsed, False
except ValueError as e:
if content is not None:
print("{0}: {1}".format(api, content))
else:
print(e)
return content, False
def rename_node(self, hostname, username='Administrator', password='password'):
params = urllib.parse.urlencode({'username': username,
'password': password,
'hostname': hostname})
api = "%snode/controller/rename" % self.baseUrl
status, content, header = self._http_request(api, 'POST', params)
return status, content
def active_tasks(self):
api = self.baseUrl + "pools/default/tasks"
try:
status, content, header = self._http_request(api, 'GET',
headers=self._create_capi_headers())
json_parsed = json.loads(content)
except ValueError as e:
print(e)
return ""
return json_parsed
def ns_server_tasks(self):
api = self.baseUrl + 'pools/default/tasks'
retries = 3
while retries:
try:
status, content, header = self._http_request(api, 'GET', headers=self._create_headers())
return json.loads(content)
except ValueError:
time.sleep(10)
retries -= 1
return ""
# DEPRECATED: use create_ddoc() instead.
def create_view(self, design_doc_name, bucket_name, views, options=None):
return self.create_ddoc(design_doc_name, bucket_name, views, options)
def create_ddoc(self, design_doc_name, bucket, views, options=None):
design_doc = DesignDocument(design_doc_name, views, options=options)
if design_doc.name.find('/') != -1:
design_doc.name = design_doc.name.replace('/', '%2f')
design_doc.id = '_design/{0}'.format(design_doc.name)
return self.create_design_document(bucket, design_doc)
def create_design_document(self, bucket, design_doc):
log.info("-->create_design_document")
try:
design_doc_name = design_doc.id
api = '%s/%s/%s' % (self.capiBaseUrl, bucket, design_doc_name)
if isinstance(bucket, Bucket):
api = '%s/%s/%s' % (self.capiBaseUrl, bucket.name, design_doc_name)
status, content, header = self._http_request(api, 'PUT', str(design_doc),
headers=self._create_capi_headers())
except Exception as e:
traceback.print_exc()
if not status:
raise DesignDocCreationException(design_doc_name, content)
return json.loads(content.decode())
def is_index_triggered(self, ddoc_name, index_type='main'):
run, block = self._get_indexer_task_pid(ddoc_name, index_type=index_type)
if run or block:
return True
else:
return False
def _get_indexer_task_pid(self, ddoc_name, index_type='main'):
active_tasks = self.active_tasks()
if 'error' in active_tasks:
return None
if active_tasks:
for task in active_tasks:
if task['type'] == 'indexer' and task['indexer_type'] == index_type:
for ddoc in task['design_documents']:
if ddoc == ('_design/%s' % ddoc_name):
return task['pid'], False
if task['type'] == 'blocked_indexer' and task['indexer_type'] == index_type:
for ddoc in task['design_documents']:
if ddoc == ('_design/%s' % ddoc_name):
return task['pid'], True
return None, None
def query_view(self, design_doc_name, view_name, bucket, query, timeout=120, invalid_query=False, type="view"):
status, content, header = self._query(design_doc_name, view_name, bucket, type, query, timeout)
if not status and not invalid_query:
stat = 0
if 'status' in header:
stat = int(header['status'])
raise QueryViewException(view_name, content, status=stat)
return json.loads(content)
def _query(self, design_doc_name, view_name, bucket, view_type, query, timeout):
if design_doc_name.find('/') != -1:
design_doc_name = design_doc_name.replace('/', '%2f')
if view_name.find('/') != -1:
view_name = view_name.replace('/', '%2f')
api = self.capiBaseUrl + '%s/_design/%s/_%s/%s?%s' % (bucket,
design_doc_name, view_type,
view_name,
urllib.parse.urlencode(query))
if isinstance(bucket, Bucket):
api = self.capiBaseUrl + '%s/_design/%s/_%s/%s?%s' % (bucket.name,
design_doc_name, view_type,
view_name,
urllib.parse.urlencode(query))
log.info("index query url: {0}".format(api))
status, content, header = self._http_request(api, headers=self._create_capi_headers(),
timeout=timeout)
return status, content, header
def view_results(self, bucket, ddoc_name, params, limit=100, timeout=120,
view_name=None):
status, json = self._index_results(bucket, "view", ddoc_name, params, limit, timeout=timeout, view_name=view_name)
if not status:
raise Exception("unable to obtain view results")
return json
# DEPRECATED: Incorrectly named function kept for backwards compatibility.
def get_view(self, bucket, view):
log.info("DEPRECATED function get_view(" + view + "). use get_ddoc()")
return self.get_ddoc(bucket, view)
def get_data_path(self):
node_info = self.get_nodes_self()
data_path = node_info.storage[0].get_data_path()
return data_path
def get_index_path(self):
node_info = self.get_nodes_self()
data_path = node_info.storage[0].get_index_path()
return data_path
def get_memcached_port(self):
node_info = self.get_nodes_self()
return node_info.memcached
def get_ddoc(self, bucket, ddoc_name):
status, json, meta = self._get_design_doc(bucket, ddoc_name)
if not status:
raise ReadDocumentException(ddoc_name, json)
return json, meta
# the same as Preview a Random Document on UI
def get_random_key(self, bucket):
api = self.baseUrl + 'pools/default/buckets/%s/localRandomKey' % bucket
status, content, header = self._http_request(api, headers=self._create_capi_headers())
json_parsed = json.loads(content)
if not status:
raise Exception("unable to get random document/key for bucket %s" % bucket)
return json_parsed
def create_scope(self, bucket, scope, params=None, num_retries=3):
api = self.baseUrl + 'pools/default/buckets/%s/scopes' % bucket
body = {'name': scope}
if params:
body.update(params)
params = urllib.parse.urlencode(body)
headers = self._create_headers()
while num_retries > 0:
status, content, header = self._http_request(api, 'POST', params=params, headers=headers)
log.info("{0} with params: {1}".format(api, params))
if status:
json_parsed = json.loads(content)
log.info("Scope created {}->{} {}".format(bucket, scope, json_parsed))
break
elif header["status"] == "400":
log.info("Scope already exists. Skipping create {}->{}".format(bucket, scope))
break
else:
time.sleep(10)
num_retries -= 1
else:
raise Exception("Create scope failed : status:{0},content:{1}".format(status, content))
return status
def _create_single_collection(self, bucket, scope, collection, params=None):
api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s/collections' % (bucket, scope)
body = {'name': collection}
if params:
body.update(params)
params = urllib.parse.urlencode(body)
headers = self._create_headers()
status, content, header = self._http_request(api, 'POST', params=params, headers=headers)
log.info("{0} with params: {1}".format(api, params))
return status,content,header
def create_collection(self, bucket, scope, collection, params=None, num_retries=3):
if not isinstance(collection, list):
collection = [collection]
for c in collection:
while num_retries > 0:
status, content, header = self._create_single_collection(bucket, scope, c, params)
if status:
json_parsed = json.loads(content)
log.info("Collection created {}->{}->{} manifest:{}".format(bucket, scope, c, json_parsed))
break
elif header["status"] == "400":
log.info("Collection already exists. Skipping create {}->{}-{}".format(bucket, scope, c))
break
else:
time.sleep(10)
num_retries -= 1
else:
raise Exception("Create collection failed : status:{0},content:{1}".format(status, content))
return status
def put_collection_scope_manifest(self, bucket, manifest, ensure_manifest=True):
""" Put collection scope manifest to bulk update collection/scopes
Args:
ensure_manifest (bool): If set, blocks until the manifest has been applied to all nodes as
the endpoint is asynchronous.
"""
if isinstance(bucket, Bucket):
bucket = bucket.name
params, headers = json.dumps(manifest), self._create_capi_headers()
status, content, _ = self._http_request(f"{self.baseUrl}pools/default/buckets/{bucket}/scopes", 'PUT',
params=params, headers=headers)
if ensure_manifest:
uid = json.loads(content)['uid']
ensure_manifest_status, manifest_content, _ = self._http_request(
f"{self.baseUrl}pools/default/buckets/{bucket}/scopes/@ensureManifest/{uid}", 'POST',
headers=headers)
return status
def get_bucket_manifest(self, bucket):
if isinstance(bucket, Bucket):
bucket = bucket.name
api = '{0}{1}{2}{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket, '/scopes')
status, content, header = self._http_request(api)
if status:
return json.loads(content)
else:
raise Exception(
"Cannot get manifest for bucket {}: status:{}, content:{}".format(bucket, status, content))
def _parse_manifest(self, bucket, extract=None):
try:
manifest = self.get_bucket_manifest(bucket)
scopes = []
collections = []
for scope in manifest["scopes"]:
scopes.append(scope["name"])
for collection in scope["collections"]:
collections.append(collection["name"])
if extract == "scopes":
return scopes
elif extract == "collections":
return collections
except Exception as e:
raise Exception("Cannot extract {} for bucket {} from manifest {}".format(extract, bucket, e.message))
def get_bucket_scopes(self, bucket):
return self._parse_manifest(bucket, "scopes")
def get_bucket_collections(self, bucket):
return self._parse_manifest(bucket, "collections")
def get_scope_collections(self, bucket, scope):
try:
manifest = self.get_bucket_manifest(bucket)
scope_found = False
collections_in_scope = []
for scopes in manifest["scopes"]:
if scopes['name'] == scope:
scope_found = True
for collection in scopes['collections']:
collections_in_scope.append(collection['name'])
if not scope_found:
log.error("Cannot get collections for scope {} because it does not exist".format(scope))
return collections_in_scope
except Exception as e:
raise Exception("Cannot get collections for bucket {}-> scope{} {}".format(bucket, scope, e.message))
def delete_scope(self, bucket, scope):
api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s' % (bucket, scope)
headers = self._create_headers()
status, content, header = self._http_request(api, 'DELETE', headers=headers)
log.info("{0}".format(api))
return status
def get_rest_endpoint_data(self, endpoint=None, ip=None, port=None):
protocol = "http"
if CbServer.use_https:
port = CbServer.ssl_port_map.get(str(port), str(port))
protocol = "https"
endpoint_base_url = "{0}://{1}:{2}/".format(protocol, ip, port)
api = str(endpoint_base_url) + str(endpoint)
print(f'Executing GET on: {api}')
headers = self._create_headers()
status, content, header = self._http_request(api, 'GET', headers=headers)
return status, content
def delete_collection(self, bucket, scope, collection):
api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s/collections/%s' % (bucket, scope, collection)
headers = self._create_headers()
status, content, header = self._http_request(api, 'DELETE', headers=headers)
return status
def get_collection(self, bucket):
api = self.baseUrl + 'pools/default/buckets/%s/scopes' % bucket
headers = self._create_headers()
status, content, header = self._http_request(api, 'GET', headers=headers)
return status, content
def get_collection_uid(self, bucket, scope, collection):
try:
manifest = self.get_bucket_manifest(bucket)
for scopes in manifest["scopes"]:
if scopes['name'] == scope:
for col in scopes['collections']:
if col['name'] == collection:
return col['uid']
log.error("Cannot get collection uid because {0}.{1}.{2} does not exist"
.format(bucket, scope, collection))
except Exception as e:
raise Exception("Exception thrown while getting collection uid {}"
.format(e.message))
def run_view(self, bucket, view, name):
api = self.capiBaseUrl + '/%s/_design/%s/_view/%s' % (bucket, view, name)
status, content, header = self._http_request(api, headers=self._create_capi_headers())
json_parsed = json.loads(content)
if not status:
raise Exception("unable to create view")
return json_parsed
def delete_view(self, bucket, view):
status, json = self._delete_design_doc(bucket, view)
if not status:
raise Exception("unable to delete the view")
return json
def spatial_results(self, bucket, spatial, params, limit=100):
status, json = self._index_results(bucket, "spatial", spatial,
params, limit)
if not status:
raise Exception("unable to obtain spatial view results")
return json
def create_spatial(self, bucket, spatial, function):
status, json = self._create_design_doc(bucket, spatial, function)
if status == False:
raise Exception("unable to create spatial view")
return json
def get_spatial(self, bucket, spatial):
status, json, meta = self._get_design_doc(bucket, spatial)
if not status:
raise Exception("unable to get the spatial view definition")
return json, meta
def delete_spatial(self, bucket, spatial):
status, json = self._delete_design_doc(bucket, spatial)
if not status:
raise Exception("unable to delete the spatial view")
return json
# type_ is "view" or "spatial"
def _index_results(self, bucket, type_, ddoc_name, params, limit, timeout=120,
view_name=None):
if view_name is None:
view_name = ddoc_name
query = '/{0}/_design/{1}/_{2}/{3}'
api = self.capiBaseUrl + query.format(bucket, ddoc_name, type_, view_name)
num_params = 0
if limit != None:
num_params = 1
api += "?limit={0}".format(limit)
for param in params:
if num_params > 0:
api += "&"
else:
api += "?"
num_params += 1
if param in ["key", "startkey", "endkey", "start_range",
"end_range"] or isinstance(params[param], bool):
api += "{0}={1}".format(param,
json.dumps(params[param],
separators=(',', ':')))
else:
api += "{0}={1}".format(param, params[param])
log.info("index query url: {0}".format(api))
status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout)
json_parsed = json.loads(content)
return status, json_parsed
def get_couch_doc(self, doc_id, bucket="default", timeout=120):
""" use couchBase uri to retrieve document from a bucket """
api = self.capiBaseUrl + '/%s/%s' % (bucket, doc_id)
status, content, header = self._http_request(api, headers=self._create_capi_headers(),
timeout=timeout)
if not status:
raise ReadDocumentException(doc_id, content)
return json.loads(content)
def _create_design_doc(self, bucket, name, function):
api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name)
status, content, header = self._http_request(
api, 'PUT', function, headers=self._create_capi_headers())
json_parsed = json.loads(content)
return status, json_parsed
def _get_design_doc(self, bucket, name):
api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name)
if isinstance(bucket, Bucket):
api = self.capiBaseUrl + '/%s/_design/%s' % (bucket.name, name)
status, content, header = self._http_request(api, headers=self._create_capi_headers())
json_parsed = json.loads(content.decode())
meta_parsed = ""
if status:
# in dp4 builds meta data is in content, not in header
if 'X-Couchbase-Meta' in header:
meta = header['X-Couchbase-Meta']
meta_parsed = json.loads(meta)
elif 'x-couchbase-meta' in header:
meta = header['x-couchbase-meta']
meta_parsed = json.loads(meta)
else:
meta_parsed = {}
try:
meta_parsed["_rev"] = json_parsed["_rev"]
meta_parsed["_id"] = json_parsed["_id"]
except KeyError:
pass
return status, json_parsed, meta_parsed
def _delete_design_doc(self, bucket, name):
status, design_doc, meta = self._get_design_doc(bucket, name)
if not status:
raise Exception("unable to find for deletion design document")
api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name)
if isinstance(bucket, Bucket):
api = self.capiBaseUrl + '/%s/_design/%s' % (bucket.name, name)
status, content, header = self._http_request(api, 'DELETE',
headers=self._create_capi_headers())
json_parsed = json.loads(content)
return status, json_parsed
def spatial_compaction(self, bucket, design_name):
api = self.capiBaseUrl + '/%s/_design/%s/_spatial/_compact' % (bucket, design_name)
if isinstance(bucket, Bucket):
api = self.capiBaseUrl + \
'/%s/_design/%s/_spatial/_compact' % (bucket.name, design_name)
status, content, header = self._http_request(api, 'POST',
headers=self._create_capi_headers())
json_parsed = json.loads(content)
return status, json_parsed
# Make a _design/_info request
def set_view_info(self, bucket, design_name):
"""Get view diagnostic info (node specific)"""
api = self.capiBaseUrl
if isinstance(bucket, Bucket):
api += '/_set_view/{0}/_design/{1}/_info'.format(bucket.name, design_name)
else:
api += '_set_view/{0}/_design/{1}/_info'.format(bucket, design_name)
status, content, header = self._http_request(api, 'GET',
headers=self._create_capi_headers())
if not status:
raise SetViewInfoNotFound(design_name, content)
json_parsed = json.loads(content)
return status, json_parsed
# Make a _spatial/_info request
def spatial_info(self, bucket, design_name):
api = self.capiBaseUrl + \
'/%s/_design/%s/_spatial/_info' % (bucket, design_name)
status, content, header = self._http_request(
api, 'GET', headers=self._create_capi_headers())
json_parsed = json.loads(content)
return status, json_parsed
def _create_capi_headers(self):
authorization = self.get_authorization(self.username, self.password)
return {'Content-Type': 'application/json',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
def _create_capi_headers_with_auth(self, username, password):
authorization = self.get_authorization(username, password)
return {'Content-Type': 'application/json',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
def _create_headers_with_auth(self, username, password):
authorization = self.get_authorization(username, password)
return {'Authorization': 'Basic %s' % authorization}
# authorization must be a base64 string of username:password
def _create_headers(self):
authorization = self.get_authorization(self.username, self.password)
return {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
# authorization must be a base64 string of username:password
def _create_headers_encoded_prepared(self):
authorization = self.get_authorization(self.username, self.password)
return {'Content-Type': 'application/json',
'Authorization': 'Basic %s' % authorization}
def _get_auth(self, headers):
key = 'Authorization'
if key in headers:
val = headers[key]
if val.startswith("Basic "):
try:
val = val.encode()
return str("auth: " + base64.decodebytes(val[6:]).decode())
except Exception as e:
print(e)
return ""
def _http_request(self, api, method='GET', params='',
headers=None, timeout=120, disable_ssl_certificate_validation=True):
if not headers:
headers = self._create_headers()
end_time = time.time() + timeout
log.debug("Executing {0} request for following api {1} with Params: {2} and Headers: {3}"\
.format(method, api, params, headers))
count = 1
t1 = 3
while True:
try:
try:
if TestInputSingleton.input.param("debug.api.calls", False):
log.info("--->Start calling httplib2.Http({}).request({},{},{},{})".format(timeout,api,headers,method,params))
except AttributeError:
pass
response, content = httplib2.Http(timeout=timeout,
disable_ssl_certificate_validation=disable_ssl_certificate_validation).\
request(api, method, params, headers)
try:
if TestInputSingleton.input.param("debug.api.calls", False):
log.info(
"--->End calling httplib2.Http({}).request({},{},{},{})".format(timeout, api, headers,
method, params))
except AttributeError:
pass
if response['status'] in ['200', '201', '202']:
return True, content, response
else:
try:
json_parsed = json.loads(content)
except ValueError as e:
json_parsed = {}
json_parsed["error"] = "status: {0}, content: {1}"\
.format(response['status'], content)
reason = "unknown"
if "error" in json_parsed:
reason = json_parsed["error"]
message = '{0} {1} body: {2} headers: {3} error: {4} reason: {5} {6} {7}'.\
format(method, api, params, headers, response['status'], reason,
str(str(content).rstrip('\n')), self._get_auth(headers))
log.error(message)
log.debug(''.join(traceback.format_stack()))
return False, content, response
except socket.error as e:
if count < 4:
log.error("socket error while connecting to {0} error {1} ".format(api, e))
if time.time() > end_time:
log.error("Giving up due to {2}! Tried {0} connect {1} times.".format(
api, count, e))
raise ServerUnavailableException(ip=self.ip)
except (AttributeError, httplib2.ServerNotFoundError) as e:
if count < 4:
log.error("ServerNotFoundError error while connecting to {0} error {1} "\
.format(api, e))
if time.time() > end_time:
log.error("Giving up due to {2}! Tried {0} connect {1} times.".\
format(api, count, e))
raise ServerUnavailableException(ip=self.ip)
time.sleep(t1)
count += 1
t1 *= 2
def init_cluster(self, username='Administrator', password='password', port='8091'):
log.info("--> in init_cluster...{},{},{}".format(username,password,port))
api = self.baseUrl + 'settings/web'
params = urllib.parse.urlencode({'port': port,
'username': username,
'password': password})
log.info('settings/web params on {0}:{1}:{2}'.format(self.ip, self.port, params))
status, content, header = self._http_request(api, 'POST', params=params)
log.info("--> status:{}".format(status))
return status
def init_node(self, set_node_services=None):
""" need a standalone method to initialize a node that could call
anywhere with quota from testconstant """
self.node_services = []
if set_node_services is None:
set_node_services = self.services_node_init
if set_node_services is None and self.services == "":
self.node_services = ["kv"]
elif set_node_services is None and self.services != "":
self.node_services = self.services.split(",")
elif set_node_services is not None:
if "-" in set_node_services:
self.node_services = set_node_services.split("-")
if "," in set_node_services:
self.node_services = set_node_services.split(",")
kv_quota = 0
while kv_quota == 0:
time.sleep(1)
kv_quota = int(self.get_nodes_self().mcdMemoryReserved)
info = self.get_nodes_self()
kv_quota = int(info.mcdMemoryReserved * CLUSTER_QUOTA_RATIO)
cb_version = info.version[:5]
if cb_version in COUCHBASE_FROM_VERSION_4:
if "index" in self.node_services:
log.info("quota for index service will be %s MB" % (INDEX_QUOTA))
kv_quota -= INDEX_QUOTA
log.info("set index quota to node %s " % self.ip)
self.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
if "fts" in self.node_services:
log.info("quota for fts service will be %s MB" % (FTS_QUOTA))
kv_quota -= FTS_QUOTA
log.info("set both index and fts quota at node %s "% self.ip)
self.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA)
if "cbas" in self.node_services:
log.info("quota for cbas service will be %s MB" % (CBAS_QUOTA))
kv_quota -= CBAS_QUOTA
self.set_service_memoryQuota(service = "cbasMemoryQuota", memoryQuota=CBAS_QUOTA)
kv_quota -= 1
if kv_quota < MIN_KV_QUOTA:
raise Exception("KV RAM needs to be more than %s MB"
" at node %s" % (MIN_KV_QUOTA, self.ip))
log.info("quota for kv: %s MB" % kv_quota)
self.init_cluster_memoryQuota(self.username, self.password, kv_quota)
if cb_version in COUCHBASE_FROM_VERSION_4:
self.init_node_services(username=self.username, password=self.password,
services=self.node_services)
self.init_cluster(username=self.username, password=self.password)
return kv_quota
def init_node_services(self, username='Administrator', password='password', hostname='127.0.0.1', port='8091', services=None):
if CbServer.use_https:
port = CbServer.ssl_port_map.get(str(port), str(port))
log.info("--> init_node_services({},{},{},{},{})".format(username,password,hostname,port,services))
api = self.baseUrl + '/node/controller/setupServices'
if services == None:
log.info(" services are marked as None, will not work")
return False
params_dict = {'user': username,
'password': password,
'services': ",".join(services)}
if hostname == "127.0.0.1":
hostname = "{0}:{1}".format(hostname, port)
params = urllib.parse.urlencode({ 'hostname': hostname,
'user': username,
'password': password,
'services': ",".join(services)})
log.info('/node/controller/setupServices params on {0}: {1}:{2}'.format(self.ip, self.port, params))
status, content, header = self._http_request(api, 'POST', params)
error_message = "cannot change node services after cluster is provisioned"
if not status and error_message in str(content):
status = True
log.info("This node is already provisioned with services, we do not consider this as failure for test case")
return status
def get_cluster_settings(self):
settings = {}
api = self.baseUrl + 'settings/web'
status, content, header = self._http_request(api, 'GET')
if status:
settings = json.loads(content)
log.info('settings/web params on {0}:{1}:{2}'.format(self.ip, self.port, settings))
return settings
def init_cluster_memoryQuota(self, username='Administrator',
password='password',
memoryQuota=256):
api = self.baseUrl + 'pools/default'
params = urllib.parse.urlencode({'memoryQuota': memoryQuota})
log.info('pools/default params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
def set_service_memoryQuota(self, service, username='Administrator',
password='password',
memoryQuota=256):
''' cbasMemoryQuota for cbas service.
ftsMemoryQuota for fts service.
indexMemoryQuota for index service.'''
api = self.baseUrl + 'pools/default'
params = urllib.parse.urlencode({service: memoryQuota})
log.info('pools/default params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
def set_cluster_name(self, name):
api = self.baseUrl + 'pools/default'
if name is None:
name = ""
params = urllib.parse.urlencode({'clusterName': name})
log.info('pools/default params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
def set_indexer_storage_mode(self, username='Administrator',
password='password',
storageMode='plasma'):
"""
StorageMode could be plasma or memopt
From spock, we replace forestdb with plasma
"""
api = self.baseUrl + 'settings/indexes'
params = urllib.parse.urlencode({'storageMode': storageMode})
error_message = "storageMode must be one of plasma, memory_optimized"
log.info('settings/indexes params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
if not status and error_message in content.decode():
#TODO: Currently it just acknowledges if there is an error.
#And proceeds with further initialization.
log.info(content)
return status
def set_indexer_num_replica(self,
num_replica=0):
api = self.index_baseUrl + 'settings'
params = {'indexer.settings.num_replica': num_replica}
params = json.dumps(params)
status, content, header = self._http_request(api, 'POST',
params=params,
timeout=60)
error_message = ""
log.info('settings params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
if not status and error_message in content:
# TODO: Currently it just acknowledges if there is an error.
# And proceeds with further initialization.
log.info(content)
return status
def cleanup_indexer_rebalance(self, server):
protocol = "http"
if CbServer.use_https:
protocol = "https"
if server:
api = "{0}://{1}:{2}/".format(protocol, server.ip, self.index_port) + 'cleanupRebalance'
else:
api = self.baseUrl + 'cleanupRebalance'
status, content, _ = self._http_request(api, 'GET')
if status:
return content
else:
log.error("cleanupRebalance:{0},content:{1}".format(status, content))
raise Exception("indexer rebalance cleanup failed")
def list_indexer_rebalance_tokens(self, server):
protocol = "http"
if CbServer.use_https:
protocol = "https"
if server:
api = "{0}://{1}:{2}/".format(protocol, server.ip, self.index_port) + 'listRebalanceTokens'
else:
api = self.baseUrl + 'listRebalanceTokens'
print(api)
status, content, _ = self._http_request(api, 'GET')
if status:
return content.decode('utf-8')
else:
log.error("listRebalanceTokens:{0},content:{1}".format(status, content))
raise Exception("list rebalance tokens failed")
def wait_until_cbas_is_ready(self, timeout):
""" Wait until a http request can be made to the analytics service """
timeout = time.time() + timeout
while time.time() < timeout:
try:
self.execute_statement_on_cbas("SELECT 'hello' as message", None)
return True
except ServerUnavailableException:
self.sleep(1, "Waiting for analytics server to be ready")
return False
def execute_statement_on_cbas(self, statement, mode, pretty=True,
timeout=70, client_context_id=None,
username=None, password=None):
if not username:
username = self.username
if not password:
password = self.password
api = self.cbas_base_url + "/analytics/service"
headers = self._create_capi_headers_with_auth(username, password)
params = {'statement': statement, 'pretty': pretty, 'client_context_id': client_context_id}
if mode is not None:
params['mode'] = mode
params = json.dumps(params)
status, content, header = self._http_request(api, 'POST',
headers=headers,
params=params,
timeout=timeout)
if status:
return content
elif str(header['status']) == '503':
log.info("Request Rejected")
raise Exception("Request Rejected")
elif str(header['status']) in ['500', '400']:
json_content = json.loads(content)
msg = json_content['errors'][0]['msg']
if "Job requirement" in msg and "exceeds capacity" in msg:
raise Exception("Capacity cannot meet job requirement")
else:
return content
else:
log.error("/analytics/service status:{0},content:{1}".format(
status, content))
raise Exception("Analytics Service API failed")
def delete_active_request_on_cbas(self, client_context_id, username=None, password=None):
if not username:
username = self.username
if not password:
password = self.password
api = self.cbas_base_url + "/analytics/admin/active_requests?client_context_id={0}".format(
client_context_id)
headers = self._create_capi_headers_with_auth(username, password)
status, content, header = self._http_request(api, 'DELETE',
headers=headers,
timeout=60)
if status:
return header['status']
elif str(header['status']) == '404':
log.info("Request Not Found")
return header['status']
else:
log.error(
"/analytics/admin/active_requests status:{0},content:{1}".format(
status, content))
raise Exception("Analytics Admin API failed")
def get_cluster_ceritificate(self):
api = self.baseUrl + 'pools/default/certificate'
status, content, _ = self._http_request(api, 'GET')
if status:
return content.decode("utf-8")
else:
log.error("/pools/default/certificate status:{0},content:{1}".format(status, content))
raise Exception("certificate API failed")
def regenerate_cluster_certificate(self):
api = self.baseUrl + 'controller/regenerateCertificate'
status, content, _ = self._http_request(api, 'POST')
if status:
return content
else:
log.error("controller/regenerateCertificate status:{0},content:{1}".format(status, content))
raise Exception("regenerateCertificate API failed")
def __remote_clusters(self, api, op, remoteIp, remotePort, username, password, name, demandEncryption=0,
certificate='', encryptionType="half"):
param_map = {'hostname': "{0}:{1}".format(remoteIp, remotePort),
'username': username,
'password': password,
'name':name}
from TestInput import TestInputServer
remote = TestInputServer()
remote.ip = remoteIp
remote.rest_username = username
remote.rest_password = password
remote.port = remotePort
if demandEncryption:
param_map ['demandEncryption'] = 'on'
if certificate != '':
param_map['certificate'] = certificate
if self.check_node_versions("5.5") and RestConnection(remote).check_node_versions("5.5"):
# 5.5.0 and above
param_map['secureType'] = encryptionType
elif self.check_node_versions("5.0") and RestConnection(remote).check_node_versions("5.0"):
param_map['encryptionType'] = encryptionType
params = urllib.parse.urlencode(param_map)
retries = 5
while retries:
status, content, _ = self._http_request(api, 'POST', params)
# sample response :
# [{"name":"two","uri":"/pools/default/remoteClusters/two","validateURI":"/pools/default/remoteClusters/two?just_validate=1","hostname":"127.0.0.1:9002","username":"Administrator"}]
remoteCluster = json.loads(content)
if status or "Duplicate cluster" in remoteCluster["_"]:
return remoteCluster
retries -= 1
raise Exception("remoteCluster API '{0} remote cluster' failed".format(op))
def add_remote_cluster(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='',
encryptionType="full"):
# example : password:password username:Administrator hostname:127.0.0.1:9002 name:two
msg = "adding remote cluster hostname:{0}:{1} with username:password {2}:{3} name:{4} to source node: {5}:{6}"
log.info(msg.format(remoteIp, remotePort, username, password, name, self.ip, self.port))
api = self.baseUrl + 'pools/default/remoteClusters'
return self.__remote_clusters(api, 'add', remoteIp, remotePort, username, password, name, demandEncryption, certificate, encryptionType)
def add_remote_cluster_new(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate=''):
# example : password:password username:Administrator hostname:127.0.0.1:9002 name:two
msg = "adding remote cluster hostname:{0}:{1} with username:password {2}:{3} name:{4} to source node: {5}:{6}"
log.info(msg.format(remoteIp, remotePort, username, password, name, self.ip, self.port))
api = self.baseUrl + 'pools/default/remoteClusters'
return self.__remote_clusters(api, 'add', remoteIp, remotePort, username, password, name, demandEncryption, certificate)
def modify_remote_cluster(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="half"):
log.info("modifying remote cluster name:{0}".format(name))
api = self.baseUrl + 'pools/default/remoteClusters/' + urllib.parse.quote(name)
return self.__remote_clusters(api, 'modify', remoteIp, remotePort, username, password, name, demandEncryption, certificate, encryptionType)
def get_remote_clusters(self):
remote_clusters = []
api = self.baseUrl + 'pools/default/remoteClusters/'
params = urllib.parse.urlencode({})
status, content, header = self._http_request(api, 'GET', params)
if status:
remote_clusters = json.loads(content)
return remote_clusters
def remove_all_remote_clusters(self):
remote_clusters = self.get_remote_clusters()
for remote_cluster in remote_clusters:
try:
if remote_cluster["deleted"] == False:
self.remove_remote_cluster(remote_cluster["name"])
except KeyError:
# goxdcr cluster references will not contain "deleted" field
self.remove_remote_cluster(remote_cluster["name"])
def remove_remote_cluster(self, name):
# example : name:two
msg = "removing remote cluster name:{0}".format(urllib.parse.quote(name))
log.info(msg)
api = self.baseUrl + 'pools/default/remoteClusters/{0}?'.format(urllib.parse.quote(name))
params = urllib.parse.urlencode({})
status, content, header = self._http_request(api, 'DELETE', params)
#sample response : "ok"
if not status:
log.error("failed to remove remote cluster: status:{0},content:{1}".format(status, content))
raise Exception("remoteCluster API 'remove cluster' failed")
# replicationType:continuous toBucket:default toCluster:two fromBucket:default
# defaults at https://github.com/couchbase/goxdcr/metadata/replication_settings.go#L20-L33
def start_replication(self, replicationType, fromBucket, toCluster, rep_type="xmem", toBucket=None, xdcr_params={}):
toBucket = toBucket or fromBucket
msg = "starting {0} replication type:{1} from {2} to {3} in the remote" \
" cluster {4} with settings {5}"
log.info(msg.format(replicationType, rep_type, fromBucket, toBucket,
toCluster, xdcr_params))
api = self.baseUrl + 'controller/createReplication'
param_map = {'replicationType': replicationType,
'toBucket': toBucket,
'fromBucket': fromBucket,
'toCluster': toCluster,
'type': rep_type}
param_map.update(xdcr_params)
params = urllib.parse.urlencode(param_map)
retries = 3
while retries:
try:
status, content, header = self._http_request(api, 'POST', params)
# response : {"id": "replication_id"}
json_parsed = json.loads(content)
log.info("Replication created with id: {0}".format(json_parsed['id']))
return json_parsed['id']
except ValueError:
time.sleep(10)
retries -= 1
except:
raise Exception("create replication failed: status:{0},content:{1}".format(status, content))
def get_replications(self):
replications = []
content = self.ns_server_tasks()
for item in content:
if not isinstance(item, dict):
log.error("Unexpected error while retrieving pools/default/tasks : {0}".format(content))
raise Exception("Unexpected error while retrieving pools/default/tasks : {0}".format(content))
if item["type"] == "xdcr":
replications.append(item)
return replications
def remove_all_replications(self):
replications = self.get_replications()
for replication in replications:
self.stop_replication(replication["cancelURI"])
def stop_replication(self, uri):
log.info("Deleting replication {0}".format(uri))
api = self.baseUrl[:-1] + uri
retries = 3
while retries:
status, content, header = self._http_request(api, 'DELETE')
if status:
log.info("Replication deleted successfully")
return
else:
retries -= 1
time.sleep(10)
raise Exception("delete replication failed: status:{0}, content:{1}".format(status, content))
def remove_all_recoveries(self):
recoveries = []
content = self.ns_server_tasks()
for item in content:
if item["type"] == "recovery":
recoveries.append(item)
for recovery in recoveries:
api = self.baseUrl + recovery["stopURI"]
status, content, header = self._http_request(api, 'POST')
if not status:
raise CBRecoveryFailedException("impossible to stop cbrecovery by {0}".format(api))
log.info("recovery stopped by {0}".format(api))
# params serverIp : the server to add to this cluster
# raises exceptions when
# unauthorized user
# server unreachable
# can't add the node to itself ( TODO )
# server already added
# returns otpNode
def add_node(self, user='', password='', remoteIp='', port='8091', zone_name='', services=None):
otpNode = None
protocol = "http"
if CbServer.use_https:
port = CbServer.ssl_port
protocol = "https"
# if ip format is ipv6 and enclosing brackets are not found,
# enclose self.ip and remoteIp
if self.ip.count(':') and self.ip[0] != '[':
self.ip = '[' + self.ip + ']'
if remoteIp.count(':') and remoteIp[0] != '[':
remoteIp = '[' + remoteIp + ']'
log.info('adding remote node @{0}:{1} to this cluster @{2}:{3}'\
.format(remoteIp, port, self.ip, self.port))
if zone_name == '':
api = self.baseUrl + 'controller/addNode'
else:
api = self.baseUrl + 'pools/default/serverGroups'
if self.is_zone_exist(zone_name):
zones = self.get_zone_names()
api = "/".join((api, zones[zone_name], "addNode"))
log.info("node {0} will be added to zone {1}".format(remoteIp, zone_name))
else:
raise Exception("There is not zone with name: %s in cluster" % zone_name)
params = urllib.parse.urlencode({'hostname': "{0}://{1}:{2}".format(protocol, remoteIp, port),
'user': user,
'password': password})
if services != None:
services = ','.join(services)
params = urllib.parse.urlencode({'hostname': "{0}://{1}:{2}".format(protocol, remoteIp, port),
'user': user,
'password': password,
'services': services})
if self.monitorRebalance():
status, content, header = self._http_request(api, 'POST', params)
if status:
json_parsed = json.loads(content)
otpNodeId = json_parsed['otpNode']
otpNode = OtpNode(otpNodeId)
if otpNode.ip == '127.0.0.1':
otpNode.ip = self.ip
else:
self.print_UI_logs()
try:
# print logs from node that we want to add
wanted_node = deepcopy(self)
wanted_node.ip = remoteIp
wanted_node.print_UI_logs()
except Exception as ex:
self.log(ex)
if content.find(b'Prepare join failed. Node is already part of cluster') >= 0:
raise ServerAlreadyJoinedException(nodeIp=self.ip,
remoteIp=remoteIp)
elif content.find(b'Prepare join failed. Joining node to itself is not allowed') >= 0:
raise ServerSelfJoinException(nodeIp=self.ip,
remoteIp=remoteIp)
else:
log.error('add_node error : {0}'.format(content))
raise AddNodeException(nodeIp=self.ip,
remoteIp=remoteIp,
reason=content)
else:
raise AddNodeException(nodeIp=self.ip,
remoteIp=remoteIp,
reason="Rebalance error, cannot add node")
return otpNode
# params serverIp : the server to add to this cluster
# raises exceptions when
# unauthorized user
# server unreachable
# can't add the node to itself ( TODO )
# server already added
# returns otpNode
def do_join_cluster(self, user='', password='', remoteIp='', port='8091', zone_name='', services=None):
otpNode = None
if CbServer.use_https:
port = CbServer.ssl_port
log.info('adding remote node @{0}:{1} to this cluster @{2}:{3}'\
.format(remoteIp, port, self.ip, self.port))
api = self.baseUrl + '/node/controller/doJoinCluster'
params = urllib.parse.urlencode({'hostname': "{0}:{1}".format(remoteIp, port),
'user': user,
'password': password})
if services != None:
services = ','.join(services)
params = urllib.parse.urlencode({'hostname': "{0}:{1}".format(remoteIp, port),
'user': user,
'password': password,
'services': services})
status, content, header = self._http_request(api, 'POST', params)
if status:
json_parsed = json.loads(content)
otpNodeId = json_parsed['otpNode']
otpNode = OtpNode(otpNodeId)
if otpNode.ip == '127.0.0.1':
otpNode.ip = self.ip
else:
self.print_UI_logs()
try:
# print logs from node that we want to add
wanted_node = deepcopy(self)
wanted_node.ip = remoteIp
wanted_node.print_UI_logs()
except Exception as ex:
self.log(ex)
if content.find('Prepare join failed. Node is already part of cluster') >= 0:
raise ServerAlreadyJoinedException(nodeIp=self.ip,
remoteIp=remoteIp)
elif content.find('Prepare join failed. Joining node to itself is not allowed') >= 0:
raise ServerSelfJoinException(nodeIp=self.ip,
remoteIp=remoteIp)
else:
log.error('add_node error : {0}'.format(content))
raise AddNodeException(nodeIp=self.ip,
remoteIp=remoteIp,
reason=content)
return otpNode
def eject_node(self, user='', password='', otpNode=None):
if not otpNode:
log.error('otpNode parameter required')
return False
api = self.baseUrl + 'controller/ejectNode'
params = urllib.parse.urlencode({'otpNode': otpNode,
'user': user,
'password': password})
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info('ejectNode successful')
else:
if content.find('Prepare join failed. Node is already part of cluster') >= 0:
raise ServerAlreadyJoinedException(nodeIp=self.ip,
remoteIp=otpNode)
else:
# TODO : raise an exception here
log.error('eject_node error {0}'.format(content))
return True
def force_eject_node(self):
self.diag_eval("gen_server:cast(ns_cluster, leave).")
self.check_delay_restart_coucbase_server()
""" when we do reset couchbase server by force reject, couchbase server will not
down right away but delay few seconds to be down depend on server spec.
This fx will detect that delay and return true when couchbase server down and
up again after force reject """
def check_delay_restart_coucbase_server(self):
api = self.baseUrl + 'nodes/self'
headers = self._create_headers()
break_out = 0
count_cbserver_up = 0
while break_out < 60 and count_cbserver_up < 2:
try:
response, content = httplib2.Http(timeout=120).request(api, 'GET', '', headers)
if response['status'] in ['200', '201', '202'] and count_cbserver_up == 0:
log.info("couchbase server is up but down soon.")
time.sleep(1)
break_out += 1 # time needed for couchbase server reload after reset config
if break_out == 7:
log.info("couchbase server may be up already")
count_cbserver_up = 1
elif response['status'] in ['200', '201', '202']:
count_cbserver_up = 2
log.info("couchbase server is up again in few seconds")
time.sleep(7)
except (socket.error, AttributeError) as e:
log.info("couchbase server is down. Waiting for couchbase server up")
time.sleep(2)
break_out += 1
count_cbserver_up = 1
pass
if break_out >= 60:
raise Exception("Couchbase server did not start after 60 seconds")
def fail_over(self, otpNode=None, graceful=False):
if otpNode is None:
log.error('otpNode parameter required')
return False
api = self.baseUrl + 'controller/failOver'
if graceful:
api = self.baseUrl + 'controller/startGracefulFailover'
params = urllib.parse.urlencode({'otpNode': otpNode})
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info('fail_over node {0} successful'.format(otpNode))
else:
log.error('fail_over node {0} error : {1}'.format(otpNode, content))
raise FailoverFailedException(content)
return status
def set_recovery_type(self, otpNode=None, recoveryType=None):
log.info("Going to set recoveryType={0} for node :: {1}".format(recoveryType, otpNode))
if otpNode is None:
log.error('otpNode parameter required')
return False
if recoveryType is None:
log.error('recoveryType is not set')
return False
api = self.baseUrl + 'controller/setRecoveryType'
params = urllib.parse.urlencode({'otpNode': otpNode,
'recoveryType': recoveryType})
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info('recoveryType for node {0} set successful'.format(otpNode))
else:
log.error('recoveryType node {0} not set with error : {1}'.format(otpNode, content))
raise SetRecoveryTypeFailed(content)
return status
def add_back_node(self, otpNode=None):
if otpNode is None:
log.error('otpNode parameter required')
return False
api = self.baseUrl + 'controller/reAddNode'
params = urllib.parse.urlencode({'otpNode': otpNode})
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info('add_back_node {0} successful'.format(otpNode))
else:
log.error('add_back_node {0} error : {1}'.format(otpNode, content))
raise InvalidArgumentException('controller/reAddNode',
parameters=params)
return status
def rebalance(self, otpNodes=[], ejectedNodes=[], deltaRecoveryBuckets=None):
knownNodes = ','.join(otpNodes)
ejectedNodesString = ','.join(ejectedNodes)
if deltaRecoveryBuckets == None:
params = {'knownNodes': knownNodes,
'ejectedNodes': ejectedNodesString,
'user': self.username,
'password': self.password}
else:
deltaRecoveryBuckets = ",".join(deltaRecoveryBuckets)
params = {'knownNodes': knownNodes,
'ejectedNodes': ejectedNodesString,
'deltaRecoveryBuckets': deltaRecoveryBuckets,
'user': self.username,
'password': self.password}
log.info('rebalance params : {0}'.format(params))
params = urllib.parse.urlencode(params)
api = self.baseUrl + "controller/rebalance"
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info('rebalance operation started')
else:
log.error('rebalance operation failed: {0}'.format(content))
# extract the error
raise InvalidArgumentException('controller/rebalance with error message {0}'.format(content),
parameters=params)
return status
def diag_eval(self, code, print_log=True):
api = '{0}{1}'.format(self.baseUrl, 'diag/eval/')
status, content, header = self._http_request(api, "POST", code)
if content:
try:
content = content.decode('utf-8')
except (UnicodeDecodeError, AttributeError):
pass
if print_log:
log.info("/diag/eval status on {0}:{1}: {2} content: {3} command: {4}".
format(self.ip, self.port, status, content, code))
return status, content
def set_chk_max_items(self, max_items):
status, content = self.diag_eval("ns_config:set(chk_max_items, " + str(max_items) + ")")
return status, content
def set_chk_period(self, period):
status, content = self.diag_eval("ns_config:set(chk_period, " + str(period) + ")")
return status, content
def set_enable_flow_control(self, flow=True, bucket='default'):
flow_control = "false"
if flow:
flow_control = "true"
code = "ns_bucket:update_bucket_props(\"" + bucket + "\", [{extra_config_string, \"upr_enable_flow_control=" + flow_control + "\"}])"
status, content = self.diag_eval(code)
return status, content
def change_flusher_total_batch_limit(self, flusher_total_batch_limit=3,
bucket='default'):
code = "ns_bucket:update_bucket_props(\"" + bucket \
+ "\", [{extra_config_string, " \
+ "\"flusher_total_batch_limit=" \
+ str(flusher_total_batch_limit) + "\"}])."
status, content = self.diag_eval(code)
return status, content
def diag_master_events(self):
api = '{0}{1}'.format(self.baseUrl, 'diag/masterEvents?o=1')
status, content, header = self._http_request(api, "GET")
log.info("diag/masterEvents?o=1 status: {0} content: {1}".format(status, content))
return status, content
def get_admin_credentials(self):
code = 'ns_config:search_node_prop(node(), ns_config:latest(), memcached, admin_user)'
status, id = self.diag_eval(code)
code = 'ns_config:search_node_prop(node(), ns_config:latest(), memcached, admin_pass)'
status, password = self.diag_eval(code)
return id.strip('"'), password.strip('"')
def monitorRebalance(self, stop_if_loop=True):
start = time.time()
progress = 0
retry = 0
same_progress_count = 0
previous_progress = 0
while progress != -1 and (progress != 100 or \
self._rebalance_progress_status() == 'running') and retry < 20:
# -1 is error , -100 means could not retrieve progress
progress = self._rebalance_progress()
if progress == -100:
log.error("unable to retrieve rebalanceProgress.try again in 1 second")
retry += 1
else:
retry = 0
if stop_if_loop:
# reset same_progress_count if get a different result,
# or progress is still O
# (it may take a long time until the results are different from 0)
if previous_progress != progress or progress == 0:
previous_progress = progress
same_progress_count = 0
else:
same_progress_count += 1
if same_progress_count > 50:
log.error("apparently rebalance progress code in infinite loop:"
" {0}".format(progress))
return False
# sleep 10 seconds to printout less log
time.sleep(10)
if progress < 0:
log.error("rebalance progress code : {0}".format(progress))
return False
else:
duration = time.time() - start
if duration > 10:
sleep = 10
else:
sleep = duration
log.info('rebalance progress took {:.02f} seconds '.format(duration))
log.info("sleep for {0} seconds after rebalance...".format(sleep))
time.sleep(sleep)
return True
def _rebalance_progress_status(self):
api = self.baseUrl + "pools/default/rebalanceProgress"
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
if "status" in json_parsed:
return json_parsed['status']
else:
return None
def _rebalance_status_and_progress(self):
"""
Returns a 2-tuple capturing the rebalance status and progress, as follows:
('running', progress) - if rebalance is running
('none', 100) - if rebalance is not running (i.e. assumed done)
(None, -100) - if there's an error getting the rebalance progress
from the server
(None, -1) - if the server responds but there's no information on
what the status of rebalance is
The progress is computed as a average of the progress of each node
rounded to 2 decimal places.
Throws RebalanceFailedException if rebalance progress returns an error message
"""
avg_percentage = -1
rebalance_status = None
api = self.baseUrl + "pools/default/rebalanceProgress"
try:
status, content, header = self._http_request(api)
except ServerUnavailableException as e:
log.error(e)
return None, -100
json_parsed = json.loads(content)
if status:
if "status" in json_parsed:
rebalance_status = json_parsed["status"]
if "errorMessage" in json_parsed:
msg = '{0} - rebalance failed'.format(json_parsed)
log.error(msg)
self.print_UI_logs()
raise RebalanceFailedException(msg)
elif rebalance_status == "running":
total_percentage = 0
count = 0
for key in json_parsed:
if key.find('@') >= 0:
ns_1_dictionary = json_parsed[key]
percentage = ns_1_dictionary['progress'] * 100
count += 1
total_percentage += percentage
if count:
avg_percentage = (total_percentage // count)
else:
avg_percentage = 0
log.info('rebalance percentage : {0:.02f} %'.
format(round(avg_percentage, 2)))
else:
avg_percentage = 100
else:
avg_percentage = -100
return rebalance_status, avg_percentage
def _rebalance_progress(self):
return self._rebalance_status_and_progress()[1]
def log_client_error(self, post):
api = self.baseUrl + 'logClientError'
status, content, header = self._http_request(api, 'POST', post)
if not status:
log.error('unable to logClientError')
return status, content, header
def trigger_index_compaction(self, timeout=120):
node = None
api = self.index_baseUrl + 'triggerCompaction'
status, content, header = self._http_request(api, timeout=timeout)
if not status:
raise Exception(content)
def set_index_settings(self, setting_json, timeout=120):
api = self.index_baseUrl + 'settings'
status, content, header = self._http_request(api, 'POST', json.dumps(setting_json))
if not status:
raise Exception(content)
log.info("{0} set".format(setting_json))
def set_index_settings_internal(self, setting_json, timeout=120):
api = self.index_baseUrl + 'internal/settings'
status, content, header = self._http_request(api, 'POST',
json.dumps(setting_json))
if not status:
if header['status']=='404':
log.info("This endpoint is introduced only in 5.5.0, hence not found. Redirecting the request to the old endpoint")
self.set_index_settings(setting_json, timeout)
else:
raise Exception(content)
log.info("{0} set".format(setting_json))
def get_index_settings(self, timeout=120):
node = None
api = self.index_baseUrl + 'settings'
status, content, header = self._http_request(api, timeout=timeout)
if not status:
raise Exception(content)
return json.loads(content)
def get_index_storage_mode(self, timeout=120):
api = self.index_baseUrl + 'settings'
status, content, header = self._http_request(api, timeout=timeout)
if not status:
raise Exception(content)
return json.loads(content)["indexer.settings.storage_mode"]
def set_index_planner_settings(self, setting, timeout=120):
api = self.index_baseUrl + 'settings/planner?{0}'.format(setting)
status, content, header = self._http_request(api, timeout=timeout)
if not status:
raise Exception(content)
return json.loads(content)
def get_index_stats(self, timeout=120, index_map=None):
api = self.index_baseUrl + 'stats'
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
index_map = RestParser().parse_index_stats_response(json_parsed, index_map=index_map)
return index_map
def get_index_stats_collections(self, timeout=120, index_map=None):
api = self.index_baseUrl + 'stats'
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
index_map = RestParser().parse_index_stats_response_collections(json_parsed, index_map=index_map)
return index_map
def get_all_index_stats(self, timeout=120, inst_id_filter=[], consumer_filter=None, text=False):
"""return: json object or text response of :9102/stats"""
api = self.index_baseUrl + 'stats'
all_index_stats = {}
if inst_id_filter:
inst_id_filter = json.dumps(inst_id_filter)
elif consumer_filter:
api += f"?consumerFilter={consumer_filter}"
else:
inst_id_filter = ""
status, content, _ = self._http_request(api, timeout=timeout, params=inst_id_filter)
if status:
if text:
all_index_stats = content.decode("utf8").replace('":', '": ').replace(",", ", ")
else:
all_index_stats = json.loads(content)
return all_index_stats
def get_index_official_stats(self, timeout=120, index_map=None, bucket="", scope="", collection=""):
api = self.index_baseUrl + 'api/v1/stats'
if bucket:
api += f'/`{bucket.replace("%", "%25")}`'
if scope:
api += f'.{scope}'
if collection:
api += f'.{collection}'
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
return json_parsed
def get_indexes_count(self):
indexes_count = {}
index_map = self.get_index_storage_stats()
for bucket, indexes in index_map.items():
for index, stats in indexes.items():
indexes_count[index] = stats["MainStore"]["count"]
return indexes_count
def get_index_storage_stats(self, timeout=120, index_map=None):
api = self.index_baseUrl + 'stats/storage'
status, content, header = self._http_request(api, timeout=timeout)
if not status:
raise Exception(content)
json_parsed = json.loads(content)
index_storage_stats = {}
for index_stats in json_parsed:
bucket = index_stats["Index"].split(":")[0]
index_name = index_stats["Index"].split(":")[-1]
if bucket not in list(index_storage_stats.keys()):
index_storage_stats[bucket] = {}
index_storage_stats[bucket][index_name] = index_stats["Stats"]
return index_storage_stats
def get_indexer_stats(self, timeout=120, index_map=None, baseUrl=None):
if baseUrl is None:
api = self.index_baseUrl + 'stats'
else:
api = baseUrl + 'stats'
index_map = {}
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
index_map[field] = val
return index_map
def get_indexer_metadata(self, timeout=120, index_map=None):
api = self.index_baseUrl + 'getIndexStatus'
index_map = {}
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
index_map[field] = val
return index_map
def get_indexer_internal_stats(self, timeout=120, index_map=None):
api = self.index_baseUrl + 'settings?internal=ok'
index_map = {}
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
index_map[field] = val
return index_map
def trigger_compaction(self, timeout=120):
api = self.index_baseUrl + 'plasmaDiag'
command = {'Cmd': 'listDBs'}
status, content, header = self._http_request(api, 'POST', json.dumps(command), timeout=timeout)
for l in list(iter(str(content, 'utf-8').splitlines())):
try:
x, id = l.split(" : ")
if id:
log.info(f'Triggering compaction for instance id {id}')
compact_command = {'Cmd': 'compactAll', 'Args': [int(id)]}
status, content, header = self._http_request(api, 'POST', json.dumps(compact_command))
if not status:
log.error(f'Failed to trigger compaction : {content}')
except ValueError:
pass
def get_index_status(self, timeout=120, index_map=None):
api = self.baseUrl + 'indexStatus'
index_map = {}
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
index_map = RestParser().parse_index_status_response(json_parsed)
return index_map
def get_index_id_map(self, timeout=120):
api = self.baseUrl + 'indexStatus'
index_map = {}
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
for map in json_parsed["indexes"]:
bucket_name = map['bucket']
if bucket_name not in list(index_map.keys()):
index_map[bucket_name] = {}
index_name = map['index']
index_map[bucket_name][index_name] = {}
index_map[bucket_name][index_name]['id'] = map['id']
return index_map
def get_index_statements(self, timeout=120):
api = self.index_baseUrl + 'getIndexStatement'
index_map = {}
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
return json_parsed
# returns node data for this host
def get_nodes_self(self, timeout=120):
node = None
api = self.baseUrl + 'nodes/self'
status, content, header = self._http_request(api, timeout=timeout)
if status:
json_parsed = json.loads(content)
node = RestParser().parse_get_nodes_response(json_parsed)
return node
def get_ip_from_ini_file(self):
""" in alternate address, we need to get hostname from ini file """
return self.ip
def node_statuses(self, timeout=120):
nodes = []
api = self.baseUrl + 'nodeStatuses'
status, content, header = self._http_request(api, timeout=timeout)
json_parsed = json.loads(content)
if status:
for key in json_parsed:
# each key contain node info
value = json_parsed[key]
# Create an OtpNode object given the id and status.
# Note the OtpNode object grabs the ip address from the id.
node = OtpNode(id=value['otpNode'],
status=value['status'])
if node.ip == 'cb.local':
node.ip = self.ip
node.id = node.id.replace('cb.local',
self.ip.__str__())
# The ip address grabbed from the id is '127.0.0.1' or '::1'
# when the node is not part of a cluster. This can be amended
# to the ip address in the TestInputServer object that is
# provided.
if node.ip in ['127.0.0.1', '[::1]']:
node.ip = self.ip
node.port = int(key[key.rfind(":") + 1:])
node.replication = value['replication']
if 'gracefulFailoverPossible' in list(value.keys()):
node.gracefulFailoverPossible = value['gracefulFailoverPossible']
else:
node.gracefulFailoverPossible = False
nodes.append(node)
return nodes
def cluster_status(self):
parsed = {}
api = self.baseUrl + 'pools/default'
status, content, header = self._http_request(api)
if status:
parsed = json.loads(content)
return parsed
def fetch_vbucket_map(self, bucket="default"):
"""Return vbucket map for bucket
Keyword argument:
bucket -- bucket name
"""
api = self.baseUrl + 'pools/default/buckets/' + bucket
status, content, header = self._http_request(api)
_stats = json.loads(content)
return _stats['vBucketServerMap']['vBucketMap']
def get_vbucket_map_and_server_list(self, bucket="default"):
""" Return server list, replica and vbuckets map
that matches to server list """
vbucket_map = self.fetch_vbucket_map(bucket)
api = self.baseUrl + 'pools/default/buckets/' + bucket
status, content, header = self._http_request(api)
_stats = json.loads(content)
num_replica = _stats['vBucketServerMap']['numReplicas']
vbucket_map = _stats['vBucketServerMap']['vBucketMap']
servers = _stats['vBucketServerMap']['serverList']
server_list = []
for node in servers:
node = node.split(":")
server_list.append(node[0])
return vbucket_map, server_list, num_replica
def get_pools_info(self):
parsed = {}
api = self.baseUrl + 'pools'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
parsed = json_parsed
return parsed
def get_pools_default(self, query='', timeout=30):
parsed = {}
api = self.baseUrl + 'pools/default'
if query:
api += "?" + query
status, content, header = self._http_request(api, timeout=timeout)
json_parsed = json.loads(content)
if status:
parsed = json_parsed
return parsed
def get_cluster_stats(self):
"""
Reads cluster nodes statistics using `pools/default` rest GET method
:return stat_dict - Dictionary of CPU & Memory status each cluster node:
"""
stat_dict = dict()
json_output = self.get_pools_default()
if 'nodes' in json_output:
for node_stat in json_output['nodes']:
stat_dict[node_stat['hostname']] = dict()
stat_dict[node_stat['hostname']]['services'] = node_stat['services']
stat_dict[node_stat['hostname']]['cpu_utilization'] = node_stat['systemStats']['cpu_utilization_rate']
stat_dict[node_stat['hostname']]['mem_free'] = node_stat['systemStats']['mem_free']
stat_dict[node_stat['hostname']]['mem_total'] = node_stat['systemStats']['mem_total']
stat_dict[node_stat['hostname']]['swap_mem_used'] = node_stat['systemStats']['swap_used']
stat_dict[node_stat['hostname']]['swap_mem_total'] = node_stat['systemStats']['swap_total']
return stat_dict
def get_pools(self):
version = None
api = self.baseUrl + 'pools'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
version = MembaseServerVersion(json_parsed['implementationVersion'], json_parsed['componentsVersion'])
return version
def get_buckets(self, num_retries=3, poll_interval=15):
buckets = []
api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true')
buckets_are_received = False
status = ""
content = ""
while num_retries > 0:
try:
# get all the buckets
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
for item in json_parsed:
bucketInfo = RestParser().parse_get_bucket_json(item)
buckets.append(bucketInfo)
buckets_are_received = True
break
else:
log.error("Response status is: False, response content is: {0}".format(content))
num_retries -= 1
time.sleep(poll_interval)
except Exception as e:
num_retries -= 1
log.error(e)
log.error('{0} seconds sleep before calling get_buckets again...'.format(poll_interval))
time.sleep(poll_interval)
if not buckets_are_received:
log.error("Could not get buckets list from the following api: {0}".format(api))
log.error("Last response status is: {0}".format(status))
log.error("Last response content is: {0}".format(content))
return buckets
def get_bucket_by_name(self,bucket_name):
# get all the buckets
buckets = []
api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true')
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
for item in json_parsed:
bucketInfo = RestParser().parse_get_bucket_json(item)
if bucketInfo.name == bucket_name:
buckets.append(bucketInfo)
return buckets
def get_buckets_itemCount(self):
# get all the buckets
bucket_map = {}
api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true')
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
for item in json_parsed:
bucketInfo = RestParser().parse_get_bucket_json(item)
bucket_map[bucketInfo.name] = bucketInfo.stats.itemCount
return bucket_map
def get_bucket_stats_for_node(self, bucket='default', node=None):
if not node:
log.error('node_ip not specified')
return None
stats = {}
api = "{0}{1}{2}{3}{4}:{5}{6}".format(self.baseUrl, 'pools/default/buckets/',
bucket, "/nodes/", node.ip, node.port, "/stats")
status, content, header = self._http_request(api)
if status:
json_parsed = json.loads(content)
op = json_parsed["op"]
samples = op["samples"]
for stat_name in samples:
if stat_name not in stats:
if len(samples[stat_name]) == 0:
stats[stat_name] = []
else:
stats[stat_name] = samples[stat_name][-1]
else:
raise Exception("Duplicate entry in the stats command {0}".format(stat_name))
return stats
def get_node_settings(self, setting_name=None):
api = "{0}{1}".format(self.fts_baseUrl, 'api/manager')
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
options_vals = json_parsed['mgr']['options']
if setting_name in options_vals.keys():
return options_vals[setting_name]
log.error("Setting {0} not available".format(setting_name))
def get_bucket_status(self, bucket):
if not bucket:
log.error("Bucket Name not Specified")
return None
api = self.baseUrl + 'pools/default/buckets'
status, content, header = self._http_request(api)
if status:
json_parsed = json.loads(content)
for item in json_parsed:
if item["name"] == bucket:
return item["nodes"][0]["status"]
log.error("Bucket {0} doesn't exist".format(bucket))
return None
def fetch_bucket_stats(self, bucket='default', zoom='minute'):
"""Return deserialized buckets stats.
Keyword argument:
bucket -- bucket name
zoom -- stats zoom level (minute | hour | day | week | month | year)
"""
api = self.baseUrl + 'pools/default/buckets/{0}/stats?zoom={1}'.format(bucket, zoom)
log.info(api)
status, content, header = self._http_request(api)
return json.loads(content)
def set_query_index_api_mode(self, index_api_mode=3):
api = self.query_baseUrl + 'admin/settings'
query_api_setting = {"max-index-api": index_api_mode}
status, content, header = self._http_request(api, 'POST', json.dumps(query_api_setting))
if not status:
raise Exception(content)
log.info("{0} set".format(query_api_setting))
def fetch_bucket_xdcr_stats(self, bucket='default', zoom='minute'):
"""Return deserialized bucket xdcr stats.
Keyword argument:
bucket -- bucket name
zoom -- stats zoom level (minute | hour | day | week | month | year)
"""
api = self.baseUrl + 'pools/default/buckets/@xdcr-{0}/stats?zoom={1}'.format(bucket, zoom)
status, content, header = self._http_request(api)
return json.loads(content)
def fetch_system_stats(self):
"""Return deserialized system stats."""
api = self.baseUrl + 'pools/default/'
status, content, header = self._http_request(api)
return json.loads(content)
def get_xdc_queue_size(self, bucket):
"""Fetch bucket stats and return the latest value of XDC replication
queue size"""
bucket_stats = self.fetch_bucket_xdcr_stats(bucket)
return bucket_stats['op']['samples']['replication_changes_left'][-1]
def get_dcp_queue_size(self, bucket):
"""Fetch bucket stats and return the latest value of DCP
queue size"""
bucket_stats = self.fetch_bucket_stats(bucket)
return bucket_stats['op']['samples']['ep_dcp_xdcr_items_remaining'][-1]
def get_active_key_count(self, bucket):
"""Fetch bucket stats and return the bucket's curr_items count"""
bucket_stats = self.fetch_bucket_stats(bucket)
ret_val = -1
retries = 10
while retries > 0:
try:
ret_val = bucket_stats['op']['samples']['curr_items'][-1]
return ret_val
except KeyError as err:
log.error(f"get_active_key_count() function for bucket {bucket} reported an error {err}")
log.error(f"Corresponding bucket stats JSON is {bucket_stats}")
time.sleep(2)
retries = retries - 1
return ret_val
def get_replica_key_count(self, bucket):
"""Fetch bucket stats and return the bucket's replica count"""
bucket_stats = self.fetch_bucket_stats(bucket)
return bucket_stats['op']['samples']['vb_replica_curr_items'][-1]
def get_nodes(self, get_all_nodes=False):
nodes = []
api = self.baseUrl + 'pools/default'
status, content, header = self._http_request(api)
count = 0
while not content and count < 7:
log.info("sleep 5 seconds and retry")
time.sleep(5)
status, content, header = self._http_request(api)
count += 1
if count == 7:
raise Exception("could not get node info after 30 seconds")
json_parsed = json.loads(content)
if status:
if "nodes" in json_parsed:
for json_node in json_parsed["nodes"]:
node = RestParser().parse_get_nodes_response(json_node)
node.rest_username = self.username
node.rest_password = self.password
if node.ip == "127.0.0.1":
node.ip = self.ip
# Only add nodes which are active on cluster
if get_all_nodes or node.clusterMembership == 'active':
nodes.append(node)
else:
log.info("Node {0} not part of cluster {1}".format(node.ip, node.clusterMembership))
return nodes
# this method returns the number of node in cluster
def get_cluster_size(self):
nodes = self.get_nodes()
node_ip = []
for node in nodes:
node_ip.append(node.ip)
log.info("Number of node(s) in cluster is {0} node(s)".format(len(node_ip)))
return len(node_ip)
""" this medthod return version on node that is not initialized yet """
def get_nodes_version(self):
node = self.get_nodes_self()
version = node.version
log.info("Node version in cluster {0}".format(version))
return version
# this method returns the versions of nodes in cluster
def get_nodes_versions(self, logging=True):
nodes = self.get_nodes()
versions = []
for node in nodes:
versions.append(node.version)
if logging:
log.info("Node versions in cluster {0}".format(versions))
return versions
def get_major_version(self):
""" Returns the major version of the node (e.g. 6.5) """
return self.get_nodes_self().major_version
def check_cluster_compatibility(self, version):
"""
Check if all nodes in cluster are of versions equal or above the version required.
:param version: Version to check the cluster compatibility for. Should be of format major_ver.minor_ver.
For example: 5.0, 4.5, 5.1
:return: True if cluster is compatible with the version specified, False otherwise. Return None if cluster is
uninitialized.
"""
nodes = self.get_nodes()
if not nodes:
# If nodes returned is None, it means that the cluster is not initialized yet and hence cluster
# compatibility cannot be found. Return None
return None
major_ver, minor_ver = version.split(".")
compatibility = int(major_ver) * 65536 + int(minor_ver)
is_compatible = True
for node in nodes:
clusterCompatibility = int(node.clusterCompatibility)
if clusterCompatibility < compatibility:
is_compatible = False
return is_compatible
# this method returns the services of nodes in cluster - implemented for Sherlock
def get_nodes_services(self):
nodes = self.get_nodes()
map = {}
for node in nodes:
key = "{0}:{1}".format(node.ip, node.port)
map[key] = node.services
return map
# Check node version
def check_node_versions(self, check_version="4.0"):
versions = self.get_nodes_versions()
if versions[0] < check_version:
return False
return True
def get_bucket_stats(self, bucket='default'):
stats = {}
status, json_parsed = self.get_bucket_stats_json(bucket)
if status:
op = json_parsed["op"]
samples = op["samples"]
for stat_name in samples:
if samples[stat_name]:
last_sample = len(samples[stat_name]) - 1
if last_sample:
stats[stat_name] = samples[stat_name][last_sample]
return stats
def get_fts_stats(self, index_name=None, bucket_name=None, stat_name=None):
"""
List of fts stats available as of 03/16/2017 -
default:default_idx3:avg_queries_latency: 0,
default:default_idx3:batch_merge_count: 0,
default:default_idx3:doc_count: 0,
default:default_idx3:iterator_next_count: 0,
default:default_idx3:iterator_seek_count: 0,
default:default_idx3:num_bytes_live_data: 0,
default:default_idx3:num_bytes_used_disk: 0,
default:default_idx3:num_mutations_to_index: 0,
default:default_idx3:num_pindexes: 0,
default:default_idx3:num_pindexes_actual: 0,
default:default_idx3:num_pindexes_target: 0,
default:default_idx3:num_recs_to_persist: 0,
default:default_idx3:reader_get_count: 0,
default:default_idx3:reader_multi_get_count: 0,
default:default_idx3:reader_prefix_iterator_count: 0,
default:default_idx3:reader_range_iterator_count: 0,
default:default_idx3:timer_batch_store_count: 0,
default:default_idx3:timer_data_delete_count: 0,
default:default_idx3:timer_data_update_count: 0,
default:default_idx3:timer_opaque_get_count: 0,
default:default_idx3:timer_opaque_set_count: 0,
default:default_idx3:timer_rollback_count: 0,
default:default_idx3:timer_snapshot_start_count: 0,
default:default_idx3:total_bytes_indexed: 0,
default:default_idx3:total_bytes_query_results: 0,
default:default_idx3:total_compactions: 0,
default:default_idx3:total_queries: 0,
default:default_idx3:total_queries_error: 0,
default:default_idx3:total_queries_slow: 0,
default:default_idx3:total_queries_timeout: 0,
default:default_idx3:total_request_time: 0,
default:default_idx3:total_term_searchers: 0,
default:default_idx3:writer_execute_batch_count: 0,
:param index_name: name of the index
:param bucket_name: source bucket
:param stat_name: any of the above
:return:
"""
api = "{0}{1}".format(self.fts_baseUrl, 'api/nsstats')
attempts = 0
while attempts < 5:
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if bucket_name is None and index_name is None and stat_name is None:
return status, content
if bucket_name is None and index_name is None:
key = stat_name
else:
key = bucket_name+':'+index_name+':'+stat_name
if key in json_parsed:
return status, json_parsed[key]
attempts += 1
log.info("Stat {0} not available yet".format(stat_name))
time.sleep(1)
log.error("ERROR: Stat {0} error on {1} on bucket {2}".
format(stat_name, index_name, bucket_name))
def start_fts_index_compaction(self, index_name):
api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks')
params = {"op": "merge"}
status, content, header = self._http_request(api,
method='POST',
params=json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
json_parsed = json.loads(content)
return status, json_parsed
def get_fts_index_compactions(self, index_name):
api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks')
params = {"op": "get"}
status, content, header = self._http_request(api,
method='POST',
params=json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
json_parsed = json.loads(content)
return status, json_parsed
def cancel_fts_index_compaction(self, index_name=None, uuid=None):
api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks')
params = {"op": "cancel", "uuid": uuid}
status, content, header = self._http_request(api,
method='POST',
params=json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
json_parsed = json.loads(content)
return status, json_parsed
def get_bucket_stats_json(self, bucket='default'):
stats = {}
api = "{0}{1}{2}{3}".format(self.baseUrl, 'pools/default/buckets/', bucket, "/stats")
if isinstance(bucket, Bucket):
api = '{0}{1}{2}{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name, "/stats")
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
return status, json_parsed
def get_bucket_json(self, bucket='default'):
api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket)
if isinstance(bucket, Bucket):
api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name)
status, content, header = self._http_request(api)
if not status:
raise GetBucketInfoFailed(bucket, content)
return json.loads(content)
def get_bucket_maxTTL(self, bucket='default'):
bucket_info = self.get_bucket_json(bucket=bucket)
return bucket_info['maxTTL']
def get_bucket_compressionMode(self, bucket='default'):
bucket_info = self.get_bucket_json(bucket=bucket)
info = self.get_nodes_self()
if 5.5 > float(info.version[:3]):
bucket_info['compressionMode'] = "off"
return bucket_info['compressionMode']
def is_lww_enabled(self, bucket='default'):
bucket_info = self.get_bucket_json(bucket=bucket)
try:
if bucket_info['conflictResolutionType'] == 'lww':
return True
except KeyError:
return False
def get_bucket(self, bucket='default', num_attempt=1, timeout=1):
bucketInfo = None
try:
bucket = bucket.decode()
except AttributeError:
pass
api = '%s%s%s?basic_stats=true' % (self.baseUrl, 'pools/default/buckets/', bucket)
if isinstance(bucket, Bucket):
api = '%s%s%s?basic_stats=true' % (self.baseUrl, 'pools/default/buckets/', bucket.name)
status, content, header = self._http_request(api)
num = 1
while not status and num_attempt > num:
log.error("try to get {0} again after {1} sec".format(api, timeout))
time.sleep(timeout)
status, content, header = self._http_request(api)
num += 1
if status:
bucketInfo = RestParser().parse_get_bucket_response(content)
return bucketInfo
def get_vbuckets(self, bucket='default'):
b = self.get_bucket(bucket)
return None if not b else b.vbuckets
def delete_bucket(self, bucket='default', num_retries=3, poll_interval=5):
api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket)
if isinstance(bucket, Bucket):
api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket.name)
status = False
while num_retries > 0:
try:
status, content, header = self._http_request(api, 'DELETE')
if int(header['status']) == 500:
# According to http://docs.couchbase.com/couchbase-manual-2.5/cb-rest-api/#deleting-buckets
# the cluster will return with 500 if it failed to nuke
# the bucket on all of the nodes within 30 secs
log.warning("Bucket deletion timed out waiting for all nodes, retrying...")
num_retries -= 1
time.sleep(poll_interval)
else:
break
except Exception as e:
num_retries -= 1
log.error(e)
log.error('{0} seconds sleep before calling delete_bucket again...'.format(poll_interval))
time.sleep(poll_interval)
return status
def delete_all_buckets(self):
buckets = self.get_buckets()
for bucket in buckets:
if isinstance(bucket, Bucket):
api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket.name)
self._http_request(api, 'DELETE')
'''Load any of the three sample buckets'''
def load_sample(self, sample_name, poll_interval=3, max_wait_time=1200, max_error_retries=3):
api = '{0}{1}'.format(self.baseUrl, "sampleBuckets/install")
data = '["{0}"]'.format(sample_name)
status, content, header = self._http_request(api, 'POST', data)
# Allow the sample bucket to be loaded
self.wait_until_bucket_loaded(sample_name, poll_interval, max_wait_time, max_error_retries)
return status
def wait_until_bucket_loaded(self, bucket_name, poll_interval=3, max_wait_time=1200, max_error_retries=3):
max_time = time.time() + float(max_wait_time)
is_bucket_loaded = False
response = ""
api = '{0}{1}'.format(self.baseUrl, "pools/default/buckets/{}".format(bucket_name))
previous_doc_count = 0
while time.time() < max_time and max_error_retries > 0:
time.sleep(poll_interval)
status, content, response = self._http_request(api, method='GET')
data = json.loads(content)
current_doc_count = int(data["basicStats"]["itemCount"])
if status:
if current_doc_count == previous_doc_count:
is_bucket_loaded = True
break
else:
previous_doc_count = current_doc_count
else:
max_error_retries -= 1
log.warning("Something wrong happened while getting bucket {0} items count, retrying.".format(bucket_name))
log.warning("Server response is {0}".format(str(response)))
if not is_bucket_loaded:
log.error("Bucket {0} was not loaded completely")
log.error("Last response is: {0}".format(str(response)))
# figure out the proxy port
def create_bucket(self, bucket='',
ramQuotaMB=1,
replicaNumber=1,
proxyPort=11211,
bucketType='membase',
replica_index=1,
threadsNumber=3,
flushEnabled=1,
evictionPolicy='valueOnly',
lww=False,
maxTTL=None,
compressionMode='passive',
storageBackend='couchstore'):
api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets')
params = urllib.parse.urlencode({})
init_params = {'name': bucket,
'ramQuotaMB': ramQuotaMB,
'replicaNumber': replicaNumber,
# 'proxyPort': proxyPort,
'bucketType': bucketType,
'replicaIndex': replica_index,
'threadsNumber': threadsNumber,
'flushEnabled': flushEnabled,
'evictionPolicy': evictionPolicy}
if bucketType == "memcached":
log.info("Create memcached bucket")
# 'replicaNumber' is not valid for memcached buckets
init_params.pop("replicaNumber", None)
if lww:
init_params['conflictResolutionType'] = 'lww'
if maxTTL:
init_params['maxTTL'] = maxTTL
if compressionMode and self.is_enterprise_edition():
init_params['compressionMode'] = compressionMode
if bucketType == 'ephemeral':
del init_params['replicaIndex'] # does not apply to ephemeral buckets, and is even rejected
# bucket storage is applicable only for membase bucket
if bucketType == "membase":
init_params['storageBackend'] = storageBackend
pre_spock = not self.check_cluster_compatibility("5.0")
if pre_spock:
init_params['proxyPort'] = proxyPort
params = urllib.parse.urlencode(init_params)
log.info("{0} with param: {1}".format(api, params))
create_start_time = time.time()
maxwait = 60
for numsleep in range(maxwait):
status, content, header = self._http_request(api, 'POST', params)
if status:
break
elif (int(header['status']) == 503 and
'{"_":"Bucket with given name still exists"}'.encode('utf-8') in content):
log.info("The bucket still exists, sleep 1 sec and retry")
time.sleep(1)
else:
raise BucketCreationException(ip=self.ip, bucket_name=bucket)
if (numsleep + 1) == maxwait:
log.error("Tried to create the bucket for {0} secs.. giving up".
format(maxwait))
raise BucketCreationException(ip=self.ip, bucket_name=bucket)
create_time = time.time() - create_start_time
log.info("{0:.02f} seconds to create bucket {1}".
format(round(create_time, 2), bucket))
return status
def change_bucket_props(self, bucket,
ramQuotaMB=None,
replicaNumber=None,
proxyPort=None,
replicaIndex=None,
flushEnabled=None,
timeSynchronization=None,
maxTTL=None,
compressionMode=None):
api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket)
if isinstance(bucket, Bucket):
api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name)
params = urllib.parse.urlencode({})
params_dict = {}
existing_bucket = self.get_bucket_json(bucket)
if ramQuotaMB:
params_dict["ramQuotaMB"] = ramQuotaMB
if replicaNumber:
params_dict["replicaNumber"] = replicaNumber
#if proxyPort:
# params_dict["proxyPort"] = proxyPort
if replicaIndex:
params_dict["replicaIndex"] = replicaIndex
if flushEnabled:
params_dict["flushEnabled"] = flushEnabled
if timeSynchronization:
params_dict["timeSynchronization"] = timeSynchronization
if maxTTL:
params_dict["maxTTL"] = maxTTL
if compressionMode and self.is_enterprise_edition():
params_dict["compressionMode"] = compressionMode
params = urllib.parse.urlencode(params_dict)
log.info("%s with param: %s" % (api, params))
status, content, header = self._http_request(api, 'POST', params)
if timeSynchronization:
if status:
raise Exception("Erroneously able to set bucket settings %s for bucket on time-sync" % (params, bucket))
return status, content
if not status:
raise Exception("Unable to set bucket settings %s for bucket" % (params, bucket))
log.info("bucket %s updated" % bucket)
return status
# return AutoFailoverSettings
def get_autofailover_settings(self):
settings = None
api = self.baseUrl + 'settings/autoFailover'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
settings = AutoFailoverSettings()
settings.enabled = json_parsed["enabled"]
settings.count = json_parsed["count"]
settings.timeout = json_parsed["timeout"]
settings.failoverOnDataDiskIssuesEnabled = json_parsed["failoverOnDataDiskIssues"]["enabled"]
settings.failoverOnDataDiskIssuesTimeout = json_parsed["failoverOnDataDiskIssues"]["timePeriod"]
settings.maxCount = json_parsed["maxCount"]
settings.failoverServerGroup = json_parsed["failoverServerGroup"]
if json_parsed["canAbortRebalance"]:
settings.can_abort_rebalance = json_parsed["canAbortRebalance"]
return settings
def update_autofailover_settings(self, enabled, timeout, canAbortRebalance=False, enable_disk_failure=False,
disk_timeout=120, maxCount=1, enableServerGroup=False):
params_dict = {}
params_dict['timeout'] = timeout
if enabled:
params_dict['enabled'] = 'true'
else:
params_dict['enabled'] = 'false'
if canAbortRebalance:
params_dict['canAbortRebalance'] = 'true'
if enable_disk_failure:
params_dict['failoverOnDataDiskIssues[enabled]'] = 'true'
params_dict['failoverOnDataDiskIssues[timePeriod]'] = disk_timeout
else:
params_dict['failoverOnDataDiskIssues[enabled]'] = 'false'
params_dict['maxCount'] = maxCount
if enableServerGroup:
params_dict['failoverServerGroup'] = 'true'
else:
params_dict['failoverServerGroup'] = 'false'
params = urllib.parse.urlencode(params_dict)
api = self.baseUrl + 'settings/autoFailover'
log.info('settings/autoFailover params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
if not status:
log.warning('''failed to change autofailover_settings!
See MB-7282. Workaround:
wget --user=Administrator --password=asdasd --post-data='rpc:call(mb_master:master_node(), erlang, apply ,[fun () -> erlang:exit(erlang:whereis(mb_master), kill) end, []]).' http://localhost:8091/diag/eval''')
return status
# return AutoReprovisionSettings
def get_autoreprovision_settings(self):
settings = None
api = self.baseUrl + 'settings/autoReprovision'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
settings = AutoReprovisionSettings()
settings.enabled = json_parsed["enabled"]
settings.count = json_parsed["count"]
settings.max_nodes = json_parsed["max_nodes"]
return settings
def update_autoreprovision_settings(self, enabled, maxNodes=1):
if enabled:
params = urllib.parse.urlencode({'enabled': 'true',
'maxNodes': maxNodes})
else:
params = urllib.parse.urlencode({'enabled': 'false',
'maxNodes': maxNodes})
api = self.baseUrl + 'settings/autoReprovision'
log.info('settings/autoReprovision params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
if not status:
log.error('failed to change autoReprovision_settings!')
return status
def reset_autofailover(self):
api = self.baseUrl + 'settings/autoFailover/resetCount'
status, content, header = self._http_request(api, 'POST', '')
return status
def reset_autoreprovision(self):
api = self.baseUrl + 'settings/autoReprovision/resetCount'
status, content, header = self._http_request(api, 'POST', '')
return status
def set_alerts_settings(self, recipients, sender, email_username, email_password, email_host='localhost', email_port=25, email_encrypt='false', alerts='auto_failover_node,auto_failover_maximum_reached'):
api = self.baseUrl + 'settings/alerts'
params = urllib.parse.urlencode({'enabled': 'true',
'recipients': recipients,
'sender': sender,
'emailUser': email_username,
'emailPass': email_password,
'emailHost': email_host,
'emailPort': email_port,
'emailEncrypt': email_encrypt,
'alerts': alerts})
log.info('settings/alerts params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
def get_alerts_settings(self):
api = self.baseUrl + 'settings/alerts'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if not status:
raise Exception("unable to get autofailover alerts settings")
return json_parsed
def disable_alerts(self):
api = self.baseUrl + 'settings/alerts'
params = urllib.parse.urlencode({'enabled': 'false'})
log.info('settings/alerts params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
def set_cas_drift_threshold(self, bucket, ahead_threshold_in_millisecond, behind_threshold_in_millisecond):
api = self.baseUrl + 'pools/default/buckets/{0}'. format( bucket )
params_dict ={'driftAheadThresholdMs': ahead_threshold_in_millisecond,
'driftBehindThresholdMs': behind_threshold_in_millisecond}
params = urllib.parse.urlencode(params_dict)
log.info("%s with param: %s" % (api, params))
status, content, header = self._http_request(api, 'POST', params)
return status
def stop_rebalance(self, wait_timeout=10):
api = self.baseUrl + '/controller/stopRebalance'
status, content, header = self._http_request(api, 'POST')
if status:
for i in range(int(wait_timeout)):
if self._rebalance_progress_status() == 'running':
log.warning("rebalance is not stopped yet after {0} sec".format(i + 1))
time.sleep(1)
status = False
else:
log.info("rebalance was stopped")
status = True
break
else:
log.error("Rebalance is not stopped due to {0}".format(content))
return status
def set_data_path(self, data_path=None, index_path=None, cbas_path=None):
end_point = '/nodes/self/controller/settings'
api = self.baseUrl + end_point
paths = HTTPHeaderDict()
set_path = False
if data_path:
set_path = True
paths.add('path', data_path)
if index_path:
set_path = True
paths.add('index_path', index_path)
if cbas_path:
set_path = True
import ast
for cbas in ast.literal_eval(cbas_path):
paths.add('cbas_path', cbas)
if set_path:
params = urllib.parse.urlencode(paths)
log.info('%s : %s' % (end_point, params))
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info("Setting data_path: {0}: status {1}".format(data_path, status))
else:
log.error("Unable to set data_path {0} : {1}".format(data_path, content))
return status
def get_database_disk_size(self, bucket='default'):
api = self.baseUrl + "pools/{0}/buckets".format(bucket)
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
# disk_size in MB
disk_size = (json_parsed[0]["basicStats"]["diskUsed"]) // (1024 * 1024)
return status, disk_size
def ddoc_compaction(self, design_doc_id, bucket="default"):
api = self.baseUrl + "pools/default/buckets/%s/ddocs/%s/controller/compactView" % \
(bucket, design_doc_id)
status, content, header = self._http_request(api, 'POST')
if not status:
raise CompactViewFailed(design_doc_id, content)
log.info("compaction for ddoc '%s' was triggered" % design_doc_id)
def check_compaction_status(self, bucket_name):
tasks = self.active_tasks()
if "error" in tasks:
raise Exception(tasks)
for task in tasks:
log.info("Task is {0}".format(task))
if task["type"] == "bucket_compaction":
if task["bucket"] == bucket_name:
return True, task["progress"]
return False, None
def change_memcached_t_option(self, value):
cmd = '[ns_config:update_key({node, N, memcached}, fun (PList)' + \
' -> lists:keystore(verbosity, 1, PList, {verbosity, \'-t ' + str(value) + '\'}) end)' + \
' || N <- ns_node_disco:nodes_wanted()].'
return self.diag_eval(cmd)
def set_ensure_full_commit(self, value):
"""Dynamic settings changes"""
# the boolean paramter is used to turn on/off ensure_full_commit(). In XDCR,
# issuing checkpoint in this function is expensive and not necessary in some
# test, turning off this function would speed up some test. The default value
# is ON.
cmd = 'ns_config:set(ensure_full_commit_enabled, {0}).'.format(value)
return self.diag_eval(cmd)
def get_internalSettings(self, param):
"""allows to get internalSettings values for:
indexAwareRebalanceDisabled, rebalanceIndexWaitingDisabled,
rebalanceIndexPausingDisabled, maxParallelIndexers,
maxParallelReplicaIndexers, maxBucketCount"""
api = self.baseUrl + "internalSettings"
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
param = json_parsed[param]
return param
def set_internalSetting(self, param, value):
"Set any internal setting"
api = self.baseUrl + "internalSettings"
if isinstance(value, bool):
value = str(value).lower()
params = urllib.parse.urlencode({param : value})
status, content, header = self._http_request(api, "POST", params)
log.info('Update internal setting {0}={1}'.format(param, value))
return status
def get_replication_for_buckets(self, src_bucket_name, dest_bucket_name):
replications = self.get_replications()
for replication in replications:
if src_bucket_name in replication['source'] and \
replication['target'].endswith(dest_bucket_name):
return replication
raise XDCRException("Replication with Src bucket: {0} and Target bucket: {1} not found".
format(src_bucket_name, dest_bucket_name))
""" By default, these are the global replication settings -
{ optimisticReplicationThreshold:256,
workerBatchSize:500,
failureRestartInterval:1,
docBatchSizeKb":2048,
checkpointInterval":1800,
maxConcurrentReps":32}
You can override these using set_xdcr_param()
"""
def set_xdcr_param(self, src_bucket_name,
dest_bucket_name, param, value):
replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name)
api = self.baseUrl[:-1] + replication['settingsURI']
value = str(value).lower()
params = urllib.parse.urlencode({param: value})
status, content, header = self._http_request(api, "POST", params)
if not status:
raise XDCRException("Unable to set replication setting {0}={1} on bucket {2} on node {3}".
format(param, value, src_bucket_name, self.ip))
else:
log.info("Updated {0}={1} on bucket '{2}' on {3}".format(param, value, src_bucket_name, self.ip))
def set_xdcr_params(self, src_bucket_name,
dest_bucket_name, param_value_map):
replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name)
api = self.baseUrl[:-1] + replication['settingsURI']
params = urllib.parse.urlencode(param_value_map)
status, content, header = self._http_request(api, "POST", params)
if not status:
raise XDCRException("{0} \n Unable to set replication settings {1} on bucket {2} on node {3}".
format(content, param_value_map, src_bucket_name, self.ip))
else:
log.info("Updated {0} on bucket '{1}' on {2}".format(param_value_map, src_bucket_name, self.ip))
def set_global_xdcr_param(self, param, value):
api = self.baseUrl[:-1] + "/settings/replications"
value = str(value).lower()
params = urllib.parse.urlencode({param: value})
status, _, _ = self._http_request(api, "POST", params)
if not status:
raise XDCRException("Unable to set replication setting {0}={1} on node {2}".
format(param, value, self.ip))
log.info("Updated {0}={1} on {2}".format(param, value, self.ip))
# Gets per-replication setting value
def get_xdcr_param(self, src_bucket_name,
dest_bucket_name, param):
replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name)
api = self.baseUrl[:-1] + replication['settingsURI']
status, content, _ = self._http_request(api)
if not status:
raise XDCRException("Unable to get replication setting {0} on bucket {1} on node {2}".
format(param, src_bucket_name, self.ip))
json_parsed = json.loads(content)
# when per-replication settings match global(internal) settings,
# the param is not returned by rest API
# in such cases, return internalSetting value for the param
try:
return json_parsed[param]
except KeyError:
if param == 'pauseRequested':
return False
else:
param = 'xdcr' + param[0].upper() + param[1:]
log.info("Trying to fetch xdcr param:{0} from global settings".
format(param))
return self.get_internalSettings(param)
# Returns a boolean value on whether replication
def is_replication_paused(self, src_bucket_name, dest_bucket_name):
return self.get_xdcr_param(src_bucket_name, dest_bucket_name, 'pauseRequested')
def is_replication_paused_by_id(self, repl_id):
repl_id = repl_id.replace('/', '%2F')
api = self.baseUrl + 'settings/replications/' + repl_id
status, content, header = self._http_request(api)
if not status:
raise XDCRException("Unable to retrieve pause resume status for replication {0}".
format(repl_id))
repl_stats = json.loads(content)
return repl_stats['pauseRequested']
def pause_resume_repl_by_id(self, repl_id, param, value):
repl_id = repl_id.replace('/', '%2F')
api = self.baseUrl + 'settings/replications/' + repl_id
params = urllib.parse.urlencode({param: value})
status, _, _ = self._http_request(api, "POST", params)
if not status:
raise XDCRException("Unable to update {0}={1} setting for replication {2}".
format(param, value, repl_id))
log.info("Updated {0}={1} on {2}".format(param, value, repl_id))
def get_recent_xdcr_vb_ckpt(self, repl_id):
command = 'ns_server_testrunner_api:grab_all_goxdcr_checkpoints().'
status, content = self.diag_eval(command, print_log=False)
if not status:
raise Exception("Unable to get recent XDCR checkpoint information")
repl_ckpt_list = json.loads(content)
# a single decoding will only return checkpoint record as string
# convert string to dict using json
chkpt_doc_string = repl_ckpt_list['/ckpt/%s/0' % repl_id].replace('"', '\"')
chkpt_dict = json.loads(chkpt_doc_string)
return chkpt_dict['checkpoints'][0]
def get_repl_stat(self, repl_id, src_bkt="default", stat="data_replicated", timestamp=None):
repl_id = repl_id.replace('/', '%2F')
api = self.baseUrl + "pools/default/buckets/" + src_bkt + "/stats/replications%2F" \
+ repl_id + "%2F" + stat
if timestamp:
api += "?haveTStamp=" + timestamp
status, content, header = self._http_request(api)
if not status:
raise XDCRException("Unable to retrieve {0} stat for replication {1}".
format(stat, repl_id))
repl_stat = json.loads(content)
samples = []
for node in self.get_nodes():
items = repl_stat["nodeStats"]["{0}:8091".format(node.ip)]
samples.append(items)
return samples
""" Start of FTS rest apis"""
def set_fts_ram_quota(self, value):
"""set fts ram quota"""
api = self.baseUrl + "pools/default"
params = urllib.parse.urlencode({"ftsMemoryQuota": value})
status, content, _ = self._http_request(api, "POST", params)
if status:
log.info("SUCCESS: FTS RAM quota set to {0}mb".format(value))
else:
raise Exception("Error setting fts ram quota: {0}".format(content))
return status
def set_maxConcurrentPartitionMovesPerNode(self, value):
api = self.fts_baseUrl + "api/managerOptions"
params = {"maxConcurrentPartitionMovesPerNode": str(value)}
status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers())
if status:
log.info("SUCCESS: FTS maxConcurrentPartitionMovesPerNode set to {0}".format(value))
return status
def set_disableFileTransferRebalance(self, value):
api = self.fts_baseUrl + "api/managerOptions"
params = {"disableFileTransferRebalance": str(value)}
status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers())
if status:
log.info("SUCCESS: FTS disableFileTransferRebalance set to {0}".format(value))
return status
def set_maxFeedsPerDCPAgent(self, value):
api = self.fts_baseUrl + "api/managerOptions"
params = {"maxFeedsPerDCPAgent": str(value)}
status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers())
if status:
log.info("SUCCESS: FTS maxFeedsPerDCPAgent set to {0}".format(value))
return status
def set_maxDCPAgents(self, value):
api = self.fts_baseUrl + "api/managerOptions"
params = {"maxDCPAgents": str(value)}
status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers())
if status:
log.info("SUCCESS: FTS maxDCPAgents set to {0}".format(value))
return status
def create_fts_index(self, index_name, params):
"""create or edit fts index , returns {"status":"ok"} on success"""
api = self.fts_baseUrl + "api/index/{0}".format(index_name)
log.info(json.dumps(params))
status, content, header = self._http_request(api,
'PUT',
json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
if status:
log.info("Index {0} created".format(index_name))
else:
raise Exception("Error creating index: {0}".format(content))
return status
def update_fts_index(self, index_name, index_def):
api = self.fts_baseUrl + "api/index/{0}".format(index_name)
log.info(json.dumps(index_def, indent=3))
status, content, header = self._http_request(api,
'PUT',
json.dumps(index_def, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
if status:
log.info("Index/alias {0} updated".format(index_name))
else:
raise Exception("Error updating index: {0}".format(content))
return status
def get_fts_index_definition(self, name, timeout=30):
""" get fts index/alias definition """
json_parsed = {}
api = self.fts_baseUrl + "api/index/{0}".format(name)
status, content, header = self._http_request(
api,
headers=self._create_capi_headers(),
timeout=timeout)
if status:
json_parsed = json.loads(content)
return status, json_parsed
def get_fts_index_doc_count(self, name, timeout=30):
""" get number of docs indexed"""
json_parsed = {}
api = self.fts_baseUrl + "api/index/{0}/count".format(name)
status, content, header = self._http_request(
api,
headers=self._create_capi_headers(),
timeout=timeout)
if status:
json_parsed = json.loads(content)
return json_parsed['count']
def get_fts_index_uuid(self, name, timeout=30):
""" Returns uuid of index/alias """
json_parsed = {}
api = self.fts_baseUrl + "api/index/{0}".format(name)
status, content, header = self._http_request(
api,
headers=self._create_capi_headers(),
timeout=timeout)
if status:
json_parsed = json.loads(content)
return json_parsed['indexDef']['uuid']
def get_fts_pindex_stats(self, timeout=30):
""" Returns uuid of index/alias """
json_parsed = {}
api = self.fts_baseUrl + "api/stats"
status, content, header = self._http_request(
api,
headers=self._create_capi_headers(),
timeout=timeout)
if status:
json_parsed = json.loads(content)
return json_parsed['pindexes']
def delete_fts_index(self, name):
""" delete fts index/alias """
api = self.fts_baseUrl + "api/index/{0}".format(name)
status, content, header = self._http_request(
api,
'DELETE',
headers=self._create_capi_headers())
return status
def delete_fts_index_extended_output(self, name):
""" delete fts index/alias """
api = self.fts_baseUrl + "api/index/{0}".format(name)
status, content, header = self._http_request(
api,
'DELETE',
headers=self._create_capi_headers())
return status, content, header
def stop_fts_index_update(self, name):
""" method to stop fts index from updating"""
api = self.fts_baseUrl + "api/index/{0}/ingestControl/pause".format(name)
log.info('calling api : {0}'.format(api))
status, content, header = self._http_request(
api,
'POST',
'',
headers=self._create_capi_headers())
return status
def resume_fts_index_update(self, name):
""" method to stop fts index from updating"""
api = self.fts_baseUrl + "api/index/{0}/ingestControl/resume".format(name)
log.info('calling api : {0}'.format(api))
status, content, header = self._http_request(
api,
'POST',
'',
headers=self._create_capi_headers())
return status
def freeze_fts_index_partitions(self, name):
""" method to freeze index partitions asignment"""
api = self.fts_baseUrl+ "api/index/{0}/planFreezeControl/freeze".format(name)
log.info('calling api : {0}'.format(api))
status, content, header = self._http_request(
api,
'POST',
'',
headers=self._create_capi_headers())
return status
def set_bleve_max_result_window(self, bmrw_value):
"""create or edit fts index , returns {"status":"ok"} on success"""
api = self.fts_baseUrl + "api/managerOptions"
params = {"bleveMaxResultWindow": str(bmrw_value)}
log.info(json.dumps(params))
status, content, header = self._http_request(api,
'PUT',
json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
if status:
log.info("Updated bleveMaxResultWindow")
else:
raise Exception("Error Updating bleveMaxResultWindow: {0}".format(content))
return status
def set_node_setting(self, setting_name, value):
"""create or edit fts index , returns {"status":"ok"} on success"""
api = self.fts_baseUrl + "api/managerOptions"
params = {str(setting_name): str(value)}
log.info(json.dumps(params))
status, content, header = self._http_request(api,
'PUT',
json.dumps(params, ensure_ascii=False),
headers=self._create_capi_headers(),
timeout=30)
if status:
log.info("Updated {0}".format(setting_name))
else:
raise Exception("Error Updating {0}: {1}".format(setting_name, content))
return status
def unfreeze_fts_index_partitions(self, name):
""" method to freeze index partitions asignment"""
api = self.fts_baseUrl+ "api/index/{0}/planFreezeControl/unfreeze".format(name)
log.info('calling api : {0}'.format(api))
status, content, header = self._http_request(
api,
'POST',
'',
headers=self._create_capi_headers())
return status
def disable_querying_on_fts_index(self, name):
""" method to disable querying on index"""
api = self.fts_baseUrl + "api/index/{0}/queryControl/disallow".format(name)
log.info('calling api : {0}'.format(api))
status, content, header = self._http_request(
api,
'POST',
'',
headers=self._create_capi_headers())
return status
def enable_querying_on_fts_index(self, name):
""" method to enable querying on index"""
api = self.fts_baseUrl + "api/index/{0}/queryControl/allow".format(name)
log.info('calling api : {0}'.format(api))
status, content, header = self._http_request(
api,
'POST',
'',
headers=self._create_capi_headers())
return status
def run_fts_query(self, index_name, query_json, timeout=70):
"""Method run an FTS query through rest api"""
api = self.fts_baseUrl + "api/index/{0}/query".format(index_name)
headers = self._create_capi_headers()
status, content, header = self._http_request(
api,
"POST",
json.dumps(query_json, ensure_ascii=False).encode('utf8'),
headers,
timeout=timeout)
content = json.loads(content)
if status:
return content['total_hits'], content['hits'], content['took'], \
content['status']
else:
return -1, content['error'], -1, content['status']
def run_fts_query_generalized(self, index_name, query_json, timeout=70):
"""Method run an FTS query through rest api"""
api = self.fts_baseUrl + "api/index/{0}/query".format(index_name)
headers = self._create_capi_headers()
status, content, header = self._http_request(
api,
"POST",
json.dumps(query_json, ensure_ascii=False).encode('utf8'),
headers,
timeout=timeout)
content = json.loads(content)
return content
def run_fts_query_with_facets(self, index_name, query_json):
"""Method run an FTS query through rest api"""
api = self.fts_baseUrl + "api/index/{0}/query".format(index_name)
headers = self._create_capi_headers()
status, content, header = self._http_request(
api,
"POST",
json.dumps(query_json, ensure_ascii=False).encode('utf8'),
headers,
timeout=70)
if status:
content = json.loads(content)
return content['total_hits'], content['hits'], content['took'], \
content['status'], content['facets']
""" End of FTS rest APIs """
def set_reb_cons_view(self, disable):
"""Enable/disable consistent view for rebalance tasks"""
api = self.baseUrl + "internalSettings"
params = {"indexAwareRebalanceDisabled": str(disable).lower()}
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
log.info('Consistent-views during rebalance was set as indexAwareRebalanceDisabled={0}'\
.format(str(disable).lower()))
return status
def set_reb_index_waiting(self, disable):
"""Enable/disable rebalance index waiting"""
api = self.baseUrl + "internalSettings"
params = {"rebalanceIndexWaitingDisabled": str(disable).lower()}
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
log.info('rebalance index waiting was set as rebalanceIndexWaitingDisabled={0}'\
.format(str(disable).lower()))
return status
def set_rebalance_index_pausing(self, disable):
"""Enable/disable index pausing during rebalance"""
api = self.baseUrl + "internalSettings"
params = {"rebalanceIndexPausingDisabled": str(disable).lower()}
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
log.info('index pausing during rebalance was set as rebalanceIndexPausingDisabled={0}'\
.format(str(disable).lower()))
return status
def set_max_parallel_indexers(self, count):
"""set max parallel indexer threads"""
api = self.baseUrl + "internalSettings"
params = {"maxParallelIndexers": count}
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
log.info('max parallel indexer threads was set as maxParallelIndexers={0}'.\
format(count))
return status
def set_max_parallel_replica_indexers(self, count):
"""set max parallel replica indexers threads"""
api = self.baseUrl + "internalSettings"
params = {"maxParallelReplicaIndexers": count}
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
log.info('max parallel replica indexers threads was set as maxParallelReplicaIndexers={0}'.\
format(count))
return status
def get_internal_replication_type(self):
buckets = self.get_buckets()
cmd = "\'{ok, BC} = ns_bucket:get_bucket(%s), ns_bucket:replication_type(BC).\'" % buckets[0].name
return self.diag_eval(cmd)
def set_mc_threads(self, mc_threads=4):
"""
Change number of memcached threads and restart the cluster
"""
cmd = "[ns_config:update_key({node, N, memcached}, " \
"fun (PList) -> lists:keystore(verbosity, 1, PList," \
" {verbosity, \"-t %s\"}) end) " \
"|| N <- ns_node_disco:nodes_wanted()]." % mc_threads
return self.diag_eval(cmd)
def get_auto_compaction_settings(self):
api = self.baseUrl + "settings/autoCompaction"
status, content, header = self._http_request(api)
return json.loads(content)
def set_auto_compaction(self, parallelDBAndVC="false",
dbFragmentThreshold=None,
viewFragmntThreshold=None,
dbFragmentThresholdPercentage=None,
viewFragmntThresholdPercentage=None,
allowedTimePeriodFromHour=None,
allowedTimePeriodFromMin=None,
allowedTimePeriodToHour=None,
allowedTimePeriodToMin=None,
allowedTimePeriodAbort=None,
bucket=None):
"""Reset compaction values to default, try with old fields (dp4 build)
and then try with newer fields"""
params = {}
api = self.baseUrl
if bucket is None:
# setting is cluster wide
api = api + "controller/setAutoCompaction"
else:
# overriding per/bucket compaction setting
api = api + "pools/default/buckets/" + bucket
params["autoCompactionDefined"] = "true"
# reuse current ram quota in mb per node
num_nodes = len(self.node_statuses())
bucket_info = self.get_bucket_json(bucket)
quota = self.get_bucket_json(bucket)["quota"]["ram"] // (1048576 * num_nodes)
params["ramQuotaMB"] = quota
params["parallelDBAndViewCompaction"] = parallelDBAndVC
# Need to verify None because the value could be = 0
if dbFragmentThreshold is not None:
params["databaseFragmentationThreshold[size]"] = dbFragmentThreshold
if viewFragmntThreshold is not None:
params["viewFragmentationThreshold[size]"] = viewFragmntThreshold
if dbFragmentThresholdPercentage is not None:
params["databaseFragmentationThreshold[percentage]"] = dbFragmentThresholdPercentage
if viewFragmntThresholdPercentage is not None:
params["viewFragmentationThreshold[percentage]"] = viewFragmntThresholdPercentage
if allowedTimePeriodFromHour is not None:
params["allowedTimePeriod[fromHour]"] = allowedTimePeriodFromHour
if allowedTimePeriodFromMin is not None:
params["allowedTimePeriod[fromMinute]"] = allowedTimePeriodFromMin
if allowedTimePeriodToHour is not None:
params["allowedTimePeriod[toHour]"] = allowedTimePeriodToHour
if allowedTimePeriodToMin is not None:
params["allowedTimePeriod[toMinute]"] = allowedTimePeriodToMin
if allowedTimePeriodAbort is not None:
params["allowedTimePeriod[abortOutside]"] = allowedTimePeriodAbort
params = urllib.parse.urlencode(params)
log.info("'%s' bucket's settings will be changed with parameters: %s" % (bucket, params))
return self._http_request(api, "POST", params)
def disable_auto_compaction(self):
"""
Cluster-wide Setting
Disable autocompaction on doc and view
"""
api = self.baseUrl + "controller/setAutoCompaction"
log.info("Disable autocompaction in cluster-wide setting")
status, content, header = self._http_request(api, "POST",
"parallelDBAndViewCompaction=false")
return status
def set_purge_interval_and_parallel_compaction(self, interval=3, parallel="false"):
"""
Cluster-wide setting.
Set purge interval
Set parallel db and view compaction
Return: status
"""
api = self.baseUrl + "controller/setAutoCompaction"
log.info("Set purgeInterval to %s and parallel DB and view compaction to %s"\
% (interval, parallel))
params = {}
params["purgeInterval"] = interval
params["parallelDBAndViewCompaction"] = parallel
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
return status, content
def set_indexer_compaction(self, mode="circular", indexDayOfWeek=None, indexFromHour=0,
indexFromMinute=0, abortOutside=False,
indexToHour=0, indexToMinute=0, fragmentation=30):
"""Reset compaction values to default, try with old fields (dp4 build)
and then try with newer fields"""
params = {}
api = self.baseUrl + "controller/setAutoCompaction"
params["indexCompactionMode"] = mode
params["indexCircularCompaction[interval][fromHour]"] = indexFromHour
params["indexCircularCompaction[interval][fromMinute]"] = indexFromMinute
params["indexCircularCompaction[interval][toHour]"] = indexToHour
params["indexCircularCompaction[interval][toMinute]"] = indexToMinute
if indexDayOfWeek:
params["indexCircularCompaction[daysOfWeek]"] = indexDayOfWeek
params["indexCircularCompaction[interval][abortOutside]"] = str(abortOutside).lower()
params["parallelDBAndViewCompaction"] = "false"
if mode == "full":
params["indexFragmentationThreshold[percentage]"] = fragmentation
log.info("Indexer Compaction Settings: %s" % (params))
params = urllib.parse.urlencode(params)
return self._http_request(api, "POST", params)
def set_global_loglevel(self, loglevel='error'):
"""Set cluster-wide logging level for core components
Possible loglevel:
-- debug
-- info
-- warn
-- error
"""
api = self.baseUrl + 'diag/eval'
request_body = 'rpc:eval_everywhere(erlang, apply, [fun () -> \
[ale:set_loglevel(L, {0}) || L <- \
[ns_server, couchdb, user, menelaus, ns_doctor, stats, \
rebalance, cluster, views, stderr]] end, []]).'.format(loglevel)
return self._http_request(api=api, method='POST', params=request_body,
headers=self._create_headers())
def set_indexer_params(self, parameter, val):
"""
:Possible parameters:
-- indexerThreads
-- memorySnapshotInterval
-- stableSnapshotInterval
-- maxRollbackPoints
-- logLevel
"""
params = {}
api = self.baseUrl + 'settings/indexes'
params[parameter] = val
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, "POST", params)
log.info('Indexer {0} set to {1}'.format(parameter, val))
return status
def get_global_index_settings(self):
api = self.baseUrl + "settings/indexes"
status, content, header = self._http_request(api)
if status:
return json.loads(content)
return None
def set_couchdb_option(self, section, option, value):
"""Dynamic settings changes"""
cmd = 'ns_config:set({{couchdb, {{{0}, {1}}}}}, {2}).'.format(section,
option,
value)
return self.diag_eval(cmd)
def get_alerts(self):
api = self.baseUrl + "pools/default/"
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
if "alerts" in json_parsed:
return json_parsed['alerts']
else:
return None
def get_nodes_data_from_cluster(self, param="nodes"):
api = self.baseUrl + "pools/default/"
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
if param in json_parsed:
return json_parsed[param]
else:
return None
def flush_bucket(self, bucket="default"):
if isinstance(bucket, Bucket):
bucket_name = bucket.name
else:
bucket_name = bucket
api = self.baseUrl + "pools/default/buckets/%s/controller/doFlush" % (bucket_name)
status, content, header = self._http_request(api, 'POST')
if not status:
raise BucketFlushFailed(self.ip, bucket_name)
log.info("Flush for bucket '%s' was triggered" % bucket_name)
return True
def update_notifications(self, enable):
api = self.baseUrl + 'settings/stats'
params = urllib.parse.urlencode({'sendStats' : enable})
log.info('settings/stats params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
def get_notifications(self):
api = self.baseUrl + 'settings/stats'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
if status:
return json_parsed["sendStats"]
return None
def get_num_rollback_stat(self, bucket):
api = self.index_baseUrl + 'stats'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
num_rollback = json_parsed["MAINT_STREAM:{}:num_rollbacks".format(bucket)]
return num_rollback
def get_num_rollback_to_zero_stat(self, bucket):
api = self.index_baseUrl + 'stats'
status, content, header = self._http_request(api)
json_parsed = json.loads(content)
num_rollback = json_parsed["MAINT_STREAM:{}:num_rollbacks_to_zero".format(bucket)]
return num_rollback
def get_logs(self, last_n=10, contains_text=None):
api = self.baseUrl + 'logs'
status, content, header = self._http_request(api)
json_parsed = json.loads(content.decode("utf-8","ignore"))
logs = json_parsed['list']
logs.reverse()
result = []
for i in range(min(last_n, len(logs))):
result.append(logs[i])
if contains_text is not None and contains_text in logs[i]["text"]:
break
return result
def print_UI_logs(self, last_n=10, contains_text=None):
logs = self.get_logs(last_n, contains_text)
log.info("Latest logs from UI on {0}:".format(self.ip))
for lg in logs: log.error(lg)
def get_ro_user(self):
api = self.baseUrl + 'settings/readOnlyAdminName'
status, content, header = self._http_request(api, 'GET', '')
return content, status
def delete_ro_user(self):
api = self.baseUrl + 'settings/readOnlyUser'
status, content, header = self._http_request(api, 'DELETE', '')
return status
def create_ro_user(self, username, password):
api = self.baseUrl + 'settings/readOnlyUser'
params = urllib.parse.urlencode({'username' : username, 'password' : password})
log.info('settings/readOnlyUser params : {0}'.format(params))
status, content, header = self._http_request(api, 'POST', params)
return status
# Change password for readonly user
def changePass_ro_user(self, username, password):
api = self.baseUrl + 'settings/readOnlyUser'
params = urllib.parse.urlencode({'username' : username, 'password' : password})
log.info('settings/readOnlyUser params : {0}'.format(params))
status, content, header = self._http_request(api, 'PUT', params)
return status
'''Start Monitoring/Profiling Rest Calls'''
def set_completed_requests_collection_duration(self, server, min_time):
http = httplib2.Http(disable_ssl_certificate_validation=True)
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port)))
protocol = "https"
api = "%s://%s:%s/" % (protocol,server.ip, n1ql_port) + "admin/settings"
body = {"completed-threshold": min_time}
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "POST", headers=headers, body=json.dumps(body))
return response, content
def set_completed_requests_max_entries(self, server, no_entries):
http = httplib2.Http(disable_ssl_certificate_validation=True)
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port)))
protocol = "https"
api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings"
body = {"completed-limit": no_entries}
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "POST", headers=headers, body=json.dumps(body))
return response, content
def set_profiling(self, server, setting):
http = httplib2.Http(disable_ssl_certificate_validation=True)
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port)))
protocol = "https"
api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings"
body = {"profile": setting}
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "POST", headers=headers, body=json.dumps(body))
return response, content
def set_profiling_controls(self, server, setting):
http = httplib2.Http(disable_ssl_certificate_validation=True)
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port)))
protocol = "https"
api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings"
body = {"controls": setting}
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "POST", headers=headers, body=json.dumps(body))
return response, content
def get_query_admin_settings(self, server):
http = httplib2.Http(disable_ssl_certificate_validation=True)
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port)))
protocol = "https"
api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings"
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "GET", headers=headers)
result = json.loads(content)
return result
def get_query_vitals(self, server):
http = httplib2.Http(disable_ssl_certificate_validation=True)
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port)))
protocol = "https"
api = "%s://%s:%s/" % (protocol,server.ip, n1ql_port) + "admin/vitals"
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "GET", headers=headers)
return response, content
'''End Monitoring/Profiling Rest Calls'''
def create_whitelist(self, server, whitelist):
http = httplib2.Http(disable_ssl_certificate_validation=True)
protocol = "http"
if CbServer.use_https:
protocol = "https"
api = "%s://%s:%s/" % (protocol, server.ip, server.port) + "settings/querySettings/curlWhitelist"
headers = self._create_headers_with_auth('Administrator', 'password')
response, content = http.request(api, "POST", headers=headers, body=json.dumps(whitelist))
return response, content
def query_tool(self, query, port=8093, timeout=1300, query_params={}, is_prepared=False, named_prepare=None,
verbose = True, encoded_plan=None, servers=None):
if timeout is None:
timeout = 1300
protocol = "http"
if CbServer.use_https:
port = str(CbServer.ssl_port_map.get(str(port), str(port)))
protocol = "https"
key = 'prepared' if is_prepared else 'statement'
headers = None
prepared = json.dumps(query)
if is_prepared:
if named_prepare and encoded_plan:
http = httplib2.Http(disable_ssl_certificate_validation=True)
if len(servers)>1:
url = "%s://%s:%s/query/service" % (protocol, servers[1].ip, port)
else:
url = "%s://%s:%s/query/service" % (protocol, self.ip, port)
headers = self._create_headers_encoded_prepared()
body = {'prepared': named_prepare, 'encoded_plan':encoded_plan}
response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body))
return eval(content)
elif named_prepare and not encoded_plan:
params = 'prepared=' + urllib.parse.quote(prepared, '~()')
params = 'prepared="%s"'% named_prepare
else:
if isinstance(query, dict):
prepared = json.dumps(query['name'])
else:
prepared = json.dumps(query)
prepared = str(prepared)
params = 'prepared=' + urllib.parse.quote(prepared, '~()')
if 'creds' in query_params and query_params['creds']:
headers = self._create_headers_with_auth(query_params['creds'][0]['user'],
query_params['creds'][0]['pass'])
api = "%s://%s:%s/query/service?%s" % (protocol, self.ip, port, params)
log.info("%s"%api)
else:
params = {key : query}
try:
if 'creds' in query_params and query_params['creds']:
headers = self._create_headers_with_auth(query_params['creds'][0]['user'],
query_params['creds'][0]['pass'])
del query_params['creds']
except Exception:
traceback.print_exc()
params.update(query_params)
params = urllib.parse.urlencode(params)
if verbose:
log.info('query params : {0}'.format(params))
api = "%s://%s:%s/query?%s" % (protocol, self.ip, port, params)
if 'query_context' in query_params and query_params['query_context']:
log.info(f"Running Query with query_context: {query_params['query_context']}")
try:
status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers)
except Exception as ex:
print("\nException error: ", str(ex))
print("\napi: ", api)
print("\nheaders: ", headers)
try:
return json.loads(content)
except ValueError:
return content
def analytics_tool(self, query, port=8095, timeout=650, query_params={}, is_prepared=False, named_prepare=None,
verbose = True, encoded_plan=None, servers=None):
protocol = "http"
if CbServer.use_https:
port = str(CbServer.ssl_port_map.get(str(port), str(port)))
protocol = "https"
key = 'prepared' if is_prepared else 'statement'
headers = None
content=""
prepared = json.dumps(query)
if is_prepared:
if named_prepare and encoded_plan:
http = httplib2.Http(disable_ssl_certificate_validation=True)
if len(servers)>1:
url = "%s://%s:%s/query/service" % (protocol, servers[1].ip, port)
else:
url = "%s://%s:%s/query/service" % (protocol, self.ip, port)
headers = {'Content-type': 'application/json'}
body = {'prepared': named_prepare, 'encoded_plan':encoded_plan}
response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body))
return eval(content)
elif named_prepare and not encoded_plan:
params = 'prepared=' + urllib.parse.quote(prepared, '~()')
params = 'prepared="%s"'% named_prepare
else:
prepared = json.dumps(query)
prepared = str(prepared.encode('utf-8'))
params = 'prepared=' + urllib.parse.quote(prepared, '~()')
if 'creds' in query_params and query_params['creds']:
headers = self._create_headers_with_auth(query_params['creds'][0]['user'],
query_params['creds'][0]['pass'])
api = "%s/analytics/service?%s" % (self.cbas_base_url, params)
log.info("%s"%api)
else:
params = {key : query}
if 'creds' in query_params and query_params['creds']:
headers = self._create_headers_with_auth(query_params['creds'][0]['user'],
query_params['creds'][0]['pass'])
del query_params['creds']
params.update(query_params)
params = urllib.parse.urlencode(params)
if verbose:
log.info('query params : {0}'.format(params))
api = "%s/analytics/service?%s" % (self.cbas_base_url, params)
status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers)
try:
return json.loads(content)
except ValueError:
return content
def query_tool_stats(self):
n1ql_port = CbServer.n1ql_port
protocol = "http"
if CbServer.use_https:
n1ql_port = CbServer.ssl_n1ql_port
protocol = "https"
log.info('query n1ql stats')
api = "%s://%s:%s/admin/stats" % (protocol, str(n1ql_port), self.ip)
status, content, header = self._http_request(api, 'GET')
log.info(content)
try:
return json.loads(content)
except ValueError:
return content
def index_tool_stats(self, show_index_stats=True):
log.info('index n1ql stats')
port = CbServer.port
protocol = "http"
if CbServer.use_https:
port = CbServer.ssl_port
protocol = "https"
api = "%s://%s:%s/indexStatus" % (protocol, self.ip, port)
params = ""
status, content, header = self._http_request(api, 'GET', params)
if show_index_stats:
log.info(content)
try:
return json.loads(content)
except ValueError:
return content
# return all rack/zone info
def get_all_zones_info(self, timeout=120):
zones = {}
api = self.baseUrl + 'pools/default/serverGroups'
status, content, header = self._http_request(api, timeout=timeout)
if status:
zones = json.loads(content)
else:
raise Exception("Failed to get all zones info.\n \
Zone only supports from couchbase server version 2.5 and up.")
return zones
# return group name and unique uuid
def get_zone_names(self):
zone_names = {}
zone_info = self.get_all_zones_info()
if zone_info and len(zone_info["groups"]) >= 1:
for i in range(0, len(zone_info["groups"])):
# pools/default/serverGroups/ = 27 chars
zone_names[zone_info["groups"][i]["name"]] = zone_info["groups"][i]["uri"][28:]
return zone_names
def add_zone(self, zone_name):
api = self.baseUrl + 'pools/default/serverGroups'
request_name = "name={0}".format(zone_name)
status, content, header = self._http_request(api, "POST", \
params=request_name)
if status:
log.info("zone {0} is added".format(zone_name))
return True
else:
raise Exception("Failed to add zone with name: %s " % zone_name)
def delete_zone(self, zone_name):
api = self.baseUrl + 'pools/default/serverGroups/'
# check if zone exist
found = False
zones = self.get_zone_names()
for zone in zones:
if zone_name == zone:
api += zones[zone_name]
found = True
break
if not found:
raise Exception("There is not zone with name: %s in cluster" % zone_name)
status, content, header = self._http_request(api, "DELETE")
if status:
log.info("zone {0} is deleted".format(zone_name))
else:
raise Exception("Failed to delete zone with name: %s " % zone_name)
def rename_zone(self, old_name, new_name):
api = self.baseUrl + 'pools/default/serverGroups/'
# check if zone exist
found = False
zones = self.get_zone_names()
for zone in zones:
if old_name == zone:
api += zones[old_name]
request_name = "name={0}".format(new_name)
found = True
break
if not found:
raise Exception("There is not zone with name: %s in cluster" % old_name)
status, content, header = self._http_request(api, "PUT", params=request_name)
if status:
log.info("zone {0} is renamed to {1}".format(old_name, new_name))
else:
raise Exception("Failed to rename zone with name: %s " % old_name)
# get all nodes info in one zone/rack/group
def get_nodes_in_zone(self, zone_name):
nodes = {}
tmp = {}
zone_info = self.get_all_zones_info()
if zone_name != "":
found = False
if len(zone_info["groups"]) >= 1:
for i in range(0, len(zone_info["groups"])):
if zone_info["groups"][i]["name"] == zone_name:
tmp = zone_info["groups"][i]["nodes"]
if not tmp:
log.info("zone {0} is existed but no node in it".format(zone_name))
# remove port
for node in tmp:
node["hostname"] = node["hostname"].split(":")
node["hostname"] = node["hostname"][0]
nodes[node["hostname"]] = node
found = True
break
if not found:
raise Exception("There is not zone with name: %s in cluster" % zone_name)
return nodes
def get_zone_and_nodes(self):
""" only return zones with node in its """
zones = {}
tmp = {}
zone_info = self.get_all_zones_info()
if len(zone_info["groups"]) >= 1:
for i in range(0, len(zone_info["groups"])):
tmp = zone_info["groups"][i]["nodes"]
if not tmp:
log.info("zone {0} is existed but no node in it".format(tmp))
# remove port
else:
nodes = []
for node in tmp:
node["hostname"] = node["hostname"].split(":")
node["hostname"] = node["hostname"][0]
print(node["hostname"][0])
nodes.append(node["hostname"])
zones[zone_info["groups"][i]["name"]] = nodes
return zones
def get_zone_uri(self):
zone_uri = {}
zone_info = self.get_all_zones_info()
if zone_info and len(zone_info["groups"]) >= 1:
for i in range(0, len(zone_info["groups"])):
zone_uri[zone_info["groups"][i]["name"]] = zone_info["groups"][i]["uri"]
return zone_uri
def shuffle_nodes_in_zones(self, moved_nodes, source_zone, target_zone):
# moved_nodes should be a IP list like
# ["192.168.171.144", "192.168.171.145"]
request = ""
for i in range(0, len(moved_nodes)):
moved_nodes[i] = "ns_1@" + moved_nodes[i]
all_zones = self.get_all_zones_info()
api = self.baseUrl + all_zones["uri"][1:]
moved_node_json = []
for i in range(0, len(all_zones["groups"])):
for node in all_zones["groups"][i]["nodes"]:
if all_zones["groups"][i]["name"] == source_zone:
for n in moved_nodes:
if n == node["otpNode"]:
moved_node_json.append({"otpNode": node["otpNode"]})
zone_json = {}
group_json = []
for i in range(0, len(all_zones["groups"])):
node_j = []
zone_json["uri"] = all_zones["groups"][i]["uri"]
zone_json["name"] = all_zones["groups"][i]["name"]
zone_json["nodes"] = node_j
if not all_zones["groups"][i]["nodes"]:
if all_zones["groups"][i]["name"] == target_zone:
for i in range(0, len(moved_node_json)):
zone_json["nodes"].append(moved_node_json[i])
else:
zone_json["nodes"] = []
else:
for node in all_zones["groups"][i]["nodes"]:
if all_zones["groups"][i]["name"] == source_zone and \
node["otpNode"] in moved_nodes:
pass
else:
node_j.append({"otpNode": node["otpNode"]})
if all_zones["groups"][i]["name"] == target_zone:
for k in range(0, len(moved_node_json)):
node_j.append(moved_node_json[k])
zone_json["nodes"] = node_j
group_json.append({"name": zone_json["name"], "uri": zone_json["uri"], "nodes": zone_json["nodes"]})
request = '{{"groups": {0} }}'.format(json.dumps(group_json))
status, content, header = self._http_request(api, "PUT", params=request)
# sample request format
# request = ' {"groups":[{"uri":"/pools/default/serverGroups/0","nodes": [] },\
# {"uri":"/pools/default/serverGroups/c8275b7a88e6745c02815dde4a505e70","nodes": [] },\
# {"uri":"/pools/default/serverGroups/1acd9810a027068bd14a1ddd43db414f","nodes": \
# [{"otpNode":"[email protected]"},{"otpNode":"[email protected]"}]} ]} '
return status
def is_zone_exist(self, zone_name):
found = False
zones = self.get_zone_names()
if zones:
for zone in zones:
if zone_name == zone:
found = True
return True
break
if not found:
log.error("There is not zone with name: {0} in cluster.".format(zone_name))
return False
def get_items_info(self, keys, bucket='default'):
items_info = {}
for key in keys:
api = '{0}{1}{2}/docs/{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket, key)
status, content, header = self._http_request(api)
if status:
items_info[key] = json.loads(content)
return items_info
def start_cluster_logs_collection(self, nodes="*", upload=False, \
uploadHost=None, customer="", ticket=""):
if not upload:
params = urllib.parse.urlencode({"nodes":nodes})
else:
params = urllib.parse.urlencode({"nodes":nodes, "uploadHost":uploadHost, \
"customer":customer, "ticket":ticket})
api = self.baseUrl + "controller/startLogsCollection"
status, content, header = self._http_request(api, "POST", params)
return status, content
def get_cluster_logs_collection_info(self):
api = self.baseUrl + "pools/default/tasks/"
status, content, header = self._http_request(api, "GET")
if status:
tmp = json.loads(content)
for k in tmp:
if k["type"] == "clusterLogsCollection":
content = k
return content
return None
""" result["progress"]: progress logs collected at cluster level
result["status]: status logs collected at cluster level
result["perNode"]: all information logs collected at each node """
def get_cluster_logs_collection_status(self):
result = self.get_cluster_logs_collection_info()
if result:
return result["progress"], result["status"], result["perNode"]
return None, None, None
def cancel_cluster_logs_collection(self):
api = self.baseUrl + "controller/cancelLogsCollection"
status, content, header = self._http_request(api, "POST")
return status, content
def set_log_redaction_level(self, redaction_level="none"):
api = self.baseUrl + "settings/logRedaction"
params = urllib.parse.urlencode({"logRedactionLevel":redaction_level})
status, content, header = self._http_request(api, "POST", params)
if status:
result = json.loads(content)
if result["logRedactionLevel"] == redaction_level:
return True
else:
return False
return False
def get_bucket_CCCP(self, bucket):
log.info("Getting CCCP config ")
api = '%spools/default/b/%s' % (self.baseUrl, bucket)
if isinstance(bucket, Bucket):
api = '%spools/default/b/%s' % (self.baseUrl, bucket.name)
status, content, header = self._http_request(api)
if status:
return json.loads(content)
return None
def get_recovery_task(self):
content = self.ns_server_tasks()
for item in content:
if item["type"] == "recovery":
return item
return None
def get_recovery_progress(self, recoveryStatusURI):
api = '%s%s' % (self.baseUrl, recoveryStatusURI)
status, content, header = self._http_request(api)
if status:
return json.loads(content)
return None
def get_warming_up_tasks(self):
tasks = self.ns_server_tasks()
tasks_warmup = []
for task in tasks:
if task["type"] == "warming_up":
tasks_warmup.append(task)
return tasks_warmup
def compact_bucket(self, bucket="default"):
api = self.baseUrl + 'pools/default/buckets/{0}/controller/compactBucket'.format(bucket)
status, content, header = self._http_request(api, 'POST')
if status:
log.info('bucket compaction successful')
else:
raise BucketCompactionException(bucket)
return True
def cancel_bucket_compaction(self, bucket="default"):
api = self.baseUrl + 'pools/default/buckets/{0}/controller/cancelBucketCompaction'.format(bucket)
if isinstance(bucket, Bucket):
api = self.baseUrl + 'pools/default/buckets/{0}/controller/cancelBucketCompaction'.format(bucket.name)
status, content, header = self._http_request(api, 'POST')
log.info("Status is {0}".format(status))
if status:
log.info('Cancel bucket compaction successful')
else:
raise BucketCompactionException(bucket)
return True
def set_bucket_compressionMode(self, bucket="default", mode="passive"):
api = self.baseUrl + "pools/default/buckets/" + bucket
body = {'compressionMode': mode}
params = urllib.parse.urlencode(body)
headers = self._create_headers()
status, content, header = self._http_request(api, 'POST', params=params, headers=headers)
log.info("{0} with params: {1}".format(api, params))
if not status:
raise Exception("Unable to set compressionMode {0} for bucket {1}".format(mode, bucket))
'''LDAP Rest API '''
'''
clearLDAPSettings - Function to clear LDAP settings
Parameter - None
Returns -
status of LDAPAuth clear command
'''
def clearLDAPSettings(self):
api = self.baseUrl + 'settings/saslauthdAuth'
params = urllib.parse.urlencode({'enabled':'false'})
status, content, header = self._http_request(api, 'POST', params)
return status, content, header
'''
ldapUserRestOperation - Execute LDAP REST API
Input Parameter -
authOperation - this is for auth need to be enabled or disabled - True or 0
currAdmmins - a list of username to add to full admin matching with ldap
currROAdmins - a list of username to add to RO Admin
Returns - status, content and header for the command executed
'''
def ldapUserRestOperation(self, authOperation, adminUser='', ROadminUser=''):
authOperation = authOperation
currAdmins = ''
currROAdmins = ''
if (adminUser != ''):
for user in adminUser:
currAdmins = user[0] + "\n\r" + currAdmins
if (ROadminUser != ''):
for user in ROadminUser:
currROAdmins = user[0] + "\n\r" + currROAdmins
content = self.executeLDAPCommand(authOperation, currAdmins, currROAdmins)
'''LDAP Rest API '''
'''
clearLDAPSettings - Function to clear LDAP settings
Parameter - None
Returns -
status of LDAPAuth clear command
'''
def clearLDAPSettings (self):
api = self.baseUrl + 'settings/saslauthdAuth'
params = urllib.parse.urlencode({'enabled':'false'})
status, content, header = self._http_request(api, 'POST', params)
return status, content, header
'''
ldapUserRestOperation - Execute LDAP REST API
Input Parameter -
authOperation - this is for auth need to be enabled or disabled - True or 0
currAdmmins - a list of username to add to full admin matching with ldap
currROAdmins - a list of username to add to RO Admin
Returns - status, content and header for the command executed
'''
def ldapUserRestOperation(self, authOperation, adminUser='', ROadminUser='', exclude=None):
if (authOperation):
authOperation = 'true'
else:
authOperation = 'false'
currAdmins = ''
currROAdmins = ''
if (adminUser != ''):
for user in adminUser:
currAdmins = user[0] + "\n\r" + currAdmins
if (ROadminUser != ''):
for user in ROadminUser:
currROAdmins = user[0] + "\n\r" + currROAdmins
content = self.executeLDAPCommand(authOperation, currAdmins, currROAdmins, exclude)
'''
executeLDAPCommand - Execute LDAP REST API
Input Parameter -
authOperation - this is for auth need to be enabled or disabled - True or 0
currAdmmins - a list of username to add to full admin matching with ldap
currROAdmins - a list of username to add to RO Admin
Returns - status, content and header for the command executed
'''
def executeLDAPCommand(self, authOperation, currAdmins, currROAdmins, exclude=None):
api = self.baseUrl + "settings/saslauthdAuth"
if (exclude is None):
log.info ("into exclude is None")
params = urllib.parse.urlencode({
'enabled': authOperation,
'admins': '{0}'.format(currAdmins),
'roAdmins': '{0}'.format(currROAdmins),
})
else:
log.info ("Into exclude for value of fullAdmin {0}".format(exclude))
if (exclude == 'fullAdmin'):
params = urllib.parse.urlencode({
'enabled': authOperation,
'roAdmins': '{0}'.format(currROAdmins),
})
else:
log.info ("Into exclude for value of fullAdmin {0}".format(exclude))
params = urllib.parse.urlencode({
'enabled': authOperation,
'admins': '{0}'.format(currAdmins),
})
status, content, header = self._http_request(api, 'POST', params)
return content
'''
validateLogin - Validate if user can login using a REST API
Input Parameter - user and password to check for login. Also take a boolean to
decide if the status should be 200 or 400 and everything else should be
false
Returns - True of false based if user should login or login fail
'''
def validateLogin(self, user, password, login, getContent=False):
api = self.baseUrl + "uilogin"
header = {'Content-type': 'application/x-www-form-urlencoded'}
params = urllib.parse.urlencode({'user':'{0}'.format(user), 'password':'{0}'.format(password)})
log.info ("value of param is {0}".format(params))
http = httplib2.Http()
status, content = http.request(api, 'POST', headers=header, body=params)
log.info ("Status of login command - {0}".format(status))
if (getContent):
return status, content
if ((status['status'] == "200" and login == True) or (status ['status'] == "400" and login == False)):
return True
else:
return False
'''
ldapRestOperationGet - Get setting of LDAPAuth - Settings
Returns - list of Admins, ROAdmins and is LDAPAuth enabled or not
'''
def ldapRestOperationGetResponse(self):
log.info ("GET command for LDAP Auth")
api = self.baseUrl + "settings/saslauthdAuth"
status, content, header = self._http_request(api, 'GET')
return json.loads(content)
'''
executeValidateCredentials - API to check credentials of users
Input - user and password that needs validation
Returns -
[role]:<currentrole>
[source]:<saslauthd,builtin>
'''
def executeValidateCredentials(self, user, password):
api = self.baseUrl + "validateCredentials"
params = urllib.parse.urlencode({
'user':'{0}'.format(user),
'password':'{0}'.format(password)
})
status, content, header = self._http_request(api, 'POST', params)
log.info ("Status of executeValidateCredentials command - {0}".format(status))
return status, json.loads(content)
'''MadHatter LDAP Group Support'''
'''
Assign group roles
'''
def add_group_role(self,group_name,description,roles,ldap_group_ref=None):
api = self.baseUrl + "/settings/rbac/groups/" + group_name
if ldap_group_ref is not None:
params = urllib.parse.urlencode({
'description':'{0}'.format(description),
'roles':'{0}'.format(roles),
'ldap_group_ref':'{0}'.format(ldap_group_ref)
})
else:
params = urllib.parse.urlencode({
'description':'{0}'.format(description),
'roles':'{0}'.format(roles)
})
status, content, header = self._http_request(api, 'PUT', params)
log.info ("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def delete_group(self,group_name):
api = self.baseUrl + "/settings/rbac/groups/" + group_name
status, content, header = self._http_request(api, 'DELETE')
log.info ("Status of Delete role from CB is {0}".format(status))
return status, json.loads(content)
def get_group_list(self):
api = self.baseUrl + "/settings/rbac/groups/"
status, content, header = self._http_request(api, 'GET')
return status, json.loads(content)
def get_group_details(self, group_name):
api = self.baseUrl + "/settings/rbac/groups/" + group_name
status, content, header = self._http_request(api, 'GET')
return status, json.loads(content)
def add_user_group(self,group_name,user_name):
api = self.baseUrl + "/settings/rbac/users/local/" + user_name
params = urllib.parse.urlencode({
'groups':'{0}'.format(group_name)
})
status, content, header = self._http_request(api, 'PUT', params)
log.info ("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def get_user_group(self,user_name):
api = self.baseUrl + "/settings/rbac/users/local/" + user_name
status, content, header = self._http_request(api, 'GET')
log.info ("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def grp_invalidate_cache(self):
api = self.baseUrl + "/settings/invalidateLDAPCache/"
status, content, header = self._http_request(api, 'POST')
log.info("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def invalidate_ldap_cache(self):
api = self.baseUrl + '/settings/invalidateLDAPCache'
status, content, header = self._http_request(api, 'POST')
log.info("Status of Invalidate LDAP Cached is {0}".format(status))
return status, json.loads(content)
def ldap_validate_conn(self):
api = self.baseUrl + "/settings/ldap/validate/connectivity"
status, content, header = self._http_request(api, 'POST')
log.info("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def ldap_validate_authen(self, user_name, password='password'):
api = self.baseUrl + "/settings/ldap/validate/authentication"
params = urllib.parse.urlencode({
'auth_user': '{0}'.format(user_name),
'auth_pass': '{0}'.format(password)
})
status, content, header = self._http_request(api, 'POST', params)
log.info("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def ldap_validate_grp_query(self, user):
api = self.baseUrl + "/settings/ldap/validate/groups_query"
params = urllib.parse.urlencode({
'groups_query_user':'{0}'.format(user)
})
status, content, header = self._http_request(api, 'POST',params)
log.info ("Status of Adding role to group command is {0}".format(status))
return status, json.loads(content)
def setup_ldap(self, data, extraparam):
api = self.baseUrl + '/settings/ldap/'
params = urllib.parse.urlencode(data)
params = params + "&" + extraparam
status, content, header = self._http_request(api, 'POST',params)
log.info ("Status of Setting up LDAP command is {0}".format(status))
return status, json.loads(content)
'''
Audit Commands
'''
'''
getAuditSettings - API returns audit settings for Audit
Input - None
Returns -
[archive_path]:<path for archieve>
[auditd_enabled]:<enabled disabled status for auditd>
[log_path]:<path for logs>
[rotate_interval]:<log rotate interval>
'''
def getAuditSettings(self):
api = self.baseUrl + "settings/audit"
status, content, header = self._http_request(api, 'GET')
return json.loads(content)
'''
getAuditSettings - API returns audit settings for Audit
Input -
[archive_path]:<path for archieve>
[auditd_enabled]:<enabled disabled status for auditd>
[rotate_interval]:<log rotate interval in seconds>
'''
def setAuditSettings(self, enabled='true', rotateInterval=86400, logPath='/opt/couchbase/var/lib/couchbase/logs', services_to_disable=None):
api = self.baseUrl + "settings/audit"
params = {'rotateInterval':'{0}'.format(rotateInterval),
'auditdEnabled':'{0}'.format(enabled),
'logPath':'{0}'.format(logPath)}
if services_to_disable:
params['disabled'] = ",".join(services_to_disable)
params = urllib.parse.urlencode(params)
status, content, header = self._http_request(api, 'POST', params)
log.info ("Value os status is {0}".format(status))
log.info ("Value of content is {0}".format(content))
if status:
return status
else:
return status, json.loads(content)
def get_audit_descriptors(self):
api = self.baseUrl + "/settings/audit/descriptors"
status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers())
return json.loads(content) if status else None
def _set_secrets_password(self, new_password):
api = self.baseUrl + "/node/controller/changeMasterPassword"
params = urllib.parse.urlencode({
'newPassword': '{0}'.format(new_password.encode('utf-8').strip())
})
log.info("Params getting set is ---- {0}".format(params))
params = params.replace('%24', '$')
params = params.replace('%3D', '=')
log.info("Params getting set is ---- {0}".format(params))
status, content, header = self._http_request(api, 'POST', params)
log.info("Status of set password command - {0}".format(status))
log.info("Content of the response is {0}".format(content))
log.info ("Header of the response is {0}".format(header))
return status
def set_downgrade_storage_mode_with_rest(self, downgrade=True, username="Administrator",
password="password"):
authorization = self.get_authorization(username, password)
if downgrade:
api = self.index_baseUrl + 'settings/storageMode?downgrade=true'
else:
api = self.index_baseUrl + 'settings/storageMode?downgrade=false'
headers = {'Content-type': 'application/json','Authorization': 'Basic %s'
% authorization}
status, content, header = self._http_request(api, 'POST', headers=headers)
if not status:
raise Exception(content)
return json.loads(content)
def create_index_with_rest(self, create_info, username="Administrator", password="password"):
log.info("CREATE INDEX USING REST WITH PARAMETERS: " + str(create_info))
authorization = self.get_authorization(username, password)
api = self.index_baseUrl + 'internal/indexes?create=true'
headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization}
params = json.loads("{0}".format(create_info).replace('\'', '"').replace('True', 'true').replace('False', 'false'))
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(params).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return json.loads(content)
def build_index_with_rest(self, id, username="Administrator", password="password"):
credentials = '{}:{}'.format(self.username, self.password)
authorization = base64.encodebytes(credentials.encode('utf-8'))
authorization = authorization.decode('utf-8').rstrip('\n')
api = self.index_baseUrl + 'internal/indexes?build=true'
build_info = {'ids': [id]}
headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'PUT', headers=headers,
params=json.dumps(build_info))
if not status:
raise Exception(content)
return json.loads(content)
def drop_index_with_rest(self, id, username="Administrator", password="password"):
authorization = self.get_authorization(username, password)
url = 'internal/index/{0}'.format(id)
api = self.index_baseUrl + url
headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'DELETE', headers=headers)
if not status:
raise Exception(content)
def get_all_indexes_with_rest(self, username="Administrator", password="password"):
credentials = '{}:{}'.format(self.username, self.password)
authorization = base64.encodebytes(credentials.encode('utf-8'))
authorization = authorization.decode('utf-8').rstrip('\n')
url = 'internal/indexes'
api = self.index_baseUrl + url
headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return json.loads(content)
def lookup_gsi_index_with_rest(self, id, body, username="Administrator", password="password"):
authorization = self.get_authorization(username, password)
url = 'internal/index/{0}?lookup=true'.format(id)
api = self.index_baseUrl + url
headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization}
params = json.loads("{0}".format(body).replace('\'', '"').replace('True', 'true').replace('False', 'false'))
status, content, header = self._http_request(api, 'GET', headers=headers,
params=json.dumps(params).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return json.loads(content)
def full_table_scan_gsi_index_with_rest(self, id, body, username="Administrator", password="password"):
if "limit" not in list(body.keys()):
body["limit"] = 900000
authorization = self.get_authorization(username, password)
url = 'internal/index/{0}?scanall=true'.format(id)
api = self.index_baseUrl + url
headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization}
params = json.loads("{0}".format(body).replace('\'', '"').replace('True', 'true').replace('False', 'false'))
status, content, header = self._http_request(
api, 'GET', headers=headers,
params=json.dumps(params).encode("ascii", "ignore"))
if not status:
raise Exception(content)
# Following line is added since the content uses chunked encoding
chunkless_content = content.decode().replace("][", ", \n")
return json.loads(chunkless_content)
def range_scan_gsi_index_with_rest(self, id, body, username="Administrator", password="password"):
if "limit" not in list(body.keys()):
body["limit"] = 300000
authorization = self.get_authorization(username, password)
url = 'internal/index/{0}?range=true'.format(id)
api = self.index_baseUrl + url
headers = {'Content-type': 'application/json',
'Authorization': 'Basic %s' % authorization}
params = json.loads("{0}".format(body).replace(
'\'', '"').replace('True', 'true').replace('False', 'false'))
status, content, header = self._http_request(
api, 'GET', headers=headers,
params=json.dumps(params).encode("ascii", "ignore"))
if not status:
raise Exception(content)
#Below line is there because of MB-20758
content = content.split(b'[]')[0].decode()
# Following line is added since the content uses chunked encoding
chunkless_content = content.decode().replace("][", ", \n")
return json.loads(chunkless_content)
def multiscan_for_gsi_index_with_rest(self, id, body, username="Administrator", password="password"):
authorization = self.get_authorization(username, password)
url = 'internal/index/{0}?multiscan=true'.format(id)
api = self.index_baseUrl + url
headers = {'Accept': 'application/json','Authorization': 'Basic %s' % authorization}
params = json.loads("{0}".format(body).replace('\'', '"').replace(
'True', 'true').replace('False', 'false').replace(
"~[]{}UnboundedtruenilNA~", "~[]{}UnboundedTruenilNA~"))
params = json.dumps(params).encode("ascii", "ignore").decode().replace("\\\\", "\\")
log.info(json.dumps(params).encode("ascii", "ignore"))
status, content, header = self._http_request(api, 'GET', headers=headers,
params=params)
if not status:
raise Exception(content)
#Below line is there because of MB-20758
content = content.split(b'[]')[0].decode()
# Following line is added since the content uses chunked encoding
chunkless_content = content.replace("][", ", \n")
if chunkless_content:
return json.loads(chunkless_content)
else:
return content
def multiscan_count_for_gsi_index_with_rest(self, id, body, username="Administrator", password="password"):
authorization = self.get_authorization(username, password)
url = 'internal/index/{0}?multiscancount=true'.format(id)
api = self.index_baseUrl + url
headers = {'Accept': 'application/json','Authorization': 'Basic %s' % authorization}
count_cmd_body = body.replace('\'', '"').replace('True', 'true').replace('False', 'false')
count_cmd_body = count_cmd_body.replace("~[]{}UnboundedtruenilNA~", "~[]{}UnboundedTruenilNA~")
params = json.loads(count_cmd_body)
params = json.dumps(params).encode("ascii", "ignore").decode().replace("\\\\", "\\")
log.info(json.dumps(params).encode("ascii", "ignore"))
status, content, header = self._http_request(api, 'GET', headers=headers,
params=params)
if not status:
raise Exception(content)
#Below line is there because of MB-20758
content = content.split(b'[]')[0].decode()
# Following line is added since the content uses chunked encoding
chunkless_content = content.replace("][", ", \n")
if chunkless_content:
return json.loads(chunkless_content)
else:
return content
'Get list of all roles that exist in the system'
def retrive_all_user_role(self):
url = "/settings/rbac/roles"
api = self.baseUrl + url
status, content, header = self._http_request(api, 'GET')
if not status:
raise Exception(content)
return json.loads(content)
'Get list of current users and rols assigned to them'
def retrieve_user_roles(self):
url = "/settings/rbac/users"
api = self.baseUrl + url
status, content, header = self._http_request(api, 'GET')
if not status:
raise Exception(content)
return json.loads(content)
'''
Add/Update user role assignment
user_id=userid of the user to act on
payload=name=<nameofuser>&roles=admin,cluster_admin'''
def set_user_roles(self, user_id, payload):
url = "settings/rbac/users/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'PUT', payload)
if not status:
raise Exception(content)
return json.loads(content)
'''
Delete user from couchbase role assignment
user_id=userid of user to act on'''
def delete_user_roles(self, user_id):
url = "settings/rbac/users/local/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'DELETE')
if not status:
raise Exception(content)
return json.loads(content)
'''
Returns base64 string of username:password
'''
def get_authorization(self, username, password):
credentials = '{}:{}'.format(username, password)
authorization = base64.encodebytes(credentials.encode('utf-8'))
return authorization.decode('utf-8').rstrip('\n')
'''
Return list of permission with True/False if user has permission or not
user_id = userid for checking permission
password = password for userid
permission_set=cluster.bucket[default].stats!read,cluster.bucket[default]!write
'''
def check_user_permission(self, user_id, password, permission_set):
url = "pools/default/checkPermissions/"
api = self.baseUrl + url
authorization = self.get_authorization(user_id, password)
header = {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
status, content, header = self._http_request(api, 'POST', params=permission_set, headers=header)
if not status:
raise Exception(content)
return json.loads(content)
'''
Add/Update user role assignment
user_id=userid of the user to act on
payload=name=<nameofuser>&roles=admin,cluster_admin&password=<password>
if roles=<empty> user will be created with no roles'''
def add_set_builtin_user(self, user_id, payload):
url = "settings/rbac/users/local/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'PUT', payload)
if not status:
raise Exception(content)
return json.loads(content)
'''
Add External User
'''
def add_external_user(self,user_id,payload):
url = "settings/rbac/users/external/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'PUT', payload)
if not status:
raise Exception(content)
return json.loads(content)
'''
Delete External User
'''
def delete_external_user(self,user_id):
url = "settings/rbac/users/external/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'DELETE')
if not status:
raise Exception(content)
return json.loads(content)
'''
Delete built-in user
'''
def delete_builtin_user(self, user_id):
url = "settings/rbac/users/local/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'DELETE')
if not status:
raise Exception(content)
return json.loads(content)
'''
Add/Update user role assignment
user_id=userid of the user to act on
password=<new password>'''
def change_password_builtin_user(self, user_id, password):
url = "controller/changePassword/" + user_id
api = self.baseUrl + url
status, content, header = self._http_request(api, 'POST', password)
if not status:
raise Exception(content)
return json.loads(content)
# Applicable to eventing service
'''
Eventing lifecycle operation
'''
def lifecycle_operation(self, name, operation,body=None):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name +"/"+ operation
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
if body != None:
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
else:
status, content, header = self._http_request(api, 'POST', headers=headers)
if not status:
raise Exception(content)
return content
'''
Save the Function so that it is visible in UI
'''
def save_function(self, name, body):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/saveAppTempStore/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
Deploy the Function
'''
def deploy_function(self, name, body):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/setApplication/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
GET all the Functions
'''
def get_all_functions(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return content
'''
Undeploy the Function
'''
def set_settings_for_function(self, name, body):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name +"/settings"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
deploy the Function
'''
def deploy_function_by_name(self, name):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/settings"
body = {"deployment_status": True, "processing_status": True}
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
pause the Function
'''
def pause_function_by_name(self, name):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/settings"
body = {"deployment_status": True, "processing_status": False}
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
undeploy the Function
'''
def undeploy_function(self, name):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name +"/settings"
body= {"deployment_status": False, "processing_status": False}
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
Delete all the functions
'''
def delete_all_function(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'DELETE', headers=headers)
if not status:
raise Exception(content)
return content
'''
Delete single function
'''
def delete_single_function(self, name):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'DELETE', headers=headers)
if not status:
raise Exception(content)
return content
'''
Delete the Function from UI
'''
def delete_function_from_temp_store(self, name):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/deleteAppTempStore/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'DELETE', headers=headers)
if not status:
raise Exception(content)
return content
'''
Delete the Function
'''
def delete_function(self, name):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/deleteApplication/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'DELETE', headers=headers)
if not status:
raise Exception(content)
return content
'''
Export the Function
'''
def export_function(self, name):
export_map = {}
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/export/" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed[0].keys()): # returns an array
tokens = key.split(":")
val = json_parsed[0][key]
if len(tokens) == 1:
field = tokens[0]
export_map[field] = val
return export_map
'''
Import the Function
'''
def import_function(self, body):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/import"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=body)
if not status:
raise Exception(content)
return content
'''
Ensure that the eventing node is out of bootstrap node
'''
def get_deployed_eventing_apps(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getDeployedApps"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return json.loads(content)
'''
Ensure that the eventing node is out of bootstrap node
'''
def get_running_eventing_apps(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getRunningApps"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return json.loads(content)
'''
composite status of a handler
'''
def get_composite_eventing_status(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/status"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return json.loads(content)
'''
Get Eventing processing stats
'''
def get_event_processing_stats(self, name, eventing_map=None):
if eventing_map is None:
eventing_map = {}
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getEventProcessingStats?name=" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
eventing_map[field] = val
return eventing_map
'''
Get Aggregate Eventing processing stats
'''
def get_aggregate_event_processing_stats(self, name, eventing_map=None):
if eventing_map is None:
eventing_map = {}
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getAggEventProcessingStats?name=" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
eventing_map[field] = val
return eventing_map
'''
Get Eventing execution stats
'''
def get_event_execution_stats(self, name, eventing_map=None):
if eventing_map is None:
eventing_map = {}
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getExecutionStats?name=" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
eventing_map[field] = val
return eventing_map
'''
Get Eventing failure stats
'''
def get_event_failure_stats(self, name, eventing_map=None):
if eventing_map is None:
eventing_map = {}
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getFailureStats?name=" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if status:
json_parsed = json.loads(content)
for key in list(json_parsed.keys()):
tokens = key.split(":")
val = json_parsed[key]
if len(tokens) == 1:
field = tokens[0]
eventing_map[field] = val
return eventing_map
'''
Get all eventing stats
'''
def get_all_eventing_stats(self, seqs_processed=False, eventing_map=None):
if eventing_map is None:
eventing_map = {}
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
if seqs_processed:
url = "api/v1/stats?type=full"
else:
url = "api/v1/stats"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return json.loads(content)
'''
Cleanup eventing
'''
def cleanup_eventing(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "cleanupEventing"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return content
'''
enable debugger
'''
def enable_eventing_debugger(self):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/api/v1/config"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
body="{\"enable_debugger\": true}"
status, content, header = self._http_request(api, 'POST', headers=headers, params=body)
if not status:
raise Exception(content)
return content
'''
disable debugger
'''
def disable_eventing_debugger(self):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/api/v1/config"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
body = "{\"enable_debugger\": false}"
status, content, header = self._http_request(api, 'POST', headers=headers, params=body)
if not status:
raise Exception(content)
return content
'''
Start debugger
'''
def start_eventing_debugger(self, name):
authorization = self.get_authorization(self.username, self.password)
url="/pools/default"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
url = "_p/event/startDebugger/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers, params=content)
if not status:
raise Exception(content)
return content
'''
Stop debugger
'''
def stop_eventing_debugger(self, name):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/stopDebugger/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers)
if not status:
raise Exception(content)
return content
'''
Get debugger url
'''
def get_eventing_debugger_url(self, name):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/getDebuggerUrl/?name=" + name
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers)
if not status:
raise Exception(content)
return content
'''
allow inter bucket recursion
'''
def allow_interbucket_recursion(self):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/api/v1/config"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
body = "{\"allow_interbucket_recursion\": true}"
status, content, header = self._http_request(api, 'POST', headers=headers, params=body)
if not status:
raise Exception(content)
return content
'''
update eventing config
'''
def update_eventing_config(self,body):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/api/v1/config"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers, params=body)
if not status:
raise Exception(content)
return content
'''
GET eventing config
'''
def get_eventing_config(self):
authorization = self.get_authorization(self.username, self.password)
url = "_p/event/api/v1/config"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers, params='')
if not status:
raise Exception(content)
return content
'''
update eventing config function wise
'''
def update_eventing_config_per_function(self, body, name):
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/config"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
'''
GET eventing config for single function
'''
def get_eventing_config_per_function(self, name):
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/config"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers, params='')
if not status:
raise Exception(content)
return content
'''
Update function appcode
'''
def update_function_appcode(self, body, name):
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/appcode"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers, params=body)
if not status:
raise Exception(content)
return content
'''
Get function appcode
'''
def get_function_appcode(self, name):
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/appcode"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers, params='')
if not status:
raise Exception(content)
return content
'''
Get eventing rebalance status
'''
def get_eventing_rebalance_status(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getAggRebalanceStatus"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if status:
return content
'''
Get application logs
'''
def get_app_logs(self,handler_name):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "getAppLog?aggregate=true&name="+handler_name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if status:
return content
def create_function(self, name, body):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
def update_function(self, name, body):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
body['appname']=name
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
def get_function_details(self, name):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return content
def get_eventing_go_routine_dumps(self):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "debug/pprof/goroutine?debug=1"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return content
def set_eventing_retry(self, name, body):
if self.eventing_role:
authorization = self.get_authorization("eventing_admin", "password")
else:
authorization = self.get_authorization(self.username, self.password)
url = "api/v1/functions/" + name + "/retry"
api = self.eventing_baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers,
params=json.dumps(body).encode("ascii", "ignore"))
if not status:
raise Exception(content)
return content
def get_user(self, user_id):
url = "settings/rbac/users/"
api = self.baseUrl + url
status, content, header = self._http_request(api, "GET")
if content is not None:
content_json = json.loads(content)
for i in range(len(content_json)):
user = content_json[i]
if user.get('id') == user_id:
return user
return {}
""" From 6.5.0, enable IPv6 on cluster/node needs 2 settings
default is set to IPv6
We need to disable auto failover first, then set network version
Then enable autofaiover again. """
def enable_ip_version(self, afamily='ipv6', afamilyOnly='false'):
log.info("Start enable {0} on this node {1}".format(afamily, self.baseUrl))
self.update_autofailover_settings(False, 60)
params = urllib.parse.urlencode({'afamily': afamily,
'afamilyOnly': afamilyOnly,
'nodeEncryption': 'off'})
api = "{0}node/controller/enableExternalListener".format(self.baseUrl)
status, content, header = self._http_request(api, 'POST', params)
if status:
params = urllib.parse.urlencode({'afamily': afamily,
'afamilyOnly': afamilyOnly,
'nodeEncryption': 'off'})
api = "{0}node/controller/setupNetConfig".format(self.baseUrl)
status, content, header = self._http_request(api, 'POST', params)
if status:
log.info("Done enable {0} on this node {1}".format(afamily, self.baseUrl))
else:
log.error("Failed to set 'setupNetConfig' on this node {0}"
.format(self.baseUrl))
raise Exception(content)
else:
log.error("Failed to set 'enableExternalListener' on this node {0}"
.format(self.baseUrl))
raise Exception(content)
if afamilyOnly == 'true':
api = "{0}node/controller/disableUnusedExternalListeners".format(self.baseUrl)
status, _, _ = self._http_request(api, 'POST', params)
if not status:
log.error("Failed to set 'disableUnusedExternalListeners' on this node {0}"
.format(self.baseUrl))
self.update_autofailover_settings(True, 60)
# These methods are added for Auto-Rebalance On Failure tests
def set_retry_rebalance_settings(self, body):
url = "settings/retryRebalance"
api = self.baseUrl + url
params = urllib.parse.urlencode(body)
headers = self._create_headers()
status, content, header = self._http_request(api, 'POST', headers=headers, params=params)
if not status:
raise Exception(content)
return content
def get_retry_rebalance_settings(self):
authorization = self.get_authorization(self.username, self.password)
url = "settings/retryRebalance"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return content
def get_pending_rebalance_info(self):
authorization = self.get_authorization(self.username, self.password)
url = "pools/default/pendingRetryRebalance"
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'GET', headers=headers)
if not status:
raise Exception(content)
return content
def cancel_pending_rebalance(self, id):
authorization = self.get_authorization(self.username, self.password)
url = "controller/cancelRebalanceRetry/" + str(id)
api = self.baseUrl + url
headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization}
status, content, header = self._http_request(api, 'POST', headers=headers)
if not status:
raise Exception(content)
return content
# Upload a root certificate
def upload_cluster_ca(self, certificate):
""" Upload a certificate the cluster
This can be a root certificate or an intermediate certificate.
"""
headers = self._create_capi_headers()
headers['Content-Type'] = 'application/octet-stream'
status, content, header = self._http_request(self.baseUrl + "controller/uploadClusterCA", 'POST', headers=headers, params=certificate)
return status, content
def reload_certificate(self):
""" Reload certificate
Call this function after uploading a certificate to the cluster to activate the new certificate.
"""
headers = self._create_capi_headers()
status, content, header = self._http_request(self.baseUrl + "node/controller/reloadCertificate", 'POST', headers=headers)
return status, content
def client_cert_auth(self, state, prefixes):
"""
Args:
state (str): Either 'enable', 'mandatory' or 'disable'.
prefixes (list(dict)): A list of dicts of containing the keys 'path', 'prefix' and 'delimiter'
e.g. {"path": .., "prefix": .., "delimiter", ..}
"""
headers = self._create_capi_headers()
params = json.dumps({'state': state, 'prefixes': prefixes})
status, content, header = self._http_request(self.baseUrl + "settings/clientCertAuth", 'POST', headers=headers, params=params)
return status, content
class MembaseServerVersion:
def __init__(self, implementationVersion='', componentsVersion=''):
self.implementationVersion = implementationVersion
self.componentsVersion = componentsVersion
# this class will also contain more node related info
class OtpNode(object):
def __init__(self, id='', status=''):
self.id = id
self.ip = ''
self.replication = ''
self.port = CbServer.port
if CbServer.use_https:
self.port = CbServer.ssl_port
self.gracefulFailoverPossible = 'true'
# extract ns ip from the otpNode string
# its normally [email protected]
if id.find('@') >= 0:
self.ip = id[id.index('@') + 1:]
if self.ip.count(':') > 0:
# raw ipv6? enclose in square brackets
self.ip = '[' + self.ip + ']'
self.status = status
class NodeInfo(object):
def __init__(self):
self.availableStorage = None # list
self.memoryQuota = None
class NodeDataStorage(object):
def __init__(self):
self.type = '' # hdd or ssd
self.path = ''
self.index_path = ''
self.quotaMb = ''
self.state = '' # ok
def __str__(self):
return '{0}'.format({'type': self.type,
'path': self.path,
'index_path' : self.index_path,
'quotaMb': self.quotaMb,
'state': self.state})
def get_data_path(self):
return self.path
def get_index_path(self):
return self.index_path
class NodeDiskStorage(object):
def __init__(self):
self.type = 0
self.path = ''
self.sizeKBytes = 0
self.usagePercent = 0
class Bucket(object):
def __init__(self, bucket_size='', name="", num_replicas=0, port=11211, master_id=None,
type='', eviction_policy="valueOnly", bucket_priority=None, uuid="", lww=False, maxttl=None, bucket_storage=None):
self.name = name
self.port = port
self.type = type
self.nodes = None
self.stats = None
self.servers = []
self.vbuckets = []
self.forward_map = []
self.numReplicas = num_replicas
self.bucket_size = bucket_size
self.kvs = {1:KVStore()}
self.master_id = master_id
self.eviction_policy = eviction_policy
self.bucket_priority = bucket_priority
self.uuid = uuid
self.lww = lww
self.maxttl = maxttl
self.bucket_storage = bucket_storage
def __str__(self):
return self.name
class Node(object):
def __init__(self):
self.uptime = 0
self.memoryTotal = 0
self.memoryFree = 0
self.mcdMemoryReserved = 0
self.mcdMemoryAllocated = 0
self.status = ""
self.hostname = ""
self.clusterCompatibility = ""
self.clusterMembership = ""
self.recoveryType = ""
self.version = ""
self.os = ""
self.ports = []
self.availableStorage = []
self.storage = []
self.memoryQuota = 0
self.moxi = 11211
self.memcached = 11210
self.id = ""
self.ip = ""
self.rest_username = ""
self.rest_password = ""
self.port = 8091
if CbServer.use_https:
self.port = CbServer.ssl_port
self.services = []
self.storageTotalRam = 0
@property
def failed_over_state_a(self):
""" The state in which a node is failed-over and is requesting a recovery type from the user
"""
return self.clusterMembership == "inactiveFailed"
@property
def failed_over_state_b(self):
""" The state in which a node is failed-over and the user has selected a recovery type
"""
return self.clusterMembership == "inactiveAdded" and self.recoveryType
@property
def has_failed_over(self):
""" Returns tree if a node is in the failed-over state
"""
return self.failed_over_state_a or self.failed_over_state_b
@property
def complete_version(self):
""" Returns the complete version of the node (e.g. 6.5.0)
"""
return self.version.split('-')[0]
@property
def major_version(self):
""" Returns the major version of the node (e.g. 6.5)
"""
return self.complete_version.rsplit('.', 1)[0]
@property
def minor_version(self):
""" Returns the minor version of the node (e.g. 0)
"""
return self.complete_version.rsplit('.', 1)[1]
class AutoFailoverSettings(object):
def __init__(self):
self.enabled = True
self.timeout = 0
self.count = 0
self.failoverOnDataDiskIssuesEnabled = False
self.failoverOnDataDiskIssuesTimeout = 0
self.maxCount = 1
self.failoverServerGroup = False
self.can_abort_rebalance = False
class AutoReprovisionSettings(object):
def __init__(self):
self.enabled = True
self.max_nodes = 0
self.count = 0
class NodePort(object):
def __init__(self):
self.proxy = 0
self.direct = 0
class BucketStats(object):
def __init__(self):
self.opsPerSec = 0
self.itemCount = 0
self.diskUsed = 0
self.memUsed = 0
self.ram = 0
class vBucket(object):
def __init__(self):
self.master = ''
self.replica = []
self.id = -1
class RestParser(object):
def parse_index_status_response(self, parsed):
index_map = {}
for map in parsed["indexes"]:
bucket_name = map['bucket']
if bucket_name not in list(index_map.keys()):
index_map[bucket_name] = {}
index_name = map['index']
index_map[bucket_name][index_name] = {}
index_map[bucket_name][index_name]['status'] = map['status']
index_map[bucket_name][index_name]['progress'] = str(map['progress'])
index_map[bucket_name][index_name]['definition'] = map['definition']
if len(map['hosts']) == 1:
index_map[bucket_name][index_name]['hosts'] = map['hosts'][0]
else:
index_map[bucket_name][index_name]['hosts'] = map['hosts']
index_map[bucket_name][index_name]['id'] = map['id']
return index_map
def parse_index_stats_response(self, parsed, index_map=None):
if index_map == None:
index_map = {}
for key in list(parsed.keys()):
tokens = key.split(":")
val = parsed[key]
if len(tokens) == 3 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]:
bucket = tokens[0]
index_name = tokens[1]
stats_name = tokens[2]
if bucket not in list(index_map.keys()):
index_map[bucket] = {}
if index_name not in list(index_map[bucket].keys()):
index_map[bucket][index_name] = {}
index_map[bucket][index_name][stats_name] = val
return index_map
def parse_index_stats_response_collections(self, parsed, index_map=None):
if index_map == None:
index_map = {}
for key in list(parsed.keys()):
tokens = key.split(":")
val = parsed[key]
if len(tokens) == 3 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]:
bucket = tokens[0]
index_name = tokens[1]
stats_name = tokens[2]
if bucket not in list(index_map.keys()):
index_map[bucket] = {}
if index_name not in list(index_map[bucket].keys()):
index_map[bucket][index_name] = {}
index_map[bucket][index_name][stats_name] = val
elif len(tokens) == 5 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]:
bucket = tokens[0]
scope_name = tokens[1]
collection_name = tokens[2]
index_name = tokens[3]
stats_name = tokens[4]
keyspace = f'default:{bucket}.{scope_name}.{collection_name}'
if keyspace not in list(index_map.keys()):
index_map[keyspace] = {}
if index_name not in list(index_map[keyspace].keys()):
index_map[keyspace][index_name] = {}
index_map[keyspace][index_name][stats_name] = val
return index_map
def parse_get_nodes_response(self, parsed):
node = Node()
node.uptime = parsed['uptime']
node.memoryFree = parsed['memoryFree']
node.memoryTotal = parsed['memoryTotal']
node.mcdMemoryAllocated = parsed['mcdMemoryAllocated']
node.mcdMemoryReserved = parsed['mcdMemoryReserved']
node.status = parsed['status']
node.hostname = parsed['hostname']
node.clusterCompatibility = parsed['clusterCompatibility']
node.clusterMembership = parsed['clusterMembership']
if 'recoveryType' in parsed:
node.recoveryType = parsed['recoveryType']
node.version = parsed['version']
node.curr_items = 0
if 'interestingStats' in parsed and 'curr_items' in parsed['interestingStats']:
node.curr_items = parsed['interestingStats']['curr_items']
node.port = parsed["hostname"][parsed["hostname"].rfind(":") + 1:]
if CbServer.use_https:
str_node_port = CbServer.ssl_port_map.get(str(node.port), str(node.port))
if type(node.port) == int:
node.port = int(str_node_port)
node.os = parsed['os']
if "services" in parsed:
node.services = parsed["services"]
if "otpNode" in parsed:
node.id = parsed["otpNode"]
if "hostname" in parsed:
# should work for both: ipv4 and ipv6
node.ip = parsed["hostname"].rsplit(":", 1)[0]
# memoryQuota
if 'memoryQuota' in parsed:
node.memoryQuota = parsed['memoryQuota']
if 'availableStorage' in parsed:
availableStorage = parsed['availableStorage']
for key in availableStorage:
# let's assume there is only one disk in each noce
dict_parsed = parsed['availableStorage']
if 'path' in dict_parsed and 'sizeKBytes' in dict_parsed and 'usagePercent' in dict_parsed:
diskStorage = NodeDiskStorage()
diskStorage.path = dict_parsed['path']
diskStorage.sizeKBytes = dict_parsed['sizeKBytes']
diskStorage.type = key
diskStorage.usagePercent = dict_parsed['usagePercent']
node.availableStorage.append(diskStorage)
log.info(diskStorage)
if 'storage' in parsed:
storage = parsed['storage']
for key in storage:
disk_storage_list = storage[key]
for dict_parsed in disk_storage_list:
if 'path' in dict_parsed and 'state' in dict_parsed and 'quotaMb' in dict_parsed:
dataStorage = NodeDataStorage()
dataStorage.path = dict_parsed['path']
dataStorage.index_path = dict_parsed.get('index_path', '')
dataStorage.quotaMb = dict_parsed['quotaMb']
dataStorage.state = dict_parsed['state']
dataStorage.type = key
node.storage.append(dataStorage)
# ports":{"proxy":11211,"direct":11210}
if "ports" in parsed:
ports = parsed["ports"]
if "proxy" in ports:
node.moxi = ports["proxy"]
if "direct" in ports:
node.memcached = ports["direct"]
if CbServer.use_https:
node.memcached = int(CbServer.ssl_port_map.get(str(node.memcached), str(node.memcached)))
if "storageTotals" in parsed:
storageTotals = parsed["storageTotals"]
if storageTotals.get("ram"):
if storageTotals["ram"].get("total"):
ramKB = storageTotals["ram"]["total"]
node.storageTotalRam = ramKB//(1024*1024)
if node.mcdMemoryReserved == 0:
node.mcdMemoryReserved = node.storageTotalRam
if IS_CONTAINER:
# the storage total values are more accurate than
# mcdMemoryReserved - which is container host memory
node.mcdMemoryReserved = node.storageTotalRam * 0.70
return node
def parse_get_bucket_response(self, response):
parsed = json.loads(response)
return self.parse_get_bucket_json(parsed)
def parse_get_bucket_json(self, parsed):
bucket = Bucket()
bucket.name = parsed['name']
bucket.uuid = parsed['uuid']
bucket.type = parsed['bucketType']
if 'proxyPort' in parsed:
bucket.port = parsed['proxyPort']
bucket.nodes = list()
if 'vBucketServerMap' in parsed:
vBucketServerMap = parsed['vBucketServerMap']
serverList = vBucketServerMap['serverList']
bucket.servers.extend(serverList)
if "numReplicas" in vBucketServerMap:
bucket.numReplicas = vBucketServerMap["numReplicas"]
# vBucketMapForward
if 'vBucketMapForward' in vBucketServerMap:
# let's gather the forward map
vBucketMapForward = vBucketServerMap['vBucketMapForward']
counter = 0
for vbucket in vBucketMapForward:
# there will be n number of replicas
vbucketInfo = vBucket()
vbucketInfo.master = serverList[vbucket[0]]
if vbucket:
for i in range(1, len(vbucket)):
if vbucket[i] != -1:
vbucketInfo.replica.append(serverList[vbucket[i]])
vbucketInfo.id = counter
counter += 1
bucket.forward_map.append(vbucketInfo)
vBucketMap = vBucketServerMap['vBucketMap']
counter = 0
for vbucket in vBucketMap:
# there will be n number of replicas
vbucketInfo = vBucket()
vbucketInfo.master = serverList[vbucket[0]]
if vbucket:
for i in range(1, len(vbucket)):
if vbucket[i] != -1:
vbucketInfo.replica.append(serverList[vbucket[i]])
vbucketInfo.id = counter
counter += 1
bucket.vbuckets.append(vbucketInfo)
# now go through each vbucket and populate the info
# who is master , who is replica
# get the 'storageTotals'
log.debug('read {0} vbuckets'.format(len(bucket.vbuckets)))
stats = parsed['basicStats']
# vBucketServerMap
bucketStats = BucketStats()
log.debug('stats:{0}'.format(stats))
bucketStats.opsPerSec = stats['opsPerSec']
bucketStats.itemCount = stats['itemCount']
if bucket.type != "memcached":
bucketStats.diskUsed = stats['diskUsed']
bucketStats.memUsed = stats['memUsed']
quota = parsed['quota']
bucketStats.ram = quota['ram']
bucket.stats = bucketStats
nodes = parsed['nodes']
for nodeDictionary in nodes:
node = Node()
node.uptime = nodeDictionary['uptime']
node.memoryFree = nodeDictionary['memoryFree']
node.memoryTotal = nodeDictionary['memoryTotal']
node.mcdMemoryAllocated = nodeDictionary['mcdMemoryAllocated']
node.mcdMemoryReserved = nodeDictionary['mcdMemoryReserved']
node.status = nodeDictionary['status']
node.hostname = nodeDictionary['hostname']
if 'clusterCompatibility' in nodeDictionary:
node.clusterCompatibility = nodeDictionary['clusterCompatibility']
if 'clusterMembership' in nodeDictionary:
node.clusterCompatibility = nodeDictionary['clusterMembership']
node.version = nodeDictionary['version']
node.os = nodeDictionary['os']
if "ports" in nodeDictionary:
ports = nodeDictionary["ports"]
if "proxy" in ports:
node.moxi = ports["proxy"]
if "direct" in ports:
node.memcached = ports["direct"]
if CbServer.use_https:
node.memcached = int(CbServer.ssl_port_map.get(str(node.memcached), str(node.memcached)))
if "hostname" in nodeDictionary:
value = str(nodeDictionary["hostname"])
node.ip = value[:value.rfind(":")]
node.port = int(value[value.rfind(":") + 1:])
if CbServer.use_https:
node.port = int(CbServer.ssl_port_map.get(str(node.port), str(node.port)))
if "otpNode" in nodeDictionary:
node.id = nodeDictionary["otpNode"]
bucket.nodes.append(node)
return bucket
|
py | b407b5156f7f17517ba81e349e8f93b5d2fc18b6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import psycopg2
import sys
import os
sys.path.insert(0, f"{os.path.abspath(os.path.join(os.path.abspath(__file__), '../../../'))}")
from configparser import ConfigParser
path_root = os.path.abspath(os.path.join(os.path.abspath(__file__), "../../../"))
def pg_config(filename='database.ini', section='pg_afolu_fe'):
filename = f'{path_root}/config/{filename}'
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
return db
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
def pg_connection_str(**kwargs):
"""
Create string connection for using with pandas read_sql
:type kwargs: basestring
:param kwargs: set of strings for creating string connection
:return:
"""
# read connection parameters
if not kwargs:
params = pg_config(**kwargs)
else:
params = kwargs
conn = 'postgresql://{user}:{password}@{host}:{port}/{dbname}'.format(**params)
return conn
def pg_connection(**kwargs):
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
if not kwargs:
params = pg_config(**kwargs)
else:
params = kwargs
# connect to the PostgreSQL server
# print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
return conn
def get_from_ca_table(ca_id):
"""
Get data from categoria animal table
:return: a1, tc
"""
conn = pg_connection()
with conn as connection:
query = """
SELECT coe_cat_animal, temp_conf, rcms, ib
FROM categoria_animal
WHERE id_categoria_animal = {0}
""".format(ca_id)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
a1 = res[0][0]
tc = res[0][1]
rcms = res[0][2]
bi = res[0][3]
conn.close()
return a1, tc, rcms, bi
def get_from_cp_table(cp_id):
"""
Get data from coeficiente de preñez table
:return: cp
"""
conn = pg_connection()
with conn as connection:
query = """
SELECT coe_prenez
FROM coeficiente_prenez
WHERE id_coe_prenez = {0}
""".format(cp_id)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
cp = res[0][0]
conn.close()
return cp
def get_from_grass_type(id_):
conn = pg_connection()
with conn as connection:
query = """
SELECT ed_rumiantes, energia_bruta_pasto, fdn_dieta, fda, enm_rumiantes,
ceniza_dieta, pc_dieta
FROM variedad_pasto
WHERE id_variedad = {0}
""".format(id_)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
edr: float = res[0][0]
ebp: float = res[0][1]
fdn: float = res[0][2]
fda: float = res[0][3]
enm: float = res[0][4]
cen: float = res[0][5]
pcd: float = res[0][6]
conn.close()
return edr, ebp, fdn, fda, enm, cen, pcd
def get_from_suplement_type(id_):
conn = pg_connection()
with conn as connection:
query = """
SELECT edt_rumiantes, energia_bruta_pasto, fdn_dieta, fda,
enm_rumiantes, ceniza_dieta, pc_dieta
FROM suplemento
WHERE id_suplemento = {0}
""".format(id_)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
edr: float = res[0][0]
ebp: float = res[0][1]
fdn: float = res[0][2]
fda: float = res[0][3]
enm: float = res[0][4]
cen: float = res[0][5]
pcd: float = res[0][6]
conn.close()
return edr, ebp, fdn, fda, enm, cen, pcd
def get_from_ac_table(ac_id):
"""
Get data from coeficiente de actividad table
:return: ca
"""
conn = pg_connection()
with conn as connection:
query = """
SELECT coe_actividad
FROM coeficiente_actividad
WHERE id_coe_actividad = {0}
""".format(ac_id)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
ca = res[0][0]
conn.close()
return ca
def get_from_cs_table(id_cs):
"""
Get data from condición sexual table
:return: ca
"""
conn = pg_connection()
with conn as connection:
query = """
SELECT coe_cond_sexual
FROM condicion_sexual
WHERE id_cond_sexual = {0}
""".format(id_cs)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
fcs = res[0][0]
conn.close()
return fcs
def get_from_pm_table(pm_id):
"""
Get data from produccion de metano table
:return: cp
"""
conn = pg_connection()
with conn as connection:
query = """
SELECT bovino_alta_prod, bovino_otras
FROM produccion_metano
WHERE id = '{0}'
""".format(pm_id)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
ap: float = res[0][0]
bp: float = res[0][1]
conn.close()
return ap, bp
def get_from_awms_table(awms_id):
"""
Get data from produccion de metano table
:return: cp
"""
conn = pg_connection()
with conn as connection:
query = """
SELECT alta_prod, otras
FROM gestion_residuos
WHERE id = '{0}'
""".format(awms_id)
cur = connection.cursor()
cur.execute(query)
res = cur.fetchall()
ap = res[0][0]
bp = res[0][1]
conn.close()
return ap, bp
def main():
db_parameters = pg_config()
db_str = pg_connection_str()
if __name__ == '__main__':
main()
|
py | b407b56a85f90ec27bb3aabfc5675b9b6d726238 | #!/usr/bin/env python3
#
# Copyright (c) 2021 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
This module implement Python access to the VPP statistics segment. It
accesses the data structures directly in shared memory.
VPP uses optimistic locking, so data structures may change underneath
us while we are reading. Data is copied out and it's important to
spend as little time as possible "holding the lock".
Counters are stored in VPP as a two dimensional array.
Index by thread and index (typically sw_if_index).
Simple counters count only packets, Combined counters count packets
and octets.
Counters can be accessed in either dimension.
stat['/if/rx'] - returns 2D lists
stat['/if/rx'][0] - returns counters for all interfaces for thread 0
stat['/if/rx'][0][1] - returns counter for interface 1 on thread 0
stat['/if/rx'][0][1]['packets'] - returns the packet counter
for interface 1 on thread 0
stat['/if/rx'][:, 1] - returns the counters for interface 1 on all threads
stat['/if/rx'][:, 1].packets() - returns the packet counters for
interface 1 on all threads
stat['/if/rx'][:, 1].sum_packets() - returns the sum of packet counters for
interface 1 on all threads
stat['/if/rx-miss'][:, 1].sum() - returns the sum of packet counters for
interface 1 on all threads for simple counters
'''
import os
import socket
import array
import mmap
from struct import Struct
import time
import unittest
import re
def recv_fd(sock):
'''Get file descriptor for memory map'''
fds = array.array("i") # Array of ints
_, ancdata, _, _ = sock.recvmsg(0, socket.CMSG_LEN(4))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS:
fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return list(fds)[0]
VEC_LEN_FMT = Struct('I')
def get_vec_len(stats, vector_offset):
'''Equivalent to VPP vec_len()'''
return VEC_LEN_FMT.unpack_from(stats.statseg, vector_offset - 8)[0]
def get_string(stats, ptr):
'''Get a string from a VPP vector'''
namevector = ptr - stats.base
namevectorlen = get_vec_len(stats, namevector)
if namevector + namevectorlen >= stats.size:
raise IOError('String overruns stats segment')
return stats.statseg[namevector:namevector+namevectorlen-1].decode('ascii')
class StatsVector:
'''A class representing a VPP vector'''
def __init__(self, stats, ptr, fmt):
self.vec_start = ptr - stats.base
self.vec_len = get_vec_len(stats, ptr - stats.base)
self.struct = Struct(fmt)
self.fmtlen = len(fmt)
self.elementsize = self.struct.size
self.statseg = stats.statseg
self.stats = stats
if self.vec_start + self.vec_len * self.elementsize >= stats.size:
raise IOError('Vector overruns stats segment')
def __iter__(self):
with self.stats.lock:
return self.struct.iter_unpack(self.statseg[self.vec_start:self.vec_start +
self.elementsize*self.vec_len])
def __getitem__(self, index):
if index > self.vec_len:
raise IOError('Index beyond end of vector')
with self.stats.lock:
if self.fmtlen == 1:
return self.struct.unpack_from(self.statseg, self.vec_start +
(index * self.elementsize))[0]
return self.struct.unpack_from(self.statseg, self.vec_start +
(index * self.elementsize))
class VPPStats():
'''Main class implementing Python access to the VPP statistics segment'''
# pylint: disable=too-many-instance-attributes
shared_headerfmt = Struct('QPQQPP')
default_socketname = '/run/vpp/stats.sock'
def __init__(self, socketname=default_socketname, timeout=10):
self.socketname = socketname
self.timeout = timeout
self.directory = {}
self.lock = StatsLock(self)
self.connected = False
self.size = 0
self.last_epoch = 0
self.error_vectors = 0
self.statseg = 0
def connect(self):
'''Connect to stats segment'''
if self.connected:
return
sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
sock.connect(self.socketname)
mfd = recv_fd(sock)
sock.close()
stat_result = os.fstat(mfd)
self.statseg = mmap.mmap(mfd, stat_result.st_size, mmap.PROT_READ, mmap.MAP_SHARED)
os.close(mfd)
self.size = stat_result.st_size
if self.version != 2:
raise Exception('Incompatbile stat segment version {}'
.format(self.version))
self.refresh()
self.connected = True
def disconnect(self):
'''Disconnect from stats segment'''
if self.connected:
self.statseg.close()
self.connected = False
@property
def version(self):
'''Get version of stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[0]
@property
def base(self):
'''Get base pointer of stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[1]
@property
def epoch(self):
'''Get current epoch value from stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[2]
@property
def in_progress(self):
'''Get value of in_progress from stats segment'''
return self.shared_headerfmt.unpack_from(self.statseg)[3]
@property
def directory_vector(self):
'''Get pointer of directory vector'''
return self.shared_headerfmt.unpack_from(self.statseg)[4]
@property
def error_vector(self):
'''Get pointer of error vector'''
return self.shared_headerfmt.unpack_from(self.statseg)[5]
elementfmt = 'IQ128s'
def refresh(self, blocking=True):
'''Refresh directory vector cache (epoch changed)'''
directory = {}
directory_by_idx = {}
while True:
try:
with self.lock:
self.last_epoch = self.epoch
for i, direntry in enumerate(StatsVector(self, self.directory_vector, self.elementfmt)):
path_raw = direntry[2].find(b'\x00')
path = direntry[2][:path_raw].decode('ascii')
directory[path] = StatsEntry(direntry[0], direntry[1])
directory_by_idx[i] = path
self.directory = directory
self.directory_by_idx = directory_by_idx
# Cache the error index vectors
self.error_vectors = []
for threads in StatsVector(self, self.error_vector, 'P'):
self.error_vectors.append(StatsVector(self, threads[0], 'Q'))
# Return statement must be outside the lock block to be sure
# lock.release is executed
return
except IOError:
if not blocking:
raise
def __getitem__(self, item, blocking=True):
if not self.connected:
self.connect()
while True:
try:
if self.last_epoch != self.epoch:
self.refresh(blocking)
with self.lock:
result = self.directory[item].get_counter(self)
# Return statement must be outside the lock block to be sure
# lock.release is executed
return result
except IOError:
if not blocking:
raise
def __iter__(self):
return iter(self.directory.items())
def set_errors(self, blocking=True):
'''Return dictionary of error counters > 0'''
if not self.connected:
self.connect()
errors = {k:v for k, v in self.directory.items() if k.startswith("/err/")}
result = {}
while True:
try:
if self.last_epoch != self.epoch:
self.refresh(blocking)
with self.lock:
for k, entry in errors.items():
total = 0
i = entry.value
for per_thread in self.error_vectors:
total += per_thread[i]
if total:
result[k] = total
return result
except IOError:
if not blocking:
raise
def set_errors_str(self, blocking=True):
'''Return all errors counters > 0 pretty printed'''
error_string = ['ERRORS:']
error_counters = self.set_errors(blocking)
for k in sorted(error_counters):
error_string.append('{:<60}{:>10}'.format(k, error_counters[k]))
return '%s\n' % '\n'.join(error_string)
def get_counter(self, name, blocking=True):
'''Alternative call to __getitem__'''
return self.__getitem__(name, blocking)
def get_err_counter(self, name, blocking=True):
'''Return a single value (sum of all threads)'''
if not self.connected:
self.connect()
if name.startswith("/err/"):
while True:
try:
if self.last_epoch != self.epoch:
self.refresh(blocking)
with self.lock:
result = sum(self.directory[name].get_counter(self))
# Return statement must be outside the lock block to be sure
# lock.release is executed
return result
except IOError:
if not blocking:
raise
def ls(self, patterns):
'''Returns list of counters matching pattern'''
# pylint: disable=invalid-name
if not self.connected:
self.connect()
if not isinstance(patterns, list):
patterns = [patterns]
regex = [re.compile(i) for i in patterns]
return [k for k, v in self.directory.items()
if any(re.match(pattern, k) for pattern in regex)]
def dump(self, counters, blocking=True):
'''Given a list of counters return a dictionary of results'''
if not self.connected:
self.connect()
result = {}
for cnt in counters:
result[cnt] = self.__getitem__(cnt,blocking)
return result
class StatsLock():
'''Stat segment optimistic locking'''
def __init__(self, stats):
self.stats = stats
self.epoch = 0
def __enter__(self):
acquired = self.acquire(blocking=True)
assert acquired, "Lock wasn't acquired, but blocking=True"
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
self.release()
def acquire(self, blocking=True, timeout=-1):
'''Acquire the lock. Await in progress to go false. Record epoch.'''
self.epoch = self.stats.epoch
if timeout > 0:
start = time.monotonic()
while self.stats.in_progress:
if not blocking:
time.sleep(0.01)
if timeout > 0:
if start + time.monotonic() > timeout:
return False
return True
def release(self):
'''Check if data read while locked is valid'''
if self.stats.in_progress or self.stats.epoch != self.epoch:
raise IOError('Optimistic lock failed, retry')
def locked(self):
'''Not used'''
class StatsCombinedList(list):
'''Column slicing for Combined counters list'''
def __getitem__(self, item):
'''Supports partial numpy style 2d support. Slice by column [:,1]'''
if isinstance(item, int):
return list.__getitem__(self, item)
return CombinedList([row[item[1]] for row in self])
class CombinedList(list):
'''Combined Counters 2-dimensional by thread by index of packets/octets'''
def packets(self):
'''Return column (2nd dimension). Packets for all threads'''
return [pair[0] for pair in self]
def octets(self):
'''Return column (2nd dimension). Octets for all threads'''
return [pair[1] for pair in self]
def sum_packets(self):
'''Return column (2nd dimension). Sum of all packets for all threads'''
return sum(self.packets())
def sum_octets(self):
'''Return column (2nd dimension). Sum of all octets for all threads'''
return sum(self.octets())
class StatsTuple(tuple):
'''A Combined vector tuple (packets, octets)'''
def __init__(self, data):
self.dictionary = {'packets': data[0], 'bytes': data[1]}
super().__init__()
def __repr__(self):
return dict.__repr__(self.dictionary)
def __getitem__(self, item):
if isinstance(item, int):
return tuple.__getitem__(self, item)
if item == 'packets':
return tuple.__getitem__(self, 0)
return tuple.__getitem__(self, 1)
class StatsSimpleList(list):
'''Simple Counters 2-dimensional by thread by index of packets'''
def __getitem__(self, item):
'''Supports partial numpy style 2d support. Slice by column [:,1]'''
if isinstance(item, int):
return list.__getitem__(self, item)
return SimpleList([row[item[1]] for row in self])
class SimpleList(list):
'''Simple counter'''
def sum(self):
'''Sum the vector'''
return sum(self)
class StatsEntry():
'''An individual stats entry'''
# pylint: disable=unused-argument,no-self-use
def __init__(self, stattype, statvalue):
self.type = stattype
self.value = statvalue
if stattype == 1:
self.function = self.scalar
elif stattype == 2:
self.function = self.simple
elif stattype == 3:
self.function = self.combined
elif stattype == 4:
self.function = self.error
elif stattype == 5:
self.function = self.name
elif stattype == 7:
self.function = self.symlink
else:
self.function = self.illegal
def illegal(self, stats):
'''Invalid or unknown counter type'''
return None
def scalar(self, stats):
'''Scalar counter'''
return self.value
def simple(self, stats):
'''Simple counter'''
counter = StatsSimpleList()
for threads in StatsVector(stats, self.value, 'P'):
clist = [v[0] for v in StatsVector(stats, threads[0], 'Q')]
counter.append(clist)
return counter
def combined(self, stats):
'''Combined counter'''
counter = StatsCombinedList()
for threads in StatsVector(stats, self.value, 'P'):
clist = [StatsTuple(cnt) for cnt in StatsVector(stats, threads[0], 'QQ')]
counter.append(clist)
return counter
def error(self, stats):
'''Error counter'''
counter = SimpleList()
for clist in stats.error_vectors:
counter.append(clist[self.value])
return counter
def name(self, stats):
'''Name counter'''
counter = []
for name in StatsVector(stats, self.value, 'P'):
if name[0]:
counter.append(get_string(stats, name[0]))
return counter
SYMLINK_FMT1 = Struct('II')
SYMLINK_FMT2 = Struct('Q')
def symlink(self, stats):
'''Symlink counter'''
b = self.SYMLINK_FMT2.pack(self.value)
index1, index2 = self.SYMLINK_FMT1.unpack(b)
name = stats.directory_by_idx[index1]
return stats[name][:,index2]
def get_counter(self, stats):
'''Return a list of counters'''
if stats:
return self.function(stats)
class TestStats(unittest.TestCase):
'''Basic statseg tests'''
def setUp(self):
'''Connect to statseg'''
self.stat = VPPStats()
self.stat.connect()
self.profile = cProfile.Profile()
self.profile.enable()
def tearDown(self):
'''Disconnect from statseg'''
self.stat.disconnect()
profile = Stats(self.profile)
profile.strip_dirs()
profile.sort_stats('cumtime')
profile.print_stats()
print("\n--->>>")
def test_counters(self):
'''Test access to statseg'''
print('/err/abf-input-ip4/missed', self.stat['/err/abf-input-ip4/missed'])
print('/sys/heartbeat', self.stat['/sys/heartbeat'])
print('/if/names', self.stat['/if/names'])
print('/if/rx-miss', self.stat['/if/rx-miss'])
print('/if/rx-miss', self.stat['/if/rx-miss'][1])
print('/nat44-ed/out2in/slowpath/drops', self.stat['/nat44-ed/out2in/slowpath/drops'])
print('Set Errors', self.stat.set_errors())
with self.assertRaises(KeyError):
print('NO SUCH COUNTER', self.stat['foobar'])
print('/if/rx', self.stat.get_counter('/if/rx'))
print('/err/ethernet-input/no error',
self.stat.get_err_counter('/err/ethernet-input/no error'))
def test_column(self):
'''Test column slicing'''
print('/if/rx-miss', self.stat['/if/rx-miss'])
print('/if/rx', self.stat['/if/rx']) # All interfaces for thread #1
print('/if/rx thread #1', self.stat['/if/rx'][0]) # All interfaces for thread #1
print('/if/rx thread #1, interface #1',
self.stat['/if/rx'][0][1]) # All interfaces for thread #1
print('/if/rx if_index #1', self.stat['/if/rx'][:, 1])
print('/if/rx if_index #1 packets', self.stat['/if/rx'][:, 1].packets())
print('/if/rx if_index #1 packets', self.stat['/if/rx'][:, 1].sum_packets())
print('/if/rx if_index #1 packets', self.stat['/if/rx'][:, 1].octets())
print('/if/rx-miss', self.stat['/if/rx-miss'])
print('/if/rx-miss if_index #1 packets', self.stat['/if/rx-miss'][:, 1].sum())
print('/if/rx if_index #1 packets', self.stat['/if/rx'][0][1]['packets'])
def test_error(self):
'''Test the error vector'''
print('/err/ethernet-input', self.stat['/err/ethernet-input/no error'])
print('/err/nat44-ei-ha/pkts-processed', self.stat['/err/nat44-ei-ha/pkts-processed'])
print('/err/ethernet-input', self.stat.get_err_counter('/err/ethernet-input/no error'))
print('/err/ethernet-input', self.stat['/err/ethernet-input/no error'].sum())
def test_nat44(self):
'''Test the nat counters'''
print('/nat44-ei/ha/del-event-recv', self.stat['/nat44-ei/ha/del-event-recv'])
print('/err/nat44-ei-ha/pkts-processed', self.stat['/err/nat44-ei-ha/pkts-processed'].sum())
def test_legacy(self):
'''Legacy interface'''
directory = self.stat.ls(["^/if", "/err/ip4-input", "/sys/node/ip4-input"])
data = self.stat.dump(directory)
print(data)
print('Looking up sys node')
directory = self.stat.ls(["^/sys/node"])
print('Dumping sys node')
data = self.stat.dump(directory)
print(data)
directory = self.stat.ls(["^/foobar"])
data = self.stat.dump(directory)
print(data)
def test_sys_nodes(self):
'''Test /sys/nodes'''
counters = self.stat.ls('^/sys/node')
print('COUNTERS:', counters)
print('/sys/node', self.stat.dump(counters))
print('/net/route/to', self.stat['/net/route/to'])
def test_symlink(self):
'''Symbolic links'''
print('/interface/local0/rx', self.stat['/interfaces/local0/rx'])
print('/sys/nodes/unix-epoll-input', self.stat['/nodes/unix-epoll-input/calls'])
if __name__ == '__main__':
import cProfile
from pstats import Stats
unittest.main()
|
py | b407b6caddf969f173fa9b997d00329a53944f72 |
import sys
import os
import time
import pdb
import random
import argparse
import cProfile, pstats
import scipy.io
import pickle
import configparser
import torch
import numpy as np
sys.path.append("../src")
import stats.kernels
import stats.svGPFA.svGPFAModelFactory
import stats.svGPFA.svEM
import plot.svGPFA.plotUtils
import utils.svGPFA.initUtils
# import utils.svGPFA.miscUtils
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("mEstNumber", help="Matlab's estimation number", type=int)
parser.add_argument("--deviceName", help="name of device (cpu or cuda)", default="cpu")
parser.add_argument("--profile", help="perform profiling", action="store_true")
args = parser.parse_args()
if args.profile:
profile = True
else:
profile = False
mEstNumber = args.mEstNumber
deviceName = args.deviceName
if not torch.cuda.is_available():
deviceName = "cpu"
device = torch.device(deviceName)
print("Using {:s}".format(deviceName))
mEstConfig = configparser.ConfigParser()
mEstConfig.read("../../matlabCode/scripts/results/{:08d}-pointProcessEstimationParams.ini".format(mEstNumber))
mSimNumber = int(mEstConfig["data"]["simulationNumber"])
indPointsLocsKMSEpsilon = float(mEstConfig["control_variables"]["epsilon"])
ppSimulationFilename = os.path.join(os.path.dirname(__file__), "../../matlabCode/scripts/results/{:08d}-pointProcessSimulation.mat".format(mSimNumber))
initDataFilename = os.path.join(os.path.dirname(__file__), "../../matlabCode/scripts/results/{:08d}-pointProcessInitialConditions.mat".format(mEstNumber))
# save estimated values
estimationPrefixUsed = True
while estimationPrefixUsed:
pEstNumber = random.randint(0, 10**8)
estimMetaDataFilename = \
"results/{:08d}_leasSimulation_estimationChol_metaData_{:s}.ini".format(pEstNumber, deviceName)
if not os.path.exists(estimMetaDataFilename):
estimationPrefixUsed = False
modelSaveFilename = \
"results/{:08d}_leasSimulation_estimatedModelChol_{:s}.pickle".format(pEstNumber, deviceName)
profilerFilenamePattern = \
"results/{:08d}_leaseSimulation_estimatedModelChol_{:s}.pstats".format(pEstNumber, deviceName)
lowerBoundHistFigFilename = \
"figures/{:08d}_leasSimulation_lowerBoundHistChol_{:s}.png".format(pEstNumber, deviceName)
mat = scipy.io.loadmat(initDataFilename)
nLatents = len(mat['Z0'])
nTrials = mat['Z0'][0,0].shape[2]
qMu0 = [torch.from_numpy(mat['q_mu0'][(0,k)]).type(torch.DoubleTensor).permute(2,0,1).to(device) for k in range(nLatents)]
qSVec0 = [torch.from_numpy(mat['q_sqrt0'][(0,k)]).type(torch.DoubleTensor).permute(2,0,1).to(device) for k in range(nLatents)]
qSDiag0 = [torch.from_numpy(mat['q_diag0'][(0,k)]).type(torch.DoubleTensor).permute(2,0,1).to(device) for k in range(nLatents)]
Z0 = [torch.from_numpy(mat['Z0'][(k,0)]).type(torch.DoubleTensor).permute(2,0,1).to(device) for k in range(nLatents)]
C0 = torch.from_numpy(mat["C0"]).type(torch.DoubleTensor).to(device)
b0 = torch.from_numpy(mat["b0"]).type(torch.DoubleTensor).squeeze().to(device)
legQuadPoints = torch.from_numpy(mat['ttQuad']).type(torch.DoubleTensor).permute(2, 0, 1).to(device)
legQuadWeights = torch.from_numpy(mat['wwQuad']).type(torch.DoubleTensor).permute(2, 0, 1).to(device)
# qSigma0[k] \in nTrials x nInd[k] x nInd[k]
qSigma0 = utils.svGPFA.initUtils.buildQSigmaFromQSVecAndQSDiag(qSVec=qSVec0, qSDiag=qSDiag0)
qSRSigma0 = [[None] for k in range(nLatents)]
for k in range(nLatents):
nIndPointsK = qSigma0[k].shape[1]
qSRSigma0[k] = torch.empty((nTrials, nIndPointsK, nIndPointsK), dtype=torch.double)
for r in range(nTrials):
qSRSigma0[k][r,:,:] = torch.cholesky(qSigma0[k][r,:,:])
yMat = loadmat(ppSimulationFilename)
YNonStacked_tmp = yMat['Y']
nNeurons = YNonStacked_tmp[0,0].shape[0]
YNonStacked = [[[] for n in range(nNeurons)] for r in range(nTrials)]
for r in range(nTrials):
for n in range(nNeurons):
spikesTrialNeuron = YNonStacked_tmp[r,0][n,0]
if len(spikesTrialNeuron)>0:
YNonStacked[r][n] = torch.from_numpy(spikesTrialNeuron[:,0]).type(torch.DoubleTensor).to(device)
else:
YNonStacked[r][n] = []
kernelNames = mat["kernelNames"]
hprs0 = mat["hprs0"]
# create kernels
kernels = [[None] for k in range(nLatents)]
for k in range(nLatents):
if np.char.equal(kernelNames[0,k][0], "PeriodicKernel"):
kernels[k] = stats.kernels.PeriodicKernel(scale=1.0)
elif np.char.equal(kernelNames[0,k][0], "rbfKernel"):
kernels[k] = stats.kernels.ExponentialQuadraticKernel(scale=1.0)
else:
raise ValueError("Invalid kernel name: %s"%(kernelNames[k]))
# create initial parameters
kernelsParams0 = [[None] for k in range(nLatents)]
for k in range(nLatents):
if np.char.equal(kernelNames[0,k][0], "PeriodicKernel"):
kernelsParams0[k] = torch.tensor([float(hprs0[k,0][0]),
float(hprs0[k,0][1])],
dtype=torch.double).to(device)
elif np.char.equal(kernelNames[0,k][0], "rbfKernel"):
kernelsParams0[k] = torch.tensor([float(hprs0[k,0][0])],
dtype=torch.double).to(device)
else:
raise ValueError("Invalid kernel name: %s"%(kernelNames[k]))
qUParams0 = {"qMu0": qMu0, "qSRSigma0": qSRSigma0}
kmsParams0 = {"kernelsParams0": kernelsParams0,
"inducingPointsLocs0": Z0}
qKParams0 = {"svPosteriorOnIndPoints": qUParams0,
"kernelsMatricesStore": kmsParams0}
qHParams0 = {"C0": C0, "d0": b0}
initialParams = {"svPosteriorOnLatents": qKParams0,
"svEmbedding": qHParams0}
quadParams = {"legQuadPoints": legQuadPoints,
"legQuadWeights": legQuadWeights}
optimParams = {"emMaxIter":50,
#
"eStepEstimate":True,
"eStepMaxIter":100,
"eStepTol":1e-3,
"eStepLR":1e-3,
"eStepLineSearchFn":"strong_wolfe",
# "eStepLineSearchFn":"None",
"eStepNIterDisplay":1,
#
"mStepEmbeddingEstimate":True,
"mStepEmbeddingMaxIter":100,
"mStepEmbeddingTol":1e-3,
"mStepEmbeddingLR":1e-3,
"mStepEmbeddingLineSearchFn":"strong_wolfe",
# "mStepEmbeddingLineSearchFn":"None",
"mStepEmbeddingNIterDisplay":1,
#
"mStepKernelsEstimate":True,
"mStepKernelsMaxIter":10,
"mStepKernelsTol":1e-3,
"mStepKernelsLR":1e-3,
"mStepKernelsLineSearchFn":"strong_wolfe",
# "mStepKernelsLineSearchFn":"None",
"mStepKernelsNIterDisplay":1,
"mStepKernelsNIterDisplay":1,
#
"mStepIndPointsEstimate":True,
"mStepIndPointsMaxIter":20,
"mStepIndPointsTol":1e-3,
"mStepIndPointsLR":1e-4,
"mStepIndPointsLineSearchFn":"strong_wolfe",
# "mStepIndPointsLineSearchFn":"None",
"mStepIndPointsNIterDisplay":1,
#
"verbose":True
}
estimConfig = configparser.ConfigParser()
estimConfig["data"] = {"mEstNumber": mEstNumber}
estimConfig["optim_params"] = optimParams
estimConfig["control_params"] = {"indPointsLocsKMSEpsilon": indPointsLocsKMSEpsilon}
with open(estimMetaDataFilename, "w") as f: estimConfig.write(f)
trialsLengths = yMat["trLen"].astype(np.float64).flatten().tolist()
kernelsTypes = [type(kernels[k]).__name__ for k in range(len(kernels))]
estimationDataForMatlabFilename = "results/{:08d}_estimationDataForMatlab.mat".format(0)
# estimationDataForMatlabFilename = "results/{:08d}_estimationDataForMatlab.mat".format(estResNumber)
# utils.svGPFA.miscUtils.saveDataForMatlabEstimations(
# qMu0=qMu0, qSVec0=qSVec0, qSDiag0=qSDiag0,
# C0=C0, d0=b0,
# indPointsLocs0=Z0,
# legQuadPoints=legQuadPoints,
# legQuadWeights=legQuadWeights,
# kernelsTypes=kernelsTypes,
# kernelsParams0=kernelsParams0,
# spikesTimes=YNonStacked,
# indPointsLocsKMSEpsilon=indPointsLocsKMSEpsilon,
# trialsLengths=np.array(trialsLengths).reshape(-1,1),
# emMaxIter=optimParams["emMaxIter"],
# eStepMaxIter=optimParams["eStepMaxIter"],
# mStepEmbeddingMaxIter=optimParams["mStepEmbeddingMaxIter"],
# mStepKernelsMaxIter=optimParams["mStepKernelsMaxIter"],
# mStepIndPointsMaxIter=optimParams["mStepIndPointsMaxIter"],
# saveFilename=estimationDataForMatlabFilename)
model = stats.svGPFA.svGPFAModelFactory.SVGPFAModelFactory.buildModel(
conditionalDist=stats.svGPFA.svGPFAModelFactory.PointProcess,
linkFunction=stats.svGPFA.svGPFAModelFactory.ExponentialLink,
embeddingType=stats.svGPFA.svGPFAModelFactory.LinearEmbedding,
kernels=kernels,
)
# start debug code
# parametersList = []
# i = 0
# for parameter in model.parameters():
# print("Inside for loop")
# print(i, parameter)
# parametersList.append(parameter)
# print("Outside for loop")
# pdb.set_trace()
# ned debug code
# model.to(device)
# maximize lower bound
svEM = stats.svGPFA.svEM.SVEM()
if profile:
pr = cProfile.Profile()
pr.enable()
tStart = time.time()
lowerBoundHist, elapsedTimeHist = \
svEM.maximize(model=model,
measurements=YNonStacked,
initialParams=initialParams,
quadParams=quadParams,
optimParams=optimParams,
indPointsLocsKMSEpsilon=indPointsLocsKMSEpsilon,
)
tElapsed = time.time()-tStart
print("Completed maximize in {:.2f} seconds".format(tElapsed))
# start debug code
# parametersList = []
# i = 0
# for parameter in model.parameters():
# print("Inside for loop")
# print(i, parameter)
# parametersList.append(parameter)
# i += 1
# print("Outside for loop")
# pdb.set_trace()
# end debug code
if profile:
pr.disable()
profilerFilename = profilerFilenamePattern.format(optimParams["emMaxIter"])
s = open(profilerFilename, "w")
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s)
ps.strip_dirs().sort_stats(sortby).print_stats()
s.close()
resultsToSave = {"lowerBoundHist": lowerBoundHist, "elapsedTimeHist": elapsedTimeHist, "model": model}
with open(modelSaveFilename, "wb") as f: pickle.dump(resultsToSave, f)
# plot lower bound history
plot.svGPFA.plotUtils.plotLowerBoundHist(lowerBoundHist=lowerBoundHist, elapsedTimeHist=elapsedTimeHist, figFilename=lowerBoundHistFigFilename)
pdb.set_trace()
if __name__=="__main__":
main(sys.argv)
|
py | b407b7fa901a4f0b10153591dc2dae3101b500c4 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.api_core.gapic_v1 import client_info
import mock
import pytest
from google.cloud.bigquery_storage_v1 import types
PROJECT = "my-project"
SERVICE_ACCOUNT_PROJECT = "project-from-credentials"
@pytest.fixture()
def mock_transport(monkeypatch):
from google.cloud.bigquery_storage_v1.gapic.transports import (
big_query_read_grpc_transport,
)
transport = mock.create_autospec(
big_query_read_grpc_transport.BigQueryReadGrpcTransport
)
return transport
@pytest.fixture()
def client_under_test(mock_transport):
from google.cloud.bigquery_storage_v1 import client
# The mock is detected as a callable. By creating a real callable here, the
# mock can still be used to verify RPCs.
def transport_callable(credentials=None, default_class=None, address=None):
return mock_transport
return client.BigQueryReadClient(transport=transport_callable)
def test_constructor_w_client_info(mock_transport):
from google.cloud.bigquery_storage_v1 import client
def transport_callable(credentials=None, default_class=None, address=None):
return mock_transport
client_under_test = client.BigQueryReadClient(
transport=transport_callable,
client_info=client_info.ClientInfo(
client_library_version="test-client-version"
),
)
user_agent = client_under_test._client_info.to_user_agent()
assert "test-client-version" in user_agent
def test_create_read_session(mock_transport, client_under_test):
table = "projects/{}/datasets/{}/tables/{}".format(
"data-project-id", "dataset_id", "table_id"
)
read_session = types.ReadSession()
read_session.table = table
client_under_test.create_read_session("projects/other-project", read_session)
expected_request = types.CreateReadSessionRequest(
parent="projects/other-project", read_session=read_session
)
mock_transport.create_read_session.assert_called_once_with(
expected_request, metadata=mock.ANY, timeout=mock.ANY
)
def test_read_rows(mock_transport, client_under_test):
stream_name = "teststream"
offset = 0
client_under_test.read_rows(stream_name)
expected_request = types.ReadRowsRequest(read_stream=stream_name, offset=offset)
mock_transport.create_read_session.read_rows(
expected_request, metadata=mock.ANY, timeout=mock.ANY
)
|
py | b407b89e9bef56bee4a01a7e386a27ca9a818741 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.DAG/Sans_12/udhr_Latn.DAG_Sans_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | b407ba2be571a3fd58e67d147841d27d0f0e53e9 | from .bubblesort import bubble_sort
from .insertionsort import insertion_sort
from .mergesort import merge_sort
from .quicksort import quick_sort, test_quick_sort
from .selectionsort import selection_sort, test_selection_sort
from .doubleselectionsort import double_selection_sort, test_double_selection_sort
from .heapsort import heap_sort
functions = {
"bubble_sort": bubble_sort,
"insertion_sort": insertion_sort,
"merge_sort": merge_sort,
"quick_sort": quick_sort,
"selection_sort": selection_sort,
"double_selection_sort": double_selection_sort,
"heap_sort": heap_sort,
} |
py | b407bb64c9ed1b87bb22fbd0e73f758333c6512f | #!/usr/bin/python3
from pyrob.api import *
@task
def task_2_2():
move_down()
for i in range(5):
move_right()
fill_cell()
move_right()
move_down()
fill_cell()
move_down()
move_left()
fill_cell()
move_up()
fill_cell()
move_left()
fill_cell()
move_up()
if i != 4:
move_right(4)
if __name__ == '__main__':
run_tasks()
|
py | b407bc15dd42850061d41c0f8e22a7bffb84fcc2 | _base_ = [
'../../_base_/models/slowfast_r50.py', '../../_base_/default_runtime.py'
]
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(
type='SGD', lr=0.1, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=34)
total_epochs = 256
# precise bn
precise_bn = dict(num_iters=200, interval=1)
# runtime settings
checkpoint_config = dict(interval=4)
work_dir = './work_dirs/slowfast_prebn_r50_4x16x1_256e_kinetics400_rgb'
find_unused_parameters = False
|
py | b407bd2431ce89ef09eb6447c2871efde891a33f | '''
Provides functionality for converting a given list of tokens (words) into
numbers, according to the given vocabulary.
'''
import numbers
import numpy as np
from .create_vocab import extend_vocab, VocabBuilder
from .word_generator import WordGenerator
from .global_variables import SPECIAL_TOKENS
from sklearn.model_selection import train_test_split
from copy import deepcopy
class SentenceTokenizer():
""" Create numpy array of tokens corresponding to input sentences.
The vocabulary can include Unicode tokens.
"""
def __init__(self, vocabulary, fixed_length, custom_wordgen=None,
ignore_sentences_with_only_custom=False, masking_value=0,
unknown_value=1):
""" Needs a dictionary as input for the vocabulary.
"""
if len(vocabulary) > np.iinfo('uint16').max:
raise ValueError('Dictionary is too big ({} tokens) for the numpy '
'datatypes used (max limit={}). Reduce vocabulary'
' or adjust code accordingly!'
.format(len(vocabulary), np.iinfo('uint16').max))
# Shouldn't be able to modify the given vocabulary
self.vocabulary = deepcopy(vocabulary)
self.fixed_length = fixed_length
self.ignore_sentences_with_only_custom = ignore_sentences_with_only_custom
self.masking_value = masking_value
self.unknown_value = unknown_value
# Initialized with an empty stream of sentences that must then be fed
# to the generator at a later point for reusability.
# A custom word generator can be used for domain-specific filtering etc
if custom_wordgen is not None:
assert custom_wordgen.stream is None
self.wordgen = custom_wordgen
self.uses_custom_wordgen = True
else:
self.wordgen = WordGenerator(None, allow_unicode_text=True,
ignore_emojis=False,
remove_variation_selectors=True,
break_replacement=True)
self.uses_custom_wordgen = False
def tokenize_sentences(self, sentences, reset_stats=True, max_sentences=None):
""" Converts a given list of sentences into a numpy array according to
its vocabulary.
# Arguments:
sentences: List of sentences to be tokenized.
reset_stats: Whether the word generator's stats should be reset.
max_sentences: Maximum length of sentences. Must be set if the
length cannot be inferred from the input.
# Returns:
Numpy array of the tokenization sentences with masking,
infos,
stats
# Raises:
ValueError: When maximum length is not set and cannot be inferred.
"""
if max_sentences is None and not hasattr(sentences, '__len__'):
raise ValueError('Either you must provide an array with a length'
'attribute (e.g. a list) or specify the maximum '
'length yourself using `max_sentences`!')
n_sentences = (max_sentences if max_sentences is not None
else len(sentences))
if self.masking_value == 0:
tokens = np.zeros((n_sentences, self.fixed_length), dtype='uint16')
else:
tokens = (np.ones((n_sentences, self.fixed_length), dtype='uint16') *
self.masking_value)
if reset_stats:
self.wordgen.reset_stats()
# With a custom word generator info can be extracted from each
# sentence (e.g. labels)
infos = []
# Returns words as strings and then map them to vocabulary
self.wordgen.stream = sentences
next_insert = 0
n_ignored_unknowns = 0
for s_words, s_info in self.wordgen:
s_tokens = self.find_tokens(s_words)
if (self.ignore_sentences_with_only_custom and
np.all([True if t < len(SPECIAL_TOKENS)
else False for t in s_tokens])):
n_ignored_unknowns += 1
continue
if len(s_tokens) > self.fixed_length:
s_tokens = s_tokens[:self.fixed_length]
tokens[next_insert, :len(s_tokens)] = s_tokens
infos.append(s_info)
next_insert += 1
# For standard word generators all sentences should be tokenized
# this is not necessarily the case for custom wordgenerators as they
# may filter the sentences etc.
if not self.uses_custom_wordgen and not self.ignore_sentences_with_only_custom:
assert len(sentences) == next_insert
else:
# adjust based on actual tokens received
tokens = tokens[:next_insert]
infos = infos[:next_insert]
return tokens, infos, self.wordgen.stats
def find_tokens(self, words):
assert len(words) > 0
tokens = []
for w in words:
try:
tokens.append(self.vocabulary[w])
except KeyError:
tokens.append(self.unknown_value)
return tokens
def split_train_val_test(self, sentences, info_dicts,
split_parameter=[0.7, 0.1, 0.2], extend_with=0):
""" Splits given sentences into three different datasets: training,
validation and testing.
# Arguments:
sentences: The sentences to be tokenized.
info_dicts: A list of dicts that contain information about each
sentence (e.g. a label).
split_parameter: A parameter for deciding the splits between the
three different datasets. If instead of being passed three
values, three lists are passed, then these will be used to
specify which observation belong to which dataset.
extend_with: An optional parameter. If > 0 then this is the number
of tokens added to the vocabulary from this dataset. The
expanded vocab will be generated using only the training set,
but is applied to all three sets.
# Returns:
List of three lists of tokenized sentences,
List of three corresponding dictionaries with information,
How many tokens have been added to the vocab. Make sure to extend
the embedding layer of the model accordingly.
"""
# If passed three lists, use those directly
if isinstance(split_parameter, list) and \
all(isinstance(x, list) for x in split_parameter) and \
len(split_parameter) == 3:
# Helper function to verify provided indices are numbers in range
def verify_indices(inds):
return list([i for i in inds if isinstance(i, numbers.Number) and
i < len(sentences)])
ind_train = verify_indices(split_parameter[0])
ind_val = verify_indices(split_parameter[1])
ind_test = verify_indices(split_parameter[2])
else:
# Split sentences and dicts
ind = list(range(len(sentences)))
ind_train, ind_test = train_test_split(ind, test_size=split_parameter[2])
ind_train, ind_val = train_test_split(ind_train, test_size=split_parameter[1])
# Map indices to data
train = np.array([sentences[x] for x in ind_train])
test = np.array([sentences[x] for x in ind_test])
val = np.array([sentences[x] for x in ind_val])
info_train = np.array([info_dicts[x] for x in ind_train])
info_test = np.array([info_dicts[x] for x in ind_test])
info_val = np.array([info_dicts[x] for x in ind_val])
added = 0
# Extend vocabulary with training set tokens
if extend_with > 0:
wg = WordGenerator(train)
vb = VocabBuilder(wg)
vb.count_all_words()
added = extend_vocab(self.vocabulary, vb, max_tokens=extend_with)
# Wrap results
result = [self.tokenize_sentences(s)[0] for s in [train, val, test]]
result_infos = [info_train, info_val, info_test]
return result, result_infos, added
def to_sentence(self, sentence_idx):
""" Converts a tokenized sentence back to a list of words.
# Arguments:
sentence_idx: List of numbers, representing a tokenized sentence
given the current vocabulary.
# Returns:
String created by converting all numbers back to words and joined
together with spaces.
"""
# Have to recalculate the mappings in case the vocab was extended.
ind_to_word = {ind: word for word, ind in self.vocabulary.items()}
sentence_as_list = [ind_to_word[x] for x in sentence_idx]
cleaned_list = [x for x in sentence_as_list if x != 'CUSTOM_MASK']
return " ".join(cleaned_list)
def coverage(dataset, verbose=False):
""" Computes the percentage of words in a given dataset that are unknown.
# Arguments:
dataset: Tokenized dataset to be checked.
verbose: Verbosity flag.
# Returns:
Percentage of unknown tokens.
"""
n_total = np.count_nonzero(dataset)
n_unknown = np.sum(dataset == 1)
coverage = 1.0 - float(n_unknown) / n_total
if verbose:
print("Unknown words: {}".format(n_unknown))
print("Total words: {}".format(n_total))
print("Coverage: {}".format(coverage))
return coverage
|
py | b407bdb29f086c017ac36aa1f9a89e9057649946 | """
This file offers the methods to automatically retrieve the graph Candidatus Falkowbacteria bacterium CG1_02_37_44.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusFalkowbacteriaBacteriumCg1023744(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Falkowbacteria bacterium CG1_02_37_44 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Falkowbacteria bacterium CG1_02_37_44 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusFalkowbacteriaBacteriumCg1023744",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | b407bdcd60485365a9c9a7fcbdd3d986d1c09d7f | from scinet3.model import Document
class Recommender(object):
"""
Recommendation engine that handles the recommending stuff
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
if not Document.all_docs_loaded:
print "loading docs from db..."
Document.load_all_from_db()
def recommend_keywords(self, *args, **kwargs):
raise NotImplementedError
def recommend_documents(self, *args, **kwargs):
raise NotImplementedError
def associated_keywords_from_docs(self, docs, exclude_kws = None):
"""
get associated keywoeds from documents.
Param:
docs: list of Document
exclude: set of Keyword
Return:
list of Keyword
"""
if not exclude_kws:
exclude_kws = set()
else:
exclude_kws = set(exclude_kws)
return list(set([kw
for doc in docs
for kw in doc['keywords']
if kw not in exclude_kws]))
|
py | b407bdf6a55fa00bde95c3ae77e3aa19daf10f35 | #!/usr/bin/env python
import unittest
from framework import VppTestCase, VppTestRunner
from util import ppp
from scapy.packet import Raw
from scapy.layers.inet import IP, UDP
from vpp_papi_provider import SYSLOG_SEVERITY
from syslog_rfc5424_parser import SyslogMessage, ParseError
from syslog_rfc5424_parser.constants import SyslogFacility, SyslogSeverity
class TestSyslog(VppTestCase):
""" Syslog Protocol Test Cases """
@classmethod
def setUpClass(cls):
super(TestSyslog, cls).setUpClass()
try:
cls.create_pg_interfaces(range(1))
cls.pg0.admin_up()
cls.pg0.config_ip4()
cls.pg0.resolve_arp()
except Exception:
super(TestSyslog, cls).tearDownClass()
raise
def syslog_generate(self, facility, severity, appname, msgid, sd=None,
msg=None):
"""
Generate syslog message
:param facility: facility value
:param severity: severity level
:param appname: application name that originate message
:param msgid: message indetifier
:param sd: structured data (optional)
:param msg: free-form message (optional)
"""
facility_str = ['kernel', 'user-level', 'mail-system',
'system-daemons', 'security-authorization', 'syslogd',
'line-printer', 'network-news', 'uucp', 'clock-daemon',
'', 'ftp-daemon', 'ntp-subsystem', 'log-audit',
'log-alert', '', 'local0', 'local1', 'local2',
'local3', 'local4', 'local5', 'local6', 'local7']
severity_str = ['emergency', 'alert', 'critical', 'error', 'warning',
'notice', 'informational', 'debug']
cli_str = "test syslog %s %s %s %s" % (facility_str[facility],
severity_str[severity],
appname,
msgid)
if sd is not None:
for sd_id, sd_params in sd.items():
cli_str += " sd-id %s" % (sd_id)
for name, value in sd_params.items():
cli_str += " sd-param %s %s" % (name, value)
if msg is not None:
cli_str += " %s" % (msg)
self.vapi.cli(cli_str)
def syslog_verify(self, data, facility, severity, appname, msgid, sd=None,
msg=None):
"""
Verify syslog message
:param data: syslog message
:param facility: facility value
:param severity: severity level
:param appname: application name that originate message
:param msgid: message indetifier
:param sd: structured data (optional)
:param msg: free-form message (optional)
"""
message = data.decode('utf-8')
if sd is None:
sd = {}
try:
message = SyslogMessage.parse(message)
self.assertEqual(message.facility, facility)
self.assertEqual(message.severity, severity)
self.assertEqual(message.appname, appname)
self.assertEqual(message.msgid, msgid)
self.assertEqual(message.msg, msg)
self.assertEqual(message.sd, sd)
self.assertEqual(message.version, 1)
self.assertEqual(message.hostname, self.pg0.local_ip4)
except ParseError as e:
self.logger.error(e)
def test_syslog(self):
""" Syslog Protocol test """
self.vapi.syslog_set_sender(self.pg0.remote_ip4n, self.pg0.local_ip4n)
config = self.vapi.syslog_get_sender()
self.assertEqual(str(config.collector_address),
self.pg0.remote_ip4)
self.assertEqual(config.collector_port, 514)
self.assertEqual(str(config.src_address), self.pg0.local_ip4)
self.assertEqual(config.vrf_id, 0)
self.assertEqual(config.max_msg_size, 480)
appname = 'test'
msgid = 'testMsg'
msg = 'this is message'
sd1 = {'exampleSDID@32473': {'iut': '3',
'eventSource': 'App',
'eventID': '1011'}}
sd2 = {'exampleSDID@32473': {'iut': '3',
'eventSource': 'App',
'eventID': '1011'},
'examplePriority@32473': {'class': 'high'}}
self.pg_enable_capture(self.pg_interfaces)
self.syslog_generate(SyslogFacility.local7,
SyslogSeverity.info,
appname,
msgid,
None,
msg)
capture = self.pg0.get_capture(1)
try:
self.assertEqual(capture[0][IP].src, self.pg0.local_ip4)
self.assertEqual(capture[0][IP].dst, self.pg0.remote_ip4)
self.assertEqual(capture[0][UDP].dport, 514)
self.assert_packet_checksums_valid(capture[0], False)
except:
self.logger.error(ppp("invalid packet:", capture[0]))
raise
self.syslog_verify(capture[0][Raw].load,
SyslogFacility.local7,
SyslogSeverity.info,
appname,
msgid,
None,
msg)
self.pg_enable_capture(self.pg_interfaces)
self.vapi.syslog_set_filter(SYSLOG_SEVERITY.WARN)
filter = self.vapi.syslog_get_filter()
self.assertEqual(filter.severity, SYSLOG_SEVERITY.WARN)
self.syslog_generate(SyslogFacility.local7,
SyslogSeverity.info,
appname,
msgid,
None,
msg)
self.pg0.assert_nothing_captured()
self.pg_enable_capture(self.pg_interfaces)
self.syslog_generate(SyslogFacility.local6,
SyslogSeverity.warning,
appname,
msgid,
sd1,
msg)
capture = self.pg0.get_capture(1)
self.syslog_verify(capture[0][Raw].load,
SyslogFacility.local6,
SyslogSeverity.warning,
appname,
msgid,
sd1,
msg)
self.vapi.syslog_set_sender(self.pg0.remote_ip4n,
self.pg0.local_ip4n,
collector_port=12345)
config = self.vapi.syslog_get_sender()
self.assertEqual(config.collector_port, 12345)
self.pg_enable_capture(self.pg_interfaces)
self.syslog_generate(SyslogFacility.local5,
SyslogSeverity.err,
appname,
msgid,
sd2,
None)
capture = self.pg0.get_capture(1)
try:
self.assertEqual(capture[0][UDP].dport, 12345)
except:
self.logger.error(ppp("invalid packet:", capture[0]))
raise
self.syslog_verify(capture[0][Raw].load,
SyslogFacility.local5,
SyslogSeverity.err,
appname,
msgid,
sd2,
None)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
py | b407be651f0a4b6e242f938d0dc1f8e32d9e82dd | class Data():
def shuffle_split_data(self, X, y, train_size=0.7): # X: input data, y:labels, 70% of data for training
import numpy as np
arr_rand = np.random.rand(X.shape[0]) # shuffles data
split = arr_rand < np.percentile(arr_rand, train_size * 100) # takes 70% of data
X_train = np.array(X[split]).T # to reshape the data as required for model
y_train = np.array(y[split]).ravel() # ( m , ) instead of ( m, 1 )
X_test = np.array(X[~split]).T # takes the remaining 30% for testing
y_test = np.array(y[~split]).ravel()
return X_train, y_train, X_test, y_test
def load_data(self, path, label):
import pandas as pd
data = pd.read_csv(path) # read the csv data file
Y = data[[label]] # getting data labels
data.drop([label], inplace=True, axis=1) # dropping the label column
X = data
return self.shuffle_split_data(X, Y) |
py | b407bec18d0384bfb588ea6a45f6c8a385ed9fa2 | """
The MIT License (MIT)
Copyright (c) 2018 Victor Axelsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER D
"""
import numpy as np
class Wordlist:
def getWordlist(self, data):
items = set()
for row in data:
for col in row:
items.add(col)
wordlist = dict()
reversedList = []
counter = 0
for item in items:
wordlist[item] = counter
reversedList.append(item)
counter += 1
return items, wordlist, reversedList
def getWordlistFromGraph(self, data):
allItems = {}
for k in data:
allItems[k] = 1
for innerKey in data[k]:
allItems[innerKey] = 1
items = np.array(list(allItems.keys()))
reversedList = []
itemsKeys = {}
for i in range(len(items)):
itemsKeys[items[i]] = i
reversedList.append(items[i])
return items, itemsKeys, reversedList |
py | b407becba193d46cbc6e48754b5d98f36922a3ec | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class TurtleBot2Env(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new TurtleBot2Env environment.
Turtlebot2 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /kobuki/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.logdebug("Start TurtleBot2Env INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2Env, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
#rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
#rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
#rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
rospy.Subscriber("/kobuki/laser/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
#self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr("Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr("Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message("/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr("Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /kobuki/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/kobuki/laser/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /kobuki/laser/scan READY=>")
except:
rospy.logerr("Current /kobuki/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, min_laser_distance=-1):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
crashed_into_something = self.has_crashed(min_laser_distance)
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + "," + str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + "," + str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
if crashed_into_something:
rospy.logerr("TurtleBot has crashed, stopping movement!")
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def has_crashed(self, min_laser_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
if min_laser_distance != -1:
laser_data = self.get_laser_scan()
for i, item in enumerate(laser_data.ranges):
if item == float ('Inf') or numpy.isinf(item):
pass
elif numpy.isnan(item):
pass
else:
# Has a Non Infinite or Nan Value
if (item < min_laser_distance):
rospy.logerr("TurtleBot HAS CRASHED >>> item=" + str(item) + "< " + str(min_laser_distance))
robot_has_crashed = True
break
return robot_has_crashed
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
"""
|
py | b407bef0e724709308b6eb2a4e53ef955d42ed6d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run this once to set up the database.
# PYTHONPATH=/opt/rucio/.venv/lib/python2.7/site-packages/rucio python tools/bootstrap.py
#
# Verify for default SQLite:
# for i in `sqlite3 /tmp/rucio.db ".tables"`; do echo $i:; sqlite3 /tmp/rucio.db "select * from $i"; echo; done
import sys
import os.path
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_path)
os.chdir(base_path)
from rucio.db.sqla.util import (build_database, create_base_vo, create_root_account) # noqa: E402
if __name__ == '__main__':
build_database()
create_base_vo()
create_root_account()
|
py | b407bf060f4432cbb32dcfc79308b9572be80a1b | """
Asynchronous Shared-Memory Scheduler for Dask Graphs.
This scheduler coordinates several workers to execute tasks in a dask graph in
parallel. It depends on a ``concurrent.futures.Executor``
and a corresponding Queue for worker-to-scheduler communication.
It tries to execute tasks in an order which maintains a small memory footprint
throughout execution. It does this by running tasks that allow us to release
data resources.
Task Selection Policy
=====================
When we complete a task we add more data in to our set of available data; this
new data makes new tasks available. We preferentially choose tasks that were
just made available in a last-in-first-out fashion. We implement this as a
simple stack. This results in more depth-first rather than breadth first
behavior which encourages us to process batches of data to completion before
starting in on new data when possible.
When the addition of new data readies multiple tasks simultaneously we add
tasks to the stack in sorted order so that tasks with greater keynames are run
first. This can be handy to break ties in a predictable fashion.
State
=====
Many functions pass around a ``state`` variable that holds the current state of
the computation. This variable consists of several other dictionaries and
sets, explained below.
Constant state
--------------
1. dependencies: {x: [a, b ,c]} a,b,c, must be run before x
2. dependents: {a: [x, y]} a must run before x or y
Changing state
--------------
### Data
1. cache: available concrete data. {key: actual-data}
2. released: data that we've seen, used, and released because it is no longer
needed
### Jobs
1. ready: A fifo stack of ready-to-run tasks
2. running: A set of tasks currently in execution
3. finished: A set of finished tasks
4. waiting: which tasks are still waiting on others :: {key: {keys}}
Real-time equivalent of dependencies
5. waiting_data: available data to yet-to-be-run-tasks :: {key: {keys}}
Real-time equivalent of dependents
Examples
--------
>>> import pprint # doctest: +SKIP
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +SKIP
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},
'dependents': defaultdict(None, {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}}),
'finished': set(),
'ready': ['z'],
'released': set(),
'running': set(),
'waiting': {'w': {'z'}},
'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}
Optimizations
=============
We build this scheduler with out-of-core array operations in mind. To this end
we have encoded some particular optimizations.
Compute to release data
-----------------------
When we choose a new task to execute we often have many options. Policies at
this stage are cheap and can significantly impact performance. One could
imagine policies that expose parallelism, drive towards a particular output,
etc..
Our current policy is to run tasks that were most recently made available.
Inlining computations
---------------------
We hold on to intermediate computations either in memory or on disk.
For very cheap computations that may emit new copies of the data, like
``np.transpose`` or possibly even ``x + 1`` we choose not to store these as
separate pieces of data / tasks. Instead we combine them with the computations
that require them. This may result in repeated computation but saves
significantly on space and computation complexity.
See the function ``inline_functions`` for more information.
"""
import os
from concurrent.futures import Executor, Future
from functools import partial
from queue import Empty, Queue
from . import config
from .callbacks import local_callbacks, unpack_callbacks
from .core import _execute_task, flatten, get_dependencies, has_tasks, reverse_dict
from .order import order
from .utils_test import add, inc # noqa: F401
if os.name == "nt":
# Python 3 windows Queue.get doesn't handle interrupts properly. To
# workaround this we poll at a sufficiently large interval that it
# shouldn't affect performance, but small enough that users trying to kill
# an application shouldn't care.
def queue_get(q):
while True:
try:
return q.get(block=True, timeout=0.1)
except Empty:
pass
else:
def queue_get(q):
return q.get()
DEBUG = False
def start_state_from_dask(dsk, cache=None, sortkey=None):
"""Start state from a dask
Examples
--------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP
>>> from pprint import pprint # doctest: +SKIP
>>> pprint(start_state_from_dask(dsk)) # doctest: +SKIP
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},
'dependents': defaultdict(None, {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}}),
'finished': set(),
'ready': ['z'],
'released': set(),
'running': set(),
'waiting': {'w': {'z'}},
'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}
"""
if sortkey is None:
sortkey = order(dsk).get
if cache is None:
cache = config.get("cache", None)
if cache is None:
cache = dict()
data_keys = set()
for k, v in dsk.items():
if not has_tasks(dsk, v):
cache[k] = v
data_keys.add(k)
dsk2 = dsk.copy()
dsk2.update(cache)
dependencies = {k: get_dependencies(dsk2, k) for k in dsk}
waiting = {k: v.copy() for k, v in dependencies.items() if k not in data_keys}
dependents = reverse_dict(dependencies)
for a in cache:
for b in dependents.get(a, ()):
waiting[b].remove(a)
waiting_data = {k: v.copy() for k, v in dependents.items() if v}
ready_set = {k for k, v in waiting.items() if not v}
ready = sorted(ready_set, key=sortkey, reverse=True)
waiting = {k: v for k, v in waiting.items() if v}
state = {
"dependencies": dependencies,
"dependents": dependents,
"waiting": waiting,
"waiting_data": waiting_data,
"cache": cache,
"ready": ready,
"running": set(),
"finished": set(),
"released": set(),
}
return state
"""
Running tasks
-------------
When we execute tasks we both
1. Perform the actual work of collecting the appropriate data and calling the function
2. Manage administrative state to coordinate with the scheduler
"""
def execute_task(key, task_info, dumps, loads, get_id, pack_exception):
"""
Compute task and handle all administration
See Also
--------
_execute_task : actually execute task
"""
try:
task, data = loads(task_info)
result = _execute_task(task, data)
id = get_id()
result = dumps((result, id))
failed = False
except BaseException as e:
result = pack_exception(e, dumps)
failed = True
return key, result, failed
def batch_execute_tasks(it):
"""
Batch computing of multiple tasks with `execute_task`
"""
return [execute_task(*a) for a in it]
def release_data(key, state, delete=True):
"""Remove data from temporary storage
See Also
--------
finish_task
"""
if key in state["waiting_data"]:
assert not state["waiting_data"][key]
del state["waiting_data"][key]
state["released"].add(key)
if delete:
del state["cache"][key]
def finish_task(
dsk, key, state, results, sortkey, delete=True, release_data=release_data
):
"""
Update execution state after a task finishes
Mutates. This should run atomically (with a lock).
"""
for dep in sorted(state["dependents"][key], key=sortkey, reverse=True):
s = state["waiting"][dep]
s.remove(key)
if not s:
del state["waiting"][dep]
state["ready"].append(dep)
for dep in state["dependencies"][key]:
if dep in state["waiting_data"]:
s = state["waiting_data"][dep]
s.remove(key)
if not s and dep not in results:
if DEBUG:
from chest.core import nbytes
print(
"Key: %s\tDep: %s\t NBytes: %.2f\t Release"
% (key, dep, sum(map(nbytes, state["cache"].values()) / 1e6))
)
release_data(dep, state, delete=delete)
elif delete and dep not in results:
release_data(dep, state, delete=delete)
state["finished"].add(key)
state["running"].remove(key)
return state
def nested_get(ind, coll):
"""Get nested index from collection
Examples
--------
>>> nested_get(1, 'abc')
'b'
>>> nested_get([1, 0], 'abc')
('b', 'a')
>>> nested_get([[1, 0], [0, 1]], 'abc')
(('b', 'a'), ('a', 'b'))
"""
if isinstance(ind, list):
return tuple(nested_get(i, coll) for i in ind)
else:
return coll[ind]
def default_get_id():
"""Default get_id"""
return None
def default_pack_exception(e, dumps):
raise
def reraise(exc, tb=None):
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def identity(x):
"""Identity function. Returns x.
>>> identity(3)
3
"""
return x
"""
Task Selection
--------------
We often have a choice among many tasks to run next. This choice is both
cheap and can significantly impact performance.
We currently select tasks that have recently been made ready. We hope that
this first-in-first-out policy reduces memory footprint
"""
"""
`get`
-----
The main function of the scheduler. Get is the main entry point.
"""
def get_async(
submit,
num_workers,
dsk,
result,
cache=None,
get_id=default_get_id,
rerun_exceptions_locally=None,
pack_exception=default_pack_exception,
raise_exception=reraise,
callbacks=None,
dumps=identity,
loads=identity,
chunksize=None,
**kwargs,
):
"""Asynchronous get function
This is a general version of various asynchronous schedulers for dask. It
takes a ``concurrent.futures.Executor.submit`` function to form a more
specific ``get`` method that walks through the dask array with parallel
workers, avoiding repeat computation and minimizing memory use.
Parameters
----------
submit : function
A ``concurrent.futures.Executor.submit`` function
num_workers : int
The number of workers that task submissions can be spread over
dsk : dict
A dask dictionary specifying a workflow
result : key or list of keys
Keys corresponding to desired data
cache : dict-like, optional
Temporary storage of results
get_id : callable, optional
Function to return the worker id, takes no arguments. Examples are
`threading.current_thread` and `multiprocessing.current_process`.
rerun_exceptions_locally : bool, optional
Whether to rerun failing tasks in local process to enable debugging
(False by default)
pack_exception : callable, optional
Function to take an exception and ``dumps`` method, and return a
serialized tuple of ``(exception, traceback)`` to send back to the
scheduler. Default is to just raise the exception.
raise_exception : callable, optional
Function that takes an exception and a traceback, and raises an error.
callbacks : tuple or list of tuples, optional
Callbacks are passed in as tuples of length 5. Multiple sets of
callbacks may be passed in as a list of tuples. For more information,
see the dask.diagnostics documentation.
dumps: callable, optional
Function to serialize task data and results to communicate between
worker and parent. Defaults to identity.
loads: callable, optional
Inverse function of `dumps`. Defaults to identity.
chunksize: int, optional
Size of chunks to use when dispatching work. Defaults to 1.
If -1, will be computed to evenly divide ready work across workers.
See Also
--------
threaded.get
"""
chunksize = chunksize or config.get("chunksize", 1)
queue = Queue()
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = {result}
results = set(result_flat)
dsk = dict(dsk)
with local_callbacks(callbacks) as callbacks:
_, _, pretask_cbs, posttask_cbs, _ = unpack_callbacks(callbacks)
started_cbs = []
succeeded = False
# if start_state_from_dask fails, we will have something
# to pass to the final block.
state = {}
try:
for cb in callbacks:
if cb[0]:
cb[0](dsk)
started_cbs.append(cb)
keyorder = order(dsk)
state = start_state_from_dask(dsk, cache=cache, sortkey=keyorder.get)
for _, start_state, _, _, _ in callbacks:
if start_state:
start_state(dsk, state)
if rerun_exceptions_locally is None:
rerun_exceptions_locally = config.get("rerun_exceptions_locally", False)
if state["waiting"] and not state["ready"]:
raise ValueError("Found no accessible jobs in dask")
def fire_tasks(chunksize):
"""Fire off a task to the thread pool"""
# Determine chunksize and/or number of tasks to submit
nready = len(state["ready"])
if chunksize == -1:
ntasks = nready
chunksize = -(ntasks // -num_workers)
else:
used_workers = -(len(state["running"]) // -chunksize)
avail_workers = max(num_workers - used_workers, 0)
ntasks = min(nready, chunksize * avail_workers)
# Prep all ready tasks for submission
args = []
for _ in range(ntasks):
# Get the next task to compute (most recently added)
key = state["ready"].pop()
# Notify task is running
state["running"].add(key)
for f in pretask_cbs:
f(key, dsk, state)
# Prep args to send
data = {
dep: state["cache"][dep] for dep in get_dependencies(dsk, key)
}
args.append(
(
key,
dumps((dsk[key], data)),
dumps,
loads,
get_id,
pack_exception,
)
)
# Batch submit
for i in range(-(len(args) // -chunksize)):
each_args = args[i * chunksize : (i + 1) * chunksize]
if not each_args:
break
fut = submit(batch_execute_tasks, each_args)
fut.add_done_callback(queue.put)
# Main loop, wait on tasks to finish, insert new ones
while state["waiting"] or state["ready"] or state["running"]:
fire_tasks(chunksize)
for key, res_info, failed in queue_get(queue).result():
if failed:
exc, tb = loads(res_info)
if rerun_exceptions_locally:
data = {
dep: state["cache"][dep]
for dep in get_dependencies(dsk, key)
}
task = dsk[key]
_execute_task(task, data) # Re-execute locally
else:
raise_exception(exc, tb)
res, worker_id = loads(res_info)
state["cache"][key] = res
finish_task(dsk, key, state, results, keyorder.get)
for f in posttask_cbs:
f(key, res, dsk, state, worker_id)
succeeded = True
finally:
for _, _, _, _, finish in started_cbs:
if finish:
finish(dsk, state, not succeeded)
return nested_get(result, state["cache"])
""" Synchronous concrete version of get_async
Usually we supply a ``concurrent.futures.Executor``. Here we provide a
sequential one. This is useful for debugging and for code dominated by the
GIL
"""
class SynchronousExecutor(Executor):
_max_workers = 1
def submit(self, fn, *args, **kwargs):
fut = Future()
try:
fut.set_result(fn(*args, **kwargs))
except BaseException as e:
fut.set_exception(e)
return fut
synchronous_executor = SynchronousExecutor()
def get_sync(dsk, keys, **kwargs):
"""A naive synchronous version of get_async
Can be useful for debugging.
"""
kwargs.pop("num_workers", None) # if num_workers present, remove it
return get_async(
synchronous_executor.submit,
synchronous_executor._max_workers,
dsk,
keys,
**kwargs,
)
""" Adaptor for ``multiprocessing.Pool`` instances
Usually we supply a ``concurrent.futures.Executor``. Here we provide a wrapper
class for ``multiprocessing.Pool`` instances so we can treat them like
``concurrent.futures.Executor`` instances instead.
This is mainly useful for legacy use cases or users that prefer
``multiprocessing.Pool``.
"""
class MultiprocessingPoolExecutor(Executor):
def __init__(self, pool):
self.pool = pool
self._max_workers = len(pool._pool)
def submit(self, fn, *args, **kwargs):
return submit_apply_async(self.pool.apply_async, fn, *args, **kwargs)
def submit_apply_async(apply_async, fn, *args, **kwargs):
fut = Future()
apply_async(fn, args, kwargs, fut.set_result, fut.set_exception)
return fut
def get_apply_async(apply_async, num_workers, *args, **kwargs):
return get_async(
partial(submit_apply_async, apply_async), num_workers, *args, **kwargs
)
def sortkey(item):
"""Sorting key function that is robust to different types
Both strings and tuples are common key types in dask graphs.
However In Python 3 one can not compare strings with tuples directly.
This function maps many types to a form where they can be compared
Examples
--------
>>> sortkey('Hello')
('str', 'Hello')
>>> sortkey(('x', 1))
('tuple', ('x', 1))
"""
return (type(item).__name__, item)
|
py | b407bf56fd6c6fb34801296c7eab14a9426424cf | import re
from typing import Any, Dict, List, Optional, Tuple
from django.conf import settings
from django.utils import timezone
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.cohort import format_filter_query
from ee.clickhouse.models.util import is_int, is_json
from ee.clickhouse.sql.events import SELECT_PROP_VALUES_SQL, SELECT_PROP_VALUES_SQL_WITH_FILTER
from ee.clickhouse.sql.person import GET_DISTINCT_IDS_BY_PROPERTY_SQL
from posthog.models.cohort import Cohort
from posthog.models.event import Selector
from posthog.models.property import Property
from posthog.models.team import Team
from posthog.utils import is_valid_regex, relative_date_parse
def parse_prop_clauses(
filters: List[Property],
team_id: Optional[int],
prepend: str = "global",
table_name: str = "",
allow_denormalized_props: bool = False,
filter_test_accounts=False,
is_person_query=False,
) -> Tuple[str, Dict]:
final = []
params: Dict[str, Any] = {}
if team_id is not None:
params["team_id"] = team_id
if table_name != "":
table_name += "."
if filter_test_accounts:
test_account_filters = Team.objects.only("test_account_filters").get(id=team_id).test_account_filters
filters.extend([Property(**prop) for prop in test_account_filters])
for idx, prop in enumerate(filters):
if prop.type == "cohort":
cohort = Cohort.objects.get(pk=prop.value, team_id=team_id)
person_id_query, cohort_filter_params = format_filter_query(cohort)
params = {**params, **cohort_filter_params}
final.append(
"AND {table_name}distinct_id IN ({clause})".format(table_name=table_name, clause=person_id_query)
)
elif prop.type == "person":
filter_query, filter_params = prop_filter_json_extract(
prop, idx, "{}person".format(prepend), allow_denormalized_props=allow_denormalized_props
)
if is_person_query:
final.append(filter_query)
params.update(filter_params)
else:
final.append(
"AND {table_name}distinct_id IN ({filter_query})".format(
filter_query=GET_DISTINCT_IDS_BY_PROPERTY_SQL.format(filters=filter_query),
table_name=table_name,
)
)
params.update(filter_params)
elif prop.type == "element":
query, filter_params = filter_element({prop.key: prop.value}, prepend="{}_".format(idx))
final.append("AND {}".format(query[0]))
params.update(filter_params)
else:
filter_query, filter_params = prop_filter_json_extract(
prop,
idx,
prepend,
prop_var="{}properties".format(table_name),
allow_denormalized_props=allow_denormalized_props,
)
final.append(f"{filter_query} AND {table_name}team_id = %(team_id)s" if team_id else filter_query)
params.update(filter_params)
return " ".join(final), params
def prop_filter_json_extract(
prop: Property, idx: int, prepend: str = "", prop_var: str = "properties", allow_denormalized_props: bool = False
) -> Tuple[str, Dict[str, Any]]:
# Once all queries are migrated over we can get rid of allow_denormalized_props
is_denormalized = prop.key.lower() in settings.CLICKHOUSE_DENORMALIZED_PROPERTIES and allow_denormalized_props
json_extract = "trim(BOTH '\"' FROM JSONExtractRaw({prop_var}, %(k{prepend}_{idx})s))".format(
idx=idx, prepend=prepend, prop_var=prop_var
)
denormalized = "properties_{}".format(prop.key.lower())
operator = prop.operator
params: Dict[str, Any] = {}
if operator == "is_not":
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value)}
return (
"AND NOT has(%(v{prepend}_{idx})s, {left})".format(
idx=idx, prepend=prepend, left=denormalized if is_denormalized else json_extract
),
params,
)
elif operator == "icontains":
value = "%{}%".format(prop.value)
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): value}
return (
"AND {left} LIKE %(v{prepend}_{idx})s".format(
idx=idx, prepend=prepend, left=denormalized if is_denormalized else json_extract
),
params,
)
elif operator == "not_icontains":
value = "%{}%".format(prop.value)
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): value}
return (
"AND NOT ({left} LIKE %(v{prepend}_{idx})s)".format(
idx=idx, prepend=prepend, left=denormalized if is_denormalized else json_extract
),
params,
)
elif operator in ("regex", "not_regex"):
if not is_valid_regex(prop.value):
return "AND 1 = 2", {}
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value}
return (
"AND {regex_function}({left}, %(v{prepend}_{idx})s)".format(
regex_function="match" if operator == "regex" else "NOT match",
idx=idx,
prepend=prepend,
left=denormalized if is_denormalized else json_extract,
),
params,
)
elif operator == "is_set":
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value}
if is_denormalized:
return (
"AND NOT isNull({left})".format(left=denormalized),
params,
)
return (
"AND JSONHas({prop_var}, %(k{prepend}_{idx})s)".format(idx=idx, prepend=prepend, prop_var=prop_var),
params,
)
elif operator == "is_not_set":
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value}
if is_denormalized:
return (
"AND isNull({left})".format(left=denormalized),
params,
)
return (
"AND (isNull({left}) OR NOT JSONHas({prop_var}, %(k{prepend}_{idx})s))".format(
idx=idx, prepend=prepend, prop_var=prop_var, left=json_extract
),
params,
)
elif operator == "gt":
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value}
return (
"AND toInt64OrNull(trim(BOTH '\"' FROM replaceRegexpAll({left}, ' ', ''))) > %(v{prepend}_{idx})s".format(
idx=idx,
prepend=prepend,
left=denormalized
if is_denormalized
else "visitParamExtractRaw({prop_var}, %(k{prepend}_{idx})s)".format(
idx=idx, prepend=prepend, prop_var=prop_var,
),
),
params,
)
elif operator == "lt":
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): prop.value}
return (
"AND toInt64OrNull(trim(BOTH '\"' FROM replaceRegexpAll({left}, ' ', ''))) < %(v{prepend}_{idx})s".format(
idx=idx,
prepend=prepend,
left=denormalized
if is_denormalized
else "visitParamExtractRaw({prop_var}, %(k{prepend}_{idx})s)".format(
idx=idx, prepend=prepend, prop_var=prop_var,
),
),
params,
)
else:
if is_json(prop.value) and not is_denormalized:
clause = "AND has(%(v{prepend}_{idx})s, replaceRegexpAll(visitParamExtractRaw({prop_var}, %(k{prepend}_{idx})s),' ', ''))"
params = {
"k{}_{}".format(prepend, idx): prop.key,
"v{}_{}".format(prepend, idx): box_value(prop.value, remove_spaces=True),
}
else:
clause = "AND has(%(v{prepend}_{idx})s, {left})"
params = {"k{}_{}".format(prepend, idx): prop.key, "v{}_{}".format(prepend, idx): box_value(prop.value)}
return (
clause.format(
left=denormalized if is_denormalized else json_extract, idx=idx, prepend=prepend, prop_var=prop_var
),
params,
)
def box_value(value: Any, remove_spaces=False) -> List[Any]:
if not isinstance(value, List):
value = [value]
return [str(value).replace(" ", "") if remove_spaces else str(value) for value in value]
def get_property_values_for_key(key: str, team: Team, value: Optional[str] = None):
parsed_date_from = "AND timestamp >= '{}'".format(relative_date_parse("-7d").strftime("%Y-%m-%d 00:00:00"))
parsed_date_to = "AND timestamp <= '{}'".format(timezone.now().strftime("%Y-%m-%d 23:59:59"))
if value:
return sync_execute(
SELECT_PROP_VALUES_SQL_WITH_FILTER.format(parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to),
{"team_id": team.pk, "key": key, "value": "%{}%".format(value)},
)
return sync_execute(
SELECT_PROP_VALUES_SQL.format(parsed_date_from=parsed_date_from, parsed_date_to=parsed_date_to),
{"team_id": team.pk, "key": key},
)
def filter_element(filters: Dict, prepend: str = "") -> Tuple[List[str], Dict]:
params = {}
conditions = []
if filters.get("selector"):
or_conditions = []
selectors = filters["selector"] if isinstance(filters["selector"], list) else [filters["selector"]]
for idx, query in enumerate(selectors):
selector = Selector(query, escape_slashes=False)
key = "{}_{}_selector_regex".format(prepend, idx)
params[key] = _create_regex(selector)
or_conditions.append("match(elements_chain, %({})s)".format(key))
if len(or_conditions) > 0:
conditions.append("(" + (" OR ".join(or_conditions)) + ")")
if filters.get("tag_name"):
or_conditions = []
tag_names = filters["tag_name"] if isinstance(filters["tag_name"], list) else [filters["tag_name"]]
for idx, tag_name in enumerate(tag_names):
key = "{}_{}_tag_name_regex".format(prepend, idx)
params[key] = r"(^|;){}(\.|$|;|:)".format(tag_name)
or_conditions.append("match(elements_chain, %({})s)".format(key))
if len(or_conditions) > 0:
conditions.append("(" + (" OR ".join(or_conditions)) + ")")
attributes: Dict[str, List] = {}
for key in ["href", "text"]:
vals = filters.get(key)
if filters.get(key):
attributes[key] = [re.escape(vals)] if isinstance(vals, str) else [re.escape(text) for text in filters[key]]
if len(attributes.keys()) > 0:
or_conditions = []
for key, value_list in attributes.items():
for idx, value in enumerate(value_list):
params["{}_{}_{}_attributes_regex".format(prepend, key, idx)] = ".*?({}).*?".format(
".*?".join(['{}="{}"'.format(key, value)])
)
or_conditions.append("match(elements_chain, %({}_{}_{}_attributes_regex)s)".format(prepend, key, idx))
if len(or_conditions) > 0:
conditions.append("(" + (" OR ".join(or_conditions)) + ")")
return (conditions, params)
def _create_regex(selector: Selector) -> str:
regex = r""
for idx, tag in enumerate(selector.parts):
if tag.data.get("tag_name") and isinstance(tag.data["tag_name"], str):
if tag.data["tag_name"] == "*":
regex += ".+"
else:
regex += tag.data["tag_name"]
if tag.data.get("attr_class__contains"):
regex += r".*?\.{}".format(r"\..*?".join(sorted(tag.data["attr_class__contains"])))
if tag.ch_attributes:
regex += ".*?"
for key, value in sorted(tag.ch_attributes.items()):
regex += '{}="{}".*?'.format(key, value)
regex += r"([-_a-zA-Z0-9\.]*?)?($|;|:([^;^\s]*(;|$|\s)))"
if tag.direct_descendant:
regex += ".*"
return regex
|
py | b407bf6fdc5ef5bcb237dae467dcff27bb31075d | import os
from quorumtoolbox.constellation import Constellation
from quorumtoolbox.geth import Geth
from quorumtoolbox.ibft import Ibft
from quorumtoolbox.raft import Raft
from quorumtoolbox.utils import enode_utils, bash_utils, node_utils
class QuorumNode:
blockchain_dir_name = 'blockchain'
quorum_node_config_file_name = 'quorum_node_config.sh'
def __init__(self,
context,
address,
rpcaddr,
networkid,
node_state,
private_manager='constellation',
geth_params=None,
consensus_params=None,
private_manager_params=None):
self.base_dir = os.path.join(context, self.blockchain_dir_name)
self.quorum_node_config_file = os.path.join(self.base_dir, self.quorum_node_config_file_name)
self.node_state = node_state
geth_params = {} if geth_params is None else geth_params
consensus_params = {} if consensus_params is None else consensus_params
private_manager_params = {} if private_manager_params is None else private_manager_params
self.geth = Geth(context, address, rpcaddr, networkid, **geth_params)
if node_utils.is_raft_node(self.node_state):
self.consensus = Raft(context, self.geth.enode_id_geth, node_state, **consensus_params)
else:
self.consensus = Ibft(context, self.geth.enode_id_geth, node_state, **consensus_params)
if private_manager.lower() == 'constellation':
self.private_manager = Constellation(context, address, **private_manager_params)
# make the node's enode_id depending on consensus
self._enode_id = self.make_enode_id_from_geth() if node_utils.is_raft_node(self.node_state) else \
self.geth.enode_id_geth
self._ibft_address, self._nodekey = self.geth.ibft_address, self.geth.nodekey
# get launch params from components, combine to launch this node
self.launch_params = bash_utils.make_quorum_node_launch_params([self.geth.launch_parameters,
self.consensus.launch_parameters,
self.private_manager.launch_parameters
])
bash_utils.write_quorum_node_launch_config(self.launch_params, self.quorum_node_config_file)
self.build_config = {
'local': {
'config_file': self.quorum_node_config_file
},
'geth': self.geth.build_configuration,
'consensus': self.consensus.build_configuration,
'private_manager': self.private_manager.build_configuration
}
def make_enode_id_from_geth(self):
return enode_utils.make_enode_id2(self.geth.enode_id_geth,
self.consensus.build_configuration['network']['port'])
@property
def launch_parameters(self):
return self.launch_parameters
@property
def build_configuration(self):
return self.build_config
@property
def ibft_address(self):
return self._ibft_address
@property
def enode_id(self):
return self._enode_id
@property
def nodekey(self):
return self._nodekey
@property
def accounts(self):
return self.geth.accounts
# TODO Formalize interface for Consensus (and all other components)
@property
def consensus_id(self):
return self.consensus.joining_id
# TODO Formalize interface for PTM (and all other components)
@property
def ptm_peers(self):
return self.private_manager.ptm_peers
@property
def ptm_url(self):
return self.private_manager.ptm_url
@property
def ptm_address(self):
return self.private_manager.ptm_address
|
py | b407c0039e5960171ab5cef85721b9b7fc22a1f3 | #!/usr/bin/env python
# Hellish Tech
# No commercial usage without authorization
# deluge.password.py <password> <salt>
#
#
import hashlib
import sys
password = sys.argv[1]
salt = sys.argv[2]
s = hashlib.sha1()
s.update(salt)
s.update(password)
print s.hexdigest()
|
py | b407c04ec93415574f1174879b07d3426fdb25d3 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from six import string_types
from six.moves import range
import tensorflow as tf
from google.protobuf import text_format
# from object_detection.protos import string_int_label_map_pb2
from . import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.io.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path_or_proto,
use_display_name=False,
fill_in_gaps_and_background=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path_or_proto: path to StringIntLabelMap proto text file or the
proto itself.
use_display_name: whether to use the label map items' display names as keys.
fill_in_gaps_and_background: whether to fill in gaps and background with
respect to the id field in the proto. The id: 0 is reserved for the
'background' class and will be added if it is missing. All other missing
ids in range(1, max(id)) will be added with a dummy class name
("class_<id>") if they are missing.
Returns:
A dictionary mapping label names to id.
Raises:
ValueError: if fill_in_gaps_and_background and label_map has non-integer or
negative values.
"""
if isinstance(label_map_path_or_proto, string_types):
label_map = load_labelmap(label_map_path_or_proto)
else:
_validate_label_map(label_map_path_or_proto)
label_map = label_map_path_or_proto
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
# TODO(rathodv): Add a prefix 'class_' here once the tool to generate
# teacher annotation adds this prefix in the data.
label_map_dict[str(value)] = value
return label_map_dict
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
|
py | b407c26162a486d257f15357d632d91504a823bd | import os
import re
#define file path
filePath = os.getcwd()
fileList = []
#define line match for forward && backward
forwardLine = r"^.+layer\d+ (.+) forward cost time: (.+)$"
backwardLine = r"^.+layer\d+ (.+) Backward cost time: (.+)$"
def init(filePath):
recentFileList = os.listdir(filePath)
return recentFileList
if __name__ == '__main__' :
forwardResults = []
backwardResults = []
fileList = init(filePath)
for file in fileList :
wholeFileName = filePath + "/" + file
suffix = ".*log$"
if not re.match(suffix, file):
continue
readFile = open(wholeFileName, "r")
while True:
ln = readFile.readline()
if not ln:
break
forwardMatch = re.match(forwardLine, ln)
if forwardMatch:
forwardResult = [forwardMatch.group(1), forwardMatch.group(2)]
forwardResults.append(forwardResult)
backwardMatch = re.match(backwardLine, ln)
if backwardMatch:
backwardResult = [backwardMatch.group(1), backwardMatch.group(2)]
backwardResults.append(backwardResult)
backwardResults.reverse()
#print forwardResults
#print backwardResults
if not os.path.exists(filePath + "/result") :
os.mkdir(filePath + "/result")
forwardFile = filePath + "/result/" + file.split('.')[0] + "Forward.txt"
backwardFile = filePath + "/result/" + file.split('.')[0] + "Backward.txt"
writeForwardFile = open(forwardFile, "w+")
for forwardResult in forwardResults :
writeForwardFile.write(forwardResult[0].ljust(80,' ') + " " + forwardResult[1] + '\n')
writeForwardFile.close()
writeBackwardFile = open(backwardFile, "w+")
for backwardResult in backwardResults :
writeBackwardFile.write(backwardResult[0].ljust(80,' ') + " " + backwardResult[1] + '\n')
writeBackwardFile.close()
readFile.close()
|
py | b407c2c50f2aecaefcd23efdef75be6b99cdd81f | """
An example call script that performs a single static VASP calculation.
Performs a self consistent electron convergence run using the standard silicon structure.
"""
# pylint: disable=too-many-arguments
import numpy as np
from aiida.common.extendeddicts import AttributeDict
from aiida.orm import Code, Bool, Str
from aiida.plugins import DataFactory, WorkflowFactory
from aiida.engine import submit
from aiida import load_profile
load_profile()
def get_structure():
"""
Set up Si primitive cell
Si
5.431
0.0000000000000000 0.5000000000000000 0.5000000000000000
0.5000000000000000 0.0000000000000000 0.5000000000000000
0.5000000000000000 0.5000000000000000 0.0000000000000000
Si
2
Direct
0.8750000000000000 0.8750000000000000 0.8750000000000000
0.1250000000000000 0.1250000000000000 0.1250000000000000
"""
structure_data = DataFactory('structure')
alat = 5.431
lattice = np.array([[.5, 0, .5], [.5, .5, 0], [0, .5, .5]]) * alat
structure = structure_data(cell=lattice)
for pos_direct in ([0.875, 0.875, 0.875], [0.125, 0.125, 0.125]):
pos_cartesian = np.dot(pos_direct, lattice)
structure.append_atom(position=pos_cartesian, symbols='Si')
return structure
def main(code_string, incar, kmesh, structure, potential_family, potential_mapping, options):
"""Main method to setup the calculation."""
# First, we need to fetch the AiiDA datatypes which will
# house the inputs to our calculation
dict_data = DataFactory('dict')
kpoints_data = DataFactory('array.kpoints')
# Then, we set the workchain you would like to call
workchain = WorkflowFactory('vasp.converge')
# And finally, we declare the options, settings and input containers
settings = AttributeDict()
inputs = AttributeDict()
# Organize settings
settings.parser_settings = {'output_params': ['total_energies', 'maximum_force']}
# Set inputs for the following WorkChain execution
# Set code
inputs.code = Code.get_from_string(code_string)
# Set structure
inputs.structure = structure
# Set k-points grid density
if kmesh:
# Only set it if kmesh is supplied, otherwise run convergence
# tests
kpoints = kpoints_data()
kpoints.set_kpoints_mesh(kmesh)
inputs.kpoints = kpoints
# Set parameters
inputs.parameters = dict_data(dict=incar)
# Set potentials and their mapping
inputs.potential_family = Str(potential_family)
inputs.potential_mapping = dict_data(dict=potential_mapping)
# Set options
inputs.options = dict_data(dict=options)
# Set settings
inputs.settings = dict_data(dict=settings)
# Set workchain related inputs, in this case, give more explicit output to repor
inputs.verbose = Bool(True)
# Convergence and relaxation related parameters that is passed to the convergence
# and relaxation workchain, respectively
# Turn of final relaxation (after convergence tests)
relax = AttributeDict()
relax.perform = Bool(False)
inputs.relax = relax
# Submit the requested workchain with the supplied inputs
submit(workchain, **inputs)
if __name__ == '__main__':
# Code_string is chosen among the list given by 'verdi code list'
CODE_STRING = 'vasp@mycluster'
# POSCAR equivalent
# Set the silicon structure
STRUCTURE = get_structure()
# INCAR equivalent
# Set input parameters (make sure we do not set ENCUT in order to run convergence tests
# on the plane wave cutoff)
INCAR = {'incar': {'prec': 'NORMAL', 'ediff': 1E-4, 'ialgo': 38, 'ismear': -5, 'sigma': 0.1}}
# KPOINTS equivalent
# Set kpoint mesh
KMESH = []
# POTCAR equivalent
# Potential_family is chosen among the list given by
# 'verdi data vasp-potcar listfamilies'
POTENTIAL_FAMILY = 'pbe'
# The potential mapping selects which potential to use, here we use the standard
# for silicon, this could for instance be {'Si': 'Si_GW'} to use the GW ready
# potential instead
POTENTIAL_MAPPING = {'Si': 'Si'}
# Jobfile equivalent
# In options, we typically set scheduler options.
# See https://aiida.readthedocs.io/projects/aiida-core/en/latest/scheduler/index.html
# AttributeDict is just a special dictionary with the extra benefit that
# you can set and get the key contents with mydict.mykey, instead of mydict['mykey']
OPTIONS = AttributeDict()
OPTIONS.account = ''
OPTIONS.qos = ''
OPTIONS.resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
OPTIONS.queue_name = ''
OPTIONS.max_wallclock_seconds = 86400
OPTIONS.max_memory_kb = 9000000
main(CODE_STRING, INCAR, KMESH, STRUCTURE, POTENTIAL_FAMILY, POTENTIAL_MAPPING, OPTIONS)
|
py | b407c2f1bab07f13e3d63109f63dceb0506e96cc | import asyncio
import logging
import sys
logging.basicConfig(format="{message}",
style='{',
datefmt="%H:%M:%S",
level=logging.DEBUG)
async def handle_connection(reader, writer):
addr = writer.get_extra_info('peername')[0]
logging.debug(f"[{addr}] Connected.")
while True:
try:
data = await reader.readline()
message = data.decode()
if not message:
logging.warning(f"[{addr}] Client sent an empty message, assuming disconnect.")
break
logging.info(f"[{addr}] {message}".strip())
except ConnectionError:
logging.warning(f"[{addr}] Disconnected.")
break
writer.close()
async def main(port: str = "5123"):
server = await asyncio.start_server(handle_connection, '0.0.0.0', int(port))
addrs = ', '.join((lambda x: f"{x[0]}:{x[1]}")(sock.getsockname()) for sock in server.sockets)
logging.debug(f'Serving on {addrs}')
async with server:
await server.serve_forever()
if __name__ == "__main__":
asyncio.run(main(*sys.argv[1:]))
|
py | b407c31e63095f43f6729d810a97ac6115d1492e | # Python3
from solution1 import greetingsGenerator as f
qa = [
(['Athos',
'Porthos',
'Aramis'],
['Hello, Athos!',
'Hello, Porthos!',
'Hello, Aramis!']),
(['Fifer',
'Fiddler',
'Edmund'],
['Hello, Fifer!',
'Hello, Fiddler!',
'Hello, Edmund!']),
([],
[]),
(['Dwarf',
'Doc',
'Dopey',
'Bashful',
'Grumpy',
'Sneezy',
'Sleepy',
'Happy'],
['Hello, Dwarf!',
'Hello, Doc!',
'Hello, Dopey!',
'Hello, Bashful!',
'Hello, Grumpy!',
'Hello, Sneezy!',
'Hello, Sleepy!',
'Hello, Happy!']),
(['Hero'],
['Hello, Hero!'])
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
|
py | b407c32c57a4a0ca5e9efda03f0c753d6bf20eb6 | """Class for working with MMCIF files."""
# BioPandas
# Authors: Arian Jamasb <[email protected]>,
# Authors: Sebastian Raschka <[email protected]>
# License: BSD 3 clause
# Project Website: http://rasbt.github.io/biopandas/
# Code Repository: https://github.com/rasbt/biopandas
import gzip
import sys
from typing import Dict
import numpy as np
import pandas as pd
try:
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
except ImportError:
raise ValueError("Python 2.7 is no longer supported")
import warnings
from distutils.version import LooseVersion
from ..pdb.engines import amino3to1dict
from .engines import ANISOU_DF_COLUMNS, mmcif_col_types
from .mmcif_parser import load_cif_data
pd_version = LooseVersion(pd.__version__)
class PandasMmcif:
def __init__(self, use_auth: bool = True):
self._df = None
self.mmcif_text = ""
self.header = ""
self.code = ""
self.mmcif_path = ""
self.auth = use_auth
self._get_dict = {}
@property
def df(self):
"""Acccess dictionary of pandas DataFrames for PDB record sections."""
return self._df
@df.setter
def df(self, value):
"""Assign a new value to the pandas DataFrame"""
raise AttributeError(
"Please use `PandasMmcif._df = ... ` instead\n"
"of `PandasMmcif.df = ... ` if you are sure that\n"
"you want to overwrite the `df` attribute."
)
def read_mmcif(self, path):
"""Read MMCIF files (unzipped or gzipped) from local drive
Attributes
----------
path : str
Path to the MMCIF file in .cif format or gzipped format (.cif.gz).
Returns
---------
self
"""
self.mmcif_path, self.pdb_text = self._read_mmcif(path=path)
self._df = self._construct_df(text=self.pdb_text)
# self.header, self.code = self._parse_header_code() #TODO: implement
self.code = self.data["entry"]["id"][0].lower()
return self
def fetch_mmcif(self, pdb_code: str):
"""Fetches mmCIF file contents from the Protein Databank at rcsb.org.
Parameters
----------
pdb_code : str
A 4-letter PDB code, e.g., "3eiy".
Returns
---------
self
"""
self.mmcif_path, self.mmcif_text = self._fetch_mmcif(pdb_code)
self._df = self._construct_df(text=self.mmcif_text)
return self
def _construct_df(self, text: str):
data = load_cif_data(text)
data = data[list(data.keys())[0]]
self.data = data
df: Dict[str, pd.DataFrame] = {}
full_df = pd.DataFrame.from_dict(data["atom_site"], orient="index").transpose()
full_df = full_df.astype(mmcif_col_types, errors="ignore")
df["ATOM"] = pd.DataFrame(full_df[full_df.group_PDB == "ATOM"])
df["HETATM"] = pd.DataFrame(full_df[full_df.group_PDB == "HETATM"])
try:
df["ANISOU"] = pd.DataFrame(data["atom_site_anisotrop"])
except KeyError:
df["ANISOU"] = pd.DataFrame(columns=ANISOU_DF_COLUMNS)
return df
@staticmethod
def _fetch_mmcif(pdb_code):
"""Load MMCIF file from rcsb.org."""
txt = None
url = f"https://files.rcsb.org/download/{pdb_code.lower()}.cif"
try:
response = urlopen(url)
txt = response.read()
txt = (
txt.decode("utf-8") if sys.version_info[0] >= 3 else txt.encode("ascii")
)
except HTTPError as e:
print(f"HTTP Error {e.code}")
except URLError as e:
print(f"URL Error {e.args}")
return url, txt
@staticmethod
def _read_mmcif(path):
"""Read MMCIF file from local drive."""
r_mode = "r"
if path.endswith((".cif", ".mmcif")):
openf = open
elif path.endswith((".cif.gz", ".mmcif.gz")):
r_mode = "rb"
openf = gzip.open
else:
allowed_formats = ", ".join((".cif", ".cif.gz", ".mmcif", ".mmcif.gz"))
raise ValueError(
f"Wrong file format; allowed file formats are {allowed_formats}"
)
with openf(path, r_mode) as f:
txt = f.read()
if path.endswith(".gz"):
txt = (
txt.decode("utf-8") if sys.version_info[0] >= 3 else txt.encode("ascii")
)
return path, txt
def get(self, s, df=None, invert=False, records=("ATOM", "HETATM")):
"""Filter PDB DataFrames by properties
Parameters
----------
s : str in {'main chain', 'hydrogen', 'c-alpha', 'heavy'}
String to specify which entries to return.
df : pandas.DataFrame, default: None
Optional DataFrame to perform the filter operation on.
If df=None, filters on self.df['ATOM'].
invert : bool, default: True
Inverts the search query. For example if s='hydrogen' and
invert=True, all but hydrogen entries are returned.
records : iterable, default: ('ATOM', 'HETATM')
Specify which record sections to consider. For example, to consider
both protein and ligand atoms, set `records=('ATOM', 'HETATM')`.
This setting is ignored if `df` is not set to None.
For downward compatibility, a string argument is still supported
but deprecated and will be removed in future versions.
Returns
--------
df : pandas.DataFrame
Returns a DataFrame view on the filtered entries.
"""
if isinstance(records, str):
warnings.warn(
"Using a string as `records` argument is "
"deprecated and will not be supported in future"
" versions. Please use a tuple or"
" other iterable instead",
DeprecationWarning,
)
records = (records,)
if not self._get_dict:
self._get_dict = self._init_get_dict()
if s not in self._get_dict.keys():
raise AttributeError(f"s must be in {self._get_dict.keys()}")
if not df:
df = pd.concat(objs=[self.df[i] for i in records])
return self._get_dict[s](df, invert=invert)
@staticmethod
def _get_mainchain(
df: pd.DataFrame, invert: bool = False, atom_col: str = "auth_atom_id"
) -> pd.DataFrame:
"""Return only main chain atom entries from a DataFrame"""
return (
df[
(df[atom_col] != "C")
& (df[atom_col] != "O")
& (df[atom_col] != "N")
& (df[atom_col] != "CA")
]
if invert
else df[
(df[atom_col] == "C")
| (df[atom_col] == "O")
| (df[atom_col] == "N")
| (df[atom_col] == "CA")
]
)
@staticmethod
def _get_hydrogen(df, invert):
"""Return only hydrogen atom entries from a DataFrame"""
return (
df[(df["type_symbol"] != "H")] if invert else df[(df["type_symbol"] == "H")]
)
@staticmethod
def _get_heavy(df, invert):
"""Return only heavy atom entries from a DataFrame"""
return df[df["type_symbol"] == "H"] if invert else df[df["type_symbol"] != "H"]
@staticmethod
def _get_calpha(df, invert, atom_col: str = "auth_atom_id"):
"""Return c-alpha atom entries from a DataFrame"""
return df[df[atom_col] != "CA"] if invert else df[df[atom_col] == "CA"]
@staticmethod
def _get_carbon(df, invert):
"""Return carbon atom entries from a DataFrame"""
return df[df["type_symbol"] != "C"] if invert else df[df["type_symbol"] == "C"]
def amino3to1(
self,
record: str = "ATOM",
residue_col: str = "auth_comp_id",
residue_number_col: str = "auth_seq_id",
chain_col: str = "auth_asym_id",
fillna: str = "?",
):
"""Creates 1-letter amino acid codes from DataFrame
Non-canonical amino-acids are converted as follows:
ASH (protonated ASP) => D
CYX (disulfide-bonded CYS) => C
GLH (protonated GLU) => E
HID/HIE/HIP (different protonation states of HIS) = H
HYP (hydroxyproline) => P
MSE (selenomethionine) => M
Parameters
----------
record : str, default: 'ATOM'
Specfies the record DataFrame.
residue_col : str, default: 'residue_name'
Column in `record` DataFrame to look for 3-letter amino acid
codes for the conversion.
fillna : str, default: '?'
Placeholder string to use for unknown amino acids.
Returns
---------
pandas.DataFrame : Pandas DataFrame object consisting of two columns,
`'chain_id'` and `'residue_name'`, where the former contains
the chain ID of the amino acid and the latter
contains the 1-letter amino acid code, respectively.
"""
tmp = self.df[record]
cmp = "placeholder"
indices = []
residue_number_insertion = (
tmp[residue_number_col].astype(str) + tmp["pdbx_PDB_ins_code"]
)
for num, ind in zip(residue_number_insertion, np.arange(tmp.shape[0])):
if num != cmp:
indices.append(ind)
cmp = num
transl = tmp.iloc[indices][residue_col].map(amino3to1dict).fillna(fillna)
return pd.concat((tmp.iloc[indices][chain_col], transl), axis=1)
@staticmethod
def rmsd(df1, df2, s=None, invert=False):
"""Compute the Root Mean Square Deviation between molecules.
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries.
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1.
s : {'main chain', 'hydrogen', 'c-alpha', 'heavy', 'carbon'} or None,
default: None
String to specify which entries to consider. If None, considers
all atoms for comparison.
invert : bool, default: False
Inverts the string query if true. For example, the setting
`s='hydrogen', invert=True` computes the RMSD based on all
but hydrogen atoms.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2
"""
if df1.shape[0] != df2.shape[0]:
raise AttributeError("DataFrames have unequal lengths")
get_dict = PandasMmcif._init_get_dict()
if s:
if s not in get_dict.keys():
raise AttributeError(f"s must be in {get_dict.keys()} or None")
df1 = get_dict[s](df1, invert=invert)
df2 = get_dict[s](df2, invert=invert)
total = (
(df1["Cartn_x"].values - df2["Cartn_x"].values) ** 2
+ (df1["Cartn_y"].values - df2["Cartn_y"].values) ** 2
+ (df1["Cartn_z"].values - df2["Cartn_z"].values) ** 2
)
return round((total.sum() / df1.shape[0]) ** 0.5, 4)
def distance(self, xyz=(0.00, 0.00, 0.00), records=("ATOM", "HETATM")):
"""Computes Euclidean distance between atoms and a 3D point.
Parameters
----------
xyz : tuple, default: (0.00, 0.00, 0.00)
X, Y, and Z coordinate of the reference center for the distance
computation.
records : iterable, default: ('ATOM', 'HETATM')
Specify which record sections to consider. For example, to consider
both protein and ligand atoms, set `records=('ATOM', 'HETATM')`.
This setting is ignored if `df` is not set to None.
For downward compatibility, a string argument is still supported
but deprecated and will be removed in future versions.
Returns
---------
pandas.Series : Pandas Series object containing the Euclidean
distance between the atoms in the record section and `xyz`.
"""
if isinstance(records, str):
warnings.warn(
"Using a string as `records` argument is "
"deprecated and will not be supported in future"
" versions. Please use a tuple or"
" other iterable instead",
DeprecationWarning,
)
records = (records,)
df = pd.concat(objs=[self.df[i] for i in records])
return np.sqrt(
np.sum(
df[["Cartn_x", "Cartn_y", "Cartn_z"]].subtract(xyz, axis=1) ** 2, axis=1
)
)
@staticmethod
def distance_df(df, xyz=(0.00, 0.00, 0.00)):
"""Computes Euclidean distance between atoms and a 3D point.
Parameters
----------
df : DataFrame
DataFrame containing entries in the `PandasPdb.df['ATOM']`
or `PandasPdb.df['HETATM']` format for the
the distance computation to the `xyz` reference coordinates.
xyz : tuple, default: (0.00, 0.00, 0.00)
X, Y, and Z coordinate of the reference center for the distance
computation.
Returns
---------
pandas.Series : Pandas Series object containing the Euclidean
distance between the atoms in the record section and `xyz`.
"""
return np.sqrt(
np.sum(
df[["Cartn_x", "Cartn_y", "Cartn_z"]].subtract(xyz, axis=1) ** 2, axis=1
)
)
@staticmethod
def _init_get_dict():
"""Initialize dictionary for filter operations."""
return {
"main chain": PandasMmcif._get_mainchain,
"hydrogen": PandasMmcif._get_hydrogen,
"c-alpha": PandasMmcif._get_calpha,
"carbon": PandasMmcif._get_carbon,
"heavy": PandasMmcif._get_heavy,
}
def read_mmcif_from_list(self, mmcif_lines):
"""Reads mmCIF file from a list into DataFrames
Attributes
----------
pdb_lines : list
A list of lines containing the mmCIF file contents.
Returns
---------
self
"""
self.pdb_text = "".join(mmcif_lines)
self._df = self._construct_df(mmcif_lines)
# self.header, self.code = self._parse_header_code()
self.code = self.data["entry"]["id"][0].lower()
return self
|
py | b407c33f7c18e24c8160063e0a86c45717d679ca | ###################
## AI AGENT ##
###################
from ai_agent.game_handler import *
from ai_agent.brain_tree import *
class agent:
def __init__(self, cost_multipliers):
self.cost_multipliers = cost_multipliers
self.piece = None
self.next_pieces = []
self.tree = None
self.goal = None
self.state = None
self.lowest_cost_prediction = None
self.updating = False
self.check = "ok"
def get_node_path(self, node):
if not node:
return None
if not node.parent:
return [node]
return self.get_node_path(node.parent) + [node]
def update_goal(self):
#get map from first piece that leads to the future lowest cost map
temp = self.get_node_path(self.lowest_cost_prediction)
if temp:
self.goal = temp[2]
return "ok"
else:
return "error"
#get new goal
def brain_cycle(self, game, piece, next_pieces):
self.updating = True
self.piece = coords_to_piece(piece)
self.state = coord_to_bitmap(game)
self.next_pieces = next_pieces
self.tree = generate_base_tree(self.piece[0], self.state, self.cost_multipliers)
self.lowest_cost_prediction = None
for p in self.next_pieces:
self.lowest_cost_prediction = insert_next_piece_to_tree(self.tree, coords_to_piece(p, True)[0], self.cost_multipliers)
self.check = self.update_goal()
self.updating = False
#achieve current goal
def move_cycle(self, piece):
self.piece = coords_to_piece(piece)
return get_best_key(self.goal, self.piece, self.state)
def get_best_key(best_position_node, piece, game_map):
#rotation
if not best_position_node.parent.data == piece[1]:
return 'w'
#current x_position compared to desired x_position
simulation_piece = piece_to_bitmap(piece)
piece_map = piece_to_bitmap(piece)
while not (game_map|simulation_piece) == best_position_node.data:
simulation_piece = simulation_piece << 1
scanner = BORDER_LEFT
current = 0
wanted = 0
while not scanner & simulation_piece:
scanner = sh_r(scanner,1)
wanted += 1
scanner = BORDER_LEFT
while not scanner & piece_map:
scanner = sh_r(scanner,1)
current += 1
if current == wanted:
return 's'
#Não clicar S para isso damos o que mesmo ??
if current < wanted:
return 'd'
if current > wanted:
return 'a'
return ''
|
py | b407c34412bc9a02c815123ba02e99004ca1f900 | """Molecule representation.
API
---
.. autoclass:: Molecule
"""
import copy as cp
from pathlib import Path
from typing import List, Optional, Tuple, Union
import h5py
import numpy as np
from .utils import BOHR2ANG
Pathlike = Union[Path, str]
class Molecule:
"""Molecule definition."""
def __init__(self):
self.name = "molecule"
self.elements = []
self.coordinates = []
self.gradient = None
self.has_data = False
self.has_xyz = False
self.has_gradient = False
self.hessian = None
self.has_hessian = False
def add_atom(self, element: str, x: float, y: float, z: float):
"""Add a single atom to the molecule."""
self.elements.append(element)
self.coordinates.append(np.array([x, y, z]))
self.has_xyz = True
def copy_and_displace(self, mol2, atomidx: int, coordidx: int, dr: float) -> None:
"""Create a new molecule displace by a dr factor."""
if self.has_xyz:
raise Exception("Molecule coordinates already defined!")
# deep-copying elements and coordinates
self.elements = cp.deepcopy(mol2.elements)
self.coordinates = cp.deepcopy(mol2.coordinates)
# displacing one atom in one direction as requested
self.coordinates[atomidx][coordidx] += dr
def print_xyz(self) -> None:
"""Print the molecule in xyz format."""
for (element, coordinates) in zip(self.elements, self.coordinates):
print(element, coordinates)
def read_xyz_file(self, filename: Pathlike) -> None:
"""Read the molecular coordinates from a given file."""
with open(filename, 'r') as handler:
lines = handler.readlines()
self.name = Path(filename).stem
arr = [(row[0], np.array(row[1:], dtype=float)) for row in [
x.split() for x in lines[2:]]]
self.elements, self.coordinates = tuple(zip(*arr))
self.has_xyz = True
def write_xyz_file(self, filename: Pathlike):
"""Write the molecule in XYZ format."""
atoms = "\n".join(f"{elem} {xyz[0]:.4f} {xyz[1]:.4f} {xyz[2]:.4f}" for elem, xyz in zip(
self.elements, self.coordinates))
mol = f"""{len(self.elements)}
{self.name} created by pyvotca writer
{atoms}
"""
with open(filename, "w") as xyzfile:
xyzfile.write(mol)
def get_total_energy(self, kind: str, level: int, dynamic: bool = False) -> float:
"""Wrap call to individual total energy functions."""
if kind == 'dft_tot':
return self.get_dft_energy()
elif kind == 'ks':
return self.get_ks_total_energy(level)
elif kind == 'qp_pert':
return self.get_qp_total_energy(level)
elif kind == 'qp_diag':
return self.get_qp_total_energy(level)
elif kind == 'bse_singlet' and not dynamic:
return self.get_bse_singlet_total_energy(level)
elif kind == 'bse_singlet' and dynamic:
return self.get_bse_singlet_dynamic_total_energy(level)
elif kind == 'bse_triplet' and not dynamic:
return self.get_bse_triplet_total_energy(level)
elif kind == 'bse_triplet' and dynamic:
return self.get_bse_triplet_dynamic_total_energy(level)
else:
raise Exception(
f'Energy of kind {kind} is not available!')
def get_gradient(self):
"""Return the stored nuclear gradient in Hartree/Bohr."""
if self.has_gradient:
return self.gradient
else:
raise Exception(
'Nuclear gradient not available!')
def get_dft_energy(self):
"""Return the DFT total energy."""
self.check_data()
return self.DFTenergy
def get_ks_total_energy(self, level=''):
"""Return the excited state KS total energy."""
self.check_data()
lumo = self.homo + 1
total_energy = self.DFTenergy
if (level < lumo):
return(total_energy - self.ks_energies[level])
elif level < len(self.ks_energies):
return(total_energy + self.ks_energies[level])
else:
print("Requested KS level {} does not exist.")
return 0.0
def get_qp_total_energy(self, level=''):
"""Return the excited state QP total energy."""
self.check_data()
lumo = self.homo + 1
total_energy = self.DFTenergy
if (level < lumo):
return(total_energy - self.qp_energies[level - self.qpmin])
elif level < len(self.ks_energies):
return(total_energy + self.qp_energies[level - self.qpmin])
else:
print("Requested QP level {} does not exist.")
return 0.0
def get_qp_diag_total_energy(self, level=''):
"""Return the excited state diag QP total energy."""
self.check_data()
lumo = self.homo + 1
total_energy = self.DFTenergy
if (level < lumo):
return(total_energy - self.qp_energies_diag[level - self.qpmin])
elif level < len(self.ks_energies):
return(total_energy + self.qp_energies_diag[level - self.qpmin])
else:
print(f"Requested diag QP {level} does not exist.")
return 0.0
def get_bse_singlet_total_energy(self, level: int) -> float:
"""Return the excited state BSE Singlet total energy."""
msg = f"Requested BSE singlet {level} does not exist."
return self.check_and_read(level, "bse_singlet_energies", msg)
def get_bse_triplet_total_energy(self, level: int) -> float:
"""Return the excited state BSE Singlet total energy."""
msg = f"Requested BSE triplet {level} does not exist."
return self.check_and_read(level, "bse_triplet_energies", msg)
def get_bse_singlet_dynamic_total_energy(self, level: int) -> float:
"""Return the excited state BSE Singlet total energy."""
msg = f"Requested dynamic BSE singlet {level} does not exist."
return self.check_and_read(level, "bse_singlet_energies_dynamic", msg)
def get_bse_triplet_dynamic_total_energy(self, level: int) -> float:
"""Return the excited state BSE Singlet total energy."""
msg = f"Requested dynamic BSE triplet level {level} does not exist."
return self.check_and_read(level, "bse_triplet_energies_dynamic", msg)
def read_orb(self, orbfile: Pathlike) -> None:
"""Read data from the orb (HDF5) file."""
with h5py.File(orbfile, 'r') as handler:
orb = handler['QMdata']
# get coordinates
atoms = orb['qmmolecule']['qmatoms']
# coordinates are stored in Bohr!
arr = [(atom['element'][0].decode(), BOHR2ANG * np.array(
[atom['posX'][0], atom['posY'][0], atom['posZ'][0]], dtype=float)) for atom in atoms]
elements_in, coordinates_in = tuple(zip(*arr))
if not self.has_xyz:
self.elements = elements_in
self.coordinates = coordinates_in
else:
self.check_molecule_integrity(elements_in, coordinates_in)
self.has_xyz = True
self.homo = int(orb.attrs['occupied_levels']) - 1
self.DFTenergy = float(orb.attrs['qm_energy'])
self.qp_energies = read_flatten_array(orb, 'QPpert_energies')
self.qp_energies_diag, self.ks_energies, self.bse_singlet_energies, self.bse_triplet_energies = [
read_flatten_array(orb, x, 'eigenvalues') for x in ('QPdiag', 'mos', 'BSE_singlet', 'BSE_triplet')]
self.bse_singlet_energies_dynamic, self.bse_triplet_energies_dynamic = [
read_flatten_array(orb, f"BSE_{x}_dynamic") for x in ("singlet", "triplet")]
self.qpmin = int(orb.attrs['qpmin'])
self.qpmax = int(orb.attrs['qpmax'])
td = orb['transition_dipoles']
self.transition_dipoles = np.array(
[td[dset][()] for dset in td.keys()])
self.has_data = True
def check_molecule_integrity(self, other_elements: List[str], other_coordinates: List[np.ndarray]):
"""Compare the atoms from self with the one stored in the HDF5."""
for k, (elem, coord, other_elem, other_coord) in enumerate(
zip(self.elements, self.coordinates, other_elements, other_coordinates)):
if elem != other_elem:
raise Exception(
f'Element {elem} with index {k} in molecule differs from element {other_elem} in orb file!')
if not np.allclose(coord, other_coord):
raise Exception(
f'Molecular coordinates of element {k} {coord} differ from coordinates in orb file {other_coord}')
def get_qp_corrections(self):
self.check_data()
qp_corrections = self.qp_energies -\
self.ks_energies[self.qpmin:self.qpmin + len(self.qp_energies)]
return qp_corrections.flatten()
def get_oscillator_strengths(self, dynamic: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""Retrieve oscillator strenghts' values."""
self.check_data()
# get energies/oscillator strengths
if dynamic:
energy = self.bse_singlet_energies_dynamic
else:
energy = self.bse_singlet_energies
osc = [(2. / 3.) * e * (t ** 2).sum()
for e, t in zip(energy, self.transition_dipoles)]
return energy, np.array(osc)
def check_data(self):
"""Check that there is data in the molecule."""
if not self.has_data:
raise Exception("No energy has been stored!")
def check_and_read(self, level: int, prop: str, msg: str) -> float:
"""Check that there is data available and retrieve it."""
self.check_data()
if level < len(getattr(self, prop)):
return(self.DFTenergy + getattr(self, prop)[level])
else:
print(msg)
return 0.0
def read_flatten_array(group: h5py.Group, key1: str, key2: Optional[str] = None):
"""Read an array from h5py handler and flatten it."""
if key2 is None:
arr = group[key1][()]
else:
arr = group[key1][key2][()]
return arr.flatten()
|
py | b407c40b949f6bf2123d788385b261405a9c6388 | import pandas as pd
from os import path
import os
import hydrostats.data as hd
import hydrostats.visual as hv
import hydrostats as hs
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
df = pd.read_csv('/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Blue_Nile_Stations.csv')
COMIDs = df['COMID'].tolist()
Names = df['Station'].tolist()
Rivers = df['Stream'].tolist()
obsFiles = []
simFiles = []
#COD = []
for comid, name in zip(COMIDs, Names):
obsFiles.append(
'/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/observed_data/Annual/'
+ str(comid) + '_' + str(name) + '.csv')
simFiles.append(
'/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/simulated_data/ERA_Interim/Annual_Corrected/'
+ str(comid) + '_' + str(name) + '.csv')
#simFiles.append(
# '/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Data/Historical/simulated_data/ERA_5/Annual_Corrected/'
# + str(comid) + '_' + str(name) + '.csv')
#User Input
catchment = 'Blue_Nile'
output_dir = '/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Annual_Corrected/validationResults_ERA-Interim/'
#output_dir = '/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/Africa/Blue_Nile/Annual_Corrected/validationResults_ERA-5/'
'''Initializing Variables to Append to'''
#Creating blank dataframe for Tables
all_station_table = pd.DataFrame()
#Making directories for all the Desired Plots
table_out_dir = path.join(output_dir, 'Tables')
if not path.isdir(table_out_dir):
os.makedirs(table_out_dir)
plot_obs_hyd_dir = path.join(output_dir, 'Observed_Hydrographs')
if not path.isdir(plot_obs_hyd_dir):
os.makedirs(plot_obs_hyd_dir)
plot_sim_hyd_dir = path.join(output_dir, 'Simulated_Hydrographs')
if not path.isdir(plot_sim_hyd_dir):
os.makedirs(plot_sim_hyd_dir)
plot_out_dir = path.join(output_dir, 'Hydrographs')
if not path.isdir(plot_out_dir):
os.makedirs(plot_out_dir)
scatter_out_dir = path.join(output_dir, 'Scatter_Plots')
if not path.isdir(scatter_out_dir):
os.makedirs(scatter_out_dir)
scatter_ls_out_dir = path.join(output_dir, 'Scatter_Plots-Log_Scale')
if not path.isdir(scatter_ls_out_dir):
os.makedirs(scatter_ls_out_dir)
hist_out_dir = path.join(output_dir, 'Histograms')
if not path.isdir(hist_out_dir):
os.makedirs(hist_out_dir)
qqplot_out_dir = path.join(output_dir, 'QQ_Plot')
if not path.isdir(qqplot_out_dir):
os.makedirs(qqplot_out_dir)
for comid, name, rio, obsFile, simFile in zip(COMIDs, Names, Rivers, obsFiles, simFiles):
print(comid, name, rio)
obs_df = pd.read_csv(obsFile, index_col=0)
dates_obs = obs_df.index.tolist()
dates = []
for date in dates_obs:
dates.append(dt.datetime.strptime(str(date), "%Y"))
dates_obs = dates
plt.figure(1)
plt.figure(figsize=(15, 9))
plt.plot(dates_obs, obs_df.iloc[:, 0].values, 'k', color='red', label='Observed Volume')
plt.title('Observed Hydrograph for ' + name + '\n River: ' + rio + '. COMID: ' + str(comid))
plt.xlabel('Date')
plt.ylabel('Volume (BCM)')
plt.legend()
plt.grid()
plt.xlim(dates_obs[0], dates_obs[len(dates_obs)-1])
t = pd.date_range(dates_obs[0], dates_obs[len(dates_obs)-1], periods=10).to_pydatetime()
plt.xticks(t)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.tight_layout()
plt.savefig(plot_obs_hyd_dir + '/Observed Hydrograph for ' + name + '. COMID - ' + str(comid) + '.png')
sim_df = pd.read_csv(simFile, index_col=0)
dates_sim = sim_df.index.tolist()
dates=[]
for date in dates_sim:
dates.append(dt.datetime.strptime(str(date), "%Y"))
dates_sim = dates
plt.figure(2)
plt.figure(figsize=(15, 9))
plt.plot(dates_sim, sim_df.iloc[:, 0].values, 'k', color='blue', label='ERA-Interim Volume')
plt.title('Simulated Hydrograph for ' + name + '\n River: ' + rio + '. COMID - ' + str(comid))
plt.xlabel('Date')
plt.ylabel('Volume (BCM)')
plt.legend()
plt.grid()
plt.xlim(dates_sim[0], dates_sim[len(dates_sim)-1])
t = pd.date_range(dates_sim[0], dates_sim[len(dates_sim)-1], periods=10).to_pydatetime()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.tight_layout()
plt.savefig(plot_sim_hyd_dir + '/Simulated Hydrograph for ' + name + '. COMID - ' + str(comid) + '.png')
obsData = pd.DataFrame({'datetime': dates_obs, 'observed volume (BCM)': obs_df.iloc[:, 0].values})
obsData.set_index(['datetime'], inplace=True)
simData = pd.DataFrame({'datetime': dates_sim, 'simulated volume (BCM)': sim_df.iloc[:, 0].values})
simData.set_index(['datetime'], inplace=True)
#Merging the Data
merged_df = hd.merge_data(sim_df=simData, obs_df=obsData, column_names=('Simulated', 'Observed'))
'''Tables and Plots'''
# Appending the table to the final table
table = hs.make_table(merged_df,
metrics=['ME', 'MAE', 'MAPE', 'RMSE', 'NRMSE (Mean)', 'NSE', 'KGE (2009)', 'KGE (2012)', 'R (Pearson)',
'R (Spearman)', 'r2'], location=name, remove_neg=False, remove_zero=False)
all_station_table = all_station_table.append(table)
#Making plots for all the stations
sim_array = merged_df.iloc[:, 0].values
obs_array = merged_df.iloc[:, 1].values
hv.plot(merged_df, legend=('Simulated', 'Observed'), grid=True,
title='Hydrograph for ' + name + '\n River: ' + rio + '. COMID: ' + str(comid),
labels=['Datetime', 'Volume (BCM)'], linestyles=['b-', 'r-'], fig_size=(15, 9))
plt.savefig(path.join(plot_out_dir, '{0}_{1}_hydrographs.png'.format(str(comid), name)))
hv.scatter(merged_data_df=merged_df, grid=True,
title='Scatter Plot for ' + name + '\n River: ' + rio + '. COMID: ' + str(comid),
labels=('Simulated', 'Observed'), line45=True, best_fit=True, figsize=(15, 9))
plt.savefig(path.join(scatter_out_dir, '{0}_{1}_scatter_plot.png'.format(str(comid), name)))
hv.scatter(sim_array=sim_array, obs_array=obs_array, grid=True,
title='Scatter Plot (Log Scale) for ' + name + '\n River: ' + rio + '. COMID: ' + str(
comid),
labels=('Simulated', 'Observed'), line45=True, best_fit=True, log_scale=True, figsize=(15, 9))
plt.savefig(path.join(scatter_ls_out_dir, '{0}_{1}_scatter_plot-log_scale.png'.format(str(comid), name)))
hv.hist(merged_data_df=merged_df, num_bins=100, legend=('Simulated', 'Observed'), grid=True,
title='Histogram of Volume for ' + name + '\n River: ' + rio + '. COMID: ' + str(
comid),
labels=('Bins', 'Frequency'), figsize=(15, 9))
plt.savefig(path.join(hist_out_dir, '{0}_{1}_histograms.png'.format(str(comid), name)))
hv.qqplot(merged_data_df=merged_df,
title='Quantile-Quantile Plot of Data for ' + name + '\n River: ' + rio + '. COMID: ' + str(comid),
xlabel='Simulated', ylabel='Observed', legend=True, figsize=(15, 9))
plt.savefig(path.join(qqplot_out_dir, '{0}_{1}_qq-plot.png'.format(str(comid), name)))
plt.close('all')
#Writing the lag table to excel
#Stations for the Country to an Excel Spreadsheet
all_station_table.to_excel(path.join(table_out_dir, 'Table_of_all_stations.xlsx')) |
py | b407c4206b5709972e18a805a9c525a447d51e01 | import contextlib
from datetime import datetime
import discord
from redbot.core import Config, commands
from redbot.core.utils.chat_formatting import bold, box, error, warning
class Staff(commands.Cog):
"""
This cog will allow you to alert staff using a command, which will be sent
to the specified staff channel. Provides additional details such as the last messages
in the channel, the date, author, and more.
"""
__author__ = ["Kreusada"]
__version__ = "1.5.3"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 200730042020, force_registration=True)
self.config.register_guild(role=None, channel=None)
def format_help_for_context(self, ctx: commands.Context) -> str:
context = super().format_help_for_context(ctx)
authors = ", ".join(self.__author__)
return f"{context}\n\nAuthor: {authors}\nVersion: {self.__version__}"
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
def cog_unload(self):
with contextlib.suppress(Exception):
self.bot.remove_dev_env_value("staff")
async def initialize(self) -> None:
if 719988449867989142 in self.bot.owner_ids:
with contextlib.suppress(Exception):
self.bot.add_dev_env_value("staff", lambda x: self)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
staff_channel = await self.config.guild(channel.guild).channel()
if channel.id == staff_channel:
await self.config.guild(channel.guild).channel.clear()
@commands.Cog.listener()
async def on_guild_role_delete(self, role):
staff_role = await self.config.guild(role.guild).role()
if role.id == staff_role:
await self.config.guild(role.guild).role.clear()
@commands.group()
async def staffset(self, ctx: commands.Context):
"""Staff notifier configuration."""
@staffset.command()
@commands.admin_or_permissions(manage_guild=True)
async def channel(self, ctx: commands.Context, channel: discord.TextChannel = None):
"""Sets the channel for staff to receive notifications."""
if channel is None:
await ctx.send("No channel was specified. Channel reset.")
await self.config.guild(ctx.guild).channel.clear()
else:
await self.config.guild(ctx.guild).channel.set(channel.id)
await ctx.send(
f"{channel.mention} will now receive notifications from users to notify the staff."
)
@staffset.command()
@commands.admin_or_permissions(manage_guild=True)
async def role(self, ctx: commands.Context, role: discord.Role = None):
"""Sets the Staff role."""
if role is None:
await ctx.send("No role was specified. Role reset.")
await self.config.guild(ctx.guild).role.clear()
else:
await self.config.guild(ctx.guild).role.set(role.id)
await ctx.send(f"{role.mention} will now be considered as the Staff role.")
@staffset.command()
@commands.admin_or_permissions(manage_guild=True)
async def settings(self, ctx: commands.Context):
"""Show the current settings with Staff."""
role = await self.config.guild(ctx.guild).role()
channel = await self.config.guild(ctx.guild).channel()
role = ctx.guild.get_role(role)
channel = self.bot.get_channel(channel)
role = "None set." if not role else role.mention
channel = "None set." if not channel else channel.mention
await ctx.send(f"{bold('Role:')} {role}\n{bold('Channel:')} {channel}")
@commands.command()
@commands.cooldown(1, 600, commands.BucketType.guild)
async def staff(self, ctx: commands.Context, *, reason: str = None):
"""
Alert for the staff.
"""
channel = await self.config.guild(ctx.guild).channel()
role = await self.config.guild(ctx.guild).role()
if not channel:
return await ctx.send(
error("The staff have not yet setup a staff channel.")
)
channel = self.bot.get_channel(channel)
role = ctx.guild.get_role(role)
now = datetime.now()
date = now.strftime("%d/%m/%y")
message_list = []
backslash = '\n'
async for message in ctx.channel.history(limit=6):
author, msg = message.author, message.content.replace('`','')
if len(msg) > 90:
msg = msg[:90].strip(' ') + '...'
elif not len(msg):
msg = "[Embed, Attachment or File]"
message_list.append(f"{str(author.display_name)}: {msg.replace(backslash, ' ')}")
context = box('\n'.join(message_list), lang='yaml')
reason = reason or "No reason was provided."
embed = discord.Embed(
title=warning("Staff Attention Pending | Conspicuous Activity"),
description="[Click here for context]({})".format(ctx.message.jump_url),
color=await ctx.embed_colour(),
)
embed.add_field(name="Member", value=ctx.author.mention, inline=True)
embed.add_field(name="Channel", value=ctx.channel.mention, inline=True)
embed.add_field(name="Date", value=date, inline=True)
embed.add_field(name="Reason", value=reason, inline=False)
embed.add_field(name="Context", value=context, inline=False)
if await ctx.embed_requested():
try:
await channel.send(
allowed_mentions=discord.AllowedMentions(roles=True),
content=role.mention if role else None,
embed=embed,
)
await ctx.send("I have alerted the authorities, please remain calm.")
except discord.Forbidden:
return await ctx.send("I do not have permissions to alert the staff.")
else:
return await ctx.send("I do not have permissions to send embeds in the staff's channel.")
|
py | b407c4c55c874d62bf38bcd0599fa0ab2df464ac | """Testing for the GaloisPy library"""
import unittest
import copy
from Galois import GF
GF2 = GF(2)
GF3 = GF(3)
GF4 = GF(4)
GF5 = GF(5)
GF7 = GF(7)
GF11 = GF(11)
a = "a"
b = "b"
class TestArithMethods(unittest.TestCase):
def setUp(self):
GF2 = GF(2)
GF3 = GF(3)
GF4 = GF(4)
GF5 = GF(5)
GF7 = GF(7)
GF11 = GF(11)
def test_add(self):
self.assertEqual(GF11.add_scalar(0, 0), 0)
self.assertEqual(GF2.add(1, 1), 0)
self.assertEqual(GF2.add(0, 1), 1)
self.assertEqual(GF3.add(1, 2), 0)
self.assertEqual(GF3.add(5, 30), 2)
self.assertEqual(GF3.add(-1, 0), 2)
self.assertEqual(GF7.add(3, 4), 0)
self.assertEqual(GF7.add(49, 6), 6)
self.assertEqual(GF4.add(0, 0), 0)
self.assertEqual(GF4.add(a, a), 0)
self.assertEqual(GF4.add(1, 1), 0)
self.assertEqual(GF4.add(b, b), 0)
self.assertEqual(GF4.add(a, 1), b)
self.assertEqual(GF4.add(1, a), b)
self.assertEqual(GF4.add(0, a), a)
def test_multiply(self):
self.assertEqual(GF2.mult_scalar(0, 0), 0)
self.assertEqual(GF4.mult_scalar(0, 0), 0)
self.assertEqual(GF4.mult_scalar(0, a), 0)
self.assertEqual(GF4.mult_scalar(b, 0), 0)
self.assertEqual(GF4.mult_scalar(1, 0), 0)
self.assertEqual(GF7.mult_scalar(1, 7), 0)
self.assertEqual(GF7.mult_scalar(2, 5), 3)
self.assertEqual(GF7.mult_scalar(346, 55), 4)
self.assertEqual(GF4.mult_scalar(1, a), a)
self.assertEqual(GF4.mult_scalar(a, a), b)
self.assertEqual(GF4.mult_scalar(b, b), a)
self.assertEqual(GF4.mult_scalar(b, a), 1)
with self.assertRaises(ValueError):
GF4.mult_scalar(a, 10)
def test_mult_inv(self):
self.assertEqual(GF2.mult_inverse(1), 1)
self.assertEqual(GF4.mult_scalar(a, GF4.mult_inverse(a)), 1)
self.assertEqual(GF4.mult_scalar(b, GF4.mult_inverse(b)), 1)
self.assertEqual(GF4.mult_inverse(1), 1)
self.assertEqual(GF7.mult_scalar(6, GF7.mult_inverse(6)), 1)
self.assertEqual(GF7.mult_scalar(5, GF7.mult_inverse(5)), 1)
self.assertEqual(GF7.mult_scalar(2, GF7.mult_inverse(2)), 1)
self.assertEqual(GF7.mult_inverse(1), 1)
self.assertEqual(GF11.mult_scalar(9, GF11.mult_inverse(9)), 1)
self.assertEqual(GF11.mult_scalar(20, GF11.mult_inverse(9)), 1)
self.assertEqual(GF11.mult_scalar(9, GF11.mult_inverse(20)), 1)
with self.assertRaises(ZeroDivisionError):
GF5.mult_inverse(0)
def test_add_inv(self):
self.assertEqual(GF2.add_inverse(1), 1)
self.assertEqual(GF4.add_inverse(1), 1)
self.assertEqual(GF4.add_inverse(a), a)
self.assertEqual(GF4.add_inverse(b), b)
self.assertEqual(GF7.add(5, GF7.add_inverse(5)), 0)
self.assertEqual(GF7.add([1, 4, 2, 6, 9, 120, -1],
GF7.add_inverse([1, 4, 2, 6, 9, 120, -1])),
[0, 0, 0, 0, 0, 0, 0])
def test_vectors(self):
# Fermat's Little Theorem
self.assertEqual(GF5.exp_scalar(0, 4), 0)
self.assertEqual(GF5.exp_scalar(2, 4), 1)
self.assertEqual(GF5.exp_scalar(3, 4), 1)
self.assertEqual(GF5.exp_scalar(4, 4), 1)
self.assertEqual(GF11.exp_scalar(8, 10), 1)
self.assertEqual(GF4.exp_scalar(a, 2), b)
self.assertEqual(GF4.exp_scalar(a, 3), 1)
self.assertEqual(GF4.exp_scalar(a, 4), a)
self.assertEqual(GF4.exp_scalar(b, 3), 1)
class TestRREF(unittest.TestCase):
def setUp(self):
GF2 = GF(2)
GF3 = GF(3)
GF4 = GF(4)
GF5 = GF(5)
GF7 = GF(7)
GF11 = GF(11)
def test_binary_field(self):
GF2.verbose = False
# Inputs
M_0 = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
M_1 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
M_2 = [[0]]
M_3 = [[1]]
M_4 = [[1, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]]
M_5 = [[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1], [0, 1, 0, 1]]
# Outputs
M_0o = GF2.rref(M_0)
M_1o = GF2.rref(M_1)
M_2o = GF2.rref(M_2)
M_3o = GF2.rref(M_3)
M_4o = GF2.rref(M_4)
M_5o = GF2.rref(M_5)
self.assertEqual(M_0o, M_0)
self.assertEqual(M_1o, M_1)
self.assertEqual(M_2o, M_2)
self.assertEqual(M_3o, M_3)
self.assertEqual(M_4o, [[1, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]])
def test_prime_field(self):
M_1 = [[1, 1, 2, 1, 2],
[1, 0, 1, 1, 0],
[1, 2, 0, 1, 1],
[1, 1, 2, 0, 2],
[2, 2, 1, 2, 1]]
M_2 = [[9, -2],
[0, 11]]
M_3 = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 2, 0]]
M_1o = GF3.rref(M_1)
M_2o = GF7.rref(M_2)
M_3o = GF3.rref(M_3)
self.assertEqual(M_1o, [[1, 0, 1, 0, 0],
[0, 1, 1, 0, 2],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
self.assertEqual(M_2o, [[1, 0], [0, 1]])
self.assertEqual(M_3o, [[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
def test_GF4(self):
M_1 = [[a, b],
[b, a]]
M_2 = [[0, 0, b, 0],
[0, 0, 0, 0],
[a, 0, b, 1],
[1, 0, a, b]]
M_1o = GF4.rref(M_1)
M_2o = GF4.rref(M_2)
self.assertEqual(M_1o, [[1, 0], [0, 1]])
self.assertEqual(M_2o, [[1, 0, 0, b],
[0, 0, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
class TestCodingMethods(unittest.TestCase):
def setUp(self):
GF2 = GF(2)
GF3 = GF(3)
GF4 = GF(4)
GF5 = GF(5)
GF7 = GF(7)
GF11 = GF(11)
def test_rank(self):
self.assertEqual(GF5.rank([[0]]), 0)
self.assertEqual(GF5.rank([[0, 0, 0], [0, 0, 0]]), 0)
self.assertEqual(GF2.rank([[1, 0], [0, 1]]), 2)
self.assertEqual(GF2.rank([[1, 0], [0, 0]]), 1)
self.assertEqual(GF3.rank([[0, 0], [1, 0]]), 1)
self.assertEqual(GF2.rank([[0, 0], [0, 1]]), 1)
self.assertEqual(GF5.rank([[0, 0, 0], [0, 3, 0]]), 1)
self.assertEqual(GF3.rank([[1, 1, 2, 1, 2],
[1, 0, 1, 1, 0],
[1, 2, 0, 1, 1],
[1, 1, 2, 0, 2],
[2, 2, 1, 2, 1]]), 3)
def test_lin_dep(self):
self.assertTrue(GF2.is_lin_indep([[1, 0], [0, 1]]))
self.assertTrue(GF3.is_lin_indep([[-1, 0], [0, 1]]))
self.assertFalse(GF7.is_lin_indep([[0, 0, 0], [0, 0, 0]]))
self.assertFalse(GF7.is_lin_indep([[0]]))
self.assertFalse(GF5.is_lin_indep([[0, 4], [0, 3]]))
def test_encode(self):
self.assertEqual(GF2.encode([[0]], [0]), [0])
self.assertEqual(GF2.encode([[0]], [1]), [0])
self.assertEqual(GF2.encode([[1, 0, 0, 1]], [1]),
[1, 0, 0, 1])
self.assertEqual(GF7.encode([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], [4, 5, 2]),
[4, 5, 2])
self.assertEqual(GF4.encode([[a, a, 1, b],
[0, b, a, 1],
[a, 1, 0, a]], [a, 1, b]),
[a, b, 0, 1])
self.assertEqual(GF4.encode([[a, a, 1, b],
[0, b, a, 1],
[a, 1, 0, a]], [a, 0, 1]),
[1, a, a, b])
def test_dot(self):
self.assertEqual(GF2.dot_vec([0], [0]), 0)
self.assertEqual(GF2.dot_vec([0], [1]), 0)
self.assertEqual(GF2.dot_vec([1], [0]), 0)
self.assertEqual(GF2.dot_vec([1], [1]), 1)
self.assertEqual(GF3.dot_vec([0, 0, 1, 1], [1, 1, 0, 0]), 0)
self.assertEqual(GF5.dot_vec([2, 4, 1, 3], [3, 1, 4, 2]), 0)
self.assertEqual(GF7.dot_vec([4, 3, 2, 1], [2, 3, 4, 5]), 2)
self.assertEqual(GF4.dot_vec([a, b, 1], [1, b, a]), a)
with self.assertRaises(ValueError):
GF4.dot_vec([1, a], [0, a, b])
def test_pc(self):
self.assertTrue(GF3.is_pc_matrix([[2, 0, 0, 0], [0, 1, 0, 0]],
[[0, 0, 0, 1], [0, 0, 2, 0]]))
self.assertTrue(GF4.is_pc_matrix([[b, 0, 0, 0],
[0, a, 0, 1],
[0, 0, 1, 0]],
[[0, b, 0, 1]]))
self.assertFalse(GF4.is_pc_matrix([[b, 0, 0, 0],
[0, a, 0, 1],
[0, 0, 1, 0]],
[[0, b, 0, 1],
[a, 0, 1, 0]]))
self.assertTrue(GF4.is_pc_matrix(
[[b, 0, 0, 0],
[0, a, 0, 1],
[0, 0, 1, 0]],
GF4.create_pc_matrix([[b, 0, 0, 0],
[0, a, 0, 1],
[0, 0, 1, 0]])
))
self.assertTrue(GF11.is_pc_matrix(
[[10, 3, 7, 5]],
GF11.create_pc_matrix([[10, 3, 7, 5]])
))
self.assertTrue(GF11.is_pc_matrix(
[[10, 3, 7, 5],
[1, 2, 3, 0]],
GF11.create_pc_matrix([[10, 3, 7, 5],
[1, 2, 3, 0]])
))
def test_standard_form(self):
self.assertTrue(GF2.is_standard_form([[1]]), 'g')
self.assertTrue(GF2.is_standard_form([[1]]), 'p')
self.assertTrue(GF4.is_standard_form([[1]]), 'g')
self.assertTrue(GF4.is_standard_form([[1]]), 'p')
self.assertTrue(GF11.is_standard_form([[1]]), 'g')
self.assertTrue(GF11.is_standard_form([[1]]), 'p')
self.assertFalse(GF3.is_standard_form([[0]]), 'g')
self.assertFalse(GF3.is_standard_form([[0]]), 'p')
self.assertFalse(GF4.is_standard_form([[0]]), 'g')
self.assertTrue(GF5.is_standard_form([[1, 0, 4, 0],
[0, 1, 2, 3]]), 'g')
self.assertTrue(GF7.is_standard_form([[1, 0, 0, 6, 6, 1, 0],
[0, 1, 0, 3, 2, 3, 0],
[0, 0, 1, 0, 0, 5, 4]]), 'g')
self.assertFalse(GF7.is_standard_form([[1, 0, 3, 6, 6, 1, 0],
[0, 1, 0, 3, 2, 3, 0],
[0, 0, 1, 0, 0, 5, 4]]), 'g')
self.assertFalse(GF7.is_standard_form([[1, 0, 0, 6, 6, 1, 0],
[0, 2, 0, 3, 2, 3, 0],
[0, 0, 1, 0, 0, 5, 4]]), 'g')
self.assertFalse(GF7.is_standard_form([[1, 0, 0, 6, 6, 1, 0],
[0, 1, 0, 3, 2, 3, 0],
[0, 0, 0, 0, 0, 5, 4]]), 'g')
self.assertFalse(GF11.is_standard_form([[1, 0, 0, 6, 6, 1, 0],
[0, 1, 0, 3, 2, 3, 0],
[0, 0, 1, 0, 0, 5, 4]], 'p'))
self.assertTrue(GF2.is_standard_form([[1, 0], [0, 1]]), 'g')
self.assertTrue(GF2.is_standard_form([[1, 0], [0, 1]]), 'p')
self.assertTrue(GF11.is_standard_form([[2, 1, 0, 0],
[4, 0, 1, 0],
[10, 0, 0, 1]], 'p'))
self.assertFalse(GF11.is_standard_form([[2, 1, 0, 0],
[4, 0, 1, 0],
[10, 0, 0, 2]], 'p'))
self.assertFalse(GF11.is_standard_form([[2, 1, 0, 0],
[4, 0, 1, 0],
[10, 0, 0, 0]], 'p'))
self.assertFalse(GF11.is_standard_form([[2, 8, 0, 0],
[4, 0, 1, 0],
[10, 0, 0, 1]], 'p'))
self.assertFalse(GF7.is_standard_form([[1], [2, 3]], 'g'))
self.assertFalse(GF7.is_standard_form([[1], [2, 3]], 'p'))
self.assertFalse(GF7.is_standard_form([[1, 0],
[0, 1],
[0, 0]], 'g'))
self.assertFalse(GF7.is_standard_form([[1, 0],
[0, 1],
[0, 0]], 'p'))
class TestVerbose(unittest.TestCase):
def setUp(self):
GF4 = GF(4)
GF5 = GF(5)
GF11 = GF(11)
def test_mult_inverse(self):
pass
# GF103 = GF(103, verbose=True)
# GF103.mult_inverse(63)
# GF103 = GF(103)
# GF103.mult_inverse(63, verbose=True)
# GF983 = GF(983)
# GF983.mult_inverse(444, verbose=True)
def test_rref(self):
pass
M_2 = [[0, 0, b, 0],
[0, 0, 0, 0],
[a, 0, b, 1],
[1, 0, a, b]]
GF4.rref(M_2, verbose=True)
if __name__ == '__main__':
unittest.main()
|
py | b407c7b840b8afbf622f975cb8da1301d29c32d1 | # -*- coding: utf-8 -*-
"""
Template module for Rooms
Copy this module up one level and name it as you like, then
use it as a template to create your own Objects.
To make the default commands (such as @dig) default to creating rooms
of your new type, change settings.BASE_ROOM_TYPECLASS to point to
your new class, e.g.
settings.BASE_ROOM_TYPECLASS = "game.gamesrc.objects.myroom.MyRoom"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Room as DefaultRoom
from object_common import ObjectCommon as Object
class Room(Object, DefaultRoom):
"""
Rooms are like any Object, except their location is None
(which is default). They also use basetype_setup() to
add locks so they cannot be puppeted or picked up.
(to change that, use at_object_creation instead)
See examples/object.py for a list of
properties and methods available on all Objects.
"""
pass |
py | b407ca6ee17d441d963169019d807736236e57c2 | # To delete events from Google Calendar
# Delets events having summary `Class of*`
from __future__ import print_function
import httplib2
import os
from dates import END_TERM_BEGIN, MID_TERM_BEGIN, SEM_BEGIN, GYFT_RECUR_STRS
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from oauth2client import file
import datetime
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'gyft'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'calendar-python-quickstart.json')
store = file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the events')
eventsResult = service.events().list(
calendarId='primary', timeMin=SEM_BEGIN.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), singleEvents=False, timeMax=END_TERM_BEGIN.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), maxResults=2500).execute()
events = eventsResult.get('items', [])
# print(events)
if not events:
print('No upcoming events found.')
for event in events:
# print(event.get('recurrence'))
if event.get('recurrence', 'NoRecur') in GYFT_RECUR_STRS:
service.events().delete(calendarId='primary',
eventId=event["id"]).execute()
print("Deleted: ", event["summary"], event["start"])
print("Deletion done!")
if __name__ == '__main__':
main()
|
py | b407cb67f129a98a4ac4672421962425761ecd30 | import pytest
from flake8_plugin_utils.utils import assert_error, assert_not_error
from flake8_pytest_style.errors import CompositeAssertion
from flake8_pytest_style.visitors.assertion import AssertionVisitor
def test_ok():
code = """
def test_xxx():
assert something
assert something or something_else
assert something or something_else and something_third
assert not (something and something_else)
"""
assert_not_error(AssertionVisitor, code)
@pytest.mark.parametrize(
'condition',
[
'something and something_else',
'something and something_else and something_third',
'something and not something_else',
'something and (something_else or something_third)',
'not (something or something_else)',
'not (something or something_else or something_third)',
'not (something or something_else and something_third)',
],
)
def test_error(condition):
code = f"""
def test_xxx():
assert {condition}
"""
assert_error(AssertionVisitor, code, CompositeAssertion)
|
py | b407cb694e5c2fb396dd683bc32b36a6dd08f03c | import re
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import r2_score, mean_squared_error
def to_snake_case(name):
'''
Converts a Pascal case string to snake case
INPUT
name - a string to be converted to snake case
OUTPUT
name - the string converted to snake case
'''
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
name = re.sub('__([A-Z])', r'_\1', name)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
return name.lower()
def count_percentage(x, y, data):
'''
Counts the value related to its group and provides a percentage of the total
INPUT:
x - the column name of the value to be counted
y - the column name of the group
data - the dataframe to be used
OUTPUT:
df - a dataframe with the value and its percentage of the total
'''
return data \
.groupby(x)[y] \
.value_counts(normalize=True) \
.mul(100) \
.rename('percent') \
.reset_index()
def plot_cat(x, y, data, title, label, aspect=2):
'''
Creates a barplot of the value counts of a categorical variable
INPUT:
x - the column name of the variable to be plotted
y - the column name of the variable to be plotted
data - the dataframe to be used
'''
df = data[[x, y]].dropna()
df[y] = df[y].str.split(";")
df_exp = df.explode(y)
df_g = count_percentage(x, y, df_exp)
g = sns.catplot(x = x, y = "percent", hue = y, kind="bar", aspect = aspect, data = df_g)
g.set(xlabel = "Job Satisfaction", ylabel = "Percentage", title = title)
g._legend.set_title(label)
def clean_data(data):
'''
Cleans the dataframe by removing null values and converting categorical variables to dummies
INPUT:
data - the dataframe to be cleaned
OUTPUT:
X - the response variable
y - the predictor variables
'''
# simplify job_sat
data.loc[data.job_sat.notnull(), "is_satisfied"] = data.job_sat.apply(lambda s: True if s in ["Very satisfied", "Slightly satisfied"] else False)
# drop null job_sat
satisfy = data.dropna(subset=["job_sat"])
# create response vars
y = satisfy.is_satisfied.astype(int)
# select the relevant columns
cols = ["hobbyist" ,"age1st_code" ,"comp_freq" ,\
"country" ,"dev_type" ,"ed_level" ,"employment" ,"ethnicity" ,"gender" ,"new_dev_ops" ,\
"new_dev_ops_impt" ,"new_ed_impt" ,"new_learn" ,"new_onboard_good" ,"new_other_comms" ,\
"new_overtime" ,"op_sys" ,"org_size" ,"purchase_what" ,"sexuality" ,"undergrad_major" ,\
"work_week_hrs" ,"years_code" ,"years_code_pro"]
satisfy = satisfy.drop(satisfy.columns.difference(cols), axis=1)
# for each numeric, fill with mean
num_vars = satisfy.select_dtypes(include=['float', 'int']).columns
for col in num_vars:
satisfy[col].fillna((satisfy[col].mean()), inplace=True)
# create dummies
cat_vars = satisfy.select_dtypes(include=['object']).copy().columns
for var in cat_vars:
# for each cat add dummy var, drop original column
satisfy = pd.concat([satisfy.drop(var, axis=1), pd.get_dummies(satisfy[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
X = satisfy
return X, y
def find_optimal_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
Finds the optimal logical regression model for the given data
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LogisticRegression(max_iter=1000)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LogisticRegression(max_iter=1000)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
def coef_weights(coefficients, X_train):
'''
Creates a dataframe of the coefficients and the variables they correspond to
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = coefficients
coefs_df['abs_coefs'] = np.abs(coefficients)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df |
py | b407cc2ce198fe7f47bed1b34cdaa25ab0ede73e | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 16:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '0004_auto_20170303_1045'),
('flis_metadata', '0001_initial'),
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('flis_horison_scanning', '0003_auto_20170303_1045'),
]
operations = [
migrations.CreateModel(
name='EEAIndicator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=512)),
('url', models.URLField(max_length=512)),
],
),
migrations.CreateModel(
name='OriginOfSignal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OverallImpact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Signal',
fields=[
('page_ptr',
models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True,
primary_key=True, serialize=False, to='wagtailcore.Page')),
('short_title', models.CharField(max_length=256)),
('type_of_signal', models.CharField(
choices=[('megatrend', 'Megatrend'), ('trend', 'Trend'), ('weak_signal', 'Weak Signal'),
('wild_card', 'Wild Card'), ('other', 'Other')], max_length=64)),
('headline', models.TextField(max_length=256)),
('description', wagtail.wagtailcore.fields.RichTextField()),
('impact_description', wagtail.wagtailcore.fields.RichTextField()),
('implications', wagtail.wagtailcore.fields.RichTextField()),
('cover_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='+', to='pages.FlisImage')),
('geographical_scope',
models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
to='flis_metadata.GeographicalScope')),
('overall_impact',
models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
to='flis_horison_scanning.OverallImpact')),
('time_horizon', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
to='flis_horison_scanning.TimeHorizon')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SignalSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source', wagtail.wagtailcore.fields.RichTextField()),
('signal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='signal_sources',
to='flis_horison_scanning.Signal')),
],
),
migrations.AlterModelOptions(
name='driverofchange',
options={'verbose_name': 'Signal of Change', 'verbose_name_plural': 'Signals of Change'},
),
migrations.AddField(
model_name='eeaindicator',
name='signal',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='eea_indicators',
to='flis_horison_scanning.Signal'),
),
]
|
py | b407cc315eec29b46ddbf8c0a1ebfd0f5a20189b | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from collections import OrderedDict
from nn_dataflow.core import ConvLayer, FCLayer, LocalRegionLayer, PoolingLayer
from nn_dataflow.core import NodeRegion
from nn_dataflow.core import PhyDim2
from nn_dataflow.core import Resource
class TestMapStrategyFixture(unittest.TestCase):
''' Base fixture class for MapStrategy tests. '''
def setUp(self):
# AlexNet.
self.convlayers = OrderedDict()
self.convlayers['conv1'] = ConvLayer(3, 96, 55, 11, 4)
self.convlayers['conv2'] = ConvLayer(48, 256, 27, 5)
self.convlayers['conv3'] = ConvLayer(256, 384, 13, 3)
self.convlayers['conv4'] = ConvLayer(192, 384, 13, 3)
self.convlayers['conv5'] = ConvLayer(192, 256, 13, 3)
self.fclayers = {}
self.fclayers['fc1'] = FCLayer(256, 4096, 6)
self.fclayers['fc2'] = FCLayer(4096, 4096)
self.fclayers['fc3'] = FCLayer(4096, 1000)
# LocalRegionLayer.
self.lrlayers = {}
self.lrlayers['pool1'] = PoolingLayer(64, 7, 2)
self.lrlayers['pool2'] = PoolingLayer(29, 13, 3)
self.lrlayers['pool3'] = PoolingLayer(32, 7, 2, strd=3)
self.lrlayers['lr1'] = LocalRegionLayer(32, 7, nreg=5, sreg=1)
self.lrlayers['lr2'] = LocalRegionLayer(32, 7, nreg=5, sreg=1, strd=2)
# Fake layers.
self.fake_layers = {}
# With irregular nifm/nofm.
self.fake_layers['IRR'] = ConvLayer(255, 383, 13, 3)
# With small numbers of fmaps.
self.fake_layers['SM'] = ConvLayer(5, 3, 13, 3)
# With large FIL height.
self.fake_layers['LGFIL'] = ConvLayer(64, 64, 13, 22)
# Resource.
self.resource = {}
proc_region = NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 1),
type=NodeRegion.PROC)
data_region = NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 1),
type=NodeRegion.DRAM)
# Eyeriss, ISSCC'16, JSSC'17.
self.resource['BASE'] = Resource(
proc_region=proc_region, dram_region=data_region,
src_data_region=data_region, dst_data_region=data_region,
dim_array=PhyDim2(12, 14), size_gbuf=108*1024, size_regf=520,
array_bus_width=float('inf'), dram_bandwidth=float('inf'),
no_time_mux=False)
|
py | b407cfa4b68e30c0ca905bef63251a74be71f444 | from common.items.cache_item import CommonCacheItem
class BangumiCacheItem(CommonCacheItem):
use_fail = False
|
py | b407d008524621cd7fd5aefec9cd55d60a8b7f87 | from setuptools import setup, find_packages
# from numpy.distutils.core import build_ext, build_src, Extension
import os
'''
with open('expres/VERSION', 'r') as f:
version = f.readline()
ext_modules = [
Extension(name='rv',
sources=['expres/rv/ccf/rv.f90',
'expres/rv/ccf/hermiteccflib.f90',
'expres/rv/ccf/rv.pyf']
)
]
cmdclass = {'build_ext': build_ext.build_ext, 'build_src': build_src.build_src}
'''
if __name__ == '__main__':
setup(name='famed',
version="0.0.1",
author='Enrico Corsaro, Jean McKeever, James Kuslewicz',
author_email='[email protected]',
description='Python wrapper for DIAMONDS tooling',
license='MIT',
url='https://github.com/EnricoCorsaro/FAMED',
packages=find_packages(),
# cmdclass=cmdclass,
# ext_modules=ext_modules,
install_requires=['numpy', 'scipy', 'statistics', 'matplotlib']
)
|
py | b407d1dc3f94567861c1908c0618682fc7308328 | import numpy as np
from numpy.lib.recfunctions import append_fields
import matplotlib.pyplot as plt
from tabulate import tabulate
from heuslertools.tools.data_handling import load_data
from scipy.interpolate import interp1d
import copy
class Measurement(object):
"""Object representing a Measurement
Parameters
----------
file : str
path of file
identifier : str
identifier for data start
delimiter : str, optional
delimiter of data, by default `None`
"""
def __init__(self, file, identifier, delimiter=None, start_row=0, end_row=None, names=True, encoding=None):
self.file = file
"""Path of the data file"""
self._identifier = identifier
self._delimiter = delimiter
self._start_row = start_row
self._end_row = end_row
self._names = names
self._encoding = encoding
self.data = self._load_data()
"""Numpy ndarray containing the data."""
self.names = {}
"""Dict containing the names, short names and units of the data columns"""
self._generate_names()
def _load_data(self):
return load_data(self.file, self._identifier, delimiter=self._delimiter,
start_row=self._start_row, end_row=self._end_row,
names=self._names, encoding=self._encoding)
def _generate_names(self):
for name in self.data.dtype.names:
self.names[name] = {"short_name": ' '.join(
name.split("_")[0:-1]), "unit": name.split("_")[-1]}
def add_data_column(self, name, data):
"""Add column to data.
Parameters
----------
name : str
name of data column, format: `name_name_unit`
data : array
data
"""
self.data = append_fields(self.data, name, data, np.float)
self._generate_names()
def append_measurement(self, file, start_row=0, end_row=None):
"""Append data from another file.
Parameters
----------
file : str
path of file to append
identifier : str
identifier for data start
"""
self.data = np.append(self.data, load_data(file, self._identifier, delimiter=self._delimiter, start_row=start_row, end_row=end_row, names=self._names, encoding=self._encoding))
def append_measurement_from_measurement(self, measurement):
self.data = np.append(self.data, measurement.data)
def plot(self, x, y, *args, show=True, label=True, **kwargs):
"""Plot data
Parameters
----------
x : str
name of x data column
y : str
name of y data column
show : bool, optional
if `true` the plot will be shown immediately, by default `true`
"""
if show:
plt.figure()
plt.plot(self.data[x], self.data[y], *args, **kwargs)
if label:
plt.xlabel(self.get_axis_label(x))
plt.ylabel(self.get_axis_label(y))
if show:
plt.show()
def get_unit(self, name):
"""
Get unit of data column by column name.
Arguments:
name (str): Column name
Returns:
str: unit of data column
"""
return self.names[name]["unit"]
def get_short_name(self, name):
"""Get short name of data column by column name.
Parameters
----------
name : str
Column name
Returns
-------
str
short name of data cloumn
"""
return self.names[name]["short_name"]
def get_axis_label(self, name):
"""Get axis label of data column by column name.
Parameters
----------
name : str
Column name
Returns
-------
str
axis label of data cloumn
"""
return self.get_short_name(name) + ' (' + self.get_unit(name) + ')'
def interpolation(self, x, y, kind='linear'):
"""Interpolate data
Parameters
----------
x : str
name of x data column
y : str
name of y data column
kind : str, optional
kind of interpolation (see scipy.interpolate.interp1d), by default
'linear'
Returns
-------
callable
call the returned callable with an x value to evaluate the
interpolation at this position
"""
return interp1d(self.data[x], self.data[y], bounds_error=False, kind=kind)
def print_names(self):
"""
Print table of availiable data columns that can be used to access the data.
"""
headers = ["name", "short_name", "unit"]
table = [[name, self.names[name]["short_name"], self.names[name]["unit"]]
for name in self.names]
print("Availiable names:")
print(tabulate(table, headers))
def substract_linear_baseline(self, x, y, x_min, x_max, mean=False, symmetric_zero=False):
"""Substract linear baseline from x-y-data and add substracted data
column to data.
Parameters
----------
x : str
name of x data column
y : str
name of y data column
x_min : float
lower bound of x range, where lienar baseline should be extracted from
x_min : float
upper bound of x range, where lienar baseline should be extracted from
mean: bool, optional
if `true` the substracted data will be symmetrised to x-axis
"""
data_name = y.split('_')
data_name.insert(-1, 'LinearBaselineSubstracted')
data_name = "_".join(data_name)
indices = np.where(np.logical_and(self.data[x] >= x_min, self.data[x] <= x_max))
fit = np.poly1d(np.polyfit(self.data[x][indices], self.data[y][indices], 1))
if symmetric_zero:
data = self.data[y] - (self.data[x]*fit[1])
else:
data = self.data[y]-fit(self.data[x])
if mean:
data = data - np.mean(data)
self.add_data_column(data_name, data)
def filter_data(self, column, expression, filter_type='keep', return_new_measurement=False):
filter_arr = []
for value in self.data[column]:
filter_arr.append(eval(expression.replace('x', str(value))))
if filter_type == 'delete':
filter_arr = [not x for x in filter_arr]
if return_new_measurement:
measurement = copy.copy(self)
else:
measurement = self
measurement.data = measurement.data[filter_arr]
return measurement
def save(self, filename):
names = []
for name in self.names:
names.append(name)
header = self._identifier + '\n' + ','.join(names)
np.savetxt(filename, self.data,
delimiter=self._delimiter,
header=header,
comments='')
|
py | b407d2151cb11ffdf723198ff8a164e3e02e8ea4 | # coding: utf-8
"""
Marketing API
<p>The <i>Marketing API </i> offers two platforms that sellers can use to promote and advertise their products:</p> <ul><li><b>Promoted Listings</b> is an eBay ad service that lets sellers set up <i>ad campaigns </i> for the products they want to promote. eBay displays the ads in search results and in other marketing modules as <b>SPONSORED</b> listings. If an item in a Promoted Listings campaign sells, the seller is assessed a Promoted Listings fee, which is a seller-specified percentage applied to the sales price. For complete details, see <a href=\"/api-docs/sell/static/marketing/promoted-listings.html\">Promoted Listings</a>.</li> <li><b>Promotions Manager</b> gives sellers a way to offer discounts on specific items as a way to attract buyers to their inventory. Sellers can set up discounts (such as \"20% off\" and other types of offers) on specific items or on an entire customer order. To further attract buyers, eBay prominently displays promotion <i>teasers</i> throughout buyer flows. For complete details, see <a href=\"/api-docs/sell/static/marketing/promotions-manager.html\">Promotions Manager</a>.</li></ul> <p><b>Marketing reports</b>, on both the Promoted Listings and Promotions Manager platforms, give sellers information that shows the effectiveness of their marketing strategies. The data gives sellers the ability to review and fine tune their marketing efforts.</p> <p class=\"tablenote\"><b>Important!</b> Sellers must have an active eBay Store subscription, and they must accept the <b>Terms and Conditions</b> before they can make requests to these APIs in the Production environment. There are also site-specific listings requirements and restrictions associated with these marketing tools, as listed in the \"requirements and restrictions\" sections for <a href=\"/api-docs/sell/marketing/static/overview.html#PL-requirements\">Promoted Listings</a> and <a href=\"/api-docs/sell/marketing/static/overview.html#PM-requirements\">Promotions Manager</a>.</p> <p>The table below lists all the Marketing API calls grouped by resource.</p> # noqa: E501
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CampaignPagedCollection(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'campaigns': 'list[Campaign]',
'href': 'str',
'limit': 'int',
'next': 'str',
'offset': 'int',
'prev': 'str',
'total': 'int'
}
attribute_map = {
'campaigns': 'campaigns',
'href': 'href',
'limit': 'limit',
'next': 'next',
'offset': 'offset',
'prev': 'prev',
'total': 'total'
}
def __init__(self, campaigns=None, href=None, limit=None, next=None, offset=None, prev=None, total=None): # noqa: E501
"""CampaignPagedCollection - a model defined in Swagger""" # noqa: E501
self._campaigns = None
self._href = None
self._limit = None
self._next = None
self._offset = None
self._prev = None
self._total = None
self.discriminator = None
if campaigns is not None:
self.campaigns = campaigns
if href is not None:
self.href = href
if limit is not None:
self.limit = limit
if next is not None:
self.next = next
if offset is not None:
self.offset = offset
if prev is not None:
self.prev = prev
if total is not None:
self.total = total
@property
def campaigns(self):
"""Gets the campaigns of this CampaignPagedCollection. # noqa: E501
A list of campaigns contained on this page from the paginated response. # noqa: E501
:return: The campaigns of this CampaignPagedCollection. # noqa: E501
:rtype: list[Campaign]
"""
return self._campaigns
@campaigns.setter
def campaigns(self, campaigns):
"""Sets the campaigns of this CampaignPagedCollection.
A list of campaigns contained on this page from the paginated response. # noqa: E501
:param campaigns: The campaigns of this CampaignPagedCollection. # noqa: E501
:type: list[Campaign]
"""
self._campaigns = campaigns
@property
def href(self):
"""Gets the href of this CampaignPagedCollection. # noqa: E501
The URI of the current page of results from the result set. # noqa: E501
:return: The href of this CampaignPagedCollection. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this CampaignPagedCollection.
The URI of the current page of results from the result set. # noqa: E501
:param href: The href of this CampaignPagedCollection. # noqa: E501
:type: str
"""
self._href = href
@property
def limit(self):
"""Gets the limit of this CampaignPagedCollection. # noqa: E501
The number of items returned on a single page from the result set. This value can be set in the request with the limit query parameter. # noqa: E501
:return: The limit of this CampaignPagedCollection. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this CampaignPagedCollection.
The number of items returned on a single page from the result set. This value can be set in the request with the limit query parameter. # noqa: E501
:param limit: The limit of this CampaignPagedCollection. # noqa: E501
:type: int
"""
self._limit = limit
@property
def next(self):
"""Gets the next of this CampaignPagedCollection. # noqa: E501
The URI for the following page of results. This value is returned only if there is an additional page of results to display from the result set. Max length: 2048 # noqa: E501
:return: The next of this CampaignPagedCollection. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this CampaignPagedCollection.
The URI for the following page of results. This value is returned only if there is an additional page of results to display from the result set. Max length: 2048 # noqa: E501
:param next: The next of this CampaignPagedCollection. # noqa: E501
:type: str
"""
self._next = next
@property
def offset(self):
"""Gets the offset of this CampaignPagedCollection. # noqa: E501
The number of results skipped in the result set before listing the first returned result. This value can be set in the request with the offset query parameter. Note: The items in a paginated result set use a zero-based list where the first item in the list has an offset of 0. # noqa: E501
:return: The offset of this CampaignPagedCollection. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this CampaignPagedCollection.
The number of results skipped in the result set before listing the first returned result. This value can be set in the request with the offset query parameter. Note: The items in a paginated result set use a zero-based list where the first item in the list has an offset of 0. # noqa: E501
:param offset: The offset of this CampaignPagedCollection. # noqa: E501
:type: int
"""
self._offset = offset
@property
def prev(self):
"""Gets the prev of this CampaignPagedCollection. # noqa: E501
The URI for the preceding page of results. This value is returned only if there is a previous page of results to display from the result set. Max length: 2048 # noqa: E501
:return: The prev of this CampaignPagedCollection. # noqa: E501
:rtype: str
"""
return self._prev
@prev.setter
def prev(self, prev):
"""Sets the prev of this CampaignPagedCollection.
The URI for the preceding page of results. This value is returned only if there is a previous page of results to display from the result set. Max length: 2048 # noqa: E501
:param prev: The prev of this CampaignPagedCollection. # noqa: E501
:type: str
"""
self._prev = prev
@property
def total(self):
"""Gets the total of this CampaignPagedCollection. # noqa: E501
The total number of items retrieved in the result set. If no items are found, this field is returned with a value of 0. # noqa: E501
:return: The total of this CampaignPagedCollection. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this CampaignPagedCollection.
The total number of items retrieved in the result set. If no items are found, this field is returned with a value of 0. # noqa: E501
:param total: The total of this CampaignPagedCollection. # noqa: E501
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CampaignPagedCollection, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignPagedCollection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b407d295c4f6211786ea450a41b70aac7db3d4a9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-05-09 07:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0004_apptoken'),
]
operations = [
migrations.CreateModel(
name='EditRequestLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access', models.DateTimeField(auto_now_add=True)),
('resource', models.CharField(max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['access'],
},
),
]
|
py | b407d2ec984e4df33aaf5a0edf8d43dce1cb41f8 | # CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
#
# Copyright (C) 2009 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
from Server import cfg
from util import props_to_str
class Combobox (Widget):
"""
Widget for drop-down combo elements.
Arguments:
props: dictionary with properties for the HTML element,
such as {'name': 'foo', 'id': 'bar', 'class': 'noauto'}.
options: set of tuples in the form (value,
description), (v2, d2), ...
Examples:
combo = CTK.Combobox({'name': 'language'},
[('en', 'English'), ('es', 'Spanish')])
"""
def __init__ (self, props, options):
Widget.__init__ (self)
self.props = props.copy()
self._options = options
if not 'id' in self.props:
self.props['id'] = 'Combobox_%s' %(self.uniq_id)
self.id = self.props['id']
def Render (self):
selected = self.props.get('selected')
def render_str (o):
if len(o) == 2:
name, label = o
props = {}
elif len(o) == 3:
name, label, props = o
props_str = props_to_str(props)
if selected and str(selected) == str(name):
return '<option value="%s" selected="true" %s>%s</option>' % (name, props_str, label)
else:
return '<option value="%s" %s>%s</option>' % (name, props_str, label)
def render_list (o):
if len(o) == 2:
name, options = o
props = {}
elif len(o) == 3:
name, options, props = o
props_str = props_to_str(props)
txt = '<optgroup label="%s" %s>' %(name, props_str)
for o in options:
txt += render_str (o)
txt += '</optgroup>'
return txt
# Render entries
content = ''
for o in self._options:
if type(o[1]) == str:
content += render_str (o)
elif type(o[1]) == list:
content += render_list (o)
else:
raise ValueError
# Render the container
header = ''
for p in filter(lambda x: x!='selected', self.props):
if self.props[p]:
header += ' %s="%s"' %(p, self.props[p])
else:
header += ' %s' %(p)
html = '<select%s>%s</select>' %(header, content)
render = Widget.Render (self)
render.html += html
return render
class ComboCfg (Combobox):
"""
Configuration-Tree based Combobox widget. Pre-selects the
combo-entry corresponding to the value of the configuration tree
given by key argument if it exists. Everything else is like the
Combobox widget.
"""
def __init__ (self, key, options, _props={}):
props = _props.copy()
# Read the key value
val = cfg.get_val(key)
sel = None
# Look for the selected entry
for v,k in options:
if v == val:
sel = val
if sel:
props['selected'] = sel
# Other properties
props['name'] = key
# Init parent
Combobox.__init__ (self, props, options)
|
py | b407d500778622616abd438e118a110150683014 | # -*- coding: utf-8 -*-
"""
“Commons Clause” License Condition v1.0
Copyright Oli 2019-2020
The Software is provided to you by the Licensor under the
License, as defined below, subject to the following condition.
Without limiting other conditions in the License, the grant
of rights under the License will not include, and the License
does not grant to you, the right to Sell the Software.
For purposes of the foregoing, “Sell” means practicing any or
all of the rights granted to you under the License to provide
to third parties, for a fee or other consideration (including
without limitation fees for hosting or consulting/ support
services related to the Software), a product or service whose
value derives, entirely or substantially, from the functionality
of the Software. Any license notice or attribution required by
the License must also include this Commons Clause License
Condition notice.
Software: PartyBot (fortnitepy-bot)
License: Apache 2.0
"""
try:
# System imports.
from typing import Tuple, Any, Union
import asyncio
import sys
import datetime
import json
import functools
import os
import random as py_random
import logging
import uuid
import json
# Third party imports.
from fortnitepy.ext import commands
import crayons
import fortnitepy
import BenBotAsync
import aiohttp
import pypresence
import psutil
except ModuleNotFoundError as e:
print(e)
print('Failed to import 1 or more modules, running "INSTALL PACKAGES.bat"'
'might fix the issue, if not please create an issue or join'
'the support server.')
sys.exit()
# Imports uvloop and uses it if installed (Unix only).
try:
import uvloop
except ImportError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
if sys.platform == 'win32':
asyncio.set_event_loop(asyncio.ProactorEventLoop())
def time() -> str:
return datetime.datetime.now().strftime('%H:%M:%S')
def get_device_auth_details() -> dict:
if os.path.isfile('device_auths.json'):
with open('device_auths.json', 'r') as fp:
return json.load(fp)
else:
with open('device_auths.json', 'w+') as fp:
json.dump({}, fp, sort_keys=False, indent=4)
return {}
def store_device_auth_details(email: str, details: dict) -> None:
existing = get_device_auth_details()
existing[email] = details
with open('device_auths.json', 'w') as fp:
json.dump(existing, fp, sort_keys=False, indent=4)
def check_if_process_running(name: str) -> bool:
for process in psutil.process_iter():
try:
if name.lower() in process.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
async def set_vtid(variant_token: str) -> Tuple[str, str, int]:
async with aiohttp.ClientSession() as session:
request = await session.request(
method='GET',
url='https://benbotfn.tk/api/v1/assetProperties',
params={
'path': 'FortniteGame/Content/Athena/'
f'Items/CosmeticVariantTokens/{variant_token}.uasset'
})
response = await request.json()
file_location = response['export_properties'][0]
skin_cid = file_location['cosmetic_item']
variant_channel_tag = file_location['VariantChanelTag']['TagName']
variant_name_tag = file_location['VariantNameTag']['TagName']
variant_type = variant_channel_tag.split(
'Cosmetics.Variant.Channel.'
)[1].split('.')[0]
variant_int = int("".join(filter(
lambda x: x.isnumeric(), variant_name_tag
)))
return skin_cid, variant_type if variant_type != 'ClothingColor' else 'clothing_color', variant_int
async def get_playlist(display_name: str) -> str:
async with aiohttp.ClientSession() as session:
request = await session.request(
method='GET',
url='http://scuffedapi.xyz/api/playlists/search',
params={
'displayName': display_name
})
response = await request.json()
return response['id'] if 'error' not in response else None
async def set_and_update_member_prop(schema_key: str, new_value: Any) -> None:
prop = {schema_key: client.party.me.meta.set_prop(schema_key, new_value)}
await client.party.me.patch(updated=prop)
async def set_and_update_party_prop(schema_key: str, new_value: Any) -> None:
prop = {schema_key: client.party.me.meta.set_prop(schema_key, new_value)}
await client.party.patch(updated=prop)
async def start_discord_rich_presence() -> None:
rpc = pypresence.AioPresence(
client_id='717610574837710919',
loop=client.loop
)
try:
await rpc.connect()
except Exception as discord_error:
print(f'There was an error: {discord_error}.')
start_time = datetime.datetime.now().timestamp()
while True:
try:
outfit = (await BenBotAsync.get_cosmetic_from_id(
cosmetic_id=client.party.me.outfit
)).name
except BenBotAsync.exceptions.NotFound:
outfit = client.party.me.outfit
await rpc.update(
details=f"Logged in as {client.user.display_name}.",
state=f"{client.party.leader.display_name}'s party.",
large_image="skull_trooper",
large_text="discord.gg/fnpy",
small_image="outfit",
small_text=outfit,
start=int(start_time),
party_id=client.party.id,
party_size=[client.party.member_count, 16],
join=uuid.uuid4().hex
)
await asyncio.sleep(20)
print(crayons.cyan(f'[PartyBot] [{time()}] PartyBot made by xMistt. '
'Massive credit to Terbau for creating the library.'))
print(crayons.cyan(f'[PartyBot] [{time()}] Discord server: https://discord.gg/fnpy - For support, questions, etc.'))
with open('config.json') as f:
data = json.load(f)
if data['debug']:
logger = logging.getLogger('fortnitepy.http')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[36m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
logger = logging.getLogger('fortnitepy.xmpp')
logger.setLevel(level=logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('\u001b[35m %(asctime)s:%(levelname)s:%(name)s: %(message)s \u001b[0m'))
logger.addHandler(handler)
device_auth_details = get_device_auth_details().get(data['email'], {})
client = commands.Bot(
command_prefix='!',
auth=fortnitepy.AdvancedAuth(
email=data['email'],
password=data['password'],
prompt_authorization_code=True,
delete_existing_device_auths=True,
**device_auth_details
),
status=data['status'],
platform=fortnitepy.Platform(data['platform']),
avatar=fortnitepy.Avatar(
asset="cid_028_ff2b06cf446376144ba408d3482f5c982bf2584cf0f508ee3e4ba4a0fd461a38",
background_colors=fortnitepy.KairosBackgroundColorPreset.PINK.value
)
)
@client.event
async def event_device_auth_generate(details: dict, email: str) -> None:
store_device_auth_details(email, details)
@client.event
async def event_ready() -> None:
print(crayons.green(f'[PartyBot] [{time()}] Client ready as {client.user.display_name}.'))
discord_exists = await client.loop.run_in_executor(None, check_if_process_running, 'Discord')
if discord_exists:
client.loop.create_task(start_discord_rich_presence())
for pending in list(client.pending_friends.values()):
if pending.direction == 'INBOUND':
try:
epic_friend = await pending.accept() if data["friend_accept"] else await pending.decline()
if isinstance(epic_friend, fortnitepy.Friend):
print(f"[PartyBot] [{time()}] Accepted friend request from: {epic_friend.display_name}.")
else:
print(f"[PartyBot] [{time()}] Declined friend request from: {pending.display_name}.")
except fortnitepy.HTTPException as epic_error:
if epic_error.message_code != 'errors.com.epicgames.common.throttled':
raise
await asyncio.sleep(int(epic_error.message_vars[0] + 1))
await pending.accept() if data["friend_accept"] else await pending.decline()
@client.event
async def event_party_invite(invite: fortnitepy.ReceivedPartyInvitation) -> None:
await invite.accept()
print(f'[PartyBot] [{time()}] Accepted party invite from {invite.sender.display_name}.')
@client.event
async def event_friend_request(request: fortnitepy.PendingFriend) -> None:
print(f"[PartyBot] [{time()}] Received friend request from: {request.display_name}.")
if data['friend_accept']:
await request.accept()
print(f"[PartyBot] [{time()}] Accepted friend request from: {request.display_name}.")
else:
await request.decline()
print(f"[PartyBot] [{time()}] Declined friend request from: {request.display_name}.")
@client.event
async def event_party_member_join(member: fortnitepy.PartyMember) -> None:
await BenBotAsync.set_default_loadout(client, data, member)
@client.event
async def event_friend_message(message: fortnitepy.FriendMessage) -> None:
print(crayons.magenta(f'[PartyBot] [{time()}] {message.author.display_name}: {message.content}'))
@client.event
async def event_party_message(message: fortnitepy.FriendMessage) -> None:
print(crayons.green(f'[PartyBot] [{time()}] {message.author.display_name}: {message.content}'))
@commands.dm_only()
@client.command()
async def skin(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaCharacter"
)
await ctx.send(f'Skin set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set skin to: {cosmetic.id}.")
await client.party.me.set_outfit(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a skin with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find a skin with the name: {content}.")
@commands.dm_only()
@client.command()
async def backpack(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaBackpack"
)
await ctx.send(f'Backpack set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set backpack to: {cosmetic.id}.")
await client.party.me.set_backpack(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a backpack with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find a backpack with the name: {content}.")
@commands.dm_only()
@client.command()
async def emote(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaDance"
)
await ctx.send(f'Emote set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set emote to: {cosmetic.id}.")
await client.party.me.clear_emote()
await client.party.me.set_emote(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find an emote with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find an emote with the name: {content}.")
@commands.dm_only()
@client.command()
async def pickaxe(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaPickaxe"
)
await ctx.send(f'Pickaxe set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set pickaxe to: {cosmetic.id}.")
await client.party.me.set_pickaxe(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a pickaxe with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find a pickaxe with the name: {content}.")
@commands.dm_only()
@client.command()
async def pet(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaPetCarrier"
)
await ctx.send(f'Pet set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set pet to: {cosmetic.id}.")
await client.party.me.set_pet(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a pet with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find a pet with the name: {content}.")
@commands.dm_only()
@client.command()
async def emoji(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaEmoji"
)
await ctx.send(f'Emoji set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set emoji to: {cosmetic.id}.")
await client.party.me.set_emoji(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find an emoji with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find an emoji with the name: {content}.")
@commands.dm_only()
@client.command()
async def contrail(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaSkyDiveContrail"
)
await ctx.send(f'Contrail set to {cosmetic.id}.')
print(f"[PartyBot] [{time()}] Set contrail to: {cosmetic.id}.")
await client.party.me.set_contrail(asset=cosmetic.id)
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a contrail with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find an contrail with the name: {content}.")
@commands.dm_only()
@client.command()
async def purpleskull(ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = client.party.me.create_variants(
clothing_color=1
)
await client.party.me.set_outfit(
asset='CID_030_Athena_Commando_M_Halloween',
variants=skin_variants
)
await ctx.send('Skin set to Purple Skull Trooper!')
print(f"[PartyBot] [{time()}] Skin set to Purple Skull Trooper.")
@commands.dm_only()
@client.command()
async def pinkghoul(ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = client.party.me.create_variants(
material=3
)
await client.party.me.set_outfit(
asset='CID_029_Athena_Commando_F_Halloween',
variants=skin_variants
)
await ctx.send('Skin set to Pink Ghoul Trooper!')
print(f"[PartyBot] [{time()}] Skin set to Pink Ghoul Trooper.")
@commands.dm_only()
@client.command()
async def purpleportal(ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = client.party.me.create_variants(
item='AthenaBackpack',
particle_config='Particle',
particle=1
)
await client.party.me.set_backpack(
asset='BID_105_GhostPortal',
variants=skin_variants
)
await ctx.send('Backpack set to Purple Ghost Portal!')
print(f"[PartyBot] [{time()}] Backpack set to Purple Ghost Portal.")
@commands.dm_only()
@client.command()
async def banner(ctx: fortnitepy.ext.commands.Context, icon: str, colour: str, banner_level: int) -> None:
await client.party.me.set_banner(icon=icon, color=colour, season_level=banner_level)
await ctx.send(f'Banner set to: {icon}, {colour}, {banner_level}.')
print(f"[PartyBot] [{time()}] Banner set to: {icon}, {colour}, {banner_level}.")
@commands.dm_only()
@client.command()
async def cid(ctx: fortnitepy.ext.commands.Context, character_id: str) -> None:
await client.party.me.set_outfit(
asset=character_id,
variants=client.party.me.create_variants(profile_banner='ProfileBanner')
)
await ctx.send(f'Skin set to {character_id}')
print(f'[PartyBot] [{time()}] Skin set to {character_id}')
@commands.dm_only()
@client.command()
async def vtid(ctx: fortnitepy.ext.commands.Context, variant_token: str) -> None:
variant_id = await set_vtid(variant_token)
if variant_id[1].lower() == 'particle':
skin_variants = client.party.me.create_variants(particle_config='Particle', particle=1)
else:
skin_variants = client.party.me.create_variants(**{vtid[1].lower(): int(vtid[2])})
await client.party.me.set_outfit(asset=vtid[0], variants=skin_variants)
print(f'[PartyBot] [{time()}] Set variants of {vtid[0]} to {vtid[1]} {vtid[2]}.')
await ctx.send(f'Variants set to {variant_token}.\n'
'(Warning: This feature is not supported, please use !variants)')
@commands.dm_only()
@client.command()
async def variants(ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, variant_type: str, variant_int: str) -> None:
if 'cid' in cosmetic_id.lower() and 'jersey_color' not in variant_type.lower():
skin_variants = client.party.me.create_variants(
**{variant_type: int(variant_int) if variant_int.isdigit() else variant_int}
)
await client.party.me.set_outfit(
asset=cosmetic_id,
variants=skin_variants
)
elif 'cid' in cosmetic_id.lower() and 'jersey_color' in variant_type.lower():
cosmetic_variants = client.party.me.create_variants(
pattern=0,
numeric=69,
**{variant_type: int(variant_int) if variant_int.isdigit() else variant_int}
)
await client.party.me.set_outfit(
asset=cosmetic_id,
variants=cosmetic_variants
)
elif 'bid' in cosmetic_id.lower():
cosmetic_variants = client.party.me.create_variants(
item='AthenaBackpack',
**{variant_type: int(variant_int) if variant_int.isdigit() else variant_int}
)
await client.party.me.set_backpack(
asset=cosmetic_id,
variants=cosmetic_variants
)
elif 'pickaxe_id' in cosmetic_id.lower():
cosmetic_variants = client.party.me.create_variants(
item='AthenaPickaxe',
**{variant_type: int(variant_int) if variant_int.isdigit() else variant_int}
)
await client.party.me.set_pickaxe(
asset=cosmetic_id,
variants=cosmetic_variants
)
await ctx.send(f'Set variants of {cosmetic_id} to {variant_type} {variant_int}.')
print(f'[PartyBot] [{time()}] Set variants of {cosmetic_id} to {variant_type} {variant_int}.')
@commands.dm_only()
@client.command()
async def checkeredrenegade(ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = client.party.me.create_variants(
material=2
)
await client.party.me.set_outfit(
asset='CID_028_Athena_Commando_F',
variants=skin_variants
)
await ctx.send('Skin set to Checkered Renegade!')
print(f'[PartyBot] [{time()}] Skin set to Checkered Renegade.')
@commands.dm_only()
@client.command()
async def mintyelf(ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = client.party.me.create_variants(
material=2
)
await client.party.me.set_outfit(
asset='CID_051_Athena_Commando_M_HolidayElf',
variants=skin_variants
)
await ctx.send('Skin set to Minty Elf!')
print(f'[PartyBot] [{time()}] Skin set to Minty Elf.')
@commands.dm_only()
@client.command()
async def eid(ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None:
await client.party.me.clear_emote()
await client.party.me.set_emote(
asset=emote_id
)
await ctx.send(f'Emote set to {emote_id}!')
@commands.dm_only()
@client.command()
async def stop(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.clear_emote()
await ctx.send('Stopped emoting.')
@commands.dm_only()
@client.command()
async def bid(ctx: fortnitepy.ext.commands.Context, backpack_id: str) -> None:
await client.party.me.set_backpack(
asset=backpack_id
)
await ctx.send(f'Backbling set to {backpack_id}!')
@commands.dm_only()
@client.command()
async def _help(ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send('For a list of commands, go to: https://github.com/xMistt/fortnitepy-bot/wiki/Commands')
@commands.dm_only()
@client.command(aliases=['legacypickaxe'])
async def pickaxe_id(ctx: fortnitepy.ext.commands.Context, pickaxe_id_: str) -> None:
await client.party.me.set_pickaxe(
asset=pickaxe_id_
)
await ctx.send(f'Pickaxe set to {pickaxe_id_}')
@commands.dm_only()
@client.command()
async def pet_carrier(ctx: fortnitepy.ext.commands.Context, pet_carrier_id: str) -> None:
await client.party.me.set_pet(
asset=pet_carrier_id
)
await ctx.send(f'Pet set to {pet_carrier_id}!')
@commands.dm_only()
@client.command()
async def emoji_id(ctx: fortnitepy.ext.commands.Context, emoji_: str) -> None:
await client.party.me.clear_emote()
await client.party.me.set_emoji(
asset=emoji_
)
await ctx.send(f'Emoji set to {emoji_}!')
@commands.dm_only()
@client.command()
async def trails(ctx: fortnitepy.ext.commands.Context, trails_: str) -> None:
await client.party.me.set_contrail(
asset=trails_
)
await ctx.send(f'Contrail set to {trails}!')
@commands.dm_only()
@client.command()
async def point(ctx: fortnitepy.ext.commands.Context, *, content: Union[str, None] = None) -> None:
if content is None:
await client.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Point it Out played.')
elif 'pickaxe_id' in content.lower():
await client.party.me.set_pickaxe(asset=content)
await client.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set to {content} & Point it Out played.')
else:
try:
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaPickaxe"
)
await client.party.me.set_pickaxe(asset=cosmetic.id)
await client.party.me.clear_emote()
await client.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set to {content} & Point it Out played.')
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a pickaxe with the name: {content}")
@commands.dm_only()
@client.command()
async def ready(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_ready(fortnitepy.ReadyState.READY)
await ctx.send('Ready!')
@commands.dm_only()
@client.command(aliases=['sitin'])
async def unready(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_ready(fortnitepy.ReadyState.NOT_READY)
await ctx.send('Unready!')
@commands.dm_only()
@client.command()
async def sitout(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT)
await ctx.send('Sitting Out!')
@commands.dm_only()
@client.command()
async def bp(ctx: fortnitepy.ext.commands.Context, tier: int) -> None:
await client.party.me.set_battlepass_info(
has_purchased=True,
level=tier,
)
await ctx.send(f'Set battle pass tier to {tier}.')
@commands.dm_only()
@client.command()
async def level(ctx: fortnitepy.ext.commands.Context, banner_level: int) -> None:
await client.party.me.set_banner(
season_level=banner_level
)
await ctx.send(f'Set level to {level}.')
@commands.dm_only()
@client.command()
async def echo(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
await client.party.send(content)
await ctx.send('Sent message to party chat.')
@commands.dm_only()
@client.command()
async def status(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
await client.set_status(content)
await ctx.send(f'Status set to {content}')
print(f'[PartyBot] [{time()}] Status set to {content}.')
@commands.dm_only()
@client.command()
async def leave(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_emote('EID_Wave')
await asyncio.sleep(2)
await client.party.me.leave()
await ctx.send('Bye!')
print(f'[PartyBot] [{time()}] Left the party as I was requested.')
@commands.dm_only()
@client.command()
async def kick(ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None:
user = await client.fetch_profile(epic_username)
member = client.party.members.get(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
await member.kick()
await ctx.send(f"Kicked user: {member.display_name}.")
print(f"[PartyBot] [{time()}] Kicked user: {member.display_name}")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.")
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to kick member as I don't have the required permissions."))
@commands.dm_only()
@client.command(aliases=['unhide'])
async def promote(ctx: fortnitepy.ext.commands.Context, *, epic_username: Union[str, None] = None) -> None:
if epic_username is None:
user = await client.fetch_profile(ctx.author.display_name)
member = client.party.members.get(user.id)
else:
user = await client.fetch_profile(epic_username)
member = client.party.members.get(user.id)
if member is None:
await ctx.send("Failed to find that user, are you sure they're in the party?")
else:
try:
await member.promote()
await ctx.send(f"Promoted user: {member.display_name}.")
print(f"[PartyBot] [{time()}] Promoted user: {member.display_name}")
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed topromote {member.display_name}, as I'm not party leader.")
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to kick member as I don't have the required permissions."))
@commands.dm_only()
@client.command()
async def playlist_id(ctx: fortnitepy.ext.commands.Context, playlist_: str) -> None:
try:
await client.party.set_playlist(playlist=playlist_)
await ctx.send(f'Gamemode set to {playlist_}')
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to set gamemode to {playlist_}, as I'm not party leader.")
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to set gamemode as I don't have the required permissions."))
@commands.dm_only()
@client.command()
async def privacy(ctx: fortnitepy.ext.commands.Context, privacy_type: str) -> None:
try:
if privacy_type.lower() == 'public':
await client.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC)
elif privacy_type.lower() == 'private':
await client.party.set_privacy(fortnitepy.PartyPrivacy.PRIVATE)
elif privacy_type.lower() == 'friends':
await client.party.set_privacy(fortnitepy.PartyPrivacy.FRIENDS)
elif privacy_type.lower() == 'friends_allow_friends_of_friends':
await client.party.set_privacy(fortnitepy.PartyPrivacy.FRIENDS_ALLOW_FRIENDS_OF_FRIENDS)
elif privacy_type.lower() == 'private_allow_friends_of_friends':
await client.party.set_privacy(fortnitepy.PartyPrivacy.PRIVATE_ALLOW_FRIENDS_OF_FRIENDS)
await ctx.send(f'Party privacy set to {client.party.privacy}.')
print(f'[PartyBot] [{time()}] Party privacy set to {client.party.privacy}.')
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to set party privacy to {privacy_type}, as I'm not party leader.")
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to set party privacy as I don't have the required permissions."))
@commands.dm_only()
@client.command()
async def copy(ctx: fortnitepy.ext.commands.Context, *, epic_username: Union[str, None] = None) -> None:
if epic_username is None:
member = client.party.members.get(ctx.author.id)
else:
user = await client.fetch_profile(epic_username)
member = client.party.members.get(user.id)
await client.party.me.edit(
functools.partial(
fortnitepy.ClientPartyMember.set_outfit,
asset=member.outfit,
variants=member.outfit_variants
),
functools.partial(
fortnitepy.ClientPartyMember.set_backpack,
asset=member.backpack,
variants=member.backpack_variants
),
functools.partial(
fortnitepy.ClientPartyMember.set_pickaxe,
asset=member.pickaxe,
variants=member.pickaxe_variants
),
functools.partial(
fortnitepy.ClientPartyMember.set_banner,
icon=member.banner[0],
color=member.banner[1],
season_level=member.banner[2]
),
functools.partial(
fortnitepy.ClientPartyMember.set_battlepass_info,
has_purchased=True,
level=member.battlepass_info[1]
)
)
await client.party.me.set_emote(asset=member.emote)
await ctx.send(f'Copied the loadout of {member.display_name}.')
@commands.dm_only()
@client.command()
async def hologram(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_outfit(
asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG'
)
await ctx.send('Skin set to Star Wars Hologram!')
print(f'[PartyBot] [{time()}] Skin set to Star Wars Hologram.')
@commands.dm_only()
@client.command()
async def gift(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.clear_emote()
await client.party.me.set_emote(
asset='EID_NeverGonna'
)
await ctx.send('What did you think would happen?')
@commands.dm_only()
@client.command()
async def matchmakingcode(ctx: fortnitepy.ext.commands.Context, *, custom_matchmaking_key: str) -> None:
await client.party.set_custom_key(
key=custom_matchmaking_key
)
await ctx.send(f'Custom matchmaking code set to: {custom_matchmaking_key}')
@commands.dm_only()
@client.command()
async def ponpon(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_emote(
asset='EID_TourBus'
)
await ctx.send('Emote set to Ninja Style!')
@commands.dm_only()
@client.command()
async def enlightened(ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, br_season: int, skin_level: int) -> None:
variant_types = {
1: client.party.me.create_variants(progressive=4),
2: client.party.me.create_variants(progressive=4),
3: client.party.me.create_variants(material=2)
}
if 'cid' in cosmetic_id.lower():
await client.party.me.set_outfit(
asset=cosmetic_id,
variants=variant_types[br_season] if br_season in variant_types else variant_types[2],
enlightenment=(br_season, level)
)
await ctx.send(f'Skin set to {character_id} at level {skin_level} (for Season 1{br_season}).')
elif 'bid' in cosmetic_id.lower():
await client.party.me.set_backpack(
asset=cosmetic_id,
variants=client.party.me.create_variants(progressive=2),
enlightenment=(br_season, level)
)
await ctx.send(f'Backpack set to {character_id} at level {skin_level} (for Season 1{br_season}).')
print(f'[PartyBot] [{time()}] Enlightenment for {cosmetic_id} set to level {skin_level} (for Season 1{br_season}).')
@commands.dm_only()
@client.command()
async def ninja(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_outfit(
asset='CID_605_Athena_Commando_M_TourBus'
)
await ctx.send('Skin set to Ninja!')
print(f'[PartyBot] [{time()}] Skin set to Ninja.')
@commands.dm_only()
@client.command()
async def rareskins(ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send('Showing all rare skins now.')
await client.party.me.set_outfit(
asset='CID_030_Athena_Commando_M_Halloween',
variants=client.party.me.create_variants(clothing_color=1)
)
await ctx.send('Skin set to Purple Skull Trooper!')
print(f"[PartyBot] [{time()}] Skin set to Purple Skull Trooper.")
await asyncio.sleep(2)
await client.party.me.set_outfit(
asset='CID_029_Athena_Commando_F_Halloween',
variants=client.party.me.create_variants(material=3)
)
await ctx.send('Skin set to Pink Ghoul Trooper!')
print(f"[PartyBot] [{time()}] Skin set to Pink Ghoul Trooper.")
await asyncio.sleep(2)
for rare_skin in ('CID_028_Athena_Commando_F', 'CID_017_Athena_Commando_M'):
await client.party.me.set_outfit(
asset=rare_skin
)
await ctx.send(f'Skin set to {rare_skin}!')
print(f"[PartyBot] [{time()}] Skin set to: {rare_skin}!")
await asyncio.sleep(2)
@commands.dm_only()
@client.command()
async def goldenpeely(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.set_outfit(
asset='CID_701_Athena_Commando_M_BananaAgent',
variants=client.party.me.create_variants(progressive=4),
enlightenment=(2, 350)
)
await ctx.send(f'Skin set to Golden Peely.')
@commands.dm_only()
@client.command()
async def random(ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
if cosmetic_type == 'skin':
all_outfits = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaCharacter"
)
random_skin = py_random.choice(all_outfits).id
await client.party.me.set_outfit(
asset=random_skin,
variants=client.party.me.create_variants(profile_banner='ProfileBanner')
)
await ctx.send(f'Skin randomly set to {skin}.')
elif cosmetic_type == 'backpack':
all_backpacks = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaBackpack"
)
random_backpack = py_random.choice(all_backpacks).id
await client.party.me.set_backpack(
asset=random_backpack,
variants=client.party.me.create_variants(profile_banner='ProfileBanner')
)
await ctx.send(f'Backpack randomly set to {backpack}.')
elif cosmetic_type == 'emote':
all_emotes = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaDance"
)
random_emote = py_random.choice(all_emotes).id
await client.party.me.set_emote(
asset=random_emote
)
await ctx.send(f'Emote randomly set to {emote}.')
elif cosmetic_type == 'all':
all_outfits = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaCharacter"
)
all_backpacks = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaBackpack"
)
all_emotes = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaDance"
)
random_outfit = py_random.choice(all_outfits).id
random_backpack = py_random.choice(all_backpacks).id
random_emote = py_random.choice(all_emotes).id
await client.party.me.set_outfit(
asset=random_outfit
)
await ctx.send(f'Skin randomly set to {random_outfit}.')
await client.party.me.set_backpack(
asset=random_backpack
)
await ctx.send(f'Backpack randomly set to {random_backpack}.')
await client.party.me.set_emote(
asset=random_emote
)
await ctx.send(f'Emote randomly set to {random_emote}.')
@commands.dm_only()
@client.command()
async def nobackpack(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.clear_backpack()
await ctx.send('Removed backpack.')
@commands.dm_only()
@client.command()
async def nopet(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.clear_pet()
await ctx.send('Removed pet.')
@commands.dm_only()
@client.command()
async def nocontrail(ctx: fortnitepy.ext.commands.Context) -> None:
await client.party.me.clear_contrail()
await ctx.send('Removed contrail.')
@commands.dm_only()
@client.command()
async def match(ctx: fortnitepy.ext.commands.Context, players: Union[str, int] = 0, match_time: int = 0) -> None:
if players == 'progressive':
match_time = datetime.datetime.utcnow()
await client.party.me.set_in_match(
players_left=100,
started_at=match_time
)
while (100 >= client.party.me.match_players_left > 0
and client.party.me.in_match()):
await client.party.me.set_in_match(
players_left=client.party.me.match_players_left - py_random.randint(3, 6),
started_at=match_time
)
await asyncio.sleep(py_random.randint(45, 65))
else:
await client.party.me.set_in_match(
players_left=int(players),
started_at=datetime.datetime.utcnow() - datetime.timedelta(minutes=match_time)
)
await ctx.send(f'Set state to in-game in a match with {players} players.'
'\nUse the command: !lobby to revert back to normal.')
@commands.dm_only()
@client.command()
async def lobby(ctx: fortnitepy.ext.commands.Context) -> None:
if client.default_party_member_config.cls == fortnitepy.JustChattingClientPartyMember:
client.default_party_member_config.cls = fortnitepy.ClientPartyMember
party_id = client.party.id
await client.party.me.leave()
await ctx.send('Removed state of Just Chattin\'. Now attempting to rejoin party.')
try:
await client.join_to_party(party_id)
except fortnitepy.errors.Forbidden:
await ctx.send('Failed to join back as party is set to private.')
except fortnitepy.errors.NotFound:
await ctx.send('Party not found, are you sure Fortnite is open?')
await client.party.me.clear_in_match()
await ctx.send('Set state to the pre-game lobby.')
@commands.dm_only()
@client.command()
async def join(ctx: fortnitepy.ext.commands.Context, *, epic_username: Union[str, None] = None) -> None:
if epic_username is None:
epic_friend = client.get_friend(ctx.author.id)
else:
user = await client.fetch_profile(epic_username)
if user is not None:
epic_friend = client.get_friend(user.id)
else:
epic_friend = None
await ctx.send(f'Failed to find user with the name: {epic_username}.')
if isinstance(epic_friend, fortnitepy.Friend):
try:
await epic_friend.join_party()
await ctx.send(f'Joined the party of {epic_friend.display_name}.')
except fortnitepy.errors.Forbidden:
await ctx.send('Failed to join party since it is private.')
except fortnitepy.errors.PartyError:
await ctx.send('Party not found, are you sure Fortnite is open?')
else:
await ctx.send('Cannot join party as the friend is not found.')
@commands.dm_only()
@client.command()
async def friend(ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None:
if data['friend_accept']:
await ctx.send('All friend requests will be accepted so there is no need to add manually.')
print(f'[PartyBot] [{time()}] !friend command ignored as friend requests will be accepted '
'so there is no need to add manually.')
else:
user = await client.fetch_profile(epic_username)
if user is not None:
await client.add_friend(user.id)
await ctx.send(f'Sent/accepted friend request to/from {user.display_name}.')
print(f'[PartyBot] [{time()}] Sent/accepted friend request to/from {user.display_name}.')
else:
await ctx.send(f'Failed to find user with the name: {epic_username}.')
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] Failed to find a user with the name {epic_username}."))
@commands.dm_only()
@client.command()
async def playlist(ctx: fortnitepy.ext.commands.Context, *, playlist_name: str) -> None:
try:
scuffedapi_playlist_id = await get_playlist(playlist_name)
if scuffedapi_playlist_id is not None:
await client.party.set_playlist(playlist=scuffedapi_playlist_id)
await ctx.send(f'Playlist set to {scuffedapi_playlist_id}.')
print(f'[PartyBot] [{time()}] Playlist set to {scuffedapi_playlist_id}.')
else:
await ctx.send(f'Failed to find a playlist with the name: {playlist_name}.')
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
f"Failed to find a playlist with the name: {playlist_name}."))
except fortnitepy.errors.Forbidden:
await ctx.send(f"Failed to set playlist to {playlist_name}, as I'm not party leader.")
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to set playlist as I don't have the required permissions."))
@commands.dm_only()
@client.command()
async def invite(ctx: fortnitepy.ext.commands.Context, *, epic_username: Union[str, None] = None) -> None:
if epic_username is None:
epic_friend = client.get_friend(ctx.author.id)
else:
user = await client.fetch_profile(epic_username)
if user is not None:
epic_friend = client.get_friend(user.id)
else:
epic_friend = None
await ctx.send(f'Failed to find user with the name: {epic_username}.')
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
f"Failed to find user with the name: {epic_username}."))
if isinstance(epic_friend, fortnitepy.Friend):
try:
await epic_friend.invite()
await ctx.send(f'Invited {epic_friend.display_name} to the party.')
print(f"[PartyBot] [{time()}] [ERROR] Invited {epic_friend.display_name} to the party.")
except fortnitepy.errors.PartyError:
await ctx.send('Failed to invite friend as they are either already in the party or it is full.')
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to invite to party as friend is already either in party or it is full."))
else:
await ctx.send('Cannot invite to party as the friend is not found.')
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to invite to party as the friend is not found."))
@commands.dm_only()
@client.command()
async def hide(ctx: fortnitepy.ext.commands.Context, party_member: Union[str, None] = None) -> None:
if client.party.me.leader:
if party_member is not None:
user = await client.fetch_profile(party_member)
member = client.party.members.get(user.id)
if member is not None:
raw_squad_assignments = client.party.meta.get_prop('Default:RawSquadAssignments_j')["RawSquadAssignments"]
for player in raw_squad_assignments:
if player['memberId'] == member.id:
raw_squad_assignments.remove(player)
await set_and_update_party_prop(
'Default:RawSquadAssignments_j', {
'RawSquadAssignments': raw_squad_assignments
}
)
else:
await ctx.send(f'Failed to find user with the name: {party_member}.')
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
f"Failed to find user with the name: {party_member}."))
else:
await set_and_update_party_prop(
'Default:RawSquadAssignments_j', {
'RawSquadAssignments': [{'memberId': client.user.id, 'absoluteMemberIdx': 1}]
}
)
await ctx.send('Hid everyone in the party. Use !unhide if you want to unhide everyone.')
print(f'[PartyBot] [{time()}] Hid everyone in the party.')
else:
await ctx.send("Failed to hide everyone, as I'm not party leader")
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] "
"Failed to hide everyone as I don't have the required permissions."))
@commands.dm_only()
@client.command()
async def ghost(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
skin_variants = client.party.me.create_variants(
progressive=2
)
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaCharacter"
)
await client.party.me.set_outfit(
asset=cosmetic.id,
variants=skin_variants
)
await ctx.send(f'Skin set to Ghost {cosmetic.name}!')
print(f'[PartyBot] [{time()}] Skin set to Ghost {cosmetic.name}.')
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a skin with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find a skin with the name: {content}.")
@commands.dm_only()
@client.command()
async def shadow(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
try:
skin_variants = client.party.me.create_variants(
progressive=3
)
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaCharacter"
)
await client.party.me.set_outfit(
asset=cosmetic.id,
variants=skin_variants
)
await ctx.send(f'Skin set to Shadow {cosmetic.name}!')
print(f'[PartyBot] [{time()}] Skin set to Ghost {cosmetic.name}.')
except BenBotAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a skin with the name: {content}.")
print(f"[PartyBot] [{time()}] Failed to find a skin with the name: {content}.")
@commands.dm_only()
@client.command()
async def avatar(ctx: fortnitepy.ext.commands.Context, kairos_cid: str) -> None:
kairos_avatar = fortnitepy.Avatar(
asset=kairos_cid
)
client.set_avatar(kairos_avatar)
await ctx.send(f'Kairos avatar set to {kairos_cid}.')
print(f'[PartyBot] [{time()}] Kairos avatar set to {kairos_cid}.')
@commands.dm_only()
@client.command(aliases=['clear'])
async def clean(ctx: fortnitepy.ext.commands.Context) -> None:
os.system('cls' if 'win' in sys.platform else 'clear')
print(crayons.cyan(f'[PartyBot] [{time()}] PartyBot made by xMistt. '
'Massive credit to Terbau for creating the library.'))
print(crayons.cyan(f'[PartyBot] [{time()}] Discord server: https://discord.gg/fnpy - For support, questions, etc.'))
await ctx.send('Command prompt/terminal cleared.')
print(f'[PartyBot] [{time()}] Command prompt/terminal cleared.')
@commands.dm_only()
@client.command()
async def set(ctx: fortnitepy.ext.commands.Context, *, content: str) -> None:
cosmetic_types = {
"AthenaBackpack": client.party.me.set_backpack,
"AthenaCharacter": client.party.me.set_outfit,
"AthenaEmoji": client.party.me.set_emoji,
"AthenaDance": client.party.me.set_emote
}
set_items = await BenBotAsync.get_cosmetics(
lang="en",
searchLang="en",
matchMethod="contains",
set=content
)
await ctx.send(f'Equipping all cosmetics from the {set_items[0].set} set.')
print(f'[PartyBot] [{time()}] Equipping all cosmetics from the {set_items[0].set} set.')
for cosmetic in set_items:
if cosmetic.backend_type.value in cosmetic_types:
await cosmetic_types[cosmetic.backend_type.value](asset=cosmetic.id)
await ctx.send(f'{cosmetic.short_description} set to {cosmetic.name}!')
print(f'[PartyBot] [{time()}] {cosmetic.short_description} set to {cosmetic.name}.')
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all cosmetics from the {set_items[0].set} set.')
print(f'[PartyBot] [{time()}] Fishing equipping all cosmetics from the {set_items[0].set} set.')
@commands.dm_only()
@client.command()
async def style(ctx: fortnitepy.ext.commands.Context, cosmetic_name: str, variant_type: str, variant_int: str) -> None:
# cosmetic_types = {
# "AthenaCharacter": client.party.me.set_outfit,
# "AthenaBackpack": client.party.me.set_backpack,
# "AthenaPickaxe": client.party.me.set_pickaxe
# }
cosmetic = await BenBotAsync.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=cosmetic_name,
backendType="AthenaCharacter"
)
cosmetic_variants = client.party.me.create_variants(
# item=cosmetic.backend_type.value,
**{variant_type: int(variant_int) if variant_int.isdigit() else variant_int}
)
# await cosmetic_types[cosmetic.backend_type.value](
await client.party.me.set_outfit(
asset=cosmetic.id,
variants=cosmetic_variants
)
await ctx.send(f'Set variants of {cosmetic.id} to {variant_type} {variant_int}.')
print(f'[PartyBot] [{time()}] Set variants of {cosmetic.id} to {variant_type} {variant_int}.')
@commands.dm_only()
@client.command()
async def new(ctx: fortnitepy.ext.commands.Context) -> None:
async with aiohttp.ClientSession() as session:
request = await session.request(
method='GET',
url='https://benbotfn.tk/api/v1/files/added',
)
response = await request.json()
for new_skin in [new_cid for new_cid in response if new_cid.split('/')[-1].lower().startswith('cid_')]:
await client.party.me.set_outfit(
asset=new_skin.split('/')[-1].split('.uasset')[0]
)
await ctx.send(f"Skin set to {new_skin.split('/')[-1].split('.uasset')[0]}!")
print(f"[PartyBot] [{time()}] Skin set to: {new_skin.split('/')[-1].split('.uasset')[0]}!")
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all new unencrypted skins.')
print(f'[PartyBot] [{time()}] Finished equipping all new unencrypted skins.')
for new_emote in [new_eid for new_eid in response if new_eid.split('/')[-1].lower().startswith('eid_')]:
await client.party.me.set_emote(
asset=new_skin.split('/')[-1].split('.uasset')[0]
)
await ctx.send(f"Emote set to {new_eid.split('/')[-1].split('.uasset')[0]}!")
print(f"[PartyBot] [{time()}] Emote set to: {new_eid.split('/')[-1].split('.uasset')[0]}!")
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all new unencrypted skins.')
print(f'[PartyBot] [{time()}] Finished equipping all new unencrypted skins.')
@commands.dm_only()
@client.command()
async def justchattin(ctx: fortnitepy.ext.commands.Context) -> None:
client.default_party_member_config.cls = fortnitepy.JustChattingClientPartyMember
party_id = client.party.id
await client.party.me.leave()
await ctx.send('Set state to Just Chattin\'. Now attempting to rejoin party.'
'\nUse the command: !lobby to revert back to normal.')
try:
await client.join_to_party(party_id)
except fortnitepy.errors.Forbidden:
await ctx.send('Failed to join back as party is set to private.')
except fortnitepy.errors.NotFound:
await ctx.send('Party not found, are you sure Fortnite is open?')
@commands.dm_only()
@client.command()
async def shop(ctx: fortnitepy.ext.commands.Context) -> None:
store = await client.fetch_item_shop()
await ctx.send(f"Equipping all skins in today's item shop.")
print(f"[PartyBot] [{time()}] Equipping all skins in today's item shop.")
for item in store.special_featured_items + store.special_daily_items:
for grant in item.grants:
if grant['type'] == 'AthenaCharacter':
await client.party.me.set_outfit(
asset=grant['asset']
)
await ctx.send(f"Skin set to {item.display_names[0]}!")
print(f"[PartyBot] [{time()}] Skin set to: {item.display_names[0]}!")
await asyncio.sleep(3)
await ctx.send(f'Finished equipping all skins in the item shop.')
print(f'[PartyBot] [{time()}] Finished equipping all skins in the item shop.')
@commands.dm_only()
@client.command()
async def olddefault(ctx: fortnitepy.ext.commands.Context) -> None:
random_default = py_random.choice(
[cid_ for cid_ in dir(fortnitepy.DefaultCharactersChapter1) if not cid_.startswith('_')]
)
await client.party.me.set_outfit(
asset=random_default
)
await ctx.send(f'Skin set to {random_default}!')
print(f"[PartyBot] [{time()}] Skin set to {random_default}.")
@commands.dm_only()
@client.command()
async def hatlessrecon(ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = client.party.me.create_variants(
parts=2
)
await client.party.me.set_outfit(
asset='CID_022_Athena_Commando_F',
variants=skin_variants
)
await ctx.send('Skin set to Hatless Recon Expert!')
print(f'[PartyBot] [{time()}] Skin set to Hatless Recon Expert.')
@commands.dm_only()
@client.command()
async def season(ctx: fortnitepy.ext.commands.Context, br_season: int) -> None:
max_tier_skins = {
1: "CID_028_Athena_Commando_F",
2: "CID_035_Athena_Commando_M_Medieval",
3: "CID_084_Athena_Commando_M_Assassin",
4: "CID_116_Athena_Commando_M_CarbideBlack",
5: "CID_165_Athena_Commando_M_DarkViking",
6: "CID_230_Athena_Commando_M_Werewolf",
7: "CID_288_Athena_Commando_M_IceKing",
8: "CID_352_Athena_Commando_F_Shiny",
9: "CID_407_Athena_Commando_M_BattleSuit",
10: "CID_484_Athena_Commando_M_KnightRemix",
11: "CID_572_Athena_Commando_M_Viper",
12: "CID_694_Athena_Commando_M_CatBurglar",
13: "CID_767_Athena_Commando_F_BlackKnight"
}
await client.party.me.set_outfit(asset=max_tier_skins[br_season])
await ctx.send(f'Skin set to {max_tier_skins[br_season]}!')
print(f"[PartyBot] [{time()}] Skin set to {max_tier_skins[br_season]}.")
@commands.dm_only()
@client.command()
async def henchman(ctx: fortnitepy.ext.commands.Context) -> None:
random_henchman = py_random.choice(
"CID_794_Athena_Commando_M_HenchmanBadShorts_D",
"CID_NPC_Athena_Commando_F_HenchmanSpyDark",
"CID_791_Athena_Commando_M_HenchmanGoodShorts_D",
"CID_780_Athena_Commando_M_HenchmanBadShorts",
"CID_NPC_Athena_Commando_M_HenchmanGood",
"CID_692_Athena_Commando_M_HenchmanTough",
"CID_707_Athena_Commando_M_HenchmanGood",
"CID_792_Athena_Commando_M_HenchmanBadShorts_B",
"CID_793_Athena_Commando_M_HenchmanBadShorts_C",
"CID_NPC_Athena_Commando_M_HenchmanBad",
"CID_790_Athena_Commando_M_HenchmanGoodShorts_C",
"CID_779_Athena_Commando_M_HenchmanGoodShorts",
"CID_NPC_Athena_Commando_F_RebirthDefault_Henchman",
"CID_NPC_Athena_Commando_F_HenchmanSpyGood",
"CID_706_Athena_Commando_M_HenchmanBad",
"CID_789_Athena_Commando_M_HenchmanGoodShorts_B"
)
await client.party.me.set_outfit(
asset=random_henchman
)
await ctx.send(f'Skin set to {random_henchman}!')
print(f"[PartyBot] [{time()}] Skin set to {random_henchman}.")
@commands.dm_only()
@client.command()
async def meta(ctx: fortnitepy.ext.commands.Context) -> None:
print(json.dumps(client.party.meta.schema, sort_keys=False, indent=4))
if (data['email'] and data['password']) and (data['email'] != '[email protected]' and data['password'] != 'password1'):
try:
client.run()
except fortnitepy.errors.AuthException as e:
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] {e}"))
else:
print(crayons.red(f"[PartyBot] [{time()}] [ERROR] Failed to login as no (or default) account details provided."))
|
py | b407d565c1f989204e739dc70079cdb7c13134fe | import numpy as np
import nnfs
from nnfs.datasets import spiral_data # See for code: https://gist.github.com/Sentdex/454cb20ec5acf0e76ee8ab8448e6266c
nnfs.init()
X, y = spiral_data(100, 3)
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output = np.maximum(0, inputs)
layer1 = Layer_Dense(2,5)
activation1 = Activation_ReLU()
layer1.forward(X)
activation1.forward(layer1.output)
print(activation1.output) |
py | b407d64edc4e4ff40dd2a85ff179284aef0fddf8 | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
def softmax_cross_entropy_loss_vlabel(pred, vlabel):
# The shape of vlabel is supposed to be (batch_size, n_class)
logp = F.log_softmax(pred)
loss = -1.0 * F.mean(F.sum(vlabel * logp, axis=1))
return loss
# Abstract class for learning with mixed data
class MixedDataLearning(object):
def __init__(self):
# Set params for mixing data
return NotImplemented
def set_mix_ratio():
return NotImplemented
def mix_data(self, x, y):
# Mix data given x and y, and return mix_x and mix_y
# x, y, mix_x, and mix_y are supposed to be nn.Variable
return NotImplemented
def loss(self, pred, mix_y):
# Calculate a classification loss given mix_y and prediction results of mix_x.
# Both pred and mix_y are supposed to be a nn.Variable
return NotImplemented
# Mixup
class MixupLearning(MixedDataLearning):
def __init__(self, batch_size, alpha=0.5):
# Set params for mixing data
# For mixup, set alpha for the beta distribution that generates interpolation ratios.
self._batch_size = batch_size
self._alpha = alpha
self._lam = nn.Variable((batch_size, 1))
def set_mix_ratio(self):
if self._alpha > 0.0:
self._lam.d = np.random.beta(
self._alpha, self._alpha, self._batch_size).reshape((self._batch_size, 1))
else:
self._lam.d = np.ones((self._batch_size, 1))
def mix_data(self, x, y):
# Mix data given x and y, and return mix_x and mix_y
# Both y and mix_y are supposed to be nn.Variable((batch_size, n_class))
batch_size = x.shape[0]
ind = np.random.permutation(batch_size)
x0 = x
y0 = y
x1 = x0[ind]
y1 = y0[ind]
mix_x = self._lam.reshape((-1, 1, 1, 1)) * x0 + \
(1.0-self._lam.reshape((-1, 1, 1, 1))) * x1
mix_y = self._lam * y0 + (1.0-self._lam) * y1
return mix_x, mix_y
def loss(self, pred, mix_y):
# Calculate a classification loss given mix_y and prediction results of mix_x.
# Both pred and mix_y are supposed to be a nn.Variable
return softmax_cross_entropy_loss_vlabel(pred, mix_y)
# Cutmix
class CutmixLearning(MixedDataLearning):
def __init__(self, shape_of_batch, alpha=0.5, cutmix_prob=0.5):
# Set params for mixing data
# For cutmix, set alpha for the beta distribution that generates cutting area ratio.
# cutmix_prob controls a probablity to conduct cutmix for each batch
# shape_of_batch should be (batch_size, n_channels, height, width)
self._batch_size = shape_of_batch[0]
self._alpha = alpha
self._cutmix_prob = cutmix_prob
self._mask = nn.Variable(shape_of_batch)
self._lam = nn.Variable((shape_of_batch[0], 1))
def set_mix_ratio(self):
# How to get a random bounding box
def rand_bbox(shape_of_x, lam):
width = shape_of_x[3]
height = shape_of_x[2]
cut_ratio = np.sqrt(1.0 - lam)
cut_w = np.int(width * cut_ratio)
cut_h = np.int(height * cut_ratio)
cx = np.random.randint(width)
cy = np.random.randint(height)
bbx0 = np.clip(cx - cut_w//2, 0, width)
bby0 = np.clip(cy - cut_h//2, 0, height)
bbx1 = np.clip(cx + cut_w//2, 0, width)
bby1 = np.clip(cy + cut_h//2, 0, height)
return bbx0, bby0, bbx1, bby1
def get_mask(shape_of_x, bbx0, bby0, bbx1, bby1):
mask = np.zeros(shape_of_x)
mask[:, :, bby0:bby1, bbx0:bbx1] = 1.0
return mask
if self._alpha > 0.0 and np.random.rand() <= self._cutmix_prob:
lam_tmp = np.random.beta(self._alpha, self._alpha)
bbx0, bby0, bbx1, bby1 = rand_bbox(self._mask.shape, lam_tmp)
self._mask.d = get_mask(self._mask.shape, bbx0, bby0, bbx1, bby1)
self._lam.d = (1.0 - ((bbx1-bbx0)*(bby1-bby0)/(
self._mask.shape[2]*self._mask.shape[3]))) * np.ones((self._batch_size, 1))
else:
self._mask.d = np.zeros(self._mask.shape)
self._lam.d = np.ones((self._batch_size, 1))
def mix_data(self, x, y):
# Mix data given x and y, and return mix_x and mix_y
# Both y and mix_y are supposed to be nn.Variable((batch_size, n_class))
batch_size = x.shape[0]
ind = np.random.permutation(batch_size)
x0 = x
y0 = y
x1 = x0[ind]
y1 = y0[ind]
mix_x = (1.0 - self._mask) * x0 + self._mask * x1
mix_y = self._lam * y0 + (1.0 - self._lam) * y1
return mix_x, mix_y
def loss(self, pred, mix_y):
# Calculate a classification loss given mix_y and prediction results of mix_x.
# Both pred and mix_y are supposed to be a nn.Variable
return softmax_cross_entropy_loss_vlabel(pred, mix_y)
# VH-Mixup
class VHMixupLearning(MixedDataLearning):
def __init__(self, shape_of_batch, alpha=0.5):
# Set params for mixing data
# For vh-mixup, set alpha for the beta distribution that generates interpolation ratios.
# shape_of_batch should be (batch_size, n_channels, height, width)
self._batch_size = shape_of_batch[0]
self._maskv = nn.Variable(shape_of_batch)
self._maskh = nn.Variable(shape_of_batch)
self._lamx = nn.Variable([shape_of_batch[0], 1])
self._lamy = nn.Variable([shape_of_batch[0], 1])
self._alpha = alpha
def set_mix_ratio(self):
# How to concatenate images
def get_maskv(alpha):
if alpha <= 0.0:
return np.ones(self._maskv.shape), 1.0
mask = np.zeros(self._maskv.shape)
lam = np.random.beta(self._alpha, self._alpha)
lh = np.int(lam * self._maskv.shape[2])
mask[:, :, 0:lh, :] = 1.0
return mask, lam
def get_maskh(alpha):
if alpha <= 0.0:
return np.ones(self._maskh.shape), 1.0
mask = np.zeros(self._maskh.shape)
lam = np.random.beta(self._alpha, self._alpha)
lw = np.int(lam * self._maskh.shape[3])
mask[:, :, :, 0:lw] = 1.0
return mask, lam
self._maskv.d, lam1 = get_maskv(self._alpha)
self._maskh.d, lam2 = get_maskh(self._alpha)
if self._alpha > 0.0:
self._lamx.d = np.random.beta(
self._alpha, self._alpha, self._batch_size).reshape((self._batch_size, 1))
else:
self._lamx.d = np.ones((self._batch_size, 1))
self._lamy.d = lam1 * self._lamx.d + lam2 * (1.0 - self._lamx.d)
def mix_data(self, x, y):
# Mix data given x and y, and return mix_x and mix_y
# Both y and mix_y are supposed to be nn.Variable((batch_size, n_class))
batch_size = x.shape[0]
ind = np.random.permutation(batch_size)
x0 = x
y0 = y
x1 = x0[ind]
y1 = y0[ind]
x_hcat = self._maskh * x0 + (1.0 - self._maskh) * x1
x_vcat = self._maskv * x0 + (1.0 - self._maskv) * x1
mix_x = self._lamx.reshape(
(-1, 1, 1, 1)) * x_hcat + (1.0-self._lamx.reshape((-1, 1, 1, 1))) * x_vcat
mix_y = self._lamy * y0 + (1.0-self._lamy) * y1
return mix_x, mix_y
def loss(self, pred, mix_y):
# Calculate a classification loss given mix_y and prediction results of mix_x.
# Both pred and mix_y are supposed to be a nn.Variable
return softmax_cross_entropy_loss_vlabel(pred, mix_y)
|
py | b407d73fe1585a52e27bdc774479012ee2cb3cf0 | # Created by [email protected] at 2022/1/20 10:01
|
py | b407d74600c393066953e4007bfee2407aeb1417 | from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields, normalize_string
class HEB(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"www.heb.{domain}"
def title(self):
return self.soup.find("h1", {"class": "title"}).get_text()
def total_time(self):
minutes_tag = self.soup.find("div", {"itemprop": "totalTime"})
return get_minutes(minutes_tag.parent.get_text())
def yields(self):
yields_tag = self.soup.find("div", {"itemprop": "recipeYield"})
return get_yields(yields_tag.parent.get_text())
def ingredients(self):
ingredients_container = self.soup.find(class_="ingredientswrapper")
ingredients = ingredients_container.findAll("div", {"class": "recipestepstxt"})
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def _instructions_list(self):
instructions_container = self.soup.find("div", {"class": "instructions"})
instructions = instructions_container.findAll(
"span", {"class": "instructiontxt"}
)
return [
normalize_string(instruction.get_text()) for instruction in instructions
]
def instructions(self):
data = self._instructions_list()
return "\n".join(data) if data else None
def image(self):
container = self.soup.find("div", {"class": "recipeimage"})
if not container:
return None
image = container.find("img", {"src": True})
return image["src"] if image else None
|
py | b407d8b0afef24fe8bc6f3cbdb566227cffd5041 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .client import load_json, ingest_data, write_parquet, convert_json, write_parquet_dataset
__title__ = 'json2parquet'
__version__ = '0.0.28'
__all__ = ['load_json', 'ingest_data', 'write_parquet', 'convert_json', 'write_parquet_dataset']
|
py | b407d8ffcebbbfd87888115bab85fa401aed61ea | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def make_summary(value_dict):
return tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=v) for k, v in value_dict.items()])
def flatten(l):
return [item for sublist in l for item in sublist]
def divide(x, y):
return 0 if y == 0 else x / float(y)
def projection(inputs, output_size, initializer=None):
return ffnn(inputs, 0, -1, output_size, dropout=None, output_weights_initializer=initializer)
def highway(inputs, num_layers, dropout):
for i in range(num_layers):
with tf.variable_scope("highway_{}".format(i)):
j, f = tf.split(projection(inputs, 2 * shape(inputs, -1)), 2, -1)
f = tf.sigmoid(f)
j = tf.nn.relu(j)
if dropout is not None:
j = tf.nn.dropout(j, dropout)
inputs = f * j + (1 - f) * inputs
return inputs
def shape(x, dim):
return x.get_shape()[dim].value or tf.shape(x)[dim]
# return x.get_shape().as_list()[dim] or x.shape.as_list()[dim]
def ffnn(inputs, num_hidden_layers, hidden_size, output_size, dropout, output_weights_initializer=None):
if len(inputs.get_shape()) > 3:
raise ValueError("FFNN with rank {} not supported".format(len(inputs.get_shape())))
if len(inputs.get_shape()) == 3:
batch_size = shape(inputs, 0)
seqlen = shape(inputs, 1)
emb_size = shape(inputs, 2)
current_inputs = tf.reshape(inputs, [batch_size * seqlen, emb_size])
else:
current_inputs = inputs
for i in range(num_hidden_layers):
hidden_weights = tf.get_variable("hidden_weights_{}".format(i), [shape(current_inputs, 1), hidden_size])
hidden_bias = tf.get_variable("hidden_bias_{}".format(i), [hidden_size])
current_outputs = tf.nn.relu(tf.nn.xw_plus_b(current_inputs, hidden_weights, hidden_bias))
if dropout is not None:
current_outputs = tf.nn.dropout(current_outputs, dropout)
current_inputs = current_outputs
output_weights = tf.get_variable("output_weights", [shape(current_inputs, 1), output_size],
initializer=output_weights_initializer)
output_bias = tf.get_variable("output_bias", [output_size])
outputs = tf.nn.xw_plus_b(current_inputs, output_weights, output_bias)
if len(inputs.get_shape()) == 3:
outputs = tf.reshape(outputs, [batch_size, seqlen, output_size])
return outputs
def cnn(inputs, filter_sizes, num_filters):
num_words = shape(inputs, 0)
num_chars = shape(inputs, 1)
input_size = shape(inputs, 2)
outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.variable_scope("conv_{}".format(i)):
w = tf.get_variable("w", [filter_size, input_size, num_filters])
b = tf.get_variable("b", [num_filters])
conv = tf.nn.conv1d(inputs, w, stride=1, padding="VALID") # [num_words, num_chars - filter_size, num_filters]
h = tf.nn.relu(tf.nn.bias_add(conv, b)) # [num_words, num_chars - filter_size, num_filters]
pooled = tf.reduce_max(h, 1) # [num_words, num_filters]
outputs.append(pooled)
return tf.concat(outputs, 1) # [num_words, num_filters * len(filter_sizes)]
def batch_gather(emb, indices):
batch_size = shape(emb, 0)
seqlen = shape(emb, 1)
if len(emb.get_shape()) > 2:
emb_size = shape(emb, 2)
else:
emb_size = 1
flattened_emb = tf.reshape(emb, [batch_size * seqlen, emb_size]) # [batch_size * seqlen, emb]
offset = tf.expand_dims(tf.range(batch_size) * seqlen, 1) # [batch_size, 1]
gathered = tf.gather(flattened_emb, indices + offset) # [batch_size, num_indices, emb]
if len(emb.get_shape()) == 2:
gathered = tf.squeeze(gathered, 2) # [batch_size, num_indices]
return gathered
class CustomLSTMCell(tf.contrib.rnn.RNNCell):
def __init__(self, num_units, batch_size, dropout):
self._num_units = num_units
self._dropout = dropout
self._dropout_mask = tf.nn.dropout(tf.ones([batch_size, self.output_size]), dropout)
self._initializer = self._block_orthonormal_initializer([self.output_size] * 3)
initial_cell_state = tf.get_variable("lstm_initial_cell_state", [1, self.output_size])
initial_hidden_state = tf.get_variable("lstm_initial_hidden_state", [1, self.output_size])
self._initial_state = tf.contrib.rnn.LSTMStateTuple(initial_cell_state, initial_hidden_state)
@property
def state_size(self):
return tf.contrib.rnn.LSTMStateTuple(self.output_size, self.output_size)
@property
def output_size(self):
return self._num_units
@property
def initial_state(self):
return self._initial_state
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "CustomLSTMCell"
c, h = state
h *= self._dropout_mask
concat = projection(tf.concat([inputs, h], 1), 3 * self.output_size, initializer=self._initializer)
i, j, o = tf.split(concat, num_or_size_splits=3, axis=1)
i = tf.sigmoid(i)
new_c = (1 - i) * c + i * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
return new_h, new_state
def _orthonormal_initializer(self, scale=1.0):
def _initializer(shape, dtype=tf.float32, partition_info=None):
M1 = np.random.randn(shape[0], shape[0]).astype(np.float32)
M2 = np.random.randn(shape[1], shape[1]).astype(np.float32)
Q1, R1 = np.linalg.qr(M1)
Q2, R2 = np.linalg.qr(M2)
Q1 = Q1 * np.sign(np.diag(R1))
Q2 = Q2 * np.sign(np.diag(R2))
n_min = min(shape[0], shape[1])
params = np.dot(Q1[:, :n_min], Q2[:n_min, :]) * scale
return params
return _initializer
def _block_orthonormal_initializer(self, output_sizes):
def _initializer(shape, dtype=np.float32, partition_info=None):
assert len(shape) == 2
assert sum(output_sizes) == shape[1]
initializer = self._orthonormal_initializer()
params = np.concatenate([initializer([shape[0], o], dtype, partition_info) for o in output_sizes], 1)
return params
return _initializer
|
py | b407d9e09d3e45e85f0df41960edc6eb0b509536 | """
Utility functions for candidate_cnn_builder.py
"""
from __future__ import division, print_function
import math
import os.path
from random import random
from itertools import permutations
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
import pyodbc
from tqdm import tqdm
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.cuda.is_available():
torch.cuda.set_device(0)
#container for custom errors
class CustomError(Exception):
"""
Dummy container for raising errors.
"""
class ImageTensorCreator:
"""
utility class for translating binary column data into "image" tensor
"""
def __init__(self, image_depth, data_columns):
"""
set class variables for use in create_image_tensor
args:
image_depth: image tensor depth. must be between 1 (black and white)
and 4 (RGBA) for generation of actual images
data_columns: tuple or list of names for binary data columns
"""
self.image_depth = image_depth
self.columns = data_columns
self.col_count = len(data_columns)
feature_count = 1.0
#using image_depth - 1 to account for 'age' of patient in final dimension
for i in range(self.image_depth - 1):
#model will perform image_depth - 1 permutations for each column
feature_count *= (self.col_count - i)
self.image_height_width = math.ceil(feature_count**0.5)
self.norm_tuple = list()
for i in range(self.image_depth):
self.norm_tuple.extend([0.5])
self.norm_tuple = tuple(self.norm_tuple)
print('total features: %i; image size: %i x %i x %i'
% (feature_count, self.image_height_width, self.image_height_width, self.image_depth))
def create_image_tensor(self, row):
"""
Converts binary column data and patient age into an image tensor.
INPUT: row: Expected format is [bindata,age] where bindata is a string of 1s and 0s
supplied by the DB to indicate patient conditions
OUTPUT: Normalized image tensor
"""
if row:
icol = 0
irow = 0
if len(list(row[0])) != self.col_count:
raise CustomError('Invalid Input Size: Expected %i, Received %i'
% (self.col_count, len(list(row[0]))))
if len(row) < 2:
raise CustomError('Invalid Data: Expected row format is [bindata,age]')
image_data = np.zeros((self.image_height_width, self.image_height_width, self.image_depth),
dtype=np.float32)
for pixel in permutations(list(row[0]), self.image_depth - 1):
this_pixel = list()
for channel in pixel:
this_pixel.extend([float(channel)])
this_pixel.extend([float(row[1])/100.])
image_data[icol, irow, :] = this_pixel
irow += 1
if irow > self.image_height_width - 1:
icol += 1
irow = 0
return TF.normalize(TF.to_tensor(image_data), self.norm_tuple, self.norm_tuple)
else:
raise CustomError('Invalid Data: No row data supplied.')
def create_fake_data(self, age, conditions):
"""
Creates a "fake" patient for use in model testing. Patient age and condition data may be
specified, provided as a range, or left blank if a random patient is desired.
INPUTS:
age: if an integer, the value is used explicitly.
if a tuple of integers, a random age between the two values is assigned.
if None, a random age between 15 and 90 is assigned.
conditions:
if a list or string, assign all condtions in the list or string.
if integer, assign a random set of conditions but do not apply more than this value.
if None, assign a random set of conditions.
Integer version is biased to early conditions in COLUMNS. NEEDS IMPROVEMENT.
"""
if age is None:
age = randrange(15, 90)
elif isinstance(age, tuple): age = randrange(age[0], age[1])
bindata = ''
if isinstance(conditions, list) or isinstance(conditions, str):
for col in self.columns:
bindata += ('1' if col in conditions else '0')
return self.create_image_tensor([bindata, age]).unsqueeze(0)
total_conditions = 0
if isinstance(conditions, int) and conditions < self.col_count:
total_conditions = conditions
else:
total_conditions = math.floor(self.col_count*random())
condition_count = 0
for i,col in enumerate(self.columns):
if condition_count < total_conditions:
if random() >= 0.5:
if i == 1 and (age < 12 or age > 55 or bindata[0] == '1'):
bindata += '0'
else:
bindata += '1'
condition_count += 1
else:
bindata += '0'
else:
bindata += '0'
return self.create_image_tensor([bindata, age]).unsqueeze(0)
def interpret_image_tensor(self, image_tensor):
'''
translate tensor to text description of patient
args:
image_tensor
'''
pts_data = list()
for (bindata, age) in get_bindata_from_image_tensor(image_tensor):
pt_data = str(int(age)) + ' yo, '
for i, val in enumerate(list(bindata)):
if i == 0:
if val == '1':
pt_data += 'Male, '
else:
pt_data += 'Female, '
elif val == '1':
pt_data += self.columns[i] + ', '
pt_data = pt_data[:-2]
pts_data.append(pt_data)
return pts_data
def get_bindata_from_image_tensor(self, image_tensor):
'''
extract age and patient condition binary digit string from image tensor
'''
pts_binary_data = list()
for case in image_tensor:
case = case / 2 + 0.5
case = case.numpy()
case = np.transpose(case, (1, 2, 0))
bindata = ''
for i in range(self.image_depth - 1):
bindata += str(int(case[0, 0, i]))
for i in range(1, len(self.columns) - self.image_depth + 2):
bindata += str(int(case[0, i, self.image_depth - 2]))
age = float(round(float(case[0, 0, self.image_depth - 1])*100.))
pts_binary_data.append((bindata, age))
return pts_binary_data
class CovidCnnDataset(Dataset):
'''
download data from mexican covid database, reformat into image tensors
and split into training and validation sets
'''
def __init__(self, import_export_filename, image_tensor_creator, **kwargs):
'''
download data from mexican covid database, reformat into image tensors
and split into training and validation sets
args:
import_export_filename: file from which data is loaded if it exists and where data will
be saved for future recall after the dataset is built
image_tensor_creator: instance of class ImageTensorCreator
kwargs:
pyodbc_conn_string: ODBC connection string for database
query: stored procedure or query used to create the cursor for the
remote data
force_rebuild: force the dataset to rebuild even if import_export_filename
mini_batch_size: number of images to process before backpropagation
and loss calculation
truncate_data: optionally trim dataset to a small size for testing/debugging
on CPU. default is True for CPU, False for cuda.
truncate_train_size: size of training dataset if truncation is enabled. 100
approx_dataset_size: approximate size of full dataset (train + validation)
validation_ratio: ratio of dataset to be used for validation, default 0.4
'''
super(CovidCnnDataset).__init__()
self.import_export_filename = import_export_filename
self.itc = image_tensor_creator
self.conn_string = kwargs.get('pyodbc_conn_string', 'connection string not supplied')
self.query = kwargs.get('query', 'query not supplied')
self.force_rebuild = kwargs.get('force_rebuild', False)
self.truncate_data = kwargs.get('truncate_data', False if DEVICE == 'cuda' else True)
self.truncate_train_size = kwargs.get('truncate_train_size', 50)
self.train_data = list()
self.validation_images = list()
self.validation_labels = list()
if self.force_rebuild or not(os.path.exists(self.import_export_filename)):
self.mini_batch_size = kwargs.get('mini_batch_size', 4)
self.dataset_size = kwargs.get('approx_dataset_size',
int(50000/self.mini_batch_size))
self.validation_ratio = kwargs.get('validation_ratio', 0.4)
self.validation_count = int(self.validation_ratio*self.dataset_size)
self.train_count = self.dataset_size - self.validation_count
rowgen = self.next_row()
if self.truncate_data:
self.train_count = self.truncate_train_size
pbar = tqdm(enumerate(rowgen), total=self.train_count)
for i, data in pbar:
self.train_data.append(data)
self.save_to_disk()
else:
self.reload_from_disk()
self.dataset_size = len(self.validation_images) + len(self.train_data)
self.train_count = len(self.train_data)
self.validation_count = len(self.validation_images)
self.validation_ratio = round(float(len(self.validation_images))
/float(self.dataset_size),3)
def __len__(self):
'''
required overload
'''
return self.train_count
def __getitem__(self, idx):
'''
required overload. returns 'idx'th training sample in the form:
a tensor of [self.mini_batch_size] image tensors,
a tensor of [self.mini_batch_size] label tensors
'''
return train_data[idx]
def save_to_disk(self):
'''
save dataset to disk
'''
torch.save({'train_data' : self.train_data,
'validation_images' : self.validation_images,
'validation_labels': self.validation_labels,
'mini_batch_size': self.mini_batch_size,
}, self.import_export_filename)
def reload_from_disk(self):
'''
save any manual edits performed in external subroutines to disk
'''
datasets = torch.load(self.import_export_filename)
self.train_data = datasets['train_data']
self.validation_images = datasets['validation_images']
self.validation_labels = datasets['validation_labels']
self.mini_batch_size = datasets['mini_batch_size']
def next_row(self):
"""
Generator function for retrieving patient data.
OUTPUT: tuple of form (tensor of [BATCH_SIZE] images, tensor of [BATCH_SIZE] labels)
"""
conn = pyodbc.connect(self.conn_string)
crsr = conn.cursor()
crsr.execute(self.query)
rowcnt = 0
imgs = list()
labels = list()
row = [0, 1, 2]
idx = 0
while row and (not self.truncate_data or len(self.train_data) < self.truncate_train_size):
while rowcnt < self.mini_batch_size:
try:
row = crsr.fetchone()
except:
conn = pyodbc.connect(self.conn_string)
crsr = conn.cursor()
crsr.execute(self.query)
crsr.skip(idx*self.mini_batch_size + rowcnt)
row = crsr.fetchone()
rowcnt += 1
if row:
imgtensor = self.itc.create_image_tensor(row)
labels.append(np.array(list(row[2]), dtype=np.float32))
imgs.append(imgtensor)
else:
break
if row:
if random() < self.validation_ratio:
self.validation_images.append(torch.stack(imgs))
self.validation_labels.append(torch.tensor(labels))
else:
yield torch.stack(imgs), torch.tensor(labels)
rowcnt = 0
idx += 1
imgs = list()
labels = list()
else:
break
|
py | b407da8ca870131f9ff401a0f16c47324863379a | from django.forms import *
from responses.models import EnoughResponse
class ResponseForm(ModelForm):
class Meta:
model = EnoughResponse
widgets = {
"text": TextInput,
} |
py | b407db2cb8280a264179f671a060aa599f8df870 | # -*- coding: utf-8 -*-
"""
Created on 2019/8/27
@author: LoyeLee
"""
|
py | b407dbcbd0b6338a06cb26bb81244654d78327da | import pytest
from collections import OrderedDict
from jina import Document, DocumentArray, Executor, Flow, requests
from docarray.array.chunk import ChunkArray
class DummyExecutor(Executor):
def __init__(self, mode=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if mode:
self._mode = str(mode)
@requests
def do_something(self, docs, **kwargs):
for doc in docs:
if len(doc.chunks) > 0:
chunks = ChunkArray(
(d for d in doc.chunks if d.modality == self._mode), doc
)
assert chunks[0].content == self._mode
assert len(chunks) == 1
doc.chunks = chunks
class MatchMerger(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
class ChunkMerger(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].chunks.extend(doc.chunks)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
@pytest.mark.timeout(60)
@pytest.mark.parametrize('num_replicas, num_shards', [(1, 1), (2, 2)])
def test_sharding_tail_pea(num_replicas, num_shards):
"""TODO(Maximilian): Make (1, 2) and (2, 1) also workable"""
f = Flow().add(
uses=DummyExecutor,
replicas=num_replicas,
shards=num_shards,
uses_after=MatchMerger,
)
with f:
results = f.post(
on='/search',
inputs=Document(matches=[Document()]),
return_results=True,
)
assert len(results[0].docs[0].matches) == num_shards
def test_merging_head_pea():
def multimodal_generator():
for i in range(0, 5):
document = Document()
document.chunks.append(Document(modality='1', content='1'))
document.chunks.append(Document(modality='2', content='2'))
yield document
f = (
Flow()
.add(uses={'jtype': 'DummyExecutor', 'with': {'mode': '1'}}, name='executor1')
.add(
uses={'jtype': 'DummyExecutor', 'with': {'mode': '2'}},
name='executor2',
needs='gateway',
)
.add(
uses_before=ChunkMerger, name='executor3', needs=['executor1', 'executor2']
)
)
with f:
results = f.post(
on='/search',
inputs=multimodal_generator(),
return_results=True,
)
assert len(results[0].docs[0].chunks) == 2
assert len(results[0].docs) == 5
|
py | b407dc6fbc10109f91aae71e229e86bd41619365 | import time
from datadog import DogStatsd
import time
import sys
statsd = DogStatsd(host="statsd", port=9125)
REQUEST_LATENCY_METRIC_NAME = 'request_latency_seconds'
REQUEST_COUNT_METRIC_NAME = 'request_count'
class StatsdReporter():
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.start_time = time.time()
response = self.get_response(request)
#FIXME: https://docs.djangoproject.com/en/2.2/ref/request-response/
print("Statsd middleware: request {0} {1}".format(request.path_info, request.method))
if response:
resp_time = time.time() - request.start_time
statsd.histogram(REQUEST_LATENCY_METRIC_NAME,
resp_time,
tags=[
'service:webapp',
'endpoint: %s' % request.path_info,
]
)
statsd.increment(REQUEST_COUNT_METRIC_NAME,
tags=[
'service: webapp',
'method: %s' % request.method,
'endpoint: %s' % request.path_info,
'status: %s' % str(response.status_code)
]
)
return response
|
py | b407dca9307e331a43f1493253c6ad55da17884e | # Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Revisions copyright 2010 Jeffrey Finkelstein. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Handle the SCOP CLAssification file, which describes SCOP domains.
The file format is described in the scop
"release notes.":http://scop.mrc-lmb.cam.ac.uk/scop/release-notes.html
The latest CLA file can be found
"elsewhere at SCOP.":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
"Release 1.73": http://scop.mrc-lmb.cam.ac.uk/scop/parse/dir.cla.scop.txt_1.73
(July 2008)
"""
from . import Residues
class Record(object):
"""Holds information for one SCOP domain.
Attributes:
- sid - SCOP identifier. e.g. d1danl2
- residues - The domain definition as a Residues object
- sccs - SCOP concise classification strings. e.g. b.1.2.1
- sunid - SCOP unique identifier for this domain
- hierarchy - A dictionary, keys are nodetype, values are sunid,
describing the location of this domain in the SCOP hierarchy. See
the Scop module for a description of nodetypes. This used to be a
list of (key,value) tuples in older versions of Biopython (see
Bug 3109).
"""
def __init__(self, line=None):
"""Initialize the class."""
self.sid = ""
self.residues = None
self.sccs = ""
self.sunid = ""
self.hierarchy = {}
if line:
self._process(line)
def _process(self, line):
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 6:
raise ValueError("I don't understand the format of %s" % line)
self.sid, pdbid, residues, self.sccs, self.sunid, hierarchy = columns
self.residues = Residues.Residues(residues)
self.residues.pdbid = pdbid
self.sunid = int(self.sunid)
for ht in hierarchy.split(","):
key, value = ht.split("=")
self.hierarchy[key] = int(value)
def __str__(self):
"""Represent the SCOP classification record as a tab-separated string."""
s = []
s.append(self.sid)
s += str(self.residues).split(" ")
s.append(self.sccs)
s.append(self.sunid)
s.append(
",".join(
"=".join((key, str(value))) for key, value in self.hierarchy.items()
)
)
return "\t".join(map(str, s)) + "\n"
def parse(handle):
"""Iterate over a CLA file as Cla records for each line.
Arguments:
- handle - file-like object.
"""
for line in handle:
if line.startswith("#"):
continue
yield Record(line)
class Index(dict):
"""A CLA file indexed by SCOP identifiers for rapid random access."""
def __init__(self, filename):
"""Create CLA index.
Arguments:
- filename - The file to index
"""
dict.__init__(self)
self.filename = filename
with open(self.filename, "rU") as f:
position = 0
while True:
line = f.readline()
if not line:
break
if line.startswith("#"):
continue
record = Record(line)
key = record.sid
if key is not None:
self[key] = position
position = f.tell()
def __getitem__(self, key):
"""Return an item from the indexed file."""
position = dict.__getitem__(self, key)
with open(self.filename, "rU") as f:
f.seek(position)
line = f.readline()
record = Record(line)
return record
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.